PageRenderTime 88ms CodeModel.GetById 20ms app.highlight 49ms RepoModel.GetById 2ms app.codeStats 0ms

/drivers/net/ifb.c

https://bitbucket.org/slukk/jb-tsm-kernel-4.2
C | 273 lines | 196 code | 46 blank | 31 comment | 31 complexity | 59faad654ff013937262c395d8440704 MD5 | raw file
Possible License(s): GPL-2.0, LGPL-2.0, AGPL-1.0
  1/* drivers/net/ifb.c:
  2
  3	The purpose of this driver is to provide a device that allows
  4	for sharing of resources:
  5
  6	1) qdiscs/policies that are per device as opposed to system wide.
  7	ifb allows for a device which can be redirected to thus providing
  8	an impression of sharing.
  9
 10	2) Allows for queueing incoming traffic for shaping instead of
 11	dropping.
 12
 13	The original concept is based on what is known as the IMQ
 14	driver initially written by Martin Devera, later rewritten
 15	by Patrick McHardy and then maintained by Andre Correa.
 16
 17	You need the tc action  mirror or redirect to feed this device
 18       	packets.
 19
 20	This program is free software; you can redistribute it and/or
 21	modify it under the terms of the GNU General Public License
 22	as published by the Free Software Foundation; either version
 23	2 of the License, or (at your option) any later version.
 24
 25  	Authors:	Jamal Hadi Salim (2005)
 26
 27*/
 28
 29
 30#include <linux/module.h>
 31#include <linux/kernel.h>
 32#include <linux/netdevice.h>
 33#include <linux/etherdevice.h>
 34#include <linux/init.h>
 35#include <linux/moduleparam.h>
 36#include <net/pkt_sched.h>
 37#include <net/net_namespace.h>
 38
 39#define TX_Q_LIMIT    32
 40struct ifb_private {
 41	struct tasklet_struct   ifb_tasklet;
 42	int     tasklet_pending;
 43	struct sk_buff_head     rq;
 44	struct sk_buff_head     tq;
 45};
 46
 47static int numifbs = 2;
 48
 49static void ri_tasklet(unsigned long dev);
 50static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev);
 51static int ifb_open(struct net_device *dev);
 52static int ifb_close(struct net_device *dev);
 53
 54static void ri_tasklet(unsigned long dev)
 55{
 56
 57	struct net_device *_dev = (struct net_device *)dev;
 58	struct ifb_private *dp = netdev_priv(_dev);
 59	struct net_device_stats *stats = &_dev->stats;
 60	struct netdev_queue *txq;
 61	struct sk_buff *skb;
 62
 63	txq = netdev_get_tx_queue(_dev, 0);
 64	if ((skb = skb_peek(&dp->tq)) == NULL) {
 65		if (__netif_tx_trylock(txq)) {
 66			skb_queue_splice_tail_init(&dp->rq, &dp->tq);
 67			__netif_tx_unlock(txq);
 68		} else {
 69			/* reschedule */
 70			goto resched;
 71		}
 72	}
 73
 74	while ((skb = __skb_dequeue(&dp->tq)) != NULL) {
 75		u32 from = G_TC_FROM(skb->tc_verd);
 76
 77		skb->tc_verd = 0;
 78		skb->tc_verd = SET_TC_NCLS(skb->tc_verd);
 79		stats->tx_packets++;
 80		stats->tx_bytes +=skb->len;
 81
 82		rcu_read_lock();
 83		skb->dev = dev_get_by_index_rcu(&init_net, skb->skb_iif);
 84		if (!skb->dev) {
 85			rcu_read_unlock();
 86			dev_kfree_skb(skb);
 87			stats->tx_dropped++;
 88			if (skb_queue_len(&dp->tq) != 0)
 89				goto resched;
 90			break;
 91		}
 92		rcu_read_unlock();
 93		skb->skb_iif = _dev->ifindex;
 94
 95		if (from & AT_EGRESS) {
 96			dev_queue_xmit(skb);
 97		} else if (from & AT_INGRESS) {
 98			skb_pull(skb, skb->dev->hard_header_len);
 99			netif_receive_skb(skb);
100		} else
101			BUG();
102	}
103
104	if (__netif_tx_trylock(txq)) {
105		if ((skb = skb_peek(&dp->rq)) == NULL) {
106			dp->tasklet_pending = 0;
107			if (netif_queue_stopped(_dev))
108				netif_wake_queue(_dev);
109		} else {
110			__netif_tx_unlock(txq);
111			goto resched;
112		}
113		__netif_tx_unlock(txq);
114	} else {
115resched:
116		dp->tasklet_pending = 1;
117		tasklet_schedule(&dp->ifb_tasklet);
118	}
119
120}
121
122static const struct net_device_ops ifb_netdev_ops = {
123	.ndo_open	= ifb_open,
124	.ndo_stop	= ifb_close,
125	.ndo_start_xmit	= ifb_xmit,
126	.ndo_validate_addr = eth_validate_addr,
127};
128
129#define IFB_FEATURES (NETIF_F_NO_CSUM | NETIF_F_SG  | NETIF_F_FRAGLIST	| \
130		      NETIF_F_TSO_ECN | NETIF_F_TSO | NETIF_F_TSO6	| \
131		      NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_TX)
132
133static void ifb_setup(struct net_device *dev)
134{
135	/* Initialize the device structure. */
136	dev->destructor = free_netdev;
137	dev->netdev_ops = &ifb_netdev_ops;
138
139	/* Fill in device structure with ethernet-generic values. */
140	ether_setup(dev);
141	dev->tx_queue_len = TX_Q_LIMIT;
142
143	dev->features |= IFB_FEATURES;
144	dev->vlan_features |= IFB_FEATURES;
145
146	dev->flags |= IFF_NOARP;
147	dev->flags &= ~IFF_MULTICAST;
148	dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
149	random_ether_addr(dev->dev_addr);
150}
151
152static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev)
153{
154	struct ifb_private *dp = netdev_priv(dev);
155	struct net_device_stats *stats = &dev->stats;
156	u32 from = G_TC_FROM(skb->tc_verd);
157
158	stats->rx_packets++;
159	stats->rx_bytes+=skb->len;
160
161	if (!(from & (AT_INGRESS|AT_EGRESS)) || !skb->skb_iif) {
162		dev_kfree_skb(skb);
163		stats->rx_dropped++;
164		return NETDEV_TX_OK;
165	}
166
167	if (skb_queue_len(&dp->rq) >= dev->tx_queue_len) {
168		netif_stop_queue(dev);
169	}
170
171	__skb_queue_tail(&dp->rq, skb);
172	if (!dp->tasklet_pending) {
173		dp->tasklet_pending = 1;
174		tasklet_schedule(&dp->ifb_tasklet);
175	}
176
177	return NETDEV_TX_OK;
178}
179
180static int ifb_close(struct net_device *dev)
181{
182	struct ifb_private *dp = netdev_priv(dev);
183
184	tasklet_kill(&dp->ifb_tasklet);
185	netif_stop_queue(dev);
186	__skb_queue_purge(&dp->rq);
187	__skb_queue_purge(&dp->tq);
188	return 0;
189}
190
191static int ifb_open(struct net_device *dev)
192{
193	struct ifb_private *dp = netdev_priv(dev);
194
195	tasklet_init(&dp->ifb_tasklet, ri_tasklet, (unsigned long)dev);
196	__skb_queue_head_init(&dp->rq);
197	__skb_queue_head_init(&dp->tq);
198	netif_start_queue(dev);
199
200	return 0;
201}
202
203static int ifb_validate(struct nlattr *tb[], struct nlattr *data[])
204{
205	if (tb[IFLA_ADDRESS]) {
206		if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
207			return -EINVAL;
208		if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
209			return -EADDRNOTAVAIL;
210	}
211	return 0;
212}
213
214static struct rtnl_link_ops ifb_link_ops __read_mostly = {
215	.kind		= "ifb",
216	.priv_size	= sizeof(struct ifb_private),
217	.setup		= ifb_setup,
218	.validate	= ifb_validate,
219};
220
221/* Number of ifb devices to be set up by this module. */
222module_param(numifbs, int, 0);
223MODULE_PARM_DESC(numifbs, "Number of ifb devices");
224
225static int __init ifb_init_one(int index)
226{
227	struct net_device *dev_ifb;
228	int err;
229
230	dev_ifb = alloc_netdev(sizeof(struct ifb_private),
231				 "ifb%d", ifb_setup);
232
233	if (!dev_ifb)
234		return -ENOMEM;
235
236	dev_ifb->rtnl_link_ops = &ifb_link_ops;
237	err = register_netdevice(dev_ifb);
238	if (err < 0)
239		goto err;
240
241	return 0;
242
243err:
244	free_netdev(dev_ifb);
245	return err;
246}
247
248static int __init ifb_init_module(void)
249{
250	int i, err;
251
252	rtnl_lock();
253	err = __rtnl_link_register(&ifb_link_ops);
254
255	for (i = 0; i < numifbs && !err; i++)
256		err = ifb_init_one(i);
257	if (err)
258		__rtnl_link_unregister(&ifb_link_ops);
259	rtnl_unlock();
260
261	return err;
262}
263
264static void __exit ifb_cleanup_module(void)
265{
266	rtnl_link_unregister(&ifb_link_ops);
267}
268
269module_init(ifb_init_module);
270module_exit(ifb_cleanup_module);
271MODULE_LICENSE("GPL");
272MODULE_AUTHOR("Jamal Hadi Salim");
273MODULE_ALIAS_RTNL_LINK("ifb");