PageRenderTime 98ms CodeModel.GetById 20ms app.highlight 56ms RepoModel.GetById 1ms app.codeStats 1ms

/drivers/usb/gadget/u_ether.c

https://bitbucket.org/slukk/jb-tsm-kernel-4.2
C | 1017 lines | 655 code | 165 blank | 197 comment | 114 complexity | 5be4e698f71167489d005149a670d8d6 MD5 | raw file
Possible License(s): GPL-2.0, LGPL-2.0, AGPL-1.0
   1/*
   2 * u_ether.c -- Ethernet-over-USB link layer utilities for Gadget stack
   3 *
   4 * Copyright (C) 2003-2005,2008 David Brownell
   5 * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
   6 * Copyright (C) 2008 Nokia Corporation
   7 *
   8 * This program is free software; you can redistribute it and/or modify
   9 * it under the terms of the GNU General Public License as published by
  10 * the Free Software Foundation; either version 2 of the License, or
  11 * (at your option) any later version.
  12 *
  13 * This program is distributed in the hope that it will be useful,
  14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  16 * GNU General Public License for more details.
  17 *
  18 * You should have received a copy of the GNU General Public License
  19 * along with this program; if not, write to the Free Software
  20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  21 */
  22
  23/* #define VERBOSE_DEBUG */
  24
  25#include <linux/kernel.h>
  26#include <linux/gfp.h>
  27#include <linux/device.h>
  28#include <linux/ctype.h>
  29#include <linux/etherdevice.h>
  30#include <linux/ethtool.h>
  31
  32#include "u_ether.h"
  33
  34
  35/*
  36 * This component encapsulates the Ethernet link glue needed to provide
  37 * one (!) network link through the USB gadget stack, normally "usb0".
  38 *
  39 * The control and data models are handled by the function driver which
  40 * connects to this code; such as CDC Ethernet (ECM or EEM),
  41 * "CDC Subset", or RNDIS.  That includes all descriptor and endpoint
  42 * management.
  43 *
  44 * Link level addressing is handled by this component using module
  45 * parameters; if no such parameters are provided, random link level
  46 * addresses are used.  Each end of the link uses one address.  The
  47 * host end address is exported in various ways, and is often recorded
  48 * in configuration databases.
  49 *
  50 * The driver which assembles each configuration using such a link is
  51 * responsible for ensuring that each configuration includes at most one
  52 * instance of is network link.  (The network layer provides ways for
  53 * this single "physical" link to be used by multiple virtual links.)
  54 */
  55
  56#define UETH__VERSION	"29-May-2008"
  57
  58struct eth_dev {
  59	/* lock is held while accessing port_usb
  60	 * or updating its backlink port_usb->ioport
  61	 */
  62	spinlock_t		lock;
  63	struct gether		*port_usb;
  64
  65	struct net_device	*net;
  66	struct usb_gadget	*gadget;
  67
  68	spinlock_t		req_lock;	/* guard {rx,tx}_reqs */
  69	struct list_head	tx_reqs, rx_reqs;
  70	atomic_t		tx_qlen;
  71
  72	struct sk_buff_head	rx_frames;
  73
  74	unsigned		header_len;
  75	struct sk_buff		*(*wrap)(struct gether *, struct sk_buff *skb);
  76	int			(*unwrap)(struct gether *,
  77						struct sk_buff *skb,
  78						struct sk_buff_head *list);
  79
  80	struct work_struct	work;
  81
  82	unsigned long		todo;
  83#define	WORK_RX_MEMORY		0
  84
  85	bool			zlp;
  86	u8			host_mac[ETH_ALEN];
  87};
  88
  89/*-------------------------------------------------------------------------*/
  90
  91#define RX_EXTRA	20	/* bytes guarding against rx overflows */
  92
  93#define DEFAULT_QLEN	2	/* double buffering by default */
  94
  95
  96#ifdef CONFIG_USB_GADGET_DUALSPEED
  97
  98static unsigned qmult = 5;
  99module_param(qmult, uint, S_IRUGO|S_IWUSR);
 100MODULE_PARM_DESC(qmult, "queue length multiplier at high speed");
 101
 102#else	/* full speed (low speed doesn't do bulk) */
 103#define qmult		1
 104#endif
 105
 106/* for dual-speed hardware, use deeper queues at highspeed */
 107static inline int qlen(struct usb_gadget *gadget)
 108{
 109	if (gadget_is_dualspeed(gadget) && gadget->speed == USB_SPEED_HIGH)
 110		return qmult * DEFAULT_QLEN;
 111	else
 112		return DEFAULT_QLEN;
 113}
 114
 115/*-------------------------------------------------------------------------*/
 116
 117/* REVISIT there must be a better way than having two sets
 118 * of debug calls ...
 119 */
 120
 121#undef DBG
 122#undef VDBG
 123#undef ERROR
 124#undef INFO
 125
 126#define xprintk(d, level, fmt, args...) \
 127	printk(level "%s: " fmt , (d)->net->name , ## args)
 128
 129#ifdef DEBUG
 130#undef DEBUG
 131#define DBG(dev, fmt, args...) \
 132	xprintk(dev , KERN_DEBUG , fmt , ## args)
 133#else
 134#define DBG(dev, fmt, args...) \
 135	do { } while (0)
 136#endif /* DEBUG */
 137
 138#ifdef VERBOSE_DEBUG
 139#define VDBG	DBG
 140#else
 141#define VDBG(dev, fmt, args...) \
 142	do { } while (0)
 143#endif /* DEBUG */
 144
 145#define ERROR(dev, fmt, args...) \
 146	xprintk(dev , KERN_ERR , fmt , ## args)
 147#define INFO(dev, fmt, args...) \
 148	xprintk(dev , KERN_INFO , fmt , ## args)
 149
 150/*-------------------------------------------------------------------------*/
 151
 152/* NETWORK DRIVER HOOKUP (to the layer above this driver) */
 153
 154static int ueth_change_mtu(struct net_device *net, int new_mtu)
 155{
 156	struct eth_dev	*dev = netdev_priv(net);
 157	unsigned long	flags;
 158	int		status = 0;
 159
 160	/* don't change MTU on "live" link (peer won't know) */
 161	spin_lock_irqsave(&dev->lock, flags);
 162	if (dev->port_usb)
 163		status = -EBUSY;
 164	else if (new_mtu <= ETH_HLEN || new_mtu > ETH_FRAME_LEN)
 165		status = -ERANGE;
 166	else
 167		net->mtu = new_mtu;
 168	spin_unlock_irqrestore(&dev->lock, flags);
 169
 170	return status;
 171}
 172
 173static void eth_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *p)
 174{
 175	struct eth_dev	*dev = netdev_priv(net);
 176
 177	strlcpy(p->driver, "g_ether", sizeof p->driver);
 178	strlcpy(p->version, UETH__VERSION, sizeof p->version);
 179	strlcpy(p->fw_version, dev->gadget->name, sizeof p->fw_version);
 180	strlcpy(p->bus_info, dev_name(&dev->gadget->dev), sizeof p->bus_info);
 181}
 182
 183/* REVISIT can also support:
 184 *   - WOL (by tracking suspends and issuing remote wakeup)
 185 *   - msglevel (implies updated messaging)
 186 *   - ... probably more ethtool ops
 187 */
 188
 189static const struct ethtool_ops ops = {
 190	.get_drvinfo = eth_get_drvinfo,
 191	.get_link = ethtool_op_get_link,
 192};
 193
 194static void defer_kevent(struct eth_dev *dev, int flag)
 195{
 196	if (test_and_set_bit(flag, &dev->todo))
 197		return;
 198	if (!schedule_work(&dev->work))
 199		ERROR(dev, "kevent %d may have been dropped\n", flag);
 200	else
 201		DBG(dev, "kevent %d scheduled\n", flag);
 202}
 203
 204static void rx_complete(struct usb_ep *ep, struct usb_request *req);
 205
 206static int
 207rx_submit(struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags)
 208{
 209	struct sk_buff	*skb;
 210	int		retval = -ENOMEM;
 211	size_t		size = 0;
 212	struct usb_ep	*out;
 213	unsigned long	flags;
 214
 215	spin_lock_irqsave(&dev->lock, flags);
 216	if (dev->port_usb)
 217		out = dev->port_usb->out_ep;
 218	else
 219		out = NULL;
 220	spin_unlock_irqrestore(&dev->lock, flags);
 221
 222	if (!out)
 223		return -ENOTCONN;
 224
 225
 226	/* Padding up to RX_EXTRA handles minor disagreements with host.
 227	 * Normally we use the USB "terminate on short read" convention;
 228	 * so allow up to (N*maxpacket), since that memory is normally
 229	 * already allocated.  Some hardware doesn't deal well with short
 230	 * reads (e.g. DMA must be N*maxpacket), so for now don't trim a
 231	 * byte off the end (to force hardware errors on overflow).
 232	 *
 233	 * RNDIS uses internal framing, and explicitly allows senders to
 234	 * pad to end-of-packet.  That's potentially nice for speed, but
 235	 * means receivers can't recover lost synch on their own (because
 236	 * new packets don't only start after a short RX).
 237	 */
 238	size += sizeof(struct ethhdr) + dev->net->mtu + RX_EXTRA;
 239	size += dev->port_usb->header_len;
 240	size += out->maxpacket - 1;
 241	size -= size % out->maxpacket;
 242
 243	if (dev->port_usb->is_fixed)
 244		size = max_t(size_t, size, dev->port_usb->fixed_out_len);
 245
 246	skb = alloc_skb(size + NET_IP_ALIGN, gfp_flags);
 247	if (skb == NULL) {
 248		DBG(dev, "no rx skb\n");
 249		goto enomem;
 250	}
 251
 252	/* Some platforms perform better when IP packets are aligned,
 253	 * but on at least one, checksumming fails otherwise.  Note:
 254	 * RNDIS headers involve variable numbers of LE32 values.
 255	 */
 256	skb_reserve(skb, NET_IP_ALIGN);
 257
 258	req->buf = skb->data;
 259	req->length = size;
 260	req->complete = rx_complete;
 261	req->context = skb;
 262
 263	retval = usb_ep_queue(out, req, gfp_flags);
 264	if (retval == -ENOMEM)
 265enomem:
 266		defer_kevent(dev, WORK_RX_MEMORY);
 267	if (retval) {
 268		DBG(dev, "rx submit --> %d\n", retval);
 269		if (skb)
 270			dev_kfree_skb_any(skb);
 271		spin_lock_irqsave(&dev->req_lock, flags);
 272		list_add(&req->list, &dev->rx_reqs);
 273		spin_unlock_irqrestore(&dev->req_lock, flags);
 274	}
 275	return retval;
 276}
 277
 278static void rx_complete(struct usb_ep *ep, struct usb_request *req)
 279{
 280	struct sk_buff	*skb = req->context, *skb2;
 281	struct eth_dev	*dev = ep->driver_data;
 282	int		status = req->status;
 283
 284	switch (status) {
 285
 286	/* normal completion */
 287	case 0:
 288		skb_put(skb, req->actual);
 289
 290		if (dev->unwrap) {
 291			unsigned long	flags;
 292
 293			spin_lock_irqsave(&dev->lock, flags);
 294			if (dev->port_usb) {
 295				status = dev->unwrap(dev->port_usb,
 296							skb,
 297							&dev->rx_frames);
 298			} else {
 299				dev_kfree_skb_any(skb);
 300				status = -ENOTCONN;
 301			}
 302			spin_unlock_irqrestore(&dev->lock, flags);
 303		} else {
 304			skb_queue_tail(&dev->rx_frames, skb);
 305		}
 306		skb = NULL;
 307
 308		skb2 = skb_dequeue(&dev->rx_frames);
 309		while (skb2) {
 310			if (status < 0
 311					|| ETH_HLEN > skb2->len
 312					|| skb2->len > ETH_FRAME_LEN) {
 313				dev->net->stats.rx_errors++;
 314				dev->net->stats.rx_length_errors++;
 315				DBG(dev, "rx length %d\n", skb2->len);
 316				dev_kfree_skb_any(skb2);
 317				goto next_frame;
 318			}
 319			skb2->protocol = eth_type_trans(skb2, dev->net);
 320			dev->net->stats.rx_packets++;
 321			dev->net->stats.rx_bytes += skb2->len;
 322
 323			/* no buffer copies needed, unless hardware can't
 324			 * use skb buffers.
 325			 */
 326			status = netif_rx(skb2);
 327next_frame:
 328			skb2 = skb_dequeue(&dev->rx_frames);
 329		}
 330		break;
 331
 332	/* software-driven interface shutdown */
 333	case -ECONNRESET:		/* unlink */
 334	case -ESHUTDOWN:		/* disconnect etc */
 335		VDBG(dev, "rx shutdown, code %d\n", status);
 336		goto quiesce;
 337
 338	/* for hardware automagic (such as pxa) */
 339	case -ECONNABORTED:		/* endpoint reset */
 340		DBG(dev, "rx %s reset\n", ep->name);
 341		defer_kevent(dev, WORK_RX_MEMORY);
 342quiesce:
 343		dev_kfree_skb_any(skb);
 344		goto clean;
 345
 346	/* data overrun */
 347	case -EOVERFLOW:
 348		dev->net->stats.rx_over_errors++;
 349		/* FALLTHROUGH */
 350
 351	default:
 352		dev->net->stats.rx_errors++;
 353		DBG(dev, "rx status %d\n", status);
 354		break;
 355	}
 356
 357	if (skb)
 358		dev_kfree_skb_any(skb);
 359	if (!netif_running(dev->net)) {
 360clean:
 361		spin_lock(&dev->req_lock);
 362		list_add(&req->list, &dev->rx_reqs);
 363		spin_unlock(&dev->req_lock);
 364		req = NULL;
 365	}
 366	if (req)
 367		rx_submit(dev, req, GFP_ATOMIC);
 368}
 369
 370static int prealloc(struct list_head *list, struct usb_ep *ep, unsigned n)
 371{
 372	unsigned		i;
 373	struct usb_request	*req;
 374
 375	if (!n)
 376		return -ENOMEM;
 377
 378	/* queue/recycle up to N requests */
 379	i = n;
 380	list_for_each_entry(req, list, list) {
 381		if (i-- == 0)
 382			goto extra;
 383	}
 384	while (i--) {
 385		req = usb_ep_alloc_request(ep, GFP_ATOMIC);
 386		if (!req)
 387			return list_empty(list) ? -ENOMEM : 0;
 388		list_add(&req->list, list);
 389	}
 390	return 0;
 391
 392extra:
 393	/* free extras */
 394	for (;;) {
 395		struct list_head	*next;
 396
 397		next = req->list.next;
 398		list_del(&req->list);
 399		usb_ep_free_request(ep, req);
 400
 401		if (next == list)
 402			break;
 403
 404		req = container_of(next, struct usb_request, list);
 405	}
 406	return 0;
 407}
 408
 409static int alloc_requests(struct eth_dev *dev, struct gether *link, unsigned n)
 410{
 411	int	status;
 412
 413	spin_lock(&dev->req_lock);
 414	status = prealloc(&dev->tx_reqs, link->in_ep, n);
 415	if (status < 0)
 416		goto fail;
 417	status = prealloc(&dev->rx_reqs, link->out_ep, n);
 418	if (status < 0)
 419		goto fail;
 420	goto done;
 421fail:
 422	DBG(dev, "can't alloc requests\n");
 423done:
 424	spin_unlock(&dev->req_lock);
 425	return status;
 426}
 427
 428static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags)
 429{
 430	struct usb_request	*req;
 431	unsigned long		flags;
 432
 433	/* fill unused rxq slots with some skb */
 434	spin_lock_irqsave(&dev->req_lock, flags);
 435	while (!list_empty(&dev->rx_reqs)) {
 436		req = container_of(dev->rx_reqs.next,
 437				struct usb_request, list);
 438		list_del_init(&req->list);
 439		spin_unlock_irqrestore(&dev->req_lock, flags);
 440
 441		if (rx_submit(dev, req, gfp_flags) < 0) {
 442			defer_kevent(dev, WORK_RX_MEMORY);
 443			return;
 444		}
 445
 446		spin_lock_irqsave(&dev->req_lock, flags);
 447	}
 448	spin_unlock_irqrestore(&dev->req_lock, flags);
 449}
 450
 451static void eth_work(struct work_struct *work)
 452{
 453	struct eth_dev	*dev = container_of(work, struct eth_dev, work);
 454
 455	if (test_and_clear_bit(WORK_RX_MEMORY, &dev->todo)) {
 456		if (netif_running(dev->net))
 457			rx_fill(dev, GFP_KERNEL);
 458	}
 459
 460	if (dev->todo)
 461		DBG(dev, "work done, flags = 0x%lx\n", dev->todo);
 462}
 463
 464static void tx_complete(struct usb_ep *ep, struct usb_request *req)
 465{
 466	struct sk_buff	*skb = req->context;
 467	struct eth_dev	*dev = ep->driver_data;
 468
 469	switch (req->status) {
 470	default:
 471		dev->net->stats.tx_errors++;
 472		VDBG(dev, "tx err %d\n", req->status);
 473		/* FALLTHROUGH */
 474	case -ECONNRESET:		/* unlink */
 475	case -ESHUTDOWN:		/* disconnect etc */
 476		break;
 477	case 0:
 478		dev->net->stats.tx_bytes += skb->len;
 479	}
 480	dev->net->stats.tx_packets++;
 481
 482	spin_lock(&dev->req_lock);
 483	list_add(&req->list, &dev->tx_reqs);
 484	spin_unlock(&dev->req_lock);
 485	dev_kfree_skb_any(skb);
 486
 487	atomic_dec(&dev->tx_qlen);
 488	if (netif_carrier_ok(dev->net))
 489		netif_wake_queue(dev->net);
 490}
 491
 492static inline int is_promisc(u16 cdc_filter)
 493{
 494	return cdc_filter & USB_CDC_PACKET_TYPE_PROMISCUOUS;
 495}
 496
 497static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
 498					struct net_device *net)
 499{
 500	struct eth_dev		*dev = netdev_priv(net);
 501	int			length = skb->len;
 502	int			retval;
 503	struct usb_request	*req = NULL;
 504	unsigned long		flags;
 505	struct usb_ep		*in;
 506	u16			cdc_filter;
 507
 508	spin_lock_irqsave(&dev->lock, flags);
 509	if (dev->port_usb) {
 510		in = dev->port_usb->in_ep;
 511		cdc_filter = dev->port_usb->cdc_filter;
 512	} else {
 513		in = NULL;
 514		cdc_filter = 0;
 515	}
 516	spin_unlock_irqrestore(&dev->lock, flags);
 517
 518	if (!in) {
 519		dev_kfree_skb_any(skb);
 520		return NETDEV_TX_OK;
 521	}
 522
 523	/* apply outgoing CDC or RNDIS filters */
 524	if (!is_promisc(cdc_filter)) {
 525		u8		*dest = skb->data;
 526
 527		if (is_multicast_ether_addr(dest)) {
 528			u16	type;
 529
 530			/* ignores USB_CDC_PACKET_TYPE_MULTICAST and host
 531			 * SET_ETHERNET_MULTICAST_FILTERS requests
 532			 */
 533			if (is_broadcast_ether_addr(dest))
 534				type = USB_CDC_PACKET_TYPE_BROADCAST;
 535			else
 536				type = USB_CDC_PACKET_TYPE_ALL_MULTICAST;
 537			if (!(cdc_filter & type)) {
 538				dev_kfree_skb_any(skb);
 539				return NETDEV_TX_OK;
 540			}
 541		}
 542		/* ignores USB_CDC_PACKET_TYPE_DIRECTED */
 543	}
 544
 545	spin_lock_irqsave(&dev->req_lock, flags);
 546	/*
 547	 * this freelist can be empty if an interrupt triggered disconnect()
 548	 * and reconfigured the gadget (shutting down this queue) after the
 549	 * network stack decided to xmit but before we got the spinlock.
 550	 */
 551	if (list_empty(&dev->tx_reqs)) {
 552		spin_unlock_irqrestore(&dev->req_lock, flags);
 553		return NETDEV_TX_BUSY;
 554	}
 555
 556	req = container_of(dev->tx_reqs.next, struct usb_request, list);
 557	list_del(&req->list);
 558
 559	/* temporarily stop TX queue when the freelist empties */
 560	if (list_empty(&dev->tx_reqs))
 561		netif_stop_queue(net);
 562	spin_unlock_irqrestore(&dev->req_lock, flags);
 563
 564	/* no buffer copies needed, unless the network stack did it
 565	 * or the hardware can't use skb buffers.
 566	 * or there's not enough space for extra headers we need
 567	 */
 568	if (dev->wrap) {
 569		unsigned long	flags;
 570
 571		spin_lock_irqsave(&dev->lock, flags);
 572		if (dev->port_usb)
 573			skb = dev->wrap(dev->port_usb, skb);
 574		spin_unlock_irqrestore(&dev->lock, flags);
 575		if (!skb)
 576			goto drop;
 577
 578		length = skb->len;
 579	}
 580	req->buf = skb->data;
 581	req->context = skb;
 582	req->complete = tx_complete;
 583
 584	/* NCM requires no zlp if transfer is dwNtbInMaxSize */
 585	if (dev->port_usb->is_fixed &&
 586	    length == dev->port_usb->fixed_in_len &&
 587	    (length % in->maxpacket) == 0)
 588		req->zero = 0;
 589	else
 590		req->zero = 1;
 591
 592	/* use zlp framing on tx for strict CDC-Ether conformance,
 593	 * though any robust network rx path ignores extra padding.
 594	 * and some hardware doesn't like to write zlps.
 595	 */
 596	if (req->zero && !dev->zlp && (length % in->maxpacket) == 0)
 597		length++;
 598
 599	req->length = length;
 600
 601	/* throttle highspeed IRQ rate back slightly */
 602	if (gadget_is_dualspeed(dev->gadget))
 603		req->no_interrupt = (dev->gadget->speed == USB_SPEED_HIGH)
 604			? ((atomic_read(&dev->tx_qlen) % qmult) != 0)
 605			: 0;
 606
 607	retval = usb_ep_queue(in, req, GFP_ATOMIC);
 608	switch (retval) {
 609	default:
 610		DBG(dev, "tx queue err %d\n", retval);
 611		break;
 612	case 0:
 613		net->trans_start = jiffies;
 614		atomic_inc(&dev->tx_qlen);
 615	}
 616
 617	if (retval) {
 618		dev_kfree_skb_any(skb);
 619drop:
 620		dev->net->stats.tx_dropped++;
 621		spin_lock_irqsave(&dev->req_lock, flags);
 622		if (list_empty(&dev->tx_reqs))
 623			netif_start_queue(net);
 624		list_add(&req->list, &dev->tx_reqs);
 625		spin_unlock_irqrestore(&dev->req_lock, flags);
 626	}
 627	return NETDEV_TX_OK;
 628}
 629
 630/*-------------------------------------------------------------------------*/
 631
 632static void eth_start(struct eth_dev *dev, gfp_t gfp_flags)
 633{
 634	DBG(dev, "%s\n", __func__);
 635
 636	/* fill the rx queue */
 637	rx_fill(dev, gfp_flags);
 638
 639	/* and open the tx floodgates */
 640	atomic_set(&dev->tx_qlen, 0);
 641	netif_wake_queue(dev->net);
 642}
 643
 644static int eth_open(struct net_device *net)
 645{
 646	struct eth_dev	*dev = netdev_priv(net);
 647	struct gether	*link;
 648
 649	DBG(dev, "%s\n", __func__);
 650	if (netif_carrier_ok(dev->net))
 651		eth_start(dev, GFP_KERNEL);
 652
 653	spin_lock_irq(&dev->lock);
 654	link = dev->port_usb;
 655	if (link && link->open)
 656		link->open(link);
 657	spin_unlock_irq(&dev->lock);
 658
 659	return 0;
 660}
 661
 662static int eth_stop(struct net_device *net)
 663{
 664	struct eth_dev	*dev = netdev_priv(net);
 665	unsigned long	flags;
 666
 667	VDBG(dev, "%s\n", __func__);
 668	netif_stop_queue(net);
 669
 670	DBG(dev, "stop stats: rx/tx %ld/%ld, errs %ld/%ld\n",
 671		dev->net->stats.rx_packets, dev->net->stats.tx_packets,
 672		dev->net->stats.rx_errors, dev->net->stats.tx_errors
 673		);
 674
 675	/* ensure there are no more active requests */
 676	spin_lock_irqsave(&dev->lock, flags);
 677	if (dev->port_usb) {
 678		struct gether	*link = dev->port_usb;
 679
 680		if (link->close)
 681			link->close(link);
 682
 683		/* NOTE:  we have no abort-queue primitive we could use
 684		 * to cancel all pending I/O.  Instead, we disable then
 685		 * reenable the endpoints ... this idiom may leave toggle
 686		 * wrong, but that's a self-correcting error.
 687		 *
 688		 * REVISIT:  we *COULD* just let the transfers complete at
 689		 * their own pace; the network stack can handle old packets.
 690		 * For the moment we leave this here, since it works.
 691		 */
 692		usb_ep_disable(link->in_ep);
 693		usb_ep_disable(link->out_ep);
 694		if (netif_carrier_ok(net)) {
 695			DBG(dev, "host still using in/out endpoints\n");
 696			usb_ep_enable(link->in_ep, link->in);
 697			usb_ep_enable(link->out_ep, link->out);
 698		}
 699	}
 700	spin_unlock_irqrestore(&dev->lock, flags);
 701
 702	return 0;
 703}
 704
 705/*-------------------------------------------------------------------------*/
 706
 707/* initial value, changed by "ifconfig usb0 hw ether xx:xx:xx:xx:xx:xx" */
 708static char *dev_addr;
 709module_param(dev_addr, charp, S_IRUGO);
 710MODULE_PARM_DESC(dev_addr, "Device Ethernet Address");
 711
 712/* this address is invisible to ifconfig */
 713static char *host_addr;
 714module_param(host_addr, charp, S_IRUGO);
 715MODULE_PARM_DESC(host_addr, "Host Ethernet Address");
 716
 717static int get_ether_addr(const char *str, u8 *dev_addr)
 718{
 719	if (str) {
 720		unsigned	i;
 721
 722		for (i = 0; i < 6; i++) {
 723			unsigned char num;
 724
 725			if ((*str == '.') || (*str == ':'))
 726				str++;
 727			num = hex_to_bin(*str++) << 4;
 728			num |= hex_to_bin(*str++);
 729			dev_addr [i] = num;
 730		}
 731		if (is_valid_ether_addr(dev_addr))
 732			return 0;
 733	}
 734	random_ether_addr(dev_addr);
 735	return 1;
 736}
 737
 738static struct eth_dev *the_dev;
 739
 740static const struct net_device_ops eth_netdev_ops = {
 741	.ndo_open		= eth_open,
 742	.ndo_stop		= eth_stop,
 743	.ndo_start_xmit		= eth_start_xmit,
 744	.ndo_change_mtu		= ueth_change_mtu,
 745	.ndo_set_mac_address 	= eth_mac_addr,
 746	.ndo_validate_addr	= eth_validate_addr,
 747};
 748
 749static struct device_type gadget_type = {
 750	.name	= "gadget",
 751};
 752
 753/**
 754 * gether_setup - initialize one ethernet-over-usb link
 755 * @g: gadget to associated with these links
 756 * @ethaddr: NULL, or a buffer in which the ethernet address of the
 757 *	host side of the link is recorded
 758 * Context: may sleep
 759 *
 760 * This sets up the single network link that may be exported by a
 761 * gadget driver using this framework.  The link layer addresses are
 762 * set up using module parameters.
 763 *
 764 * Returns negative errno, or zero on success
 765 */
 766int gether_setup(struct usb_gadget *g, u8 ethaddr[ETH_ALEN])
 767{
 768	return gether_setup_name(g, ethaddr, "usb");
 769}
 770
 771/**
 772 * gether_setup_name - initialize one ethernet-over-usb link
 773 * @g: gadget to associated with these links
 774 * @ethaddr: NULL, or a buffer in which the ethernet address of the
 775 *	host side of the link is recorded
 776 * @netname: name for network device (for example, "usb")
 777 * Context: may sleep
 778 *
 779 * This sets up the single network link that may be exported by a
 780 * gadget driver using this framework.  The link layer addresses are
 781 * set up using module parameters.
 782 *
 783 * Returns negative errno, or zero on success
 784 */
 785int gether_setup_name(struct usb_gadget *g, u8 ethaddr[ETH_ALEN],
 786		const char *netname)
 787{
 788	struct eth_dev		*dev;
 789	struct net_device	*net;
 790	int			status;
 791
 792	if (the_dev)
 793		return -EBUSY;
 794
 795	net = alloc_etherdev(sizeof *dev);
 796	if (!net)
 797		return -ENOMEM;
 798
 799	dev = netdev_priv(net);
 800	spin_lock_init(&dev->lock);
 801	spin_lock_init(&dev->req_lock);
 802	INIT_WORK(&dev->work, eth_work);
 803	INIT_LIST_HEAD(&dev->tx_reqs);
 804	INIT_LIST_HEAD(&dev->rx_reqs);
 805
 806	skb_queue_head_init(&dev->rx_frames);
 807
 808	/* network device setup */
 809	dev->net = net;
 810	snprintf(net->name, sizeof(net->name), "%s%%d", netname);
 811
 812	if (get_ether_addr(dev_addr, net->dev_addr))
 813		dev_warn(&g->dev,
 814			"using random %s ethernet address\n", "self");
 815	if (get_ether_addr(host_addr, dev->host_mac))
 816		dev_warn(&g->dev,
 817			"using random %s ethernet address\n", "host");
 818
 819	if (ethaddr)
 820		memcpy(ethaddr, dev->host_mac, ETH_ALEN);
 821
 822	net->netdev_ops = &eth_netdev_ops;
 823
 824	SET_ETHTOOL_OPS(net, &ops);
 825
 826	/* two kinds of host-initiated state changes:
 827	 *  - iff DATA transfer is active, carrier is "on"
 828	 *  - tx queueing enabled if open *and* carrier is "on"
 829	 */
 830	netif_carrier_off(net);
 831
 832	dev->gadget = g;
 833	SET_NETDEV_DEV(net, &g->dev);
 834	SET_NETDEV_DEVTYPE(net, &gadget_type);
 835
 836	status = register_netdev(net);
 837	if (status < 0) {
 838		dev_dbg(&g->dev, "register_netdev failed, %d\n", status);
 839		free_netdev(net);
 840	} else {
 841		INFO(dev, "MAC %pM\n", net->dev_addr);
 842		INFO(dev, "HOST MAC %pM\n", dev->host_mac);
 843
 844		the_dev = dev;
 845	}
 846
 847	return status;
 848}
 849
 850/**
 851 * gether_cleanup - remove Ethernet-over-USB device
 852 * Context: may sleep
 853 *
 854 * This is called to free all resources allocated by @gether_setup().
 855 */
 856void gether_cleanup(void)
 857{
 858	if (!the_dev)
 859		return;
 860
 861	unregister_netdev(the_dev->net);
 862	flush_work_sync(&the_dev->work);
 863	free_netdev(the_dev->net);
 864
 865	the_dev = NULL;
 866}
 867
 868
 869/**
 870 * gether_connect - notify network layer that USB link is active
 871 * @link: the USB link, set up with endpoints, descriptors matching
 872 *	current device speed, and any framing wrapper(s) set up.
 873 * Context: irqs blocked
 874 *
 875 * This is called to activate endpoints and let the network layer know
 876 * the connection is active ("carrier detect").  It may cause the I/O
 877 * queues to open and start letting network packets flow, but will in
 878 * any case activate the endpoints so that they respond properly to the
 879 * USB host.
 880 *
 881 * Verify net_device pointer returned using IS_ERR().  If it doesn't
 882 * indicate some error code (negative errno), ep->driver_data values
 883 * have been overwritten.
 884 */
 885struct net_device *gether_connect(struct gether *link)
 886{
 887	struct eth_dev		*dev = the_dev;
 888	int			result = 0;
 889
 890	if (!dev)
 891		return ERR_PTR(-EINVAL);
 892
 893	link->in_ep->driver_data = dev;
 894	result = usb_ep_enable(link->in_ep, link->in);
 895	if (result != 0) {
 896		DBG(dev, "enable %s --> %d\n",
 897			link->in_ep->name, result);
 898		goto fail0;
 899	}
 900
 901	link->out_ep->driver_data = dev;
 902	result = usb_ep_enable(link->out_ep, link->out);
 903	if (result != 0) {
 904		DBG(dev, "enable %s --> %d\n",
 905			link->out_ep->name, result);
 906		goto fail1;
 907	}
 908
 909	if (result == 0)
 910		result = alloc_requests(dev, link, qlen(dev->gadget));
 911
 912	if (result == 0) {
 913		dev->zlp = link->is_zlp_ok;
 914		DBG(dev, "qlen %d\n", qlen(dev->gadget));
 915
 916		dev->header_len = link->header_len;
 917		dev->unwrap = link->unwrap;
 918		dev->wrap = link->wrap;
 919
 920		spin_lock(&dev->lock);
 921		dev->port_usb = link;
 922		link->ioport = dev;
 923		if (netif_running(dev->net)) {
 924			if (link->open)
 925				link->open(link);
 926		} else {
 927			if (link->close)
 928				link->close(link);
 929		}
 930		spin_unlock(&dev->lock);
 931
 932		netif_carrier_on(dev->net);
 933		if (netif_running(dev->net))
 934			eth_start(dev, GFP_ATOMIC);
 935
 936	/* on error, disable any endpoints  */
 937	} else {
 938		(void) usb_ep_disable(link->out_ep);
 939fail1:
 940		(void) usb_ep_disable(link->in_ep);
 941	}
 942fail0:
 943	/* caller is responsible for cleanup on error */
 944	if (result < 0)
 945		return ERR_PTR(result);
 946	return dev->net;
 947}
 948
 949/**
 950 * gether_disconnect - notify network layer that USB link is inactive
 951 * @link: the USB link, on which gether_connect() was called
 952 * Context: irqs blocked
 953 *
 954 * This is called to deactivate endpoints and let the network layer know
 955 * the connection went inactive ("no carrier").
 956 *
 957 * On return, the state is as if gether_connect() had never been called.
 958 * The endpoints are inactive, and accordingly without active USB I/O.
 959 * Pointers to endpoint descriptors and endpoint private data are nulled.
 960 */
 961void gether_disconnect(struct gether *link)
 962{
 963	struct eth_dev		*dev = link->ioport;
 964	struct usb_request	*req;
 965
 966	if (!dev)
 967		return;
 968
 969	DBG(dev, "%s\n", __func__);
 970
 971	netif_stop_queue(dev->net);
 972	netif_carrier_off(dev->net);
 973
 974	/* disable endpoints, forcing (synchronous) completion
 975	 * of all pending i/o.  then free the request objects
 976	 * and forget about the endpoints.
 977	 */
 978	usb_ep_disable(link->in_ep);
 979	spin_lock(&dev->req_lock);
 980	while (!list_empty(&dev->tx_reqs)) {
 981		req = container_of(dev->tx_reqs.next,
 982					struct usb_request, list);
 983		list_del(&req->list);
 984
 985		spin_unlock(&dev->req_lock);
 986		usb_ep_free_request(link->in_ep, req);
 987		spin_lock(&dev->req_lock);
 988	}
 989	spin_unlock(&dev->req_lock);
 990	link->in_ep->driver_data = NULL;
 991	link->in = NULL;
 992
 993	usb_ep_disable(link->out_ep);
 994	spin_lock(&dev->req_lock);
 995	while (!list_empty(&dev->rx_reqs)) {
 996		req = container_of(dev->rx_reqs.next,
 997					struct usb_request, list);
 998		list_del(&req->list);
 999
1000		spin_unlock(&dev->req_lock);
1001		usb_ep_free_request(link->out_ep, req);
1002		spin_lock(&dev->req_lock);
1003	}
1004	spin_unlock(&dev->req_lock);
1005	link->out_ep->driver_data = NULL;
1006	link->out = NULL;
1007
1008	/* finish forgetting about this USB link episode */
1009	dev->header_len = 0;
1010	dev->unwrap = NULL;
1011	dev->wrap = NULL;
1012
1013	spin_lock(&dev->lock);
1014	dev->port_usb = NULL;
1015	link->ioport = NULL;
1016	spin_unlock(&dev->lock);
1017}