Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
u_ether.c
Go to the documentation of this file.
1 /*
2  * u_ether.c -- Ethernet-over-USB link layer utilities for Gadget stack
3  *
4  * Copyright (C) 2003-2005,2008 David Brownell
5  * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
6  * Copyright (C) 2008 Nokia Corporation
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  */
13 
14 /* #define VERBOSE_DEBUG */
15 
16 #include <linux/kernel.h>
17 #include <linux/gfp.h>
18 #include <linux/device.h>
19 #include <linux/ctype.h>
20 #include <linux/etherdevice.h>
21 #include <linux/ethtool.h>
22 
23 #include "u_ether.h"
24 
25 
26 /*
27  * This component encapsulates the Ethernet link glue needed to provide
28  * one (!) network link through the USB gadget stack, normally "usb0".
29  *
30  * The control and data models are handled by the function driver which
31  * connects to this code; such as CDC Ethernet (ECM or EEM),
32  * "CDC Subset", or RNDIS. That includes all descriptor and endpoint
33  * management.
34  *
35  * Link level addressing is handled by this component using module
36  * parameters; if no such parameters are provided, random link level
37  * addresses are used. Each end of the link uses one address. The
38  * host end address is exported in various ways, and is often recorded
39  * in configuration databases.
40  *
41  * The driver which assembles each configuration using such a link is
42  * responsible for ensuring that each configuration includes at most one
43  * instance of is network link. (The network layer provides ways for
44  * this single "physical" link to be used by multiple virtual links.)
45  */
46 
47 #define UETH__VERSION "29-May-2008"
48 
49 struct eth_dev {
50  /* lock is held while accessing port_usb
51  * or updating its backlink port_usb->ioport
52  */
54  struct gether *port_usb;
55 
56  struct net_device *net;
57  struct usb_gadget *gadget;
58 
59  spinlock_t req_lock; /* guard {rx,tx}_reqs */
60  struct list_head tx_reqs, rx_reqs;
62 
64 
65  unsigned header_len;
66  struct sk_buff *(*wrap)(struct gether *, struct sk_buff *skb);
67  int (*unwrap)(struct gether *,
68  struct sk_buff *skb,
69  struct sk_buff_head *list);
70 
71  struct work_struct work;
72 
73  unsigned long todo;
74 #define WORK_RX_MEMORY 0
75 
76  bool zlp;
78 };
79 
80 /*-------------------------------------------------------------------------*/
81 
82 #define RX_EXTRA 20 /* bytes guarding against rx overflows */
83 
84 #define DEFAULT_QLEN 2 /* double buffering by default */
85 
86 static unsigned qmult = 5;
88 MODULE_PARM_DESC(qmult, "queue length multiplier at high/super speed");
89 
90 /* for dual-speed hardware, use deeper queues at high/super speed */
91 static inline int qlen(struct usb_gadget *gadget)
92 {
93  if (gadget_is_dualspeed(gadget) && (gadget->speed == USB_SPEED_HIGH ||
94  gadget->speed == USB_SPEED_SUPER))
95  return qmult * DEFAULT_QLEN;
96  else
97  return DEFAULT_QLEN;
98 }
99 
100 /*-------------------------------------------------------------------------*/
101 
102 /* REVISIT there must be a better way than having two sets
103  * of debug calls ...
104  */
105 
106 #undef DBG
107 #undef VDBG
108 #undef ERROR
109 #undef INFO
110 
111 #define xprintk(d, level, fmt, args...) \
112  printk(level "%s: " fmt , (d)->net->name , ## args)
113 
114 #ifdef DEBUG
115 #undef DEBUG
116 #define DBG(dev, fmt, args...) \
117  xprintk(dev , KERN_DEBUG , fmt , ## args)
118 #else
119 #define DBG(dev, fmt, args...) \
120  do { } while (0)
121 #endif /* DEBUG */
122 
123 #ifdef VERBOSE_DEBUG
124 #define VDBG DBG
125 #else
126 #define VDBG(dev, fmt, args...) \
127  do { } while (0)
128 #endif /* DEBUG */
129 
130 #define ERROR(dev, fmt, args...) \
131  xprintk(dev , KERN_ERR , fmt , ## args)
132 #define INFO(dev, fmt, args...) \
133  xprintk(dev , KERN_INFO , fmt , ## args)
134 
135 /*-------------------------------------------------------------------------*/
136 
137 /* NETWORK DRIVER HOOKUP (to the layer above this driver) */
138 
139 static int ueth_change_mtu(struct net_device *net, int new_mtu)
140 {
141  struct eth_dev *dev = netdev_priv(net);
142  unsigned long flags;
143  int status = 0;
144 
145  /* don't change MTU on "live" link (peer won't know) */
146  spin_lock_irqsave(&dev->lock, flags);
147  if (dev->port_usb)
148  status = -EBUSY;
149  else if (new_mtu <= ETH_HLEN || new_mtu > ETH_FRAME_LEN)
150  status = -ERANGE;
151  else
152  net->mtu = new_mtu;
153  spin_unlock_irqrestore(&dev->lock, flags);
154 
155  return status;
156 }
157 
158 static void eth_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *p)
159 {
160  struct eth_dev *dev = netdev_priv(net);
161 
162  strlcpy(p->driver, "g_ether", sizeof p->driver);
163  strlcpy(p->version, UETH__VERSION, sizeof p->version);
164  strlcpy(p->fw_version, dev->gadget->name, sizeof p->fw_version);
165  strlcpy(p->bus_info, dev_name(&dev->gadget->dev), sizeof p->bus_info);
166 }
167 
168 /* REVISIT can also support:
169  * - WOL (by tracking suspends and issuing remote wakeup)
170  * - msglevel (implies updated messaging)
171  * - ... probably more ethtool ops
172  */
173 
174 static const struct ethtool_ops ops = {
175  .get_drvinfo = eth_get_drvinfo,
176  .get_link = ethtool_op_get_link,
177 };
178 
179 static void defer_kevent(struct eth_dev *dev, int flag)
180 {
181  if (test_and_set_bit(flag, &dev->todo))
182  return;
183  if (!schedule_work(&dev->work))
184  ERROR(dev, "kevent %d may have been dropped\n", flag);
185  else
186  DBG(dev, "kevent %d scheduled\n", flag);
187 }
188 
189 static void rx_complete(struct usb_ep *ep, struct usb_request *req);
190 
191 static int
192 rx_submit(struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags)
193 {
194  struct sk_buff *skb;
195  int retval = -ENOMEM;
196  size_t size = 0;
197  struct usb_ep *out;
198  unsigned long flags;
199 
200  spin_lock_irqsave(&dev->lock, flags);
201  if (dev->port_usb)
202  out = dev->port_usb->out_ep;
203  else
204  out = NULL;
205  spin_unlock_irqrestore(&dev->lock, flags);
206 
207  if (!out)
208  return -ENOTCONN;
209 
210 
211  /* Padding up to RX_EXTRA handles minor disagreements with host.
212  * Normally we use the USB "terminate on short read" convention;
213  * so allow up to (N*maxpacket), since that memory is normally
214  * already allocated. Some hardware doesn't deal well with short
215  * reads (e.g. DMA must be N*maxpacket), so for now don't trim a
216  * byte off the end (to force hardware errors on overflow).
217  *
218  * RNDIS uses internal framing, and explicitly allows senders to
219  * pad to end-of-packet. That's potentially nice for speed, but
220  * means receivers can't recover lost synch on their own (because
221  * new packets don't only start after a short RX).
222  */
223  size += sizeof(struct ethhdr) + dev->net->mtu + RX_EXTRA;
224  size += dev->port_usb->header_len;
225  size += out->maxpacket - 1;
226  size -= size % out->maxpacket;
227 
228  if (dev->port_usb->is_fixed)
229  size = max_t(size_t, size, dev->port_usb->fixed_out_len);
230 
231  skb = alloc_skb(size + NET_IP_ALIGN, gfp_flags);
232  if (skb == NULL) {
233  DBG(dev, "no rx skb\n");
234  goto enomem;
235  }
236 
237  /* Some platforms perform better when IP packets are aligned,
238  * but on at least one, checksumming fails otherwise. Note:
239  * RNDIS headers involve variable numbers of LE32 values.
240  */
241  skb_reserve(skb, NET_IP_ALIGN);
242 
243  req->buf = skb->data;
244  req->length = size;
245  req->complete = rx_complete;
246  req->context = skb;
247 
248  retval = usb_ep_queue(out, req, gfp_flags);
249  if (retval == -ENOMEM)
250 enomem:
251  defer_kevent(dev, WORK_RX_MEMORY);
252  if (retval) {
253  DBG(dev, "rx submit --> %d\n", retval);
254  if (skb)
255  dev_kfree_skb_any(skb);
256  spin_lock_irqsave(&dev->req_lock, flags);
257  list_add(&req->list, &dev->rx_reqs);
258  spin_unlock_irqrestore(&dev->req_lock, flags);
259  }
260  return retval;
261 }
262 
263 static void rx_complete(struct usb_ep *ep, struct usb_request *req)
264 {
265  struct sk_buff *skb = req->context, *skb2;
266  struct eth_dev *dev = ep->driver_data;
267  int status = req->status;
268 
269  switch (status) {
270 
271  /* normal completion */
272  case 0:
273  skb_put(skb, req->actual);
274 
275  if (dev->unwrap) {
276  unsigned long flags;
277 
278  spin_lock_irqsave(&dev->lock, flags);
279  if (dev->port_usb) {
280  status = dev->unwrap(dev->port_usb,
281  skb,
282  &dev->rx_frames);
283  } else {
284  dev_kfree_skb_any(skb);
285  status = -ENOTCONN;
286  }
287  spin_unlock_irqrestore(&dev->lock, flags);
288  } else {
289  skb_queue_tail(&dev->rx_frames, skb);
290  }
291  skb = NULL;
292 
293  skb2 = skb_dequeue(&dev->rx_frames);
294  while (skb2) {
295  if (status < 0
296  || ETH_HLEN > skb2->len
297  || skb2->len > ETH_FRAME_LEN) {
298  dev->net->stats.rx_errors++;
299  dev->net->stats.rx_length_errors++;
300  DBG(dev, "rx length %d\n", skb2->len);
301  dev_kfree_skb_any(skb2);
302  goto next_frame;
303  }
304  skb2->protocol = eth_type_trans(skb2, dev->net);
305  dev->net->stats.rx_packets++;
306  dev->net->stats.rx_bytes += skb2->len;
307 
308  /* no buffer copies needed, unless hardware can't
309  * use skb buffers.
310  */
311  status = netif_rx(skb2);
312 next_frame:
313  skb2 = skb_dequeue(&dev->rx_frames);
314  }
315  break;
316 
317  /* software-driven interface shutdown */
318  case -ECONNRESET: /* unlink */
319  case -ESHUTDOWN: /* disconnect etc */
320  VDBG(dev, "rx shutdown, code %d\n", status);
321  goto quiesce;
322 
323  /* for hardware automagic (such as pxa) */
324  case -ECONNABORTED: /* endpoint reset */
325  DBG(dev, "rx %s reset\n", ep->name);
326  defer_kevent(dev, WORK_RX_MEMORY);
327 quiesce:
328  dev_kfree_skb_any(skb);
329  goto clean;
330 
331  /* data overrun */
332  case -EOVERFLOW:
333  dev->net->stats.rx_over_errors++;
334  /* FALLTHROUGH */
335 
336  default:
337  dev->net->stats.rx_errors++;
338  DBG(dev, "rx status %d\n", status);
339  break;
340  }
341 
342  if (skb)
343  dev_kfree_skb_any(skb);
344  if (!netif_running(dev->net)) {
345 clean:
346  spin_lock(&dev->req_lock);
347  list_add(&req->list, &dev->rx_reqs);
348  spin_unlock(&dev->req_lock);
349  req = NULL;
350  }
351  if (req)
352  rx_submit(dev, req, GFP_ATOMIC);
353 }
354 
355 static int prealloc(struct list_head *list, struct usb_ep *ep, unsigned n)
356 {
357  unsigned i;
358  struct usb_request *req;
359 
360  if (!n)
361  return -ENOMEM;
362 
363  /* queue/recycle up to N requests */
364  i = n;
365  list_for_each_entry(req, list, list) {
366  if (i-- == 0)
367  goto extra;
368  }
369  while (i--) {
370  req = usb_ep_alloc_request(ep, GFP_ATOMIC);
371  if (!req)
372  return list_empty(list) ? -ENOMEM : 0;
373  list_add(&req->list, list);
374  }
375  return 0;
376 
377 extra:
378  /* free extras */
379  for (;;) {
380  struct list_head *next;
381 
382  next = req->list.next;
383  list_del(&req->list);
384  usb_ep_free_request(ep, req);
385 
386  if (next == list)
387  break;
388 
389  req = container_of(next, struct usb_request, list);
390  }
391  return 0;
392 }
393 
394 static int alloc_requests(struct eth_dev *dev, struct gether *link, unsigned n)
395 {
396  int status;
397 
398  spin_lock(&dev->req_lock);
399  status = prealloc(&dev->tx_reqs, link->in_ep, n);
400  if (status < 0)
401  goto fail;
402  status = prealloc(&dev->rx_reqs, link->out_ep, n);
403  if (status < 0)
404  goto fail;
405  goto done;
406 fail:
407  DBG(dev, "can't alloc requests\n");
408 done:
409  spin_unlock(&dev->req_lock);
410  return status;
411 }
412 
413 static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags)
414 {
415  struct usb_request *req;
416  unsigned long flags;
417 
418  /* fill unused rxq slots with some skb */
419  spin_lock_irqsave(&dev->req_lock, flags);
420  while (!list_empty(&dev->rx_reqs)) {
421  req = container_of(dev->rx_reqs.next,
422  struct usb_request, list);
423  list_del_init(&req->list);
424  spin_unlock_irqrestore(&dev->req_lock, flags);
425 
426  if (rx_submit(dev, req, gfp_flags) < 0) {
427  defer_kevent(dev, WORK_RX_MEMORY);
428  return;
429  }
430 
431  spin_lock_irqsave(&dev->req_lock, flags);
432  }
433  spin_unlock_irqrestore(&dev->req_lock, flags);
434 }
435 
436 static void eth_work(struct work_struct *work)
437 {
438  struct eth_dev *dev = container_of(work, struct eth_dev, work);
439 
440  if (test_and_clear_bit(WORK_RX_MEMORY, &dev->todo)) {
441  if (netif_running(dev->net))
442  rx_fill(dev, GFP_KERNEL);
443  }
444 
445  if (dev->todo)
446  DBG(dev, "work done, flags = 0x%lx\n", dev->todo);
447 }
448 
449 static void tx_complete(struct usb_ep *ep, struct usb_request *req)
450 {
451  struct sk_buff *skb = req->context;
452  struct eth_dev *dev = ep->driver_data;
453 
454  switch (req->status) {
455  default:
456  dev->net->stats.tx_errors++;
457  VDBG(dev, "tx err %d\n", req->status);
458  /* FALLTHROUGH */
459  case -ECONNRESET: /* unlink */
460  case -ESHUTDOWN: /* disconnect etc */
461  break;
462  case 0:
463  dev->net->stats.tx_bytes += skb->len;
464  }
465  dev->net->stats.tx_packets++;
466 
467  spin_lock(&dev->req_lock);
468  list_add(&req->list, &dev->tx_reqs);
469  spin_unlock(&dev->req_lock);
470  dev_kfree_skb_any(skb);
471 
472  atomic_dec(&dev->tx_qlen);
473  if (netif_carrier_ok(dev->net))
474  netif_wake_queue(dev->net);
475 }
476 
477 static inline int is_promisc(u16 cdc_filter)
478 {
479  return cdc_filter & USB_CDC_PACKET_TYPE_PROMISCUOUS;
480 }
481 
482 static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
483  struct net_device *net)
484 {
485  struct eth_dev *dev = netdev_priv(net);
486  int length = skb->len;
487  int retval;
488  struct usb_request *req = NULL;
489  unsigned long flags;
490  struct usb_ep *in;
491  u16 cdc_filter;
492 
493  spin_lock_irqsave(&dev->lock, flags);
494  if (dev->port_usb) {
495  in = dev->port_usb->in_ep;
496  cdc_filter = dev->port_usb->cdc_filter;
497  } else {
498  in = NULL;
499  cdc_filter = 0;
500  }
501  spin_unlock_irqrestore(&dev->lock, flags);
502 
503  if (!in) {
504  dev_kfree_skb_any(skb);
505  return NETDEV_TX_OK;
506  }
507 
508  /* apply outgoing CDC or RNDIS filters */
509  if (!is_promisc(cdc_filter)) {
510  u8 *dest = skb->data;
511 
512  if (is_multicast_ether_addr(dest)) {
513  u16 type;
514 
515  /* ignores USB_CDC_PACKET_TYPE_MULTICAST and host
516  * SET_ETHERNET_MULTICAST_FILTERS requests
517  */
518  if (is_broadcast_ether_addr(dest))
520  else
522  if (!(cdc_filter & type)) {
523  dev_kfree_skb_any(skb);
524  return NETDEV_TX_OK;
525  }
526  }
527  /* ignores USB_CDC_PACKET_TYPE_DIRECTED */
528  }
529 
530  spin_lock_irqsave(&dev->req_lock, flags);
531  /*
532  * this freelist can be empty if an interrupt triggered disconnect()
533  * and reconfigured the gadget (shutting down this queue) after the
534  * network stack decided to xmit but before we got the spinlock.
535  */
536  if (list_empty(&dev->tx_reqs)) {
537  spin_unlock_irqrestore(&dev->req_lock, flags);
538  return NETDEV_TX_BUSY;
539  }
540 
541  req = container_of(dev->tx_reqs.next, struct usb_request, list);
542  list_del(&req->list);
543 
544  /* temporarily stop TX queue when the freelist empties */
545  if (list_empty(&dev->tx_reqs))
546  netif_stop_queue(net);
547  spin_unlock_irqrestore(&dev->req_lock, flags);
548 
549  /* no buffer copies needed, unless the network stack did it
550  * or the hardware can't use skb buffers.
551  * or there's not enough space for extra headers we need
552  */
553  if (dev->wrap) {
554  unsigned long flags;
555 
556  spin_lock_irqsave(&dev->lock, flags);
557  if (dev->port_usb)
558  skb = dev->wrap(dev->port_usb, skb);
559  spin_unlock_irqrestore(&dev->lock, flags);
560  if (!skb)
561  goto drop;
562 
563  length = skb->len;
564  }
565  req->buf = skb->data;
566  req->context = skb;
567  req->complete = tx_complete;
568 
569  /* NCM requires no zlp if transfer is dwNtbInMaxSize */
570  if (dev->port_usb->is_fixed &&
571  length == dev->port_usb->fixed_in_len &&
572  (length % in->maxpacket) == 0)
573  req->zero = 0;
574  else
575  req->zero = 1;
576 
577  /* use zlp framing on tx for strict CDC-Ether conformance,
578  * though any robust network rx path ignores extra padding.
579  * and some hardware doesn't like to write zlps.
580  */
581  if (req->zero && !dev->zlp && (length % in->maxpacket) == 0)
582  length++;
583 
584  req->length = length;
585 
586  /* throttle high/super speed IRQ rate back slightly */
587  if (gadget_is_dualspeed(dev->gadget))
588  req->no_interrupt = (dev->gadget->speed == USB_SPEED_HIGH ||
589  dev->gadget->speed == USB_SPEED_SUPER)
590  ? ((atomic_read(&dev->tx_qlen) % qmult) != 0)
591  : 0;
592 
593  retval = usb_ep_queue(in, req, GFP_ATOMIC);
594  switch (retval) {
595  default:
596  DBG(dev, "tx queue err %d\n", retval);
597  break;
598  case 0:
599  net->trans_start = jiffies;
600  atomic_inc(&dev->tx_qlen);
601  }
602 
603  if (retval) {
604  dev_kfree_skb_any(skb);
605 drop:
606  dev->net->stats.tx_dropped++;
607  spin_lock_irqsave(&dev->req_lock, flags);
608  if (list_empty(&dev->tx_reqs))
609  netif_start_queue(net);
610  list_add(&req->list, &dev->tx_reqs);
611  spin_unlock_irqrestore(&dev->req_lock, flags);
612  }
613  return NETDEV_TX_OK;
614 }
615 
616 /*-------------------------------------------------------------------------*/
617 
618 static void eth_start(struct eth_dev *dev, gfp_t gfp_flags)
619 {
620  DBG(dev, "%s\n", __func__);
621 
622  /* fill the rx queue */
623  rx_fill(dev, gfp_flags);
624 
625  /* and open the tx floodgates */
626  atomic_set(&dev->tx_qlen, 0);
627  netif_wake_queue(dev->net);
628 }
629 
630 static int eth_open(struct net_device *net)
631 {
632  struct eth_dev *dev = netdev_priv(net);
633  struct gether *link;
634 
635  DBG(dev, "%s\n", __func__);
636  if (netif_carrier_ok(dev->net))
637  eth_start(dev, GFP_KERNEL);
638 
639  spin_lock_irq(&dev->lock);
640  link = dev->port_usb;
641  if (link && link->open)
642  link->open(link);
643  spin_unlock_irq(&dev->lock);
644 
645  return 0;
646 }
647 
648 static int eth_stop(struct net_device *net)
649 {
650  struct eth_dev *dev = netdev_priv(net);
651  unsigned long flags;
652 
653  VDBG(dev, "%s\n", __func__);
654  netif_stop_queue(net);
655 
656  DBG(dev, "stop stats: rx/tx %ld/%ld, errs %ld/%ld\n",
657  dev->net->stats.rx_packets, dev->net->stats.tx_packets,
658  dev->net->stats.rx_errors, dev->net->stats.tx_errors
659  );
660 
661  /* ensure there are no more active requests */
662  spin_lock_irqsave(&dev->lock, flags);
663  if (dev->port_usb) {
664  struct gether *link = dev->port_usb;
665 
666  if (link->close)
667  link->close(link);
668 
669  /* NOTE: we have no abort-queue primitive we could use
670  * to cancel all pending I/O. Instead, we disable then
671  * reenable the endpoints ... this idiom may leave toggle
672  * wrong, but that's a self-correcting error.
673  *
674  * REVISIT: we *COULD* just let the transfers complete at
675  * their own pace; the network stack can handle old packets.
676  * For the moment we leave this here, since it works.
677  */
678  usb_ep_disable(link->in_ep);
679  usb_ep_disable(link->out_ep);
680  if (netif_carrier_ok(net)) {
681  DBG(dev, "host still using in/out endpoints\n");
682  usb_ep_enable(link->in_ep);
683  usb_ep_enable(link->out_ep);
684  }
685  }
686  spin_unlock_irqrestore(&dev->lock, flags);
687 
688  return 0;
689 }
690 
691 /*-------------------------------------------------------------------------*/
692 
693 /* initial value, changed by "ifconfig usb0 hw ether xx:xx:xx:xx:xx:xx" */
694 static char *dev_addr;
695 module_param(dev_addr, charp, S_IRUGO);
696 MODULE_PARM_DESC(dev_addr, "Device Ethernet Address");
697 
698 /* this address is invisible to ifconfig */
699 static char *host_addr;
700 module_param(host_addr, charp, S_IRUGO);
701 MODULE_PARM_DESC(host_addr, "Host Ethernet Address");
702 
703 static int get_ether_addr(const char *str, u8 *dev_addr)
704 {
705  if (str) {
706  unsigned i;
707 
708  for (i = 0; i < 6; i++) {
709  unsigned char num;
710 
711  if ((*str == '.') || (*str == ':'))
712  str++;
713  num = hex_to_bin(*str++) << 4;
714  num |= hex_to_bin(*str++);
715  dev_addr [i] = num;
716  }
717  if (is_valid_ether_addr(dev_addr))
718  return 0;
719  }
720  eth_random_addr(dev_addr);
721  return 1;
722 }
723 
724 static struct eth_dev *the_dev;
725 
726 static const struct net_device_ops eth_netdev_ops = {
727  .ndo_open = eth_open,
728  .ndo_stop = eth_stop,
729  .ndo_start_xmit = eth_start_xmit,
730  .ndo_change_mtu = ueth_change_mtu,
731  .ndo_set_mac_address = eth_mac_addr,
732  .ndo_validate_addr = eth_validate_addr,
733 };
734 
735 static struct device_type gadget_type = {
736  .name = "gadget",
737 };
738 
753 int gether_setup_name(struct usb_gadget *g, u8 ethaddr[ETH_ALEN],
754  const char *netname)
755 {
756  struct eth_dev *dev;
757  struct net_device *net;
758  int status;
759 
760  if (the_dev)
761  return -EBUSY;
762 
763  net = alloc_etherdev(sizeof *dev);
764  if (!net)
765  return -ENOMEM;
766 
767  dev = netdev_priv(net);
768  spin_lock_init(&dev->lock);
769  spin_lock_init(&dev->req_lock);
770  INIT_WORK(&dev->work, eth_work);
771  INIT_LIST_HEAD(&dev->tx_reqs);
772  INIT_LIST_HEAD(&dev->rx_reqs);
773 
774  skb_queue_head_init(&dev->rx_frames);
775 
776  /* network device setup */
777  dev->net = net;
778  snprintf(net->name, sizeof(net->name), "%s%%d", netname);
779 
780  if (get_ether_addr(dev_addr, net->dev_addr))
781  dev_warn(&g->dev,
782  "using random %s ethernet address\n", "self");
783  if (get_ether_addr(host_addr, dev->host_mac))
784  dev_warn(&g->dev,
785  "using random %s ethernet address\n", "host");
786 
787  if (ethaddr)
788  memcpy(ethaddr, dev->host_mac, ETH_ALEN);
789 
790  net->netdev_ops = &eth_netdev_ops;
791 
792  SET_ETHTOOL_OPS(net, &ops);
793 
794  dev->gadget = g;
795  SET_NETDEV_DEV(net, &g->dev);
796  SET_NETDEV_DEVTYPE(net, &gadget_type);
797 
798  status = register_netdev(net);
799  if (status < 0) {
800  dev_dbg(&g->dev, "register_netdev failed, %d\n", status);
801  free_netdev(net);
802  } else {
803  INFO(dev, "MAC %pM\n", net->dev_addr);
804  INFO(dev, "HOST MAC %pM\n", dev->host_mac);
805 
806  the_dev = dev;
807 
808  /* two kinds of host-initiated state changes:
809  * - iff DATA transfer is active, carrier is "on"
810  * - tx queueing enabled if open *and* carrier is "on"
811  */
812  netif_carrier_off(net);
813  }
814 
815  return status;
816 }
817 
824 void gether_cleanup(void)
825 {
826  if (!the_dev)
827  return;
828 
829  unregister_netdev(the_dev->net);
830  flush_work(&the_dev->work);
831  free_netdev(the_dev->net);
832 
833  the_dev = NULL;
834 }
835 
836 
853 struct net_device *gether_connect(struct gether *link)
854 {
855  struct eth_dev *dev = the_dev;
856  int result = 0;
857 
858  if (!dev)
859  return ERR_PTR(-EINVAL);
860 
861  link->in_ep->driver_data = dev;
862  result = usb_ep_enable(link->in_ep);
863  if (result != 0) {
864  DBG(dev, "enable %s --> %d\n",
865  link->in_ep->name, result);
866  goto fail0;
867  }
868 
869  link->out_ep->driver_data = dev;
870  result = usb_ep_enable(link->out_ep);
871  if (result != 0) {
872  DBG(dev, "enable %s --> %d\n",
873  link->out_ep->name, result);
874  goto fail1;
875  }
876 
877  if (result == 0)
878  result = alloc_requests(dev, link, qlen(dev->gadget));
879 
880  if (result == 0) {
881  dev->zlp = link->is_zlp_ok;
882  DBG(dev, "qlen %d\n", qlen(dev->gadget));
883 
884  dev->header_len = link->header_len;
885  dev->unwrap = link->unwrap;
886  dev->wrap = link->wrap;
887 
888  spin_lock(&dev->lock);
889  dev->port_usb = link;
890  link->ioport = dev;
891  if (netif_running(dev->net)) {
892  if (link->open)
893  link->open(link);
894  } else {
895  if (link->close)
896  link->close(link);
897  }
898  spin_unlock(&dev->lock);
899 
900  netif_carrier_on(dev->net);
901  if (netif_running(dev->net))
902  eth_start(dev, GFP_ATOMIC);
903 
904  /* on error, disable any endpoints */
905  } else {
906  (void) usb_ep_disable(link->out_ep);
907 fail1:
908  (void) usb_ep_disable(link->in_ep);
909  }
910 fail0:
911  /* caller is responsible for cleanup on error */
912  if (result < 0)
913  return ERR_PTR(result);
914  return dev->net;
915 }
916 
929 void gether_disconnect(struct gether *link)
930 {
931  struct eth_dev *dev = link->ioport;
932  struct usb_request *req;
933 
934  WARN_ON(!dev);
935  if (!dev)
936  return;
937 
938  DBG(dev, "%s\n", __func__);
939 
940  netif_stop_queue(dev->net);
941  netif_carrier_off(dev->net);
942 
943  /* disable endpoints, forcing (synchronous) completion
944  * of all pending i/o. then free the request objects
945  * and forget about the endpoints.
946  */
947  usb_ep_disable(link->in_ep);
948  spin_lock(&dev->req_lock);
949  while (!list_empty(&dev->tx_reqs)) {
950  req = container_of(dev->tx_reqs.next,
951  struct usb_request, list);
952  list_del(&req->list);
953 
954  spin_unlock(&dev->req_lock);
955  usb_ep_free_request(link->in_ep, req);
956  spin_lock(&dev->req_lock);
957  }
958  spin_unlock(&dev->req_lock);
959  link->in_ep->driver_data = NULL;
960  link->in_ep->desc = NULL;
961 
962  usb_ep_disable(link->out_ep);
963  spin_lock(&dev->req_lock);
964  while (!list_empty(&dev->rx_reqs)) {
965  req = container_of(dev->rx_reqs.next,
966  struct usb_request, list);
967  list_del(&req->list);
968 
969  spin_unlock(&dev->req_lock);
970  usb_ep_free_request(link->out_ep, req);
971  spin_lock(&dev->req_lock);
972  }
973  spin_unlock(&dev->req_lock);
974  link->out_ep->driver_data = NULL;
975  link->out_ep->desc = NULL;
976 
977  /* finish forgetting about this USB link episode */
978  dev->header_len = 0;
979  dev->unwrap = NULL;
980  dev->wrap = NULL;
981 
982  spin_lock(&dev->lock);
983  dev->port_usb = NULL;
984  link->ioport = NULL;
985  spin_unlock(&dev->lock);
986 }