16 #include <linux/kernel.h>
17 #include <linux/module.h>
19 #include <linux/device.h>
20 #include <linux/ctype.h>
22 #include <linux/ethtool.h>
23 #include <linux/if_vlan.h>
49 #define UETH__VERSION "29-May-2008"
76 #define WORK_RX_MEMORY 0
86 #define DEFAULT_QLEN 2
88 static unsigned qmult = 5;
93 static inline int qlen(
struct usb_gadget *gadget)
113 #define xprintk(d, level, fmt, args...) \
114 printk(level "%s: " fmt , (d)->net->name , ## args)
118 #define DBG(dev, fmt, args...) \
119 xprintk(dev , KERN_DEBUG , fmt , ## args)
121 #define DBG(dev, fmt, args...) \
128 #define VDBG(dev, fmt, args...) \
132 #define ERROR(dev, fmt, args...) \
133 xprintk(dev , KERN_ERR , fmt , ## args)
134 #define INFO(dev, fmt, args...) \
135 xprintk(dev , KERN_INFO , fmt , ## args)
141 static int ueth_change_mtu(
struct net_device *
net,
int new_mtu)
155 spin_unlock_irqrestore(&dev->
lock, flags);
177 .get_drvinfo = eth_get_drvinfo,
181 static void defer_kevent(
struct eth_dev *dev,
int flag)
186 ERROR(dev,
"kevent %d may have been dropped\n", flag);
188 DBG(dev,
"kevent %d scheduled\n", flag);
207 spin_unlock_irqrestore(&dev->
lock, flags);
235 DBG(dev,
"no rx skb\n");
250 retval = usb_ep_queue(out, req, gfp_flags);
255 DBG(dev,
"rx submit --> %d\n", retval);
260 spin_unlock_irqrestore(&dev->
req_lock, flags);
289 spin_unlock_irqrestore(&dev->
lock, flags);
300 dev->
net->stats.rx_errors++;
301 dev->
net->stats.rx_length_errors++;
302 DBG(dev,
"rx length %d\n", skb2->len);
307 dev->
net->stats.rx_packets++;
308 dev->
net->stats.rx_bytes += skb2->len;
322 VDBG(dev,
"rx shutdown, code %d\n", status);
327 DBG(dev,
"rx %s reset\n", ep->
name);
335 dev->
net->stats.rx_over_errors++;
339 dev->
net->stats.rx_errors++;
340 DBG(dev,
"rx status %d\n", status);
346 if (!netif_running(dev->
net)) {
374 return list_empty(list) ? -
ENOMEM : 0;
375 list_add(&req->
list, list);
384 next = req->
list.next;
386 usb_ep_free_request(ep, req);
401 status = prealloc(&dev->tx_reqs, link->
in_ep, n);
409 DBG(dev,
"can't alloc requests\n");
415 static void rx_fill(
struct eth_dev *dev,
gfp_t gfp_flags)
422 while (!list_empty(&dev->
rx_reqs)) {
425 list_del_init(&req->
list);
426 spin_unlock_irqrestore(&dev->
req_lock, flags);
428 if (rx_submit(dev, req, gfp_flags) < 0) {
435 spin_unlock_irqrestore(&dev->
req_lock, flags);
443 if (netif_running(dev->
net))
448 DBG(dev,
"work done, flags = 0x%lx\n", dev->
todo);
458 dev->
net->stats.tx_errors++;
465 dev->
net->stats.tx_bytes += skb->
len;
467 dev->
net->stats.tx_packets++;
470 list_add(&req->
list, &dev->tx_reqs);
475 if (netif_carrier_ok(dev->
net))
476 netif_wake_queue(dev->
net);
479 static inline int is_promisc(
u16 cdc_filter)
487 struct eth_dev *dev = netdev_priv(net);
498 cdc_filter = dev->
port_usb->cdc_filter;
503 spin_unlock_irqrestore(&dev->
lock, flags);
511 if (!is_promisc(cdc_filter)) {
514 if (is_multicast_ether_addr(dest)) {
520 if (is_broadcast_ether_addr(dest))
524 if (!(cdc_filter & type)) {
538 if (list_empty(&dev->tx_reqs)) {
539 spin_unlock_irqrestore(&dev->
req_lock, flags);
547 if (list_empty(&dev->tx_reqs))
548 netif_stop_queue(net);
549 spin_unlock_irqrestore(&dev->
req_lock, flags);
561 spin_unlock_irqrestore(&dev->
lock, flags);
573 length == dev->
port_usb->fixed_in_len &&
589 if (gadget_is_dualspeed(dev->
gadget))
598 DBG(dev,
"tx queue err %d\n", retval);
608 dev->
net->stats.tx_dropped++;
610 if (list_empty(&dev->tx_reqs))
611 netif_start_queue(net);
612 list_add(&req->
list, &dev->tx_reqs);
613 spin_unlock_irqrestore(&dev->
req_lock, flags);
620 static void eth_start(
struct eth_dev *dev,
gfp_t gfp_flags)
622 DBG(dev,
"%s\n", __func__);
625 rx_fill(dev, gfp_flags);
629 netif_wake_queue(dev->
net);
634 struct eth_dev *dev = netdev_priv(net);
637 DBG(dev,
"%s\n", __func__);
638 if (netif_carrier_ok(dev->
net))
641 spin_lock_irq(&dev->
lock);
643 if (link && link->
open)
645 spin_unlock_irq(&dev->
lock);
652 struct eth_dev *dev = netdev_priv(net);
655 VDBG(dev,
"%s\n", __func__);
656 netif_stop_queue(net);
658 DBG(dev,
"stop stats: rx/tx %ld/%ld, errs %ld/%ld\n",
659 dev->
net->stats.rx_packets, dev->
net->stats.tx_packets,
660 dev->
net->stats.rx_errors, dev->
net->stats.tx_errors
682 in = link->
in_ep->desc;
684 usb_ep_disable(link->
in_ep);
685 usb_ep_disable(link->
out_ep);
686 if (netif_carrier_ok(net)) {
687 DBG(dev,
"host still using in/out endpoints\n");
690 usb_ep_enable(link->
in_ep);
691 usb_ep_enable(link->
out_ep);
694 spin_unlock_irqrestore(&dev->
lock, flags);
702 static char *dev_addr;
707 static char *host_addr;
711 static int get_ether_addr(
const char *
str,
u8 *dev_addr)
716 for (i = 0; i < 6; i++) {
719 if ((*str ==
'.') || (*str ==
':'))
725 if (is_valid_ether_addr(dev_addr))
728 eth_random_addr(dev_addr);
732 static struct eth_dev *the_dev;
735 .ndo_open = eth_open,
736 .ndo_stop = eth_stop,
737 .ndo_start_xmit = eth_start_xmit,
738 .ndo_change_mtu = ueth_change_mtu,
771 net = alloc_etherdev(
sizeof *dev);
775 dev = netdev_priv(net);
779 INIT_LIST_HEAD(&dev->tx_reqs);
788 if (get_ether_addr(dev_addr, net->
dev_addr))
790 "using random %s ethernet address\n",
"self");
791 if (get_ether_addr(host_addr, dev->
host_mac))
793 "using random %s ethernet address\n",
"host");
808 dev_dbg(&g->
dev,
"register_netdev failed, %d\n", status);
870 result = usb_ep_enable(link->
in_ep);
872 DBG(dev,
"enable %s --> %d\n",
873 link->
in_ep->name, result);
878 result = usb_ep_enable(link->
out_ep);
880 DBG(dev,
"enable %s --> %d\n",
881 link->
out_ep->name, result);
886 result = alloc_requests(dev, link, qlen(dev->
gadget));
890 DBG(dev,
"qlen %d\n", qlen(dev->
gadget));
896 spin_lock(&dev->
lock);
899 if (netif_running(dev->
net)) {
906 spin_unlock(&dev->
lock);
909 if (netif_running(dev->
net))
921 return ERR_PTR(result);
946 DBG(dev,
"%s\n", __func__);
948 netif_stop_queue(dev->
net);
955 usb_ep_disable(link->
in_ep);
957 while (!list_empty(&dev->tx_reqs)) {
963 usb_ep_free_request(link->
in_ep, req);
970 usb_ep_disable(link->
out_ep);
972 while (!list_empty(&dev->
rx_reqs)) {
978 usb_ep_free_request(link->
out_ep, req);
990 spin_lock(&dev->
lock);
993 spin_unlock(&dev->
lock);