16 #include <linux/kernel.h>
18 #include <linux/device.h>
19 #include <linux/ctype.h>
21 #include <linux/ethtool.h>
47 #define UETH__VERSION "29-May-2008"
74 #define WORK_RX_MEMORY 0
84 #define DEFAULT_QLEN 2
86 static unsigned qmult = 5;
91 static inline int qlen(
struct usb_gadget *gadget)
111 #define xprintk(d, level, fmt, args...) \
112 printk(level "%s: " fmt , (d)->net->name , ## args)
116 #define DBG(dev, fmt, args...) \
117 xprintk(dev , KERN_DEBUG , fmt , ## args)
119 #define DBG(dev, fmt, args...) \
126 #define VDBG(dev, fmt, args...) \
130 #define ERROR(dev, fmt, args...) \
131 xprintk(dev , KERN_ERR , fmt , ## args)
132 #define INFO(dev, fmt, args...) \
133 xprintk(dev , KERN_INFO , fmt , ## args)
139 static int ueth_change_mtu(
struct net_device *
net,
int new_mtu)
153 spin_unlock_irqrestore(&dev->
lock, flags);
175 .get_drvinfo = eth_get_drvinfo,
179 static void defer_kevent(
struct eth_dev *dev,
int flag)
184 ERROR(dev,
"kevent %d may have been dropped\n", flag);
186 DBG(dev,
"kevent %d scheduled\n", flag);
205 spin_unlock_irqrestore(&dev->
lock, flags);
233 DBG(dev,
"no rx skb\n");
248 retval = usb_ep_queue(out, req, gfp_flags);
253 DBG(dev,
"rx submit --> %d\n", retval);
258 spin_unlock_irqrestore(&dev->
req_lock, flags);
287 spin_unlock_irqrestore(&dev->
lock, flags);
298 dev->
net->stats.rx_errors++;
299 dev->
net->stats.rx_length_errors++;
300 DBG(dev,
"rx length %d\n", skb2->len);
305 dev->
net->stats.rx_packets++;
306 dev->
net->stats.rx_bytes += skb2->len;
320 VDBG(dev,
"rx shutdown, code %d\n", status);
325 DBG(dev,
"rx %s reset\n", ep->
name);
333 dev->
net->stats.rx_over_errors++;
337 dev->
net->stats.rx_errors++;
338 DBG(dev,
"rx status %d\n", status);
344 if (!netif_running(dev->
net)) {
372 return list_empty(list) ? -
ENOMEM : 0;
373 list_add(&req->
list, list);
382 next = req->
list.next;
384 usb_ep_free_request(ep, req);
399 status = prealloc(&dev->tx_reqs, link->
in_ep, n);
407 DBG(dev,
"can't alloc requests\n");
413 static void rx_fill(
struct eth_dev *dev,
gfp_t gfp_flags)
420 while (!list_empty(&dev->
rx_reqs)) {
423 list_del_init(&req->
list);
424 spin_unlock_irqrestore(&dev->
req_lock, flags);
426 if (rx_submit(dev, req, gfp_flags) < 0) {
433 spin_unlock_irqrestore(&dev->
req_lock, flags);
441 if (netif_running(dev->
net))
446 DBG(dev,
"work done, flags = 0x%lx\n", dev->
todo);
456 dev->
net->stats.tx_errors++;
463 dev->
net->stats.tx_bytes += skb->
len;
465 dev->
net->stats.tx_packets++;
468 list_add(&req->
list, &dev->tx_reqs);
473 if (netif_carrier_ok(dev->
net))
474 netif_wake_queue(dev->
net);
477 static inline int is_promisc(
u16 cdc_filter)
485 struct eth_dev *dev = netdev_priv(net);
496 cdc_filter = dev->
port_usb->cdc_filter;
501 spin_unlock_irqrestore(&dev->
lock, flags);
509 if (!is_promisc(cdc_filter)) {
512 if (is_multicast_ether_addr(dest)) {
518 if (is_broadcast_ether_addr(dest))
522 if (!(cdc_filter & type)) {
536 if (list_empty(&dev->tx_reqs)) {
537 spin_unlock_irqrestore(&dev->
req_lock, flags);
545 if (list_empty(&dev->tx_reqs))
546 netif_stop_queue(net);
547 spin_unlock_irqrestore(&dev->
req_lock, flags);
559 spin_unlock_irqrestore(&dev->
lock, flags);
571 length == dev->
port_usb->fixed_in_len &&
587 if (gadget_is_dualspeed(dev->
gadget))
596 DBG(dev,
"tx queue err %d\n", retval);
606 dev->
net->stats.tx_dropped++;
608 if (list_empty(&dev->tx_reqs))
609 netif_start_queue(net);
610 list_add(&req->
list, &dev->tx_reqs);
611 spin_unlock_irqrestore(&dev->
req_lock, flags);
618 static void eth_start(
struct eth_dev *dev,
gfp_t gfp_flags)
620 DBG(dev,
"%s\n", __func__);
623 rx_fill(dev, gfp_flags);
627 netif_wake_queue(dev->
net);
632 struct eth_dev *dev = netdev_priv(net);
635 DBG(dev,
"%s\n", __func__);
636 if (netif_carrier_ok(dev->
net))
639 spin_lock_irq(&dev->
lock);
641 if (link && link->
open)
643 spin_unlock_irq(&dev->
lock);
650 struct eth_dev *dev = netdev_priv(net);
653 VDBG(dev,
"%s\n", __func__);
654 netif_stop_queue(net);
656 DBG(dev,
"stop stats: rx/tx %ld/%ld, errs %ld/%ld\n",
657 dev->
net->stats.rx_packets, dev->
net->stats.tx_packets,
658 dev->
net->stats.rx_errors, dev->
net->stats.tx_errors
678 usb_ep_disable(link->
in_ep);
679 usb_ep_disable(link->
out_ep);
680 if (netif_carrier_ok(net)) {
681 DBG(dev,
"host still using in/out endpoints\n");
682 usb_ep_enable(link->
in_ep);
683 usb_ep_enable(link->
out_ep);
686 spin_unlock_irqrestore(&dev->
lock, flags);
694 static char *dev_addr;
699 static char *host_addr;
703 static int get_ether_addr(
const char *
str,
u8 *dev_addr)
708 for (i = 0; i < 6; i++) {
711 if ((*str ==
'.') || (*str ==
':'))
717 if (is_valid_ether_addr(dev_addr))
720 eth_random_addr(dev_addr);
724 static struct eth_dev *the_dev;
727 .ndo_open = eth_open,
728 .ndo_stop = eth_stop,
729 .ndo_start_xmit = eth_start_xmit,
730 .ndo_change_mtu = ueth_change_mtu,
763 net = alloc_etherdev(
sizeof *dev);
767 dev = netdev_priv(net);
771 INIT_LIST_HEAD(&dev->tx_reqs);
780 if (get_ether_addr(dev_addr, net->
dev_addr))
782 "using random %s ethernet address\n",
"self");
783 if (get_ether_addr(host_addr, dev->
host_mac))
785 "using random %s ethernet address\n",
"host");
800 dev_dbg(&g->
dev,
"register_netdev failed, %d\n", status);
862 result = usb_ep_enable(link->
in_ep);
864 DBG(dev,
"enable %s --> %d\n",
865 link->
in_ep->name, result);
870 result = usb_ep_enable(link->
out_ep);
872 DBG(dev,
"enable %s --> %d\n",
873 link->
out_ep->name, result);
878 result = alloc_requests(dev, link, qlen(dev->
gadget));
882 DBG(dev,
"qlen %d\n", qlen(dev->
gadget));
888 spin_lock(&dev->
lock);
891 if (netif_running(dev->
net)) {
898 spin_unlock(&dev->
lock);
901 if (netif_running(dev->
net))
913 return ERR_PTR(result);
938 DBG(dev,
"%s\n", __func__);
940 netif_stop_queue(dev->
net);
947 usb_ep_disable(link->
in_ep);
949 while (!list_empty(&dev->tx_reqs)) {
955 usb_ep_free_request(link->
in_ep, req);
962 usb_ep_disable(link->
out_ep);
964 while (!list_empty(&dev->
rx_reqs)) {
970 usb_ep_free_request(link->
out_ep, req);
982 spin_lock(&dev->
lock);
985 spin_unlock(&dev->
lock);