12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/netdevice.h>
17 #include <linux/string.h>
18 #include <linux/if_arp.h>
23 #include <linux/sched.h>
27 #include <linux/slab.h>
28 #include <linux/export.h>
29 #include <linux/if_vlan.h>
32 #include <asm/unaligned.h>
40 #define MAX_UDP_CHUNK 1460
47 #define USEC_PER_POLL 50
48 #define NETPOLL_RX_ENABLED 1
49 #define NETPOLL_RX_DROP 2
51 #define MAX_SKB_SIZE \
52 (sizeof(struct ethhdr) + \
53 sizeof(struct iphdr) + \
54 sizeof(struct udphdr) + \
57 static void zap_completion_queue(
void);
60 static unsigned int carrier_timeout = 4;
63 #define np_info(np, fmt, ...) \
64 pr_info("%s: " fmt, np->name, ##__VA_ARGS__)
65 #define np_err(np, fmt, ...) \
66 pr_err("%s: " fmt, np->name, ##__VA_ARGS__)
67 #define np_notice(np, fmt, ...) \
68 pr_notice("%s: " fmt, np->name, ##__VA_ARGS__)
82 if (!netif_device_present(dev) || !netif_running(dev)) {
87 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
91 if (netif_xmit_frozen_or_stopped(txq) ||
94 __netif_tx_unlock(txq);
100 __netif_tx_unlock(txq);
110 if (uh->
check == 0 || skb_csum_unnecessary(skb))
116 !csum_fold(csum_add(psum, skb->
csum)))
156 work = napi->
poll(napi, budget);
157 trace_napi_poll(napi);
163 return budget -
work;
173 spin_trylock(&napi->poll_lock)) {
176 spin_unlock(&napi->poll_lock);
190 netpoll_arp_reply(skb, npi);
194 static void netpoll_poll_dev(
struct net_device *dev)
199 if (!dev || !netif_running(dev))
203 if (!ops->ndo_poll_controller)
207 ops->ndo_poll_controller(dev);
223 service_arp_queue(ni);
225 zap_completion_queue();
228 static void refill_skbs(
void)
241 spin_unlock_irqrestore(&
skb_pool.lock, flags);
244 static void zap_completion_queue(
void)
257 while (clist !=
NULL) {
277 zap_completion_queue();
287 netpoll_poll_dev(np->
dev);
294 skb_reserve(skb, reserve);
298 static int netpoll_owner_active(
struct net_device *dev)
322 if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
328 if (skb_queue_len(&npinfo->
txq) == 0 && !netpoll_owner_active(dev)) {
335 tries > 0; --tries) {
336 if (__netif_tx_trylock(txq)) {
337 if (!netif_xmit_stopped(txq)) {
348 txq_trans_update(txq);
350 __netif_tx_unlock(txq);
358 netpoll_poll_dev(np->
dev);
364 "netpoll_send_skb_on_dev(): %s enabled interrupts in poll (%pF)\n",
385 udp_len = len +
sizeof(*udph);
386 ip_len = udp_len +
sizeof(*iph);
389 skb = find_skb(np, total_len + np->
dev->needed_tailroom,
394 skb_copy_to_linear_data(skb, msg, len);
398 skb_reset_transport_header(skb);
402 udph->len =
htons(udp_len);
408 if (udph->check == 0)
412 skb_reset_network_header(skb);
426 iph->check =
ip_fast_csum((
unsigned char *)iph, iph->ihl);
429 skb_reset_mac_header(skb);
436 netpoll_send_skb(np, skb);
443 unsigned char *arp_ptr;
453 if (list_empty(&npinfo->
rx_np))
463 spin_unlock_irqrestore(&npinfo->
rx_lock, flags);
473 if (!pskb_may_pull(skb, arp_hdr_len(skb->
dev)))
476 skb_reset_network_header(skb);
477 skb_reset_transport_header(skb);
486 arp_ptr = (
unsigned char *)(arp+1);
489 arp_ptr += skb->
dev->addr_len;
494 arp_ptr += skb->
dev->addr_len;
498 if (ipv4_is_loopback(tip) || ipv4_is_multicast(tip))
501 size = arp_hdr_len(skb->
dev);
509 tlen = np->
dev->needed_tailroom;
510 send_skb = find_skb(np, size + hlen + tlen, hlen);
514 skb_reset_network_header(send_skb);
520 if (dev_hard_header(send_skb, skb->
dev, ptype,
521 sha, np->
dev->dev_addr,
522 send_skb->
len) < 0) {
541 arp_ptr = (
unsigned char *)(arp + 1);
543 arp_ptr += np->
dev->addr_len;
547 arp_ptr += np->
dev->addr_len;
550 netpoll_send_skb(np, send_skb);
556 spin_unlock_irqrestore(&npinfo->
rx_lock, flags);
563 const struct iphdr *iph;
567 if (list_empty(&npinfo->
rx_np))
594 if (!pskb_may_pull(skb,
sizeof(
struct iphdr)))
597 if (iph->ihl < 5 || iph->version != 4)
599 if (!pskb_may_pull(skb, iph->ihl*4))
606 if (skb->
len < len || len < iph->ihl*4)
613 if (pskb_trim_rcsum(skb, len))
621 uh = (
struct udphdr *)(((
char *)iph) + iph->ihl*4);
626 if (checksum_udp(skb, uh, ulen, iph->
saddr, iph->
daddr))
639 ulen -
sizeof(
struct udphdr));
706 if (*cur ==
' ' || *cur ==
'\t')
707 np_info(np,
"warning: whitespace is not allowed\n");
731 np_info(np,
"couldn't parse config at '%s'!\n", cur);
748 np_err(np,
"%s doesn't support polling, aborting\n",
755 npinfo =
kmalloc(
sizeof(*npinfo), gfp);
762 INIT_LIST_HEAD(&npinfo->
rx_np);
765 skb_queue_head_init(&npinfo->
arp_tx);
766 skb_queue_head_init(&npinfo->
txq);
771 ops = np->
dev->netdev_ops;
772 if (ops->ndo_netpoll_setup) {
773 err = ops->ndo_netpoll_setup(ndev, npinfo, gfp);
778 npinfo = ndev->npinfo;
788 spin_unlock_irqrestore(&npinfo->
rx_lock, flags);
806 struct in_device *in_dev;
822 if (!netif_running(ndev)) {
823 unsigned long atmost, atleast;
832 np_err(np,
"failed to open %s\n", ndev->
name);
838 while (!netif_carrier_ok(ndev)) {
840 np_notice(np,
"timeout waiting for carrier\n");
852 np_notice(np,
"carrier detect appears untrustworthy, waiting 4 seconds\n");
859 in_dev = __in_dev_get_rcu(ndev);
861 if (!in_dev || !in_dev->ifa_list) {
863 np_err(np,
"no IP address for %s, aborting\n",
869 np->
local_ip = in_dev->ifa_list->ifa_local;
892 static int __init netpoll_init(
void)
911 __skb_queue_purge(&npinfo->
txq);
922 npinfo = np->
dev->npinfo;
926 if (!list_empty(&npinfo->
rx_np)) {
929 if (list_empty(&npinfo->
rx_np))
931 spin_unlock_irqrestore(&npinfo->
rx_lock, flags);
937 ops = np->
dev->netdev_ops;
938 if (ops->ndo_netpoll_cleanup)
939 ops->ndo_netpoll_cleanup(np->
dev);