14 #include <linux/bitops.h>
15 #include <linux/module.h>
16 #include <linux/types.h>
17 #include <linux/kernel.h>
18 #include <linux/sched.h>
19 #include <linux/string.h>
20 #include <linux/errno.h>
21 #include <linux/netdevice.h>
23 #include <linux/rtnetlink.h>
26 #include <linux/list.h>
27 #include <linux/slab.h>
62 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
63 if (!netif_xmit_frozen_or_stopped(txq)) {
75 static inline int handle_dev_cpu_collision(
struct sk_buff *skb,
90 dev_queue->
dev->name);
98 ret = dev_requeue_skb(skb, q);
120 spin_unlock(root_lock);
123 if (!netif_xmit_frozen_or_stopped(txq))
128 spin_lock(root_lock);
130 if (dev_xmit_complete(ret)) {
135 ret = handle_dev_cpu_collision(skb, txq, q);
140 dev->
name, ret, q->
q.qlen);
142 ret = dev_requeue_skb(skb, q);
145 if (ret && netif_xmit_frozen_or_stopped(txq))
170 static inline int qdisc_restart(
struct Qdisc *q)
178 skb = dequeue_skb(q);
182 root_lock = qdisc_lock(q);
184 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
193 while (qdisc_restart(q)) {
199 if (--quota <= 0 || need_resched()) {
214 val = netdev_get_tx_queue(dev, i)->trans_start;
223 static void dev_watchdog(
unsigned long arg)
228 if (!qdisc_tx_is_noop(dev)) {
229 if (netif_device_present(dev) &&
230 netif_running(dev) &&
231 netif_carrier_ok(dev)) {
232 int some_queue_timedout = 0;
239 txq = netdev_get_tx_queue(dev, i);
244 if (netif_xmit_stopped(txq) &&
247 some_queue_timedout = 1;
253 if (some_queue_timedout) {
264 netif_tx_unlock(dev);
280 static void dev_watchdog_up(
struct net_device *dev)
285 static void dev_watchdog_down(
struct net_device *dev)
287 netif_tx_lock_bh(dev);
290 netif_tx_unlock_bh(dev);
302 if (dev->
reg_state == NETREG_UNINITIALIZED)
305 if (netif_running(dev))
320 if (dev->
reg_state == NETREG_UNINITIALIZED)
338 static struct sk_buff *noop_dequeue(
struct Qdisc * qdisc)
346 .enqueue = noop_enqueue,
347 .dequeue = noop_dequeue,
348 .peek = noop_dequeue,
358 .enqueue = noop_enqueue,
359 .dequeue = noop_dequeue,
364 .dev_queue = &noop_netdev_queue,
369 static struct Qdisc_ops noqueue_qdisc_ops __read_mostly = {
372 .enqueue = noop_enqueue,
373 .dequeue = noop_dequeue,
374 .peek = noop_dequeue,
378 static struct Qdisc noqueue_qdisc;
380 .qdisc = &noqueue_qdisc,
381 .qdisc_sleeping = &noqueue_qdisc,
384 static struct Qdisc noqueue_qdisc = {
386 .dequeue = noop_dequeue,
388 .ops = &noqueue_qdisc_ops,
391 .dev_queue = &noqueue_netdev_queue,
397 1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1
404 #define PFIFO_FAST_BANDS 3
422 static const int bitmap2band[] = {-1, 0, 1, 0, 2, 0, 1, 0};
427 return priv->
q + band;
430 static int pfifo_fast_enqueue(
struct sk_buff *skb,
struct Qdisc *qdisc)
432 if (skb_queue_len(&qdisc->
q) < qdisc_dev(qdisc)->tx_queue_len) {
437 priv->
bitmap |= (1 << band);
439 return __qdisc_enqueue_tail(skb, qdisc, list);
442 return qdisc_drop(skb, qdisc);
445 static struct sk_buff *pfifo_fast_dequeue(
struct Qdisc *qdisc)
448 int band = bitmap2band[priv->
bitmap];
452 struct sk_buff *skb = __qdisc_dequeue_head(qdisc, list);
455 if (skb_queue_empty(list))
456 priv->
bitmap &= ~(1 << band);
464 static struct sk_buff *pfifo_fast_peek(
struct Qdisc *qdisc)
467 int band = bitmap2band[priv->
bitmap];
472 return skb_peek(list);
478 static void pfifo_fast_reset(
struct Qdisc *qdisc)
484 __qdisc_reset_queue(qdisc, band2list(priv, prio));
487 qdisc->
qstats.backlog = 0;
491 static int pfifo_fast_dump(
struct Qdisc *qdisc,
struct sk_buff *skb)
497 goto nla_put_failure;
504 static int pfifo_fast_init(
struct Qdisc *qdisc,
struct nlattr *opt)
510 skb_queue_head_init(band2list(priv, prio));
520 .enqueue = pfifo_fast_enqueue,
521 .dequeue = pfifo_fast_dequeue,
522 .peek = pfifo_fast_peek,
523 .init = pfifo_fast_init,
524 .reset = pfifo_fast_reset,
525 .dump = pfifo_fast_dump,
542 netdev_queue_numa_node_read(dev_queue));
551 netdev_queue_numa_node_read(dev_queue));
555 sch->
padded = (
char *) sch - (
char *)
p;
557 INIT_LIST_HEAD(&sch->
list);
558 skb_queue_head_init(&sch->
q);
627 #ifdef CONFIG_NET_SCHED
638 module_put(ops->
owner);
639 dev_put(qdisc_dev(qdisc));
657 root_lock = qdisc_lock(oqdisc);
658 spin_lock_bh(root_lock);
670 spin_unlock_bh(root_lock);
676 static void attach_one_default_qdisc(
struct net_device *dev,
680 struct Qdisc *qdisc = &noqueue_qdisc;
686 netdev_info(dev,
"activation failed\n");
693 static void attach_default_qdiscs(
struct net_device *dev)
698 txq = netdev_get_tx_queue(dev, 0);
700 if (!netif_is_multiqueue(dev) || dev->
tx_queue_len == 0) {
701 netdev_for_each_tx_queue(dev, attach_one_default_qdisc,
NULL);
707 qdisc->
ops->attach(qdisc);
713 static void transition_one_qdisc(
struct net_device *dev,
715 void *_need_watchdog)
718 int *need_watchdog_p = _need_watchdog;
724 if (need_watchdog_p && new_qdisc != &noqueue_qdisc) {
726 *need_watchdog_p = 1;
740 if (dev->
qdisc == &noop_qdisc)
741 attach_default_qdiscs(dev);
743 if (!netif_carrier_ok(dev))
748 netdev_for_each_tx_queue(dev, transition_one_qdisc, &need_watchdog);
749 if (dev_ingress_queue(dev))
750 transition_one_qdisc(dev, dev_ingress_queue(dev),
NULL);
754 dev_watchdog_up(dev);
759 static void dev_deactivate_queue(
struct net_device *dev,
761 void *_qdisc_default)
763 struct Qdisc *qdisc_default = _qdisc_default;
766 qdisc = dev_queue->
qdisc;
768 spin_lock_bh(qdisc_lock(qdisc));
776 spin_unlock_bh(qdisc_lock(qdisc));
780 static bool some_qdisc_is_busy(
struct net_device *dev)
790 dev_queue = netdev_get_tx_queue(dev, i);
792 root_lock = qdisc_lock(q);
794 spin_lock_bh(root_lock);
796 val = (qdisc_is_running(q) ||
799 spin_unlock_bh(root_lock);
817 bool sync_needed =
false;
820 netdev_for_each_tx_queue(dev, dev_deactivate_queue,
822 if (dev_ingress_queue(dev))
823 dev_deactivate_queue(dev, dev_ingress_queue(dev),
826 dev_watchdog_down(dev);
839 while (some_qdisc_is_busy(dev))
853 static void dev_init_scheduler_queue(
struct net_device *dev,
857 struct Qdisc *qdisc = _qdisc;
859 dev_queue->
qdisc = qdisc;
866 netdev_for_each_tx_queue(dev, dev_init_scheduler_queue, &noop_qdisc);
867 if (dev_ingress_queue(dev))
868 dev_init_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc);
873 static void shutdown_scheduler_queue(
struct net_device *dev,
875 void *_qdisc_default)
878 struct Qdisc *qdisc_default = _qdisc_default;
890 netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc);
891 if (dev_ingress_queue(dev))
892 shutdown_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc);