1 #ifndef __NET_SCHED_GENERIC_H
2 #define __NET_SCHED_GENERIC_H
4 #include <linux/netdevice.h>
5 #include <linux/types.h>
49 #define TCQ_F_BUILTIN 1
50 #define TCQ_F_INGRESS 2
51 #define TCQ_F_CAN_BYPASS 4
52 #define TCQ_F_MQROOT 8
53 #define TCQ_F_WARN_NONWC (1 << 16)
88 static inline bool qdisc_is_running(
const struct Qdisc *qdisc)
93 static inline bool qdisc_run_begin(
struct Qdisc *qdisc)
95 if (qdisc_is_running(qdisc))
101 static inline void qdisc_run_end(
struct Qdisc *qdisc)
106 static inline bool qdisc_is_throttled(
const struct Qdisc *qdisc)
111 static inline void qdisc_throttled(
struct Qdisc *qdisc)
116 static inline void qdisc_unthrottled(
struct Qdisc *qdisc)
133 struct nlattr **,
unsigned long *);
229 static inline void qdisc_cb_private_validate(
const struct sk_buff *
skb,
int sz)
237 static inline int qdisc_qlen(
const struct Qdisc *
q)
249 return &qdisc->
q.lock;
252 static inline struct Qdisc *qdisc_root(
const struct Qdisc *qdisc)
257 static inline struct Qdisc *qdisc_root_sleeping(
const struct Qdisc *qdisc)
278 return qdisc_lock(root);
281 static inline spinlock_t *qdisc_root_sleeping_lock(
const struct Qdisc *qdisc)
283 struct Qdisc *
root = qdisc_root_sleeping(qdisc);
286 return qdisc_lock(root);
294 static inline void sch_tree_lock(
const struct Qdisc *
q)
296 spin_lock_bh(qdisc_root_sleeping_lock(q));
299 static inline void sch_tree_unlock(
const struct Qdisc *q)
301 spin_unlock_bh(qdisc_root_sleeping_lock(q));
304 #define tcf_tree_lock(tp) sch_tree_lock((tp)->q)
305 #define tcf_tree_unlock(tp) sch_tree_unlock((tp)->q)
324 static inline unsigned int qdisc_class_hash(
u32 id,
u32 mask)
338 h = qdisc_class_hash(
id, hash->
hashmask);
358 struct Qdisc *qdisc);
372 static inline void qdisc_reset_all_tx_gt(
struct net_device *
dev,
unsigned int i)
377 qdisc = netdev_get_tx_queue(dev, i)->qdisc;
379 spin_lock_bh(qdisc_lock(qdisc));
381 spin_unlock_bh(qdisc_lock(qdisc));
386 static inline void qdisc_reset_all_tx(
struct net_device *dev)
388 qdisc_reset_all_tx_gt(dev, 0);
392 static inline bool qdisc_all_tx_empty(
const struct net_device *dev)
406 static inline bool qdisc_tx_changing(
const struct net_device *dev)
418 static inline bool qdisc_tx_is_noop(
const struct net_device *dev)
429 static inline unsigned int qdisc_pkt_len(
const struct sk_buff *skb)
440 #ifdef CONFIG_NET_CLS_ACT
441 #define net_xmit_drop_count(e) ((e) & __NET_XMIT_STOLEN ? 0 : 1)
443 #define net_xmit_drop_count(e) (1)
446 static inline void qdisc_calculate_pkt_len(
struct sk_buff *skb,
449 #ifdef CONFIG_NET_SCHED
457 static inline int qdisc_enqueue(
struct sk_buff *skb,
struct Qdisc *
sch)
459 qdisc_calculate_pkt_len(skb, sch);
463 static inline int qdisc_enqueue_root(
struct sk_buff *skb,
struct Qdisc *sch)
473 bstats->
bytes += qdisc_pkt_len(skb);
474 bstats->
packets += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
477 static inline void qdisc_bstats_update(
struct Qdisc *sch,
480 bstats_update(&sch->
bstats, skb);
483 static inline int __qdisc_enqueue_tail(
struct sk_buff *skb,
struct Qdisc *sch,
486 __skb_queue_tail(list, skb);
487 sch->
qstats.backlog += qdisc_pkt_len(skb);
492 static inline int qdisc_enqueue_tail(
struct sk_buff *skb,
struct Qdisc *sch)
494 return __qdisc_enqueue_tail(skb, sch, &sch->
q);
497 static inline struct sk_buff *__qdisc_dequeue_head(
struct Qdisc *sch,
500 struct sk_buff *skb = __skb_dequeue(list);
503 sch->
qstats.backlog -= qdisc_pkt_len(skb);
504 qdisc_bstats_update(sch, skb);
510 static inline struct sk_buff *qdisc_dequeue_head(
struct Qdisc *sch)
512 return __qdisc_dequeue_head(sch, &sch->
q);
515 static inline unsigned int __qdisc_queue_drop_head(
struct Qdisc *sch,
518 struct sk_buff *skb = __skb_dequeue(list);
521 unsigned int len = qdisc_pkt_len(skb);
530 static inline unsigned int qdisc_queue_drop_head(
struct Qdisc *sch)
532 return __qdisc_queue_drop_head(sch, &sch->
q);
535 static inline struct sk_buff *__qdisc_dequeue_tail(
struct Qdisc *sch,
538 struct sk_buff *skb = __skb_dequeue_tail(list);
541 sch->
qstats.backlog -= qdisc_pkt_len(skb);
546 static inline struct sk_buff *qdisc_dequeue_tail(
struct Qdisc *sch)
548 return __qdisc_dequeue_tail(sch, &sch->
q);
551 static inline struct sk_buff *qdisc_peek_head(
struct Qdisc *sch)
553 return skb_peek(&sch->
q);
557 static inline struct sk_buff *qdisc_peek_dequeued(
struct Qdisc *sch)
571 static inline struct sk_buff *qdisc_dequeue_peeked(
struct Qdisc *sch)
585 static inline void __qdisc_reset_queue(
struct Qdisc *sch,
592 __skb_queue_purge(list);
595 static inline void qdisc_reset_queue(
struct Qdisc *sch)
597 __qdisc_reset_queue(sch, &sch->
q);
601 static inline unsigned int __qdisc_queue_drop(
struct Qdisc *sch,
604 struct sk_buff *skb = __qdisc_dequeue_tail(sch, list);
607 unsigned int len = qdisc_pkt_len(skb);
615 static inline unsigned int qdisc_queue_drop(
struct Qdisc *sch)
617 return __qdisc_queue_drop(sch, &sch->
q);
620 static inline int qdisc_drop(
struct sk_buff *skb,
struct Qdisc *sch)
628 static inline int qdisc_reshape_fail(
struct sk_buff *skb,
struct Qdisc *sch)
632 #ifdef CONFIG_NET_CLS_ACT
649 int slot = pktlen + rtab->
rate.cell_align + rtab->
rate.overhead;
652 slot >>= rtab->
rate.cell_log;
654 return rtab->
data[255]*(slot >> 8) + rtab->
data[slot & 0xFF];
655 return rtab->
data[slot];
658 #ifdef CONFIG_NET_CLS_ACT