13 #include <linux/module.h>
14 #include <linux/slab.h>
15 #include <linux/types.h>
16 #include <linux/kernel.h>
17 #include <linux/string.h>
18 #include <linux/errno.h>
84 #ifdef CONFIG_NET_CLS_ACT
155 #ifdef CONFIG_NET_CLS_ACT
176 #define L2T(cl, len) qdisc_l2t((cl)->R_tab, len)
183 clc = qdisc_class_find(&q->
clhash, classid);
189 #ifdef CONFIG_NET_CLS_ACT
199 if (
new !=
NULL &&
new !=
this)
231 (cl = cbq_class_lookup(q, prio)) !=
NULL)
246 cl = (
void *)
res.class;
249 cl = cbq_class_lookup(q,
res.classid);
258 #ifdef CONFIG_NET_CLS_ACT
266 return cbq_reclassify(skb, cl);
287 !(cl = head->
defaults[prio & TC_PRIO_MAX]) &&
300 static inline void cbq_activate_class(
struct cbq_class *cl)
309 if (cl_tail !=
NULL) {
324 static void cbq_deactivate_class(
struct cbq_class *
this)
327 int prio = this->cpriority;
337 if (cl == q->
active[prio]) {
339 if (cl == q->
active[prio]) {
355 if (toplevel > cl->
level && !(qdisc_is_throttled(cl->
q))) {
359 now = psched_get_time();
379 #ifdef CONFIG_NET_CLS_ACT
389 #ifdef CONFIG_NET_CLS_ACT
390 cl->
q->__parent =
sch;
392 ret = qdisc_enqueue(skb, cl->
q);
395 cbq_mark_toplevel(q, cl);
397 cbq_activate_class(cl);
403 cbq_mark_toplevel(q, cl);
413 static void cbq_ovl_classic(
struct cbq_class *cl)
452 if (delay < base_delay) {
467 static void cbq_ovl_rclassic(
struct cbq_class *cl)
486 static void cbq_ovl_delay(
struct cbq_class *cl)
492 &qdisc_root_sleeping(cl->
qdisc)->state))
512 expires = ktime_set(0, 0);
515 ktime_to_ns(ktime_sub(
532 static void cbq_ovl_lowprio(
struct cbq_class *cl)
548 static void cbq_ovl_drop(
struct cbq_class *cl)
550 if (cl->
q->ops->drop)
551 if (cl->
q->ops->drop(cl->
q))
574 cbq_activate_class(cl);
576 if (cl == q->
active[prio]) {
578 if (cl == q->
active[prio]) {
587 }
while ((cl_prev = cl) != q->
active[prio]);
601 now = psched_get_time();
607 int prio =
ffz(~pmask);
612 tmp = cbq_undelay_prio(q, prio, now);
615 if (tmp < delay || delay == 0)
623 time = ktime_set(0, 0);
628 qdisc_unthrottled(sch);
633 #ifdef CONFIG_NET_CLS_ACT
642 if (cl && (cl = cbq_reclassify(skb, cl)) !=
NULL) {
645 cbq_mark_toplevel(q, cl);
648 cl->
q->__parent =
sch;
650 ret = qdisc_enqueue(skb, cl->
q);
654 cbq_activate_class(cl);
681 if (cl->
q->q.qlen > 1) {
687 }
while ((borrowed = borrowed->
borrow) !=
NULL);
722 if ((
unsigned long)idle > 128*1024*1024) {
725 idle -=
L2T(cl, len);
732 avgidle += idle - (avgidle>>cl->
ewma_log);
763 idle +=
L2T(cl, len);
808 this_cl->
qstats.overlimits++;
821 cbq_dequeue_prio(
struct Qdisc *sch,
int prio)
839 (borrow = cbq_under_limit(cl)) ==
NULL)
851 skb = cl->
q->dequeue(cl->
q);
860 cl->
deficit -= qdisc_pkt_len(skb);
864 #ifndef CBQ_XSTATS_BORROWS_BYTES
868 borrow->
xstats.borrows += qdisc_pkt_len(skb);
869 cl->
xstats.borrows += qdisc_pkt_len(skb);
872 q->
tx_len = qdisc_pkt_len(skb);
882 if (cl->
q->q.qlen == 0 || prio != cl->
cpriority) {
900 cbq_activate_class(cl);
907 cbq_activate_class(cl);
915 }
while (cl_prev != cl_tail);
924 cbq_dequeue_1(
struct Qdisc *sch)
928 unsigned int activemask;
932 int prio =
ffz(~activemask);
933 activemask &= ~(1<<
prio);
934 skb = cbq_dequeue_prio(sch, prio);
942 cbq_dequeue(
struct Qdisc *sch)
949 now = psched_get_time();
964 if ((incr -= incr2) < 0)
973 skb = cbq_dequeue_1(sch);
975 qdisc_bstats_update(sch, skb);
977 qdisc_unthrottled(sch);
1012 sch->
qstats.overlimits++;
1022 static void cbq_adjust_levels(
struct cbq_class *
this)
1034 if (cl->
level > level)
1036 }
while ((cl = cl->
sibling) != this->children);
1038 this->level = level + 1;
1042 static void cbq_normalize_quanta(
struct cbq_sched_data *q,
int prio)
1048 if (q->
quanta[prio] == 0)
1051 for (h = 0; h < q->
clhash.hashsize; h++) {
1061 pr_warning(
"CBQ: class %08x has bad quantum==%ld, repaired.\n",
1069 static void cbq_sync_defmap(
struct cbq_class *cl)
1085 int level = split->
level;
1090 for (h = 0; h < q->
clhash.hashsize; h++) {
1114 splitid = split->
common.classid;
1117 if (split ==
NULL || split->
common.classid != splitid) {
1119 if (split->
common.classid == splitid)
1126 if (cl->
split != split) {
1128 cbq_sync_defmap(cl);
1134 cbq_sync_defmap(cl);
1137 static void cbq_unlink_class(
struct cbq_class *
this)
1144 if (this->tparent) {
1145 clp = &this->sibling;
1153 }
while ((cl = *clp) != this->sibling);
1155 if (this->tparent->children ==
this) {
1156 this->tparent->children = this->sibling;
1157 if (this->sibling ==
this)
1158 this->tparent->children =
NULL;
1161 WARN_ON(this->sibling !=
this);
1165 static void cbq_link_class(
struct cbq_class *
this)
1184 static unsigned int cbq_drop(
struct Qdisc *sch)
1198 if (cl->
q->ops->drop && (len = cl->
q->ops->drop(cl->
q))) {
1201 cbq_deactivate_class(cl);
1210 cbq_reset(
struct Qdisc *sch)
1225 q->
now = psched_get_time();
1231 for (h = 0; h < q->
clhash.hashsize; h++) {
1271 cbq_normalize_quanta(q, cl->
priority);
1278 cbq_normalize_quanta(q, cl->
priority);
1329 #ifdef CONFIG_NET_CLS_ACT
1334 if (cl->
q->handle) {
1336 cl->
q->reshape_fail = cbq_reshape_fail;
1338 cl->
q->reshape_fail =
NULL;
1367 err = nla_parse_nested(tb,
TCA_CBQ_MAX, opt, cbq_policy);
1396 q->
link.overlimit = cbq_ovl_classic;
1397 q->
link.allot = psched_mtu(qdisc_dev(sch));
1399 q->
link.weight = q->
link.R_tab->rate.rate;
1403 q->
link.minidle = -0x7FFFFFFF;
1409 q->
now = psched_get_time();
1412 cbq_link_class(&q->
link);
1415 cbq_set_lss(&q->
link, nla_data(tb[TCA_CBQ_LSSOPT]));
1417 cbq_addprio(q, &q->
link);
1427 unsigned char *b = skb_tail_pointer(skb);
1430 goto nla_put_failure;
1440 unsigned char *b = skb_tail_pointer(skb);
1449 opt.level = cl->
level;
1450 opt.avpkt = cl->
avpkt;
1455 if (
nla_put(skb, TCA_CBQ_LSSOPT,
sizeof(opt), &opt))
1456 goto nla_put_failure;
1466 unsigned char *b = skb_tail_pointer(skb);
1470 opt.allot = cl->
allot;
1475 goto nla_put_failure;
1485 unsigned char *b = skb_tail_pointer(skb);
1493 goto nla_put_failure;
1503 unsigned char *b = skb_tail_pointer(skb);
1507 opt.split = cl->
split ? cl->
split->common.classid : 0;
1511 goto nla_put_failure;
1520 #ifdef CONFIG_NET_CLS_ACT
1523 unsigned char *b = skb_tail_pointer(skb);
1527 opt.police = cl->police;
1531 goto nla_put_failure;
1543 if (cbq_dump_lss(skb, cl) < 0 ||
1544 cbq_dump_rate(skb, cl) < 0 ||
1545 cbq_dump_wrr(skb, cl) < 0 ||
1546 cbq_dump_ovl(skb, cl) < 0 ||
1547 #ifdef CONFIG_NET_CLS_ACT
1548 cbq_dump_police(skb, cl) < 0 ||
1550 cbq_dump_fopt(skb, cl) < 0)
1555 static int cbq_dump(
struct Qdisc *sch,
struct sk_buff *skb)
1562 goto nla_put_failure;
1563 if (cbq_dump_attr(skb, &q->
link) < 0)
1564 goto nla_put_failure;
1565 nla_nest_end(skb, nest);
1569 nla_nest_cancel(skb, nest);
1578 q->
link.xstats.avgidle = q->
link.avgidle;
1583 cbq_dump_class(
struct Qdisc *sch,
unsigned long arg,
1598 goto nla_put_failure;
1599 if (cbq_dump_attr(skb, cl) < 0)
1600 goto nla_put_failure;
1601 nla_nest_end(skb, nest);
1605 nla_nest_cancel(skb, nest);
1610 cbq_dump_class_stats(
struct Qdisc *sch,
unsigned long arg,
1616 cl->
qstats.qlen = cl->
q->q.qlen;
1618 cl->
xstats.undertime = 0;
1631 static int cbq_graft(
struct Qdisc *sch,
unsigned long arg,
struct Qdisc *
new,
1638 &pfifo_qdisc_ops, cl->
common.classid);
1642 #ifdef CONFIG_NET_CLS_ACT
1644 new->reshape_fail = cbq_reshape_fail;
1652 sch_tree_unlock(sch);
1657 static struct Qdisc *cbq_leaf(
struct Qdisc *sch,
unsigned long arg)
1664 static void cbq_qlen_notify(
struct Qdisc *sch,
unsigned long arg)
1668 if (cl->
q->q.qlen == 0)
1669 cbq_deactivate_class(cl);
1672 static unsigned long cbq_get(
struct Qdisc *sch,
u32 classid)
1675 struct cbq_class *cl = cbq_class_lookup(q, classid);
1679 return (
unsigned long)
cl;
1684 static void cbq_destroy_class(
struct Qdisc *sch,
struct cbq_class *cl)
1698 static void cbq_destroy(
struct Qdisc *sch)
1705 #ifdef CONFIG_NET_CLS_ACT
1713 for (h = 0; h < q->
clhash.hashsize; h++) {
1717 for (h = 0; h < q->clhash.hashsize; h++) {
1720 cbq_destroy_class(sch, cl);
1725 static
void cbq_put(
struct Qdisc *sch,
unsigned long arg)
1730 #ifdef CONFIG_NET_CLS_ACT
1731 spinlock_t *root_lock = qdisc_root_sleeping_lock(sch);
1734 spin_lock_bh(root_lock);
1735 if (q->rx_class == cl)
1737 spin_unlock_bh(root_lock);
1740 cbq_destroy_class(sch, cl);
1759 err = nla_parse_nested(tb,
TCA_CBQ_MAX, opt, cbq_policy);
1767 cl->
tparent->common.classid != parentid)
1782 qdisc_root_sleeping_lock(sch),
1795 cbq_deactivate_class(cl);
1802 if (tb[TCA_CBQ_LSSOPT])
1803 cbq_set_lss(cl, nla_data(tb[TCA_CBQ_LSSOPT]));
1807 cbq_set_wrr(cl, nla_data(tb[TCA_CBQ_WRROPT]));
1811 cbq_set_overlimit(cl, nla_data(tb[TCA_CBQ_OVL_STRATEGY]));
1813 #ifdef CONFIG_NET_CLS_ACT
1815 cbq_set_police(cl, nla_data(tb[TCA_CBQ_POLICE]));
1819 cbq_set_fopt(cl, nla_data(tb[TCA_CBQ_FOPT]));
1822 cbq_activate_class(cl);
1824 sch_tree_unlock(sch);
1832 if (tb[TCA_CBQ_WRROPT] ==
NULL || tb[TCA_CBQ_RATE] ==
NULL ||
1833 tb[TCA_CBQ_LSSOPT] ==
NULL)
1836 rtab =
qdisc_get_rtab(nla_data(tb[TCA_CBQ_RATE]), tb[TCA_CBQ_RTAB]);
1843 cbq_class_lookup(q, classid))
1849 for (i = 0; i < 0x8000; i++) {
1863 parent = cbq_class_lookup(q, parentid);
1874 if (tca[TCA_RATE]) {
1876 qdisc_root_sleeping_lock(sch),
1890 cl->
common.classid = classid;
1902 cbq_adjust_levels(parent);
1904 cbq_set_lss(cl, nla_data(tb[TCA_CBQ_LSSOPT]));
1905 cbq_set_wrr(cl, nla_data(tb[TCA_CBQ_WRROPT]));
1913 if (tb[TCA_CBQ_OVL_STRATEGY])
1914 cbq_set_overlimit(cl, nla_data(tb[TCA_CBQ_OVL_STRATEGY]));
1915 #ifdef CONFIG_NET_CLS_ACT
1916 if (tb[TCA_CBQ_POLICE])
1917 cbq_set_police(cl, nla_data(tb[TCA_CBQ_POLICE]));
1919 if (tb[TCA_CBQ_FOPT])
1920 cbq_set_fopt(cl, nla_data(tb[TCA_CBQ_FOPT]));
1921 sch_tree_unlock(sch);
1925 *arg = (
unsigned long)cl;
1933 static int cbq_delete(
struct Qdisc *sch,
unsigned long arg)
1944 qlen = cl->
q->q.qlen;
1949 cbq_deactivate_class(cl);
1957 #ifdef CONFIG_NET_CLS_ACT
1958 if (q->rx_class == cl)
1962 cbq_unlink_class(cl);
1963 cbq_adjust_levels(cl->
tparent);
1965 cbq_sync_defmap(cl);
1968 sch_tree_unlock(sch);
1979 static struct tcf_proto **cbq_find_tcf(
struct Qdisc *sch,
unsigned long arg)
1990 static unsigned long cbq_bind_filter(
struct Qdisc *sch,
unsigned long parent,
1995 struct cbq_class *cl = cbq_class_lookup(q, classid);
2001 return (
unsigned long)
cl;
2006 static void cbq_unbind_filter(
struct Qdisc *sch,
unsigned long arg)
2023 for (h = 0; h < q->
clhash.hashsize; h++) {
2029 if (arg->
fn(sch, (
unsigned long)cl, arg) < 0) {
2041 .qlen_notify = cbq_qlen_notify,
2044 .change = cbq_change_class,
2045 .delete = cbq_delete,
2047 .tcf_chain = cbq_find_tcf,
2048 .bind_tcf = cbq_bind_filter,
2049 .unbind_tcf = cbq_unbind_filter,
2050 .dump = cbq_dump_class,
2051 .dump_stats = cbq_dump_class_stats,
2056 .cl_ops = &cbq_class_ops,
2059 .enqueue = cbq_enqueue,
2060 .dequeue = cbq_dequeue,
2061 .peek = qdisc_peek_dequeued,
2065 .destroy = cbq_destroy,
2068 .dump_stats = cbq_dump_stats,
2072 static int __init cbq_module_init(
void)
2076 static void __exit cbq_module_exit(
void)