28 #include <linux/module.h>
30 #include <linux/types.h>
31 #include <linux/kernel.h>
32 #include <linux/string.h>
33 #include <linux/errno.h>
35 #include <linux/list.h>
36 #include <linux/compiler.h>
37 #include <linux/rbtree.h>
39 #include <linux/slab.h>
57 #define HTB_VER 0x30011
59 #if HTB_VER >> 16 != TC_HTB_PROTOVER
60 #error "Mismatched sch_htb.c and pkt_sch.h"
65 MODULE_PARM_DESC(htb_hysteresis,
"Hysteresis mode, less CPU load, less accurate");
93 struct htb_class_leaf {
98 struct htb_class_inner {
160 #define HTB_WARN_TOOMANYEVENTS 0x1
171 clc = qdisc_class_find(&q->
clhash, handle);
189 #define HTB_DIRECT ((struct htb_class *)-1L)
207 if (cl && cl->
level == 0)
212 while (tcf && (result =
tc_classify(skb, tcf, &res)) >= 0) {
213 #ifdef CONFIG_NET_CLS_ACT
222 cl = (
void *)res.
class;
226 cl = htb_find(res.
classid, sch);
238 if (!cl || cl->
level)
249 static void htb_add_to_id_tree(
struct rb_root *
root,
275 static void htb_add_to_wait_tree(
struct htb_sched *q,
307 static inline void htb_next_rb_node(
struct rb_node **
n)
318 static inline void htb_add_class_to_row(
struct htb_sched *q,
323 int prio =
ffz(~mask);
324 mask &= ~(1 <<
prio);
325 htb_add_to_id_tree(q->
row[cl->
level] + prio, cl, prio);
347 static inline void htb_remove_class_from_row(
struct htb_sched *q,
353 int prio =
ffz(~mask);
355 mask &= ~(1 <<
prio);
357 htb_next_rb_node(q->
ptr[cl->
level] + prio);
359 htb_safe_rb_erase(cl->
node + prio, q->
row[cl->
level] + prio);
360 if (!q->
row[cl->
level][prio].rb_node)
384 if (p->
un.
inner.feed[prio].rb_node)
388 mask &= ~(1 <<
prio);
390 htb_add_to_id_tree(p->
un.
inner.feed + prio, cl, prio);
398 htb_add_class_to_row(q, cl, mask);
429 htb_safe_rb_erase(cl->
node + prio, p->
un.
inner.feed + prio);
431 if (!p->
un.
inner.feed[prio].rb_node)
441 htb_remove_class_from_row(q, cl, mask);
444 static inline long htb_lowater(
const struct htb_class *cl)
451 static inline long htb_hiwater(
const struct htb_class *cl)
472 htb_class_mode(
struct htb_class *cl,
long *diff)
476 if ((toks = (cl->
ctokens + *diff)) < htb_lowater(cl)) {
481 if ((toks = (cl->
tokens + *diff)) >= htb_hiwater(cl))
500 enum htb_cmode new_mode = htb_class_mode(cl, diff);
502 if (new_mode == cl->
cmode)
507 htb_deactivate_prios(q, cl);
508 cl->
cmode = new_mode;
510 htb_activate_prios(q, cl);
512 cl->
cmode = new_mode;
528 htb_activate_prios(q, cl);
544 htb_deactivate_prios(q, cl);
546 list_del_init(&cl->
un.
leaf.drop_list);
561 return qdisc_drop(skb, sch);
563 #ifdef CONFIG_NET_CLS_ACT
584 static inline void htb_accnt_tokens(
struct htb_class *cl,
int bytes,
long diff)
586 long toks = diff + cl->
tokens;
590 toks -= (
long) qdisc_l2t(cl->
rate, bytes);
597 static inline void htb_accnt_ctokens(
struct htb_class *cl,
int bytes,
long diff)
599 long toks = diff + cl->
ctokens;
603 toks -= (
long) qdisc_l2t(cl->
ceil, bytes);
624 int bytes = qdisc_pkt_len(skb);
630 if (cl->
level >= level) {
631 if (cl->
level == level)
633 htb_accnt_tokens(cl, bytes, diff);
638 htb_accnt_ctokens(cl, bytes, diff);
641 old_mode = cl->
cmode;
643 htb_change_class_mode(q, cl, &diff);
644 if (old_mode != cl->
cmode) {
648 htb_add_to_wait_tree(q, cl, diff);
653 bstats_update(&cl->
bstats, skb);
673 unsigned long stop_at = start + 2;
686 htb_safe_rb_erase(p, q->
wait_pq + level);
688 htb_change_class_mode(q, cl, &diff);
690 htb_add_to_wait_tree(q, cl, diff);
705 static struct rb_node *htb_id_find_next_upper(
int prio,
struct rb_node *
n,
713 if (
id > cl->
common.classid) {
745 for (i = 0; i < 65535; i++) {
746 if (!*sp->pptr && *sp->pid) {
751 htb_id_find_next_upper(prio, sp->root, *sp->pid);
757 *sp->pptr = sp->root;
758 while ((*sp->pptr)->rb_left)
759 *sp->pptr = (*sp->pptr)->
rb_left;
766 htb_next_rb_node(sp->pptr);
773 (++
sp)->root = cl->
un.
inner.feed[prio].rb_node;
774 sp->pptr = cl->
un.
inner.ptr + prio;
775 sp->pid = cl->
un.
inner.last_ptr_id + prio;
791 start = cl = htb_lookup_leaf(q->
row[level] + prio, prio,
792 q->
ptr[level] + prio,
807 htb_deactivate(q, cl);
810 if ((q->
row_mask[level] & (1 << prio)) == 0)
813 next = htb_lookup_leaf(q->
row[level] + prio,
814 prio, q->
ptr[level] + prio,
828 htb_next_rb_node((level ? cl->
parent->un.inner.ptr : q->
830 cl = htb_lookup_leaf(q->
row[level] + prio, prio,
831 q->
ptr[level] + prio,
834 }
while (cl != start);
837 bstats_update(&cl->
bstats, skb);
839 if (cl->
un.
leaf.deficit[level] < 0) {
841 htb_next_rb_node((level ? cl->
parent->un.inner.ptr : q->
847 if (!cl->
un.
leaf.q->q.qlen)
848 htb_deactivate(q, cl);
849 htb_charge_class(q, cl, level, skb);
860 unsigned long start_at;
866 qdisc_bstats_update(sch, skb);
867 qdisc_unthrottled(sch);
874 q->
now = psched_get_time();
885 event = htb_do_events(q, level, start_at);
892 if (next_event > event)
896 while (m != (
int)(-1)) {
900 skb = htb_dequeue_tree(q, prio, level);
915 static unsigned int htb_drop(
struct Qdisc *sch)
926 if (cl->
un.
leaf.q->ops->drop &&
929 if (!cl->
un.
leaf.q->q.qlen)
930 htb_deactivate(q, cl);
940 static void htb_reset(
struct Qdisc *sch)
947 for (i = 0; i < q->
clhash.hashsize; i++) {
954 INIT_LIST_HEAD(&cl->
un.
leaf.drop_list);
969 INIT_LIST_HEAD(q->
drops + i);
998 err = nla_parse_nested(tb,
TCA_HTB_INIT, opt, htb_policy);
1003 pr_err(
"HTB: hey probably you have bad tc tool ?\n");
1008 pr_err(
"HTB: need tc/htb version %d (minor is %d), you have %d\n",
1017 INIT_LIST_HEAD(q->
drops + i);
1034 static int htb_dump(
struct Qdisc *sch,
struct sk_buff *skb)
1036 spinlock_t *root_lock = qdisc_root_sleeping_lock(sch);
1041 spin_lock_bh(root_lock);
1051 goto nla_put_failure;
1053 goto nla_put_failure;
1054 nla_nest_end(skb, nest);
1056 spin_unlock_bh(root_lock);
1060 spin_unlock_bh(root_lock);
1061 nla_nest_cancel(skb, nest);
1065 static int htb_dump_class(
struct Qdisc *sch,
unsigned long arg,
1069 spinlock_t *root_lock = qdisc_root_sleeping_lock(sch);
1073 spin_lock_bh(root_lock);
1081 goto nla_put_failure;
1083 memset(&opt, 0,
sizeof(opt));
1085 opt.rate = cl->
rate->rate;
1087 opt.ceil = cl->
ceil->rate;
1090 opt.prio = cl->
prio;
1091 opt.level = cl->
level;
1093 goto nla_put_failure;
1095 nla_nest_end(skb, nest);
1096 spin_unlock_bh(root_lock);
1100 spin_unlock_bh(root_lock);
1101 nla_nest_cancel(skb, nest);
1106 htb_dump_class_stats(
struct Qdisc *sch,
unsigned long arg,
struct gnet_dump *
d)
1123 static int htb_graft(
struct Qdisc *sch,
unsigned long arg,
struct Qdisc *
new,
1142 sch_tree_unlock(sch);
1146 static struct Qdisc *htb_leaf(
struct Qdisc *sch,
unsigned long arg)
1152 static void htb_qlen_notify(
struct Qdisc *sch,
unsigned long arg)
1156 if (cl->
un.
leaf.q->q.qlen == 0)
1157 htb_deactivate(qdisc_priv(sch), cl);
1160 static unsigned long htb_get(
struct Qdisc *sch,
u32 classid)
1162 struct htb_class *cl = htb_find(classid, sch);
1165 return (
unsigned long)
cl;
1168 static inline int htb_parent_last_child(
struct htb_class *cl)
1173 if (cl->
parent->children > 1)
1180 struct Qdisc *new_q)
1191 INIT_LIST_HEAD(&parent->
un.
leaf.drop_list);
1195 parent->
t_c = psched_get_time();
1199 static void htb_destroy_class(
struct Qdisc *sch,
struct htb_class *cl)
1213 static void htb_destroy(
struct Qdisc *sch)
1229 for (i = 0; i < q->
clhash.hashsize; i++) {
1233 for (i = 0; i < q->clhash.hashsize; i++) {
1236 htb_destroy_class(sch, cl);
1239 __skb_queue_purge(&q->direct_queue);
1242 static
int htb_delete(
struct Qdisc *sch,
unsigned long arg)
1256 if (!cl->
level && htb_parent_last_child(cl)) {
1258 cl->
parent->common.classid);
1265 qlen = cl->
un.
leaf.q->q.qlen;
1276 htb_deactivate(q, cl);
1282 htb_parent_to_leaf(q, cl, new_q);
1290 sch_tree_unlock(sch);
1294 static void htb_put(
struct Qdisc *sch,
unsigned long arg)
1299 htb_destroy_class(sch, cl);
1302 static int htb_change_class(
struct Qdisc *sch,
u32 classid,
1318 err = nla_parse_nested(tb,
TCA_HTB_MAX, opt, htb_policy);
1326 parent = parentid ==
TC_H_ROOT ?
NULL : htb_find(parentid, sch);
1336 struct Qdisc *new_q;
1343 .
nla_len = nla_attr_size(
sizeof(est.opt)),
1355 htb_find(classid, sch))
1359 if (parent && parent->
parent && parent->
parent->level < 2) {
1360 pr_err(
"htb: tree is too deep\n");
1369 qdisc_root_sleeping_lock(sch),
1378 INIT_LIST_HEAD(&cl->
un.
leaf.drop_list);
1389 &pfifo_qdisc_ops, classid);
1391 if (parent && !parent->
level) {
1392 unsigned int qlen = parent->
un.
leaf.q->q.qlen;
1399 htb_deactivate(q, parent);
1413 cl->
common.classid = classid;
1420 cl->
t_c = psched_get_time();
1430 qdisc_root_sleeping_lock(sch),
1445 "HTB: quantum of class %X is small. Consider r2q change.\n",
1451 "HTB: quantum of class %X is big. Consider r2q change.\n",
1457 if ((cl->
prio = hopt->
prio) >= TC_HTB_NUMPRIO)
1458 cl->
prio = TC_HTB_NUMPRIO - 1;
1469 sch_tree_unlock(sch);
1473 *arg = (
unsigned long)cl;
1484 static struct tcf_proto **htb_find_tcf(
struct Qdisc *sch,
unsigned long arg)
1493 static unsigned long htb_bind_filter(
struct Qdisc *sch,
unsigned long parent,
1496 struct htb_class *cl = htb_find(classid, sch);
1509 return (
unsigned long)
cl;
1512 static void htb_unbind_filter(
struct Qdisc *sch,
unsigned long arg)
1530 for (i = 0; i < q->
clhash.hashsize; i++) {
1536 if (arg->
fn(sch, (
unsigned long)cl, arg) < 0) {
1548 .qlen_notify = htb_qlen_notify,
1551 .change = htb_change_class,
1552 .delete = htb_delete,
1554 .tcf_chain = htb_find_tcf,
1555 .bind_tcf = htb_bind_filter,
1556 .unbind_tcf = htb_unbind_filter,
1557 .dump = htb_dump_class,
1558 .dump_stats = htb_dump_class_stats,
1562 .cl_ops = &htb_class_ops,
1565 .enqueue = htb_enqueue,
1566 .dequeue = htb_dequeue,
1567 .peek = qdisc_peek_dequeued,
1571 .destroy = htb_destroy,
1576 static int __init htb_module_init(
void)
1580 static void __exit htb_module_exit(
void)