19 #include <linux/module.h>
20 #include <linux/types.h>
21 #include <linux/kernel.h>
22 #include <linux/errno.h>
24 #include <linux/random.h>
37 #define SFB_BUCKET_SHIFT 4
38 #define SFB_NUMBUCKETS (1 << SFB_BUCKET_SHIFT)
39 #define SFB_BUCKET_MASK (SFB_NUMBUCKETS - 1)
40 #define SFB_LEVELS (32 / SFB_BUCKET_SHIFT)
97 qdisc_cb_private_validate(skb,
sizeof(
struct sfb_skb_cb));
123 return p1 > p2 ? p1 - p2 : 0;
135 if (b[hash].
qlen < 0xFFFF)
145 sfbhash = sfb_hash(skb, 0);
147 increment_one_qlen(sfbhash, 0, q);
149 sfbhash = sfb_hash(skb, 1);
151 increment_one_qlen(sfbhash, 1, q);
154 static void decrement_one_qlen(
u32 sfbhash,
u32 slot,
164 if (b[hash].
qlen > 0)
174 sfbhash = sfb_hash(skb, 0);
176 decrement_one_qlen(sfbhash, 0, q);
178 sfbhash = sfb_hash(skb, 1);
180 decrement_one_qlen(sfbhash, 1, q);
204 u32 qlen = 0, prob = 0, totalpm = 0;
228 sfb_init_perturbation(q->
slot, q);
257 int *qerr,
u32 *salt)
264 #ifdef CONFIG_NET_CLS_ACT
293 q->
stats.queuedrop++;
311 if (!sfb_classify(skb, q, &ret, &salt))
325 q->
bins[slot].perturbation);
336 decrement_prob(b, q);
338 increment_prob(b, q);
339 if (minqlen > b->
qlen)
350 q->
stats.bucketdrop++;
360 q->
bins[slot].perturbation);
371 decrement_prob(b, q);
373 increment_prob(b, q);
376 if (sfb_rate_limit(skb, q)) {
378 q->
stats.penaltydrop++;
393 q->
stats.earlydrop++;
397 if (INET_ECN_set_ce(skb)) {
400 q->
stats.earlydrop++;
406 ret = qdisc_enqueue(skb, child);
409 increment_qlen(skb, q);
411 q->
stats.childdrop++;
417 qdisc_drop(skb, sch);
435 qdisc_bstats_update(sch, skb);
437 decrement_qlen(skb, q);
448 return child->
ops->peek(child);
453 static void sfb_reset(
struct Qdisc *sch)
461 sfb_zero_all_buckets(q);
462 sfb_init_perturbation(0, q);
465 static void sfb_destroy(
struct Qdisc *sch)
477 static const struct tc_sfb_qopt sfb_default_ops = {
499 err = nla_parse_nested(tb,
TCA_SFB_MAX, opt, sfb_policy);
511 limit =
max_t(
u32, qdisc_dev(sch)->tx_queue_len, 1);
515 return PTR_ERR(child);
538 sfb_zero_all_buckets(q);
539 sfb_init_perturbation(0, q);
540 sfb_init_perturbation(1, q);
542 sch_tree_unlock(sch);
547 static int sfb_init(
struct Qdisc *sch,
struct nlattr *opt)
552 return sfb_change(sch, opt);
555 static int sfb_dump(
struct Qdisc *sch,
struct sk_buff *skb)
574 goto nla_put_failure;
576 goto nla_put_failure;
577 return nla_nest_end(skb, opts);
580 nla_nest_cancel(skb, opts);
588 .earlydrop = q->
stats.earlydrop,
589 .penaltydrop = q->
stats.penaltydrop,
590 .bucketdrop = q->
stats.bucketdrop,
591 .queuedrop = q->
stats.queuedrop,
592 .childdrop = q->
stats.childdrop,
593 .marked = q->
stats.marked,
601 static int sfb_dump_class(
struct Qdisc *sch,
unsigned long cl,
607 static int sfb_graft(
struct Qdisc *sch,
unsigned long arg,
struct Qdisc *
new,
620 sch_tree_unlock(sch);
624 static struct Qdisc *sfb_leaf(
struct Qdisc *sch,
unsigned long arg)
631 static unsigned long sfb_get(
struct Qdisc *sch,
u32 classid)
636 static void sfb_put(
struct Qdisc *sch,
unsigned long arg)
640 static int sfb_change_class(
struct Qdisc *sch,
u32 classid,
u32 parentid,
646 static int sfb_delete(
struct Qdisc *sch,
unsigned long cl)
655 if (walker->
fn(sch, 1, walker) < 0) {
672 static unsigned long sfb_bind(
struct Qdisc *sch,
unsigned long parent,
684 .change = sfb_change_class,
685 .delete = sfb_delete,
687 .tcf_chain = sfb_find_tcf,
688 .bind_tcf = sfb_bind,
689 .unbind_tcf = sfb_put,
690 .dump = sfb_dump_class,
696 .cl_ops = &sfb_class_ops,
697 .enqueue = sfb_enqueue,
698 .dequeue = sfb_dequeue,
702 .destroy = sfb_destroy,
703 .change = sfb_change,
705 .dump_stats = sfb_dump_stats,
709 static int __init sfb_module_init(
void)
714 static void __exit sfb_module_exit(
void)