12 #include <linux/module.h>
13 #include <linux/types.h>
14 #include <linux/kernel.h>
16 #include <linux/string.h>
18 #include <linux/errno.h>
22 #include <linux/slab.h>
78 #define SFQ_MAX_DEPTH 127
79 #define SFQ_DEFAULT_FLOWS 128
80 #define SFQ_MAX_FLOWS (0x10000 - SFQ_MAX_DEPTH - 1)
81 #define SFQ_EMPTY_SLOT 0xffff
82 #define SFQ_DEFAULT_HASH_DIVISOR 1024
87 #define SFQ_ALLOT_SHIFT 3
88 #define SFQ_ALLOT_SIZE(X) DIV_ROUND_UP(X, 1 << SFQ_ALLOT_SHIFT)
169 qdisc_cb_private_validate(skb,
sizeof(
struct sfq_skb_cb));
182 return hash & (q->
divisor - 1);
185 static unsigned int sfq_classify(
struct sk_buff *skb,
struct Qdisc *
sch,
199 return sfq_hash(q, skb) + 1;
205 #ifdef CONFIG_NET_CLS_ACT
236 sfq_dep_head(q, n)->prev =
x;
239 #define sfq_unlink(q, x, n, p) \
240 n = q->slots[x].dep.next; \
241 p = q->slots[x].dep.prev; \
242 sfq_dep_head(q, p)->next = n; \
243 sfq_dep_head(q, n)->prev = p
296 static inline void slot_queue_init(
struct sfq_slot *
slot)
298 memset(slot, 0,
sizeof(*slot));
303 static inline void slot_queue_add(
struct sfq_slot *slot,
struct sk_buff *skb)
311 #define slot_queue_walk(slot, skb) \
312 for (skb = slot->skblist_next; \
313 skb != (struct sk_buff *)slot; \
316 static unsigned int sfq_drop(
struct Qdisc *sch)
329 skb = q->
headdrop ? slot_dequeue_head(slot) : slot_dequeue_tail(slot);
330 len = qdisc_pkt_len(skb);
336 sch->
qstats.backlog -= len;
380 hash = sfq_classify(skb, sch, &
ret);
394 return qdisc_drop(skb, sch);
399 red_set_vars(&slot->
vars);
414 if (sfq_prob_mark(q)) {
416 if (sfq_headdrop(q) &&
418 q->
stats.prob_mark_head++;
421 if (INET_ECN_set_ce(skb)) {
422 q->
stats.prob_mark++;
426 q->
stats.prob_drop++;
427 goto congestion_drop;
431 if (sfq_hard_mark(q)) {
433 if (sfq_headdrop(q) &&
435 q->
stats.forced_mark_head++;
438 if (INET_ECN_set_ce(skb)) {
439 q->
stats.forced_mark++;
443 q->
stats.forced_drop++;
444 goto congestion_drop;
450 if (!sfq_headdrop(q))
451 return qdisc_drop(skb, sch);
454 head = slot_dequeue_head(slot);
455 delta = qdisc_pkt_len(head) - qdisc_pkt_len(skb);
458 qdisc_drop(head, sch);
460 slot_queue_add(slot, skb);
465 sch->
qstats.backlog += qdisc_pkt_len(skb);
466 slot->
backlog += qdisc_pkt_len(skb);
467 slot_queue_add(slot, skb);
469 if (slot->
qlen == 1) {
484 if (++sch->
q.qlen <= q->
limit)
492 if (qlen != slot->
qlen)
501 sfq_dequeue(
struct Qdisc *sch)
515 if (slot->
allot <= 0) {
520 skb = slot_dequeue_head(slot);
522 qdisc_bstats_update(sch, skb);
524 sch->
qstats.backlog -= qdisc_pkt_len(skb);
525 slot->
backlog -= qdisc_pkt_len(skb);
527 if (slot->
qlen == 0) {
534 q->
tail->next = next_a;
542 sfq_reset(
struct Qdisc *sch)
546 while ((skb = sfq_dequeue(sch)) !=
NULL)
556 static void sfq_rehash(
struct Qdisc *sch)
565 __skb_queue_head_init(&
list);
572 skb = slot_dequeue_head(slot);
574 __skb_queue_tail(&
list, skb);
577 red_set_vars(&slot->
vars);
582 while ((skb = __skb_dequeue(&
list)) !=
NULL) {
583 unsigned int hash = sfq_hash(q, skb);
590 drop: sch->
qstats.backlog -= qdisc_pkt_len(skb);
601 slot_queue_add(slot, skb);
606 slot->
backlog += qdisc_pkt_len(skb);
608 if (slot->
qlen == 1) {
619 sch->
q.qlen -= dropped;
623 static void sfq_perturbation(
unsigned long arg)
627 spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch));
629 spin_lock(root_lock);
633 spin_unlock(root_lock);
647 if (opt->
nla_len < nla_attr_size(
sizeof(*ctl)))
649 if (opt->
nla_len >= nla_attr_size(
sizeof(*ctl_v1)))
650 ctl_v1 = nla_data(opt);
654 if (ctl_v1 && ctl_v1->
qth_min) {
692 while (sch->
q.qlen > q->
limit)
701 sch_tree_unlock(sch);
706 static void *sfq_alloc(
size_t sz)
715 static void sfq_free(
void *
addr)
718 if (is_vmalloc_addr(addr))
725 static void sfq_destroy(
struct Qdisc *sch)
737 static int sfq_init(
struct Qdisc *sch,
struct nlattr *opt)
757 q->
quantum = psched_mtu(qdisc_dev(sch));
763 int err = sfq_change(sch, opt);
774 for (i = 0; i < q->
divisor; i++)
778 slot_queue_init(&q->
slots[i]);
788 static int sfq_dump(
struct Qdisc *sch,
struct sk_buff *skb)
791 unsigned char *
b = skb_tail_pointer(skb);
795 memset(&opt, 0,
sizeof(opt));
798 opt.v0.limit = q->
limit;
810 opt.max_P = p->
max_P;
813 opt.flags = q->
flags;
816 goto nla_put_failure;
825 static struct Qdisc *sfq_leaf(
struct Qdisc *sch,
unsigned long arg)
830 static unsigned long sfq_get(
struct Qdisc *sch,
u32 classid)
835 static unsigned long sfq_bind(
struct Qdisc *sch,
unsigned long parent,
843 static void sfq_put(
struct Qdisc *q,
unsigned long cl)
856 static int sfq_dump_class(
struct Qdisc *sch,
unsigned long cl,
863 static int sfq_dump_class_stats(
struct Qdisc *sch,
unsigned long cl,
891 for (i = 0; i < q->
divisor; i++) {
897 if (arg->
fn(sch, i + 1, arg) < 0) {
909 .tcf_chain = sfq_find_tcf,
910 .bind_tcf = sfq_bind,
911 .unbind_tcf = sfq_put,
912 .dump = sfq_dump_class,
913 .dump_stats = sfq_dump_class_stats,
918 .cl_ops = &sfq_class_ops,
921 .enqueue = sfq_enqueue,
922 .dequeue = sfq_dequeue,
923 .peek = qdisc_peek_dequeued,
927 .destroy = sfq_destroy,
933 static int __init sfq_module_init(
void)
937 static void __exit sfq_module_exit(
void)