21 #include <linux/slab.h>
22 #include <linux/module.h>
23 #include <linux/types.h>
24 #include <linux/kernel.h>
29 #define GRED_DEF_PRIO (MAX_DPs / 2)
30 #define GRED_VQ_MASK (MAX_DPs - 1)
92 static inline int gred_wred_mode_check(
struct Qdisc *
sch)
98 for (i = 0; i < table->
DPs; i++) {
105 for (n = i + 1; n < table->
DPs; n++)
106 if (table->
tab[n] && table->
tab[n]->prio == q->
prio)
113 static inline unsigned int gred_backlog(
struct gred_sched *table,
117 if (gred_wred_mode(table))
118 return sch->
qstats.backlog;
128 static inline void gred_load_wred_set(
const struct gred_sched *table,
135 static inline void gred_store_wred_set(
struct gred_sched *table,
142 static inline int gred_use_ecn(
struct gred_sched *
t)
147 static inline int gred_use_harddrop(
struct gred_sched *
t)
156 unsigned long qavg = 0;
157 u16 dp = tc_index_to_dp(skb);
159 if (dp >= t->
DPs || (q = t->
tab[dp]) ==
NULL) {
168 if (skb_queue_len(&sch->
q) < qdisc_dev(sch)->tx_queue_len)
169 return qdisc_enqueue_tail(skb, sch);
180 if (!gred_wred_mode(t) && gred_rio_mode(t)) {
183 for (i = 0; i < t->
DPs; i++) {
185 !red_is_idling(&t->
tab[i]->vars))
186 qavg += t->
tab[i]->vars.qavg;
192 q->
bytesin += qdisc_pkt_len(skb);
194 if (gred_wred_mode(t))
195 gred_load_wred_set(t, q);
199 gred_backlog(t, q, sch));
201 if (red_is_idling(&q->
vars))
202 red_end_of_idle_period(&q->
vars);
204 if (gred_wred_mode(t))
205 gred_store_wred_set(t, q);
207 switch (red_action(&q->
parms, &q->
vars, q->
vars.qavg + qavg)) {
213 if (!gred_use_ecn(t) || !INET_ECN_set_ce(skb)) {
214 q->
stats.prob_drop++;
215 goto congestion_drop;
218 q->
stats.prob_mark++;
223 if (gred_use_harddrop(t) || !gred_use_ecn(t) ||
224 !INET_ECN_set_ce(skb)) {
225 q->
stats.forced_drop++;
226 goto congestion_drop;
228 q->
stats.forced_mark++;
233 q->
backlog += qdisc_pkt_len(skb);
234 return qdisc_enqueue_tail(skb, sch);
239 return qdisc_drop(skb, sch);
242 qdisc_drop(skb, sch);
251 skb = qdisc_dequeue_head(sch);
255 u16 dp = tc_index_to_dp(skb);
257 if (dp >= t->
DPs || (q = t->
tab[dp]) ==
NULL) {
259 tc_index_to_dp(skb));
261 q->
backlog -= qdisc_pkt_len(skb);
263 if (gred_wred_mode(t)) {
265 red_start_of_idle_period(&t->
wred_set);
268 red_start_of_idle_period(&q->
vars);
278 static unsigned int gred_drop(
struct Qdisc *sch)
283 skb = qdisc_dequeue_tail(sch);
285 unsigned int len = qdisc_pkt_len(skb);
287 u16 dp = tc_index_to_dp(skb);
289 if (dp >= t->
DPs || (q = t->
tab[dp]) ==
NULL) {
291 tc_index_to_dp(skb));
296 if (gred_wred_mode(t)) {
298 red_start_of_idle_period(&t->
wred_set);
301 red_start_of_idle_period(&q->
vars);
305 qdisc_drop(skb, sch);
312 static void gred_reset(
struct Qdisc *sch)
317 qdisc_reset_queue(sch);
319 for (i = 0; i < t->
DPs; i++) {
325 red_restart(&q->
vars);
335 static inline int gred_change_table_def(
struct Qdisc *sch,
struct nlattr *dps)
344 sopt = nla_data(dps);
359 sch_tree_unlock(sch);
362 gred_enable_rio_mode(table);
363 gred_disable_wred_mode(table);
364 if (gred_wred_mode_check(sch))
365 gred_enable_wred_mode(table);
367 gred_disable_rio_mode(table);
368 gred_disable_wred_mode(table);
374 "shadowed VQ 0x%x\n", i);
375 gred_destroy_vq(table->
tab[i]);
383 static inline int gred_change_vq(
struct Qdisc *sch,
int dp,
392 table->
tab[
dp] = q = *prealloc;
403 red_end_of_idle_period(&q->
vars);
405 red_set_parms(&q->
parms,
408 red_set_vars(&q->
vars);
432 err = nla_parse_nested(tb,
TCA_GRED_MAX, opt, gred_policy);
437 return gred_change_table_def(sch, opt);
449 if (ctl->
DP >= table->
DPs)
452 if (gred_rio_mode(table)) {
453 if (ctl->
prio == 0) {
456 if (table->
tab[table->
def])
457 def_prio = table->
tab[table->
def]->prio;
460 "setting default to %d\n", ctl->
DP, def_prio);
467 prealloc = kzalloc(
sizeof(*prealloc),
GFP_KERNEL);
470 err = gred_change_vq(sch, ctl->
DP, ctl, prio, stab, max_P, &prealloc);
474 if (gred_rio_mode(table)) {
475 gred_disable_wred_mode(table);
476 if (gred_wred_mode_check(sch))
477 gred_enable_wred_mode(table);
483 sch_tree_unlock(sch);
489 static int gred_init(
struct Qdisc *sch,
struct nlattr *opt)
497 err = nla_parse_nested(tb,
TCA_GRED_MAX, opt, gred_policy);
507 static int gred_dump(
struct Qdisc *sch,
struct sk_buff *skb)
515 .def_DP = table->
def,
516 .grio = gred_rio_mode(table),
522 goto nla_put_failure;
524 goto nla_put_failure;
526 for (i = 0; i <
MAX_DPs; i++) {
529 max_p[
i] = q ? q->
parms.max_P : 0;
531 if (
nla_put(skb, TCA_GRED_MAX_P,
sizeof(max_p), max_p))
532 goto nla_put_failure;
536 goto nla_put_failure;
538 for (i = 0; i <
MAX_DPs; i++) {
543 memset(&opt, 0,
sizeof(opt));
550 opt.DP = MAX_DPs +
i;
554 opt.limit = q->
limit;
558 opt.qth_min = q->
parms.qth_min >> q->
parms.Wlog;
559 opt.qth_max = q->
parms.qth_max >> q->
parms.Wlog;
560 opt.Wlog = q->
parms.Wlog;
561 opt.Plog = q->
parms.Plog;
562 opt.Scell_log = q->
parms.Scell_log;
563 opt.other = q->
stats.other;
564 opt.early = q->
stats.prob_drop;
565 opt.forced = q->
stats.forced_drop;
566 opt.pdrop = q->
stats.pdrop;
570 if (gred_wred_mode(table))
571 gred_load_wred_set(table, q);
573 qavg = red_calc_qavg(&q->
parms, &q->
vars,
575 opt.qave = qavg >> q->
parms.Wlog;
579 goto nla_put_failure;
582 nla_nest_end(skb, parms);
584 return nla_nest_end(skb, opts);
587 nla_nest_cancel(skb, opts);
591 static void gred_destroy(
struct Qdisc *sch)
596 for (i = 0; i < table->
DPs; i++) {
598 gred_destroy_vq(table->
tab[i]);
605 .enqueue = gred_enqueue,
606 .dequeue = gred_dequeue,
607 .peek = qdisc_peek_head,
611 .destroy = gred_destroy,
612 .change = gred_change,
617 static int __init gred_module_init(
void)
622 static void __exit gred_module_exit(
void)