17 #include <linux/module.h>
18 #include <linux/slab.h>
19 #include <linux/types.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
24 #include <linux/rtnetlink.h>
135 qdisc_cb_private_validate(skb,
sizeof(
struct netem_skb_cb));
142 static void init_crandom(
struct crndstate *
state,
unsigned long rho)
152 static u32 get_crandom(
struct crndstate *
state)
155 unsigned long answer;
161 rho = (
u64)state->rho + 1;
162 answer = (value * ((1ull<<32) - rho) + state->last * rho) >> 32;
163 state->last = answer;
173 struct clgstate *clg = &q->
clg;
186 switch (clg->state) {
191 }
else if (clg->a4 < rnd && rnd < clg->
a1) {
194 }
else if (clg->a1 < rnd)
209 else if (clg->a3 < rnd && rnd < clg->
a2 + clg->a3) {
212 }
else if (clg->a2 + clg->a3 < rnd) {
237 struct clgstate *clg = &q->
clg;
239 switch (clg->state) {
268 return loss_4state(q);
276 return loss_gilb_ell(q);
288 struct crndstate *state,
289 const struct disttable *
dist)
298 rnd = get_crandom(state);
302 return (rnd % (2*sigma)) - sigma + mu;
304 t = dist->table[rnd % dist->size];
342 return __skb_queue_tail(list, nskb);
344 skb_queue_reverse_walk(list, skb) {
349 __skb_queue_after(list, skb, nskb);
358 static int netem_enqueue(
struct sk_buff *skb,
struct Qdisc *sch)
372 if (q->
ecn && INET_ECN_set_ce(skb))
398 struct Qdisc *rootq = qdisc_root(sch);
402 qdisc_enqueue_root(skb2, rootq);
416 return qdisc_drop(skb, sch);
422 return qdisc_reshape_fail(skb, sch);
424 sch->
qstats.backlog += qdisc_pkt_len(skb);
436 now = psched_get_time();
441 delay += packet_len_2_sched_time(skb->
len, q);
443 if (!skb_queue_empty(list)) {
457 tfifo_enqueue(skb, sch);
466 __skb_queue_head(&sch->
q, skb);
473 static unsigned int netem_drop(
struct Qdisc *sch)
478 len = qdisc_queue_drop(sch);
492 if (qdisc_is_throttled(sch))
496 skb = qdisc_peek_head(sch);
502 __skb_unlink(skb, &sch->
q);
503 sch->
qstats.backlog -= qdisc_pkt_len(skb);
505 #ifdef CONFIG_NET_CLS_ACT
515 int err = qdisc_enqueue(skb, q->
qdisc);
526 qdisc_unthrottled(sch);
527 qdisc_bstats_update(sch, skb);
547 static void netem_reset(
struct Qdisc *sch)
551 qdisc_reset_queue(sch);
557 static void dist_free(
struct disttable *
d)
560 if (is_vmalloc_addr(d))
571 static int get_dist_table(
struct Qdisc *sch,
const struct nlattr *
attr)
574 size_t n = nla_len(attr)/
sizeof(
__s16);
584 s =
sizeof(
struct disttable) + n *
sizeof(
s16);
592 for (i = 0; i <
n; i++)
593 d->table[i] = data[i];
595 root_lock = qdisc_root_sleeping_lock(sch);
597 spin_lock_bh(root_lock);
599 spin_unlock_bh(root_lock);
605 static void get_correlation(
struct Qdisc *sch,
const struct nlattr *attr)
615 static void get_reorder(
struct Qdisc *sch,
const struct nlattr *attr)
624 static void get_corrupt(
struct Qdisc *sch,
const struct nlattr *attr)
633 static void get_rate(
struct Qdisc *sch,
const struct nlattr *attr)
646 static int get_loss_clg(
struct Qdisc *sch,
const struct nlattr *attr)
660 pr_info(
"netem: incorrect gi model size\n");
679 pr_info(
"netem: incorrect ge model size\n");
693 pr_info(
"netem: unknown loss type %u\n", type);
713 int nested_len = nla_len(nla) -
NLA_ALIGN(len);
715 if (nested_len < 0) {
716 pr_info(
"netem: invalid attributes len %d\n", nested_len);
720 if (nested_len >= nla_attr_size(0))
739 qopt = nla_data(opt);
740 ret = parse_attr(tb,
TCA_NETEM_MAX, opt, netem_policy,
sizeof(*qopt));
761 get_correlation(sch, tb[TCA_NETEM_CORR]);
764 ret = get_dist_table(sch, tb[TCA_NETEM_DELAY_DIST]);
770 get_reorder(sch, tb[TCA_NETEM_REORDER]);
773 get_corrupt(sch, tb[TCA_NETEM_CORRUPT]);
776 get_rate(sch, tb[TCA_NETEM_RATE]);
779 q->
ecn = nla_get_u32(tb[TCA_NETEM_ECN]);
783 ret = get_loss_clg(sch, tb[TCA_NETEM_LOSS]);
788 static int netem_init(
struct Qdisc *sch,
struct nlattr *opt)
799 ret = netem_change(sch, opt);
801 pr_info(
"netem: change failed\n");
805 static void netem_destroy(
struct Qdisc *sch)
822 goto nla_put_failure;
827 nla_nest_cancel(skb, nest);
840 goto nla_put_failure;
852 goto nla_put_failure;
857 nla_nest_end(skb, nest);
861 nla_nest_cancel(skb, nest);
865 static int netem_dump(
struct Qdisc *sch,
struct sk_buff *skb)
868 struct nlattr *nla = (
struct nlattr *) skb_tail_pointer(skb);
882 goto nla_put_failure;
887 if (
nla_put(skb, TCA_NETEM_CORR,
sizeof(cor), &cor))
888 goto nla_put_failure;
893 goto nla_put_failure;
895 corrupt.probability = q->
corrupt;
897 if (
nla_put(skb, TCA_NETEM_CORRUPT,
sizeof(corrupt), &corrupt))
898 goto nla_put_failure;
905 goto nla_put_failure;
907 if (q->
ecn && nla_put_u32(skb, TCA_NETEM_ECN, q->
ecn))
908 goto nla_put_failure;
910 if (dump_loss_model(q, skb) != 0)
911 goto nla_put_failure;
913 return nla_nest_end(skb, nla);
916 nlmsg_trim(skb, nla);
920 static int netem_dump_class(
struct Qdisc *sch,
unsigned long cl,
925 if (cl != 1 || !q->
qdisc)
934 static int netem_graft(
struct Qdisc *sch,
unsigned long arg,
struct Qdisc *
new,
946 sch_tree_unlock(sch);
951 static struct Qdisc *netem_leaf(
struct Qdisc *sch,
unsigned long arg)
957 static unsigned long netem_get(
struct Qdisc *sch,
u32 classid)
962 static void netem_put(
struct Qdisc *sch,
unsigned long arg)
970 if (walker->
fn(sch, 1, walker) < 0) {
979 .graft = netem_graft,
984 .dump = netem_dump_class,
989 .cl_ops = &netem_class_ops,
991 .enqueue = netem_enqueue,
992 .dequeue = netem_dequeue,
993 .peek = qdisc_peek_dequeued,
996 .reset = netem_reset,
997 .destroy = netem_destroy,
998 .change = netem_change,
1004 static int __init netem_module_init(
void)
1009 static void __exit netem_module_exit(
void)