52 #include <linux/kernel.h>
53 #include <linux/module.h>
54 #include <linux/types.h>
55 #include <linux/errno.h>
56 #include <linux/compiler.h>
59 #include <linux/string.h>
60 #include <linux/slab.h>
61 #include <linux/list.h>
62 #include <linux/rbtree.h>
64 #include <linux/rtnetlink.h>
69 #include <asm/div64.h>
185 #define HT_INFINITY 0xffffffffffffffffULL
208 rb_link_node(&cl->
el_node, parent, p);
234 if (p->
cl_e > cur_time)
273 rb_link_node(&cl->
vt_node, parent, p);
298 if (p->
cl_f <= cur_time)
314 while (cl->
level > 0) {
315 cl = vttree_firstfit(cl, cur_time);
342 rb_link_node(&cl->
cf_node, parent, p);
384 #define SM_SHIFT (30 - PSCHED_SHIFT)
385 #define ISM_SHIFT (8 + PSCHED_SHIFT)
387 #define SM_MASK ((1ULL << SM_SHIFT) - 1)
388 #define ISM_MASK ((1ULL << ISM_SHIFT) - 1)
484 isc->
sm1 = m2sm(sc->
m1);
485 isc->
ism1 = m2ism(sc->
m1);
486 isc->
dx = d2dx(sc->
d);
487 isc->
dy = seg_x2y(isc->
dx, isc->
sm1);
488 isc->
sm2 = m2sm(sc->
m2);
489 isc->
ism2 = m2ism(sc->
m2);
520 else if (y <= rtsc->y + rtsc->
dy) {
523 x = rtsc->
x + rtsc->
dx;
525 x = rtsc->
x + seg_y2x(y - rtsc->
y, rtsc->
ism1);
528 x = rtsc->
x + rtsc->
dx
529 + seg_y2x(y - rtsc->
y - rtsc->
dy, rtsc->
ism2);
541 else if (x <= rtsc->x + rtsc->
dx)
543 y = rtsc->
y + seg_x2y(x - rtsc->
x, rtsc->
sm1);
546 y = rtsc->
y + rtsc->
dy
547 + seg_x2y(x - rtsc->
x - rtsc->
dx, rtsc->
sm2);
561 if (isc->
sm1 <= isc->
sm2) {
563 y1 = rtsc_x2y(rtsc, x);
578 y1 = rtsc_x2y(rtsc, x);
584 y2 = rtsc_x2y(rtsc, x + isc->
dx);
585 if (y2 >= y + isc->
dy) {
601 dsm = isc->
sm1 - isc->
sm2;
607 if (rtsc->
x + rtsc->
dx > x)
608 dx += rtsc->
x + rtsc->
dx -
x;
609 dy = seg_x2y(dx, isc->
sm1);
618 init_ed(
struct hfsc_class *cl,
unsigned int next_len)
620 u64 cur_time = psched_get_time();
644 update_ed(
struct hfsc_class *cl,
unsigned int next_len)
653 update_d(
struct hfsc_class *cl,
unsigned int next_len)
673 init_vf(
struct hfsc_class *cl,
unsigned int len)
743 cur_time = psched_get_time();
765 update_vf(
struct hfsc_class *cl,
unsigned int len,
u64 cur_time)
835 myf_bound = cur_time - PSCHED_JIFFIE2US(1);
836 if (cl->
cl_myf < myf_bound) {
854 set_active(
struct hfsc_class *cl,
unsigned int len)
884 skb = sch->
ops->peek(sch);
889 len = qdisc_pkt_len(skb);
897 unsigned int len = cl->
qdisc->q.qlen;
912 if (p->
level >= level)
913 level = p->
level + 1;
920 hfsc_find_class(
u32 classid,
struct Qdisc *sch)
925 clc = qdisc_class_find(&q->
clhash, classid);
969 hfsc_change_class(
struct Qdisc *sch,
u32 classid,
u32 parentid,
984 err = nla_parse_nested(tb,
TCA_HFSC_MAX, opt, hfsc_policy);
989 rsc = nla_data(tb[TCA_HFSC_RSC]);
990 if (rsc->
m1 == 0 && rsc->
m2 == 0)
995 fsc = nla_data(tb[TCA_HFSC_FSC]);
996 if (fsc->m1 == 0 && fsc->m2 == 0)
1001 usc = nla_data(tb[TCA_HFSC_USC]);
1002 if (usc->m1 == 0 && usc->m2 == 0)
1009 cl->
cl_parent->cl_common.classid != parentid)
1014 cur_time = psched_get_time();
1018 qdisc_root_sleeping_lock(sch),
1026 hfsc_change_rsc(cl, rsc, cur_time);
1028 hfsc_change_fsc(cl, fsc);
1030 hfsc_change_usc(cl, usc, cur_time);
1032 if (cl->
qdisc->q.qlen != 0) {
1034 update_ed(cl, qdisc_peek_len(cl->
qdisc));
1036 update_vf(cl, 0, cur_time);
1038 sch_tree_unlock(sch);
1048 parent = hfsc_find_class(parentid, sch);
1055 if (hfsc_find_class(classid, sch))
1065 if (tca[TCA_RATE]) {
1067 qdisc_root_sleeping_lock(sch),
1076 hfsc_change_rsc(cl, rsc, 0);
1078 hfsc_change_fsc(cl, fsc);
1080 hfsc_change_usc(cl, usc, 0);
1087 &pfifo_qdisc_ops, classid);
1097 if (parent->
level == 0)
1098 hfsc_purge_queue(sch, parent);
1099 hfsc_adjust_levels(parent);
1101 sch_tree_unlock(sch);
1105 *arg = (
unsigned long)cl;
1122 hfsc_delete_class(
struct Qdisc *sch,
unsigned long arg)
1135 hfsc_purge_queue(sch, cl);
1144 sch_tree_unlock(sch);
1149 hfsc_classify(
struct sk_buff *skb,
struct Qdisc *sch,
int *qerr)
1164 tcf = q->
root.filter_list;
1166 #ifdef CONFIG_NET_CLS_ACT
1177 cl = hfsc_find_class(
res.classid, sch);
1201 hfsc_graft_class(
struct Qdisc *sch,
unsigned long arg,
struct Qdisc *
new,
1216 hfsc_purge_queue(sch, cl);
1219 sch_tree_unlock(sch);
1223 static struct Qdisc *
1224 hfsc_class_leaf(
struct Qdisc *sch,
unsigned long arg)
1235 hfsc_qlen_notify(
struct Qdisc *sch,
unsigned long arg)
1239 if (cl->
qdisc->q.qlen == 0) {
1240 update_vf(cl, 0, 0);
1245 static unsigned long
1246 hfsc_get_class(
struct Qdisc *sch,
u32 classid)
1248 struct hfsc_class *cl = hfsc_find_class(classid, sch);
1253 return (
unsigned long)
cl;
1257 hfsc_put_class(
struct Qdisc *sch,
unsigned long arg)
1262 hfsc_destroy_class(sch, cl);
1265 static unsigned long
1266 hfsc_bind_tcf(
struct Qdisc *sch,
unsigned long parent,
u32 classid)
1269 struct hfsc_class *cl = hfsc_find_class(classid, sch);
1277 return (
unsigned long)
cl;
1281 hfsc_unbind_tcf(
struct Qdisc *sch,
unsigned long arg)
1289 hfsc_tcf_chain(
struct Qdisc *sch,
unsigned long arg)
1306 tsc.d = dx2d(sc->
dx);
1309 goto nla_put_failure;
1321 (hfsc_dump_sc(skb, TCA_HFSC_RSC, &cl->
cl_rsc) < 0))
1322 goto nla_put_failure;
1325 (hfsc_dump_sc(skb, TCA_HFSC_FSC, &cl->
cl_fsc) < 0))
1326 goto nla_put_failure;
1329 (hfsc_dump_sc(skb, TCA_HFSC_USC, &cl->
cl_usc) < 0))
1330 goto nla_put_failure;
1339 hfsc_dump_class(
struct Qdisc *sch,
unsigned long arg,
struct sk_buff *skb,
1353 goto nla_put_failure;
1354 if (hfsc_dump_curves(skb, cl) < 0)
1355 goto nla_put_failure;
1356 nla_nest_end(skb, nest);
1360 nla_nest_cancel(skb, nest);
1365 hfsc_dump_class_stats(
struct Qdisc *sch,
unsigned long arg,
1373 xstats.level = cl->
level;
1399 for (i = 0; i < q->
clhash.hashsize; i++) {
1406 if (arg->
fn(sch, (
unsigned long)cl, arg) < 0) {
1416 hfsc_schedule_watchdog(
struct Qdisc *sch)
1422 cl = eltree_get_minel(q);
1424 next_time = cl->
cl_e;
1425 if (q->
root.cl_cfmin != 0) {
1426 if (next_time == 0 || next_time > q->
root.cl_cfmin)
1427 next_time = q->
root.cl_cfmin;
1434 hfsc_init_qdisc(
struct Qdisc *sch,
struct nlattr *opt)
1440 if (opt ==
NULL || nla_len(opt) <
sizeof(*qopt))
1442 qopt = nla_data(opt);
1458 INIT_LIST_HEAD(&q->
root.children);
1471 hfsc_change_qdisc(
struct Qdisc *sch,
struct nlattr *opt)
1476 if (opt ==
NULL || nla_len(opt) <
sizeof(*qopt))
1478 qopt = nla_data(opt);
1482 sch_tree_unlock(sch);
1522 hfsc_reset_qdisc(
struct Qdisc *sch)
1529 for (i = 0; i < q->
clhash.hashsize; i++) {
1531 hfsc_reset_class(cl);
1534 INIT_LIST_HEAD(&q->droplist);
1547 for (i = 0; i < q->
clhash.hashsize; i++) {
1551 for (i = 0; i < q->clhash.hashsize; i++) {
1554 hfsc_destroy_class(sch, cl);
1564 unsigned char *
b = skb_tail_pointer(skb);
1570 sch->qstats.backlog = 0;
1571 for (i = 0; i < q->
clhash.hashsize; i++) {
1576 qopt.defcls = q->defcls;
1578 goto nla_put_failure;
1592 cl = hfsc_classify(skb, sch, &err);
1595 sch->qstats.drops++;
1600 err = qdisc_enqueue(skb, cl->
qdisc);
1604 sch->qstats.drops++;
1609 if (cl->
qdisc->q.qlen == 1)
1610 set_active(cl, qdisc_pkt_len(skb));
1618 hfsc_dequeue(
struct Qdisc *sch)
1624 unsigned int next_len;
1627 if (sch->
q.qlen == 0)
1630 cur_time = psched_get_time();
1637 cl = eltree_get_mindl(q, cur_time);
1645 cl = vttree_get_minvt(&q->
root, cur_time);
1647 sch->
qstats.overlimits++;
1648 hfsc_schedule_watchdog(sch);
1653 skb = qdisc_dequeue_peeked(cl->
qdisc);
1659 bstats_update(&cl->
bstats, skb);
1660 update_vf(cl, qdisc_pkt_len(skb), cur_time);
1662 cl->
cl_cumul += qdisc_pkt_len(skb);
1664 if (cl->
qdisc->q.qlen != 0) {
1667 next_len = qdisc_peek_len(cl->
qdisc);
1669 update_ed(cl, next_len);
1671 update_d(cl, next_len);
1678 qdisc_unthrottled(sch);
1679 qdisc_bstats_update(sch, skb);
1686 hfsc_drop(
struct Qdisc *sch)
1694 (len = cl->
qdisc->ops->drop(cl->
qdisc)) > 0) {
1695 if (cl->
qdisc->q.qlen == 0) {
1696 update_vf(cl, 0, 0);
1711 .change = hfsc_change_class,
1712 .delete = hfsc_delete_class,
1713 .graft = hfsc_graft_class,
1714 .leaf = hfsc_class_leaf,
1715 .qlen_notify = hfsc_qlen_notify,
1716 .get = hfsc_get_class,
1717 .put = hfsc_put_class,
1718 .bind_tcf = hfsc_bind_tcf,
1719 .unbind_tcf = hfsc_unbind_tcf,
1720 .tcf_chain = hfsc_tcf_chain,
1721 .dump = hfsc_dump_class,
1722 .dump_stats = hfsc_dump_class_stats,
1728 .init = hfsc_init_qdisc,
1729 .change = hfsc_change_qdisc,
1730 .reset = hfsc_reset_qdisc,
1731 .destroy = hfsc_destroy_qdisc,
1732 .dump = hfsc_dump_qdisc,
1733 .enqueue = hfsc_enqueue,
1734 .dequeue = hfsc_dequeue,
1735 .peek = qdisc_peek_dequeued,
1737 .cl_ops = &hfsc_class_ops,