Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
sch_hfsc.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2003 Patrick McHardy, <[email protected]>
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version 2
7  * of the License, or (at your option) any later version.
8  *
9  * 2003-10-17 - Ported from altq
10  */
11 /*
12  * Copyright (c) 1997-1999 Carnegie Mellon University. All Rights Reserved.
13  *
14  * Permission to use, copy, modify, and distribute this software and
15  * its documentation is hereby granted (including for commercial or
16  * for-profit use), provided that both the copyright notice and this
17  * permission notice appear in all copies of the software, derivative
18  * works, or modified versions, and any portions thereof.
19  *
20  * THIS SOFTWARE IS EXPERIMENTAL AND IS KNOWN TO HAVE BUGS, SOME OF
21  * WHICH MAY HAVE SERIOUS CONSEQUENCES. CARNEGIE MELLON PROVIDES THIS
22  * SOFTWARE IN ITS ``AS IS'' CONDITION, AND ANY EXPRESS OR IMPLIED
23  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
25  * DISCLAIMED. IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
28  * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
29  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
30  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
32  * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
33  * DAMAGE.
34  *
35  * Carnegie Mellon encourages (but does not require) users of this
36  * software to return any improvements or extensions that they make,
37  * and to grant Carnegie Mellon the rights to redistribute these
38  * changes without encumbrance.
39  */
40 /*
41  * H-FSC is described in Proceedings of SIGCOMM'97,
42  * "A Hierarchical Fair Service Curve Algorithm for Link-Sharing,
43  * Real-Time and Priority Service"
44  * by Ion Stoica, Hui Zhang, and T. S. Eugene Ng.
45  *
46  * Oleg Cherevko <[email protected]> added the upperlimit for link-sharing.
47  * when a class has an upperlimit, the fit-time is computed from the
48  * upperlimit service curve. the link-sharing scheduler does not schedule
49  * a class whose fit-time exceeds the current time.
50  */
51 
52 #include <linux/kernel.h>
53 #include <linux/module.h>
54 #include <linux/types.h>
55 #include <linux/errno.h>
56 #include <linux/compiler.h>
57 #include <linux/spinlock.h>
58 #include <linux/skbuff.h>
59 #include <linux/string.h>
60 #include <linux/slab.h>
61 #include <linux/list.h>
62 #include <linux/rbtree.h>
63 #include <linux/init.h>
64 #include <linux/rtnetlink.h>
65 #include <linux/pkt_sched.h>
66 #include <net/netlink.h>
67 #include <net/pkt_sched.h>
68 #include <net/pkt_cls.h>
69 #include <asm/div64.h>
70 
71 /*
72  * kernel internal service curve representation:
73  * coordinates are given by 64 bit unsigned integers.
74  * x-axis: unit is clock count.
75  * y-axis: unit is byte.
76  *
77  * The service curve parameters are converted to the internal
78  * representation. The slope values are scaled to avoid overflow.
79  * the inverse slope values as well as the y-projection of the 1st
80  * segment are kept in order to avoid 64-bit divide operations
81  * that are expensive on 32-bit architectures.
82  */
83 
84 struct internal_sc {
85  u64 sm1; /* scaled slope of the 1st segment */
86  u64 ism1; /* scaled inverse-slope of the 1st segment */
87  u64 dx; /* the x-projection of the 1st segment */
88  u64 dy; /* the y-projection of the 1st segment */
89  u64 sm2; /* scaled slope of the 2nd segment */
90  u64 ism2; /* scaled inverse-slope of the 2nd segment */
91 };
92 
93 /* runtime service curve */
94 struct runtime_sc {
95  u64 x; /* current starting position on x-axis */
96  u64 y; /* current starting position on y-axis */
97  u64 sm1; /* scaled slope of the 1st segment */
98  u64 ism1; /* scaled inverse-slope of the 1st segment */
99  u64 dx; /* the x-projection of the 1st segment */
100  u64 dy; /* the y-projection of the 1st segment */
101  u64 sm2; /* scaled slope of the 2nd segment */
102  u64 ism2; /* scaled inverse-slope of the 2nd segment */
103 };
104 
106  HFSC_RSC = 0x1,
107  HFSC_FSC = 0x2,
108  HFSC_USC = 0x4
109 };
110 
111 struct hfsc_class {
113  unsigned int refcnt; /* usage count */
114 
118  unsigned int level; /* class level in hierarchy */
119  struct tcf_proto *filter_list; /* filter list */
120  unsigned int filter_cnt; /* filter count */
121 
122  struct hfsc_sched *sched; /* scheduler data */
123  struct hfsc_class *cl_parent; /* parent class */
124  struct list_head siblings; /* sibling classes */
125  struct list_head children; /* child classes */
126  struct Qdisc *qdisc; /* leaf qdisc */
127 
128  struct rb_node el_node; /* qdisc's eligible tree member */
129  struct rb_root vt_tree; /* active children sorted by cl_vt */
130  struct rb_node vt_node; /* parent's vt_tree member */
131  struct rb_root cf_tree; /* active children sorted by cl_f */
132  struct rb_node cf_node; /* parent's cf_heap member */
133  struct list_head dlist; /* drop list member */
134 
135  u64 cl_total; /* total work in bytes */
136  u64 cl_cumul; /* cumulative work in bytes done by
137  real-time criteria */
138 
139  u64 cl_d; /* deadline*/
140  u64 cl_e; /* eligible time */
141  u64 cl_vt; /* virtual time */
142  u64 cl_f; /* time when this class will fit for
143  link-sharing, max(myf, cfmin) */
144  u64 cl_myf; /* my fit-time (calculated from this
145  class's own upperlimit curve) */
146  u64 cl_myfadj; /* my fit-time adjustment (to cancel
147  history dependence) */
148  u64 cl_cfmin; /* earliest children's fit-time (used
149  with cl_myf to obtain cl_f) */
150  u64 cl_cvtmin; /* minimal virtual time among the
151  children fit for link-sharing
152  (monotonic within a period) */
153  u64 cl_vtadj; /* intra-period cumulative vt
154  adjustment */
155  u64 cl_vtoff; /* inter-period cumulative vt offset */
156  u64 cl_cvtmax; /* max child's vt in the last period */
157  u64 cl_cvtoff; /* cumulative cvtmax of all periods */
158  u64 cl_pcvtoff; /* parent's cvtoff at initialization
159  time */
160 
161  struct internal_sc cl_rsc; /* internal real-time service curve */
162  struct internal_sc cl_fsc; /* internal fair service curve */
163  struct internal_sc cl_usc; /* internal upperlimit service curve */
164  struct runtime_sc cl_deadline; /* deadline curve */
165  struct runtime_sc cl_eligible; /* eligible curve */
166  struct runtime_sc cl_virtual; /* virtual curve */
167  struct runtime_sc cl_ulimit; /* upperlimit curve */
168 
169  unsigned long cl_flags; /* which curves are valid */
170  unsigned long cl_vtperiod; /* vt period sequence number */
171  unsigned long cl_parentperiod;/* parent's vt period sequence number*/
172  unsigned long cl_nactive; /* number of active children */
173 };
174 
175 struct hfsc_sched {
176  u16 defcls; /* default class id */
177  struct hfsc_class root; /* root class */
178  struct Qdisc_class_hash clhash; /* class hash */
179  struct rb_root eligible; /* eligible tree */
180  struct list_head droplist; /* active leaf class list (for
181  dropping) */
182  struct qdisc_watchdog watchdog; /* watchdog timer */
183 };
184 
185 #define HT_INFINITY 0xffffffffffffffffULL /* infinite time value */
186 
187 
188 /*
189  * eligible tree holds backlogged classes being sorted by their eligible times.
190  * there is one eligible tree per hfsc instance.
191  */
192 
193 static void
194 eltree_insert(struct hfsc_class *cl)
195 {
196  struct rb_node **p = &cl->sched->eligible.rb_node;
197  struct rb_node *parent = NULL;
198  struct hfsc_class *cl1;
199 
200  while (*p != NULL) {
201  parent = *p;
202  cl1 = rb_entry(parent, struct hfsc_class, el_node);
203  if (cl->cl_e >= cl1->cl_e)
204  p = &parent->rb_right;
205  else
206  p = &parent->rb_left;
207  }
208  rb_link_node(&cl->el_node, parent, p);
209  rb_insert_color(&cl->el_node, &cl->sched->eligible);
210 }
211 
212 static inline void
213 eltree_remove(struct hfsc_class *cl)
214 {
215  rb_erase(&cl->el_node, &cl->sched->eligible);
216 }
217 
218 static inline void
219 eltree_update(struct hfsc_class *cl)
220 {
221  eltree_remove(cl);
222  eltree_insert(cl);
223 }
224 
225 /* find the class with the minimum deadline among the eligible classes */
226 static inline struct hfsc_class *
227 eltree_get_mindl(struct hfsc_sched *q, u64 cur_time)
228 {
229  struct hfsc_class *p, *cl = NULL;
230  struct rb_node *n;
231 
232  for (n = rb_first(&q->eligible); n != NULL; n = rb_next(n)) {
233  p = rb_entry(n, struct hfsc_class, el_node);
234  if (p->cl_e > cur_time)
235  break;
236  if (cl == NULL || p->cl_d < cl->cl_d)
237  cl = p;
238  }
239  return cl;
240 }
241 
242 /* find the class with minimum eligible time among the eligible classes */
243 static inline struct hfsc_class *
244 eltree_get_minel(struct hfsc_sched *q)
245 {
246  struct rb_node *n;
247 
248  n = rb_first(&q->eligible);
249  if (n == NULL)
250  return NULL;
251  return rb_entry(n, struct hfsc_class, el_node);
252 }
253 
254 /*
255  * vttree holds holds backlogged child classes being sorted by their virtual
256  * time. each intermediate class has one vttree.
257  */
258 static void
259 vttree_insert(struct hfsc_class *cl)
260 {
261  struct rb_node **p = &cl->cl_parent->vt_tree.rb_node;
262  struct rb_node *parent = NULL;
263  struct hfsc_class *cl1;
264 
265  while (*p != NULL) {
266  parent = *p;
267  cl1 = rb_entry(parent, struct hfsc_class, vt_node);
268  if (cl->cl_vt >= cl1->cl_vt)
269  p = &parent->rb_right;
270  else
271  p = &parent->rb_left;
272  }
273  rb_link_node(&cl->vt_node, parent, p);
274  rb_insert_color(&cl->vt_node, &cl->cl_parent->vt_tree);
275 }
276 
277 static inline void
278 vttree_remove(struct hfsc_class *cl)
279 {
280  rb_erase(&cl->vt_node, &cl->cl_parent->vt_tree);
281 }
282 
283 static inline void
284 vttree_update(struct hfsc_class *cl)
285 {
286  vttree_remove(cl);
287  vttree_insert(cl);
288 }
289 
290 static inline struct hfsc_class *
291 vttree_firstfit(struct hfsc_class *cl, u64 cur_time)
292 {
293  struct hfsc_class *p;
294  struct rb_node *n;
295 
296  for (n = rb_first(&cl->vt_tree); n != NULL; n = rb_next(n)) {
297  p = rb_entry(n, struct hfsc_class, vt_node);
298  if (p->cl_f <= cur_time)
299  return p;
300  }
301  return NULL;
302 }
303 
304 /*
305  * get the leaf class with the minimum vt in the hierarchy
306  */
307 static struct hfsc_class *
308 vttree_get_minvt(struct hfsc_class *cl, u64 cur_time)
309 {
310  /* if root-class's cfmin is bigger than cur_time nothing to do */
311  if (cl->cl_cfmin > cur_time)
312  return NULL;
313 
314  while (cl->level > 0) {
315  cl = vttree_firstfit(cl, cur_time);
316  if (cl == NULL)
317  return NULL;
318  /*
319  * update parent's cl_cvtmin.
320  */
321  if (cl->cl_parent->cl_cvtmin < cl->cl_vt)
322  cl->cl_parent->cl_cvtmin = cl->cl_vt;
323  }
324  return cl;
325 }
326 
327 static void
328 cftree_insert(struct hfsc_class *cl)
329 {
330  struct rb_node **p = &cl->cl_parent->cf_tree.rb_node;
331  struct rb_node *parent = NULL;
332  struct hfsc_class *cl1;
333 
334  while (*p != NULL) {
335  parent = *p;
336  cl1 = rb_entry(parent, struct hfsc_class, cf_node);
337  if (cl->cl_f >= cl1->cl_f)
338  p = &parent->rb_right;
339  else
340  p = &parent->rb_left;
341  }
342  rb_link_node(&cl->cf_node, parent, p);
343  rb_insert_color(&cl->cf_node, &cl->cl_parent->cf_tree);
344 }
345 
346 static inline void
347 cftree_remove(struct hfsc_class *cl)
348 {
349  rb_erase(&cl->cf_node, &cl->cl_parent->cf_tree);
350 }
351 
352 static inline void
353 cftree_update(struct hfsc_class *cl)
354 {
355  cftree_remove(cl);
356  cftree_insert(cl);
357 }
358 
359 /*
360  * service curve support functions
361  *
362  * external service curve parameters
363  * m: bps
364  * d: us
365  * internal service curve parameters
366  * sm: (bytes/psched_us) << SM_SHIFT
367  * ism: (psched_us/byte) << ISM_SHIFT
368  * dx: psched_us
369  *
370  * The clock source resolution with ktime and PSCHED_SHIFT 10 is 1.024us.
371  *
372  * sm and ism are scaled in order to keep effective digits.
373  * SM_SHIFT and ISM_SHIFT are selected to keep at least 4 effective
374  * digits in decimal using the following table.
375  *
376  * bits/sec 100Kbps 1Mbps 10Mbps 100Mbps 1Gbps
377  * ------------+-------------------------------------------------------
378  * bytes/1.024us 12.8e-3 128e-3 1280e-3 12800e-3 128000e-3
379  *
380  * 1.024us/byte 78.125 7.8125 0.78125 0.078125 0.0078125
381  *
382  * So, for PSCHED_SHIFT 10 we need: SM_SHIFT 20, ISM_SHIFT 18.
383  */
384 #define SM_SHIFT (30 - PSCHED_SHIFT)
385 #define ISM_SHIFT (8 + PSCHED_SHIFT)
386 
387 #define SM_MASK ((1ULL << SM_SHIFT) - 1)
388 #define ISM_MASK ((1ULL << ISM_SHIFT) - 1)
389 
390 static inline u64
391 seg_x2y(u64 x, u64 sm)
392 {
393  u64 y;
394 
395  /*
396  * compute
397  * y = x * sm >> SM_SHIFT
398  * but divide it for the upper and lower bits to avoid overflow
399  */
400  y = (x >> SM_SHIFT) * sm + (((x & SM_MASK) * sm) >> SM_SHIFT);
401  return y;
402 }
403 
404 static inline u64
405 seg_y2x(u64 y, u64 ism)
406 {
407  u64 x;
408 
409  if (y == 0)
410  x = 0;
411  else if (ism == HT_INFINITY)
412  x = HT_INFINITY;
413  else {
414  x = (y >> ISM_SHIFT) * ism
415  + (((y & ISM_MASK) * ism) >> ISM_SHIFT);
416  }
417  return x;
418 }
419 
420 /* Convert m (bps) into sm (bytes/psched us) */
421 static u64
422 m2sm(u32 m)
423 {
424  u64 sm;
425 
426  sm = ((u64)m << SM_SHIFT);
427  sm += PSCHED_TICKS_PER_SEC - 1;
429  return sm;
430 }
431 
432 /* convert m (bps) into ism (psched us/byte) */
433 static u64
434 m2ism(u32 m)
435 {
436  u64 ism;
437 
438  if (m == 0)
439  ism = HT_INFINITY;
440  else {
441  ism = ((u64)PSCHED_TICKS_PER_SEC << ISM_SHIFT);
442  ism += m - 1;
443  do_div(ism, m);
444  }
445  return ism;
446 }
447 
448 /* convert d (us) into dx (psched us) */
449 static u64
450 d2dx(u32 d)
451 {
452  u64 dx;
453 
454  dx = ((u64)d * PSCHED_TICKS_PER_SEC);
455  dx += USEC_PER_SEC - 1;
456  do_div(dx, USEC_PER_SEC);
457  return dx;
458 }
459 
460 /* convert sm (bytes/psched us) into m (bps) */
461 static u32
462 sm2m(u64 sm)
463 {
464  u64 m;
465 
466  m = (sm * PSCHED_TICKS_PER_SEC) >> SM_SHIFT;
467  return (u32)m;
468 }
469 
470 /* convert dx (psched us) into d (us) */
471 static u32
472 dx2d(u64 dx)
473 {
474  u64 d;
475 
476  d = dx * USEC_PER_SEC;
478  return (u32)d;
479 }
480 
481 static void
482 sc2isc(struct tc_service_curve *sc, struct internal_sc *isc)
483 {
484  isc->sm1 = m2sm(sc->m1);
485  isc->ism1 = m2ism(sc->m1);
486  isc->dx = d2dx(sc->d);
487  isc->dy = seg_x2y(isc->dx, isc->sm1);
488  isc->sm2 = m2sm(sc->m2);
489  isc->ism2 = m2ism(sc->m2);
490 }
491 
492 /*
493  * initialize the runtime service curve with the given internal
494  * service curve starting at (x, y).
495  */
496 static void
497 rtsc_init(struct runtime_sc *rtsc, struct internal_sc *isc, u64 x, u64 y)
498 {
499  rtsc->x = x;
500  rtsc->y = y;
501  rtsc->sm1 = isc->sm1;
502  rtsc->ism1 = isc->ism1;
503  rtsc->dx = isc->dx;
504  rtsc->dy = isc->dy;
505  rtsc->sm2 = isc->sm2;
506  rtsc->ism2 = isc->ism2;
507 }
508 
509 /*
510  * calculate the y-projection of the runtime service curve by the
511  * given x-projection value
512  */
513 static u64
514 rtsc_y2x(struct runtime_sc *rtsc, u64 y)
515 {
516  u64 x;
517 
518  if (y < rtsc->y)
519  x = rtsc->x;
520  else if (y <= rtsc->y + rtsc->dy) {
521  /* x belongs to the 1st segment */
522  if (rtsc->dy == 0)
523  x = rtsc->x + rtsc->dx;
524  else
525  x = rtsc->x + seg_y2x(y - rtsc->y, rtsc->ism1);
526  } else {
527  /* x belongs to the 2nd segment */
528  x = rtsc->x + rtsc->dx
529  + seg_y2x(y - rtsc->y - rtsc->dy, rtsc->ism2);
530  }
531  return x;
532 }
533 
534 static u64
535 rtsc_x2y(struct runtime_sc *rtsc, u64 x)
536 {
537  u64 y;
538 
539  if (x <= rtsc->x)
540  y = rtsc->y;
541  else if (x <= rtsc->x + rtsc->dx)
542  /* y belongs to the 1st segment */
543  y = rtsc->y + seg_x2y(x - rtsc->x, rtsc->sm1);
544  else
545  /* y belongs to the 2nd segment */
546  y = rtsc->y + rtsc->dy
547  + seg_x2y(x - rtsc->x - rtsc->dx, rtsc->sm2);
548  return y;
549 }
550 
551 /*
552  * update the runtime service curve by taking the minimum of the current
553  * runtime service curve and the service curve starting at (x, y).
554  */
555 static void
556 rtsc_min(struct runtime_sc *rtsc, struct internal_sc *isc, u64 x, u64 y)
557 {
558  u64 y1, y2, dx, dy;
559  u32 dsm;
560 
561  if (isc->sm1 <= isc->sm2) {
562  /* service curve is convex */
563  y1 = rtsc_x2y(rtsc, x);
564  if (y1 < y)
565  /* the current rtsc is smaller */
566  return;
567  rtsc->x = x;
568  rtsc->y = y;
569  return;
570  }
571 
572  /*
573  * service curve is concave
574  * compute the two y values of the current rtsc
575  * y1: at x
576  * y2: at (x + dx)
577  */
578  y1 = rtsc_x2y(rtsc, x);
579  if (y1 <= y) {
580  /* rtsc is below isc, no change to rtsc */
581  return;
582  }
583 
584  y2 = rtsc_x2y(rtsc, x + isc->dx);
585  if (y2 >= y + isc->dy) {
586  /* rtsc is above isc, replace rtsc by isc */
587  rtsc->x = x;
588  rtsc->y = y;
589  rtsc->dx = isc->dx;
590  rtsc->dy = isc->dy;
591  return;
592  }
593 
594  /*
595  * the two curves intersect
596  * compute the offsets (dx, dy) using the reverse
597  * function of seg_x2y()
598  * seg_x2y(dx, sm1) == seg_x2y(dx, sm2) + (y1 - y)
599  */
600  dx = (y1 - y) << SM_SHIFT;
601  dsm = isc->sm1 - isc->sm2;
602  do_div(dx, dsm);
603  /*
604  * check if (x, y1) belongs to the 1st segment of rtsc.
605  * if so, add the offset.
606  */
607  if (rtsc->x + rtsc->dx > x)
608  dx += rtsc->x + rtsc->dx - x;
609  dy = seg_x2y(dx, isc->sm1);
610 
611  rtsc->x = x;
612  rtsc->y = y;
613  rtsc->dx = dx;
614  rtsc->dy = dy;
615 }
616 
617 static void
618 init_ed(struct hfsc_class *cl, unsigned int next_len)
619 {
620  u64 cur_time = psched_get_time();
621 
622  /* update the deadline curve */
623  rtsc_min(&cl->cl_deadline, &cl->cl_rsc, cur_time, cl->cl_cumul);
624 
625  /*
626  * update the eligible curve.
627  * for concave, it is equal to the deadline curve.
628  * for convex, it is a linear curve with slope m2.
629  */
630  cl->cl_eligible = cl->cl_deadline;
631  if (cl->cl_rsc.sm1 <= cl->cl_rsc.sm2) {
632  cl->cl_eligible.dx = 0;
633  cl->cl_eligible.dy = 0;
634  }
635 
636  /* compute e and d */
637  cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul);
638  cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
639 
640  eltree_insert(cl);
641 }
642 
643 static void
644 update_ed(struct hfsc_class *cl, unsigned int next_len)
645 {
646  cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul);
647  cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
648 
649  eltree_update(cl);
650 }
651 
652 static inline void
653 update_d(struct hfsc_class *cl, unsigned int next_len)
654 {
655  cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
656 }
657 
658 static inline void
659 update_cfmin(struct hfsc_class *cl)
660 {
661  struct rb_node *n = rb_first(&cl->cf_tree);
662  struct hfsc_class *p;
663 
664  if (n == NULL) {
665  cl->cl_cfmin = 0;
666  return;
667  }
668  p = rb_entry(n, struct hfsc_class, cf_node);
669  cl->cl_cfmin = p->cl_f;
670 }
671 
672 static void
673 init_vf(struct hfsc_class *cl, unsigned int len)
674 {
675  struct hfsc_class *max_cl;
676  struct rb_node *n;
677  u64 vt, f, cur_time;
678  int go_active;
679 
680  cur_time = 0;
681  go_active = 1;
682  for (; cl->cl_parent != NULL; cl = cl->cl_parent) {
683  if (go_active && cl->cl_nactive++ == 0)
684  go_active = 1;
685  else
686  go_active = 0;
687 
688  if (go_active) {
689  n = rb_last(&cl->cl_parent->vt_tree);
690  if (n != NULL) {
691  max_cl = rb_entry(n, struct hfsc_class, vt_node);
692  /*
693  * set vt to the average of the min and max
694  * classes. if the parent's period didn't
695  * change, don't decrease vt of the class.
696  */
697  vt = max_cl->cl_vt;
698  if (cl->cl_parent->cl_cvtmin != 0)
699  vt = (cl->cl_parent->cl_cvtmin + vt)/2;
700 
701  if (cl->cl_parent->cl_vtperiod !=
702  cl->cl_parentperiod || vt > cl->cl_vt)
703  cl->cl_vt = vt;
704  } else {
705  /*
706  * first child for a new parent backlog period.
707  * add parent's cvtmax to cvtoff to make a new
708  * vt (vtoff + vt) larger than the vt in the
709  * last period for all children.
710  */
711  vt = cl->cl_parent->cl_cvtmax;
712  cl->cl_parent->cl_cvtoff += vt;
713  cl->cl_parent->cl_cvtmax = 0;
714  cl->cl_parent->cl_cvtmin = 0;
715  cl->cl_vt = 0;
716  }
717 
718  cl->cl_vtoff = cl->cl_parent->cl_cvtoff -
719  cl->cl_pcvtoff;
720 
721  /* update the virtual curve */
722  vt = cl->cl_vt + cl->cl_vtoff;
723  rtsc_min(&cl->cl_virtual, &cl->cl_fsc, vt,
724  cl->cl_total);
725  if (cl->cl_virtual.x == vt) {
726  cl->cl_virtual.x -= cl->cl_vtoff;
727  cl->cl_vtoff = 0;
728  }
729  cl->cl_vtadj = 0;
730 
731  cl->cl_vtperiod++; /* increment vt period */
732  cl->cl_parentperiod = cl->cl_parent->cl_vtperiod;
733  if (cl->cl_parent->cl_nactive == 0)
734  cl->cl_parentperiod++;
735  cl->cl_f = 0;
736 
737  vttree_insert(cl);
738  cftree_insert(cl);
739 
740  if (cl->cl_flags & HFSC_USC) {
741  /* class has upper limit curve */
742  if (cur_time == 0)
743  cur_time = psched_get_time();
744 
745  /* update the ulimit curve */
746  rtsc_min(&cl->cl_ulimit, &cl->cl_usc, cur_time,
747  cl->cl_total);
748  /* compute myf */
749  cl->cl_myf = rtsc_y2x(&cl->cl_ulimit,
750  cl->cl_total);
751  cl->cl_myfadj = 0;
752  }
753  }
754 
755  f = max(cl->cl_myf, cl->cl_cfmin);
756  if (f != cl->cl_f) {
757  cl->cl_f = f;
758  cftree_update(cl);
759  }
760  update_cfmin(cl->cl_parent);
761  }
762 }
763 
764 static void
765 update_vf(struct hfsc_class *cl, unsigned int len, u64 cur_time)
766 {
767  u64 f; /* , myf_bound, delta; */
768  int go_passive = 0;
769 
770  if (cl->qdisc->q.qlen == 0 && cl->cl_flags & HFSC_FSC)
771  go_passive = 1;
772 
773  for (; cl->cl_parent != NULL; cl = cl->cl_parent) {
774  cl->cl_total += len;
775 
776  if (!(cl->cl_flags & HFSC_FSC) || cl->cl_nactive == 0)
777  continue;
778 
779  if (go_passive && --cl->cl_nactive == 0)
780  go_passive = 1;
781  else
782  go_passive = 0;
783 
784  if (go_passive) {
785  /* no more active child, going passive */
786 
787  /* update cvtmax of the parent class */
788  if (cl->cl_vt > cl->cl_parent->cl_cvtmax)
789  cl->cl_parent->cl_cvtmax = cl->cl_vt;
790 
791  /* remove this class from the vt tree */
792  vttree_remove(cl);
793 
794  cftree_remove(cl);
795  update_cfmin(cl->cl_parent);
796 
797  continue;
798  }
799 
800  /*
801  * update vt and f
802  */
803  cl->cl_vt = rtsc_y2x(&cl->cl_virtual, cl->cl_total)
804  - cl->cl_vtoff + cl->cl_vtadj;
805 
806  /*
807  * if vt of the class is smaller than cvtmin,
808  * the class was skipped in the past due to non-fit.
809  * if so, we need to adjust vtadj.
810  */
811  if (cl->cl_vt < cl->cl_parent->cl_cvtmin) {
812  cl->cl_vtadj += cl->cl_parent->cl_cvtmin - cl->cl_vt;
813  cl->cl_vt = cl->cl_parent->cl_cvtmin;
814  }
815 
816  /* update the vt tree */
817  vttree_update(cl);
818 
819  if (cl->cl_flags & HFSC_USC) {
820  cl->cl_myf = cl->cl_myfadj + rtsc_y2x(&cl->cl_ulimit,
821  cl->cl_total);
822 #if 0
823  /*
824  * This code causes classes to stay way under their
825  * limit when multiple classes are used at gigabit
826  * speed. needs investigation. -kaber
827  */
828  /*
829  * if myf lags behind by more than one clock tick
830  * from the current time, adjust myfadj to prevent
831  * a rate-limited class from going greedy.
832  * in a steady state under rate-limiting, myf
833  * fluctuates within one clock tick.
834  */
835  myf_bound = cur_time - PSCHED_JIFFIE2US(1);
836  if (cl->cl_myf < myf_bound) {
837  delta = cur_time - cl->cl_myf;
838  cl->cl_myfadj += delta;
839  cl->cl_myf += delta;
840  }
841 #endif
842  }
843 
844  f = max(cl->cl_myf, cl->cl_cfmin);
845  if (f != cl->cl_f) {
846  cl->cl_f = f;
847  cftree_update(cl);
848  update_cfmin(cl->cl_parent);
849  }
850  }
851 }
852 
853 static void
854 set_active(struct hfsc_class *cl, unsigned int len)
855 {
856  if (cl->cl_flags & HFSC_RSC)
857  init_ed(cl, len);
858  if (cl->cl_flags & HFSC_FSC)
859  init_vf(cl, len);
860 
861  list_add_tail(&cl->dlist, &cl->sched->droplist);
862 }
863 
864 static void
865 set_passive(struct hfsc_class *cl)
866 {
867  if (cl->cl_flags & HFSC_RSC)
868  eltree_remove(cl);
869 
870  list_del(&cl->dlist);
871 
872  /*
873  * vttree is now handled in update_vf() so that update_vf(cl, 0, 0)
874  * needs to be called explicitly to remove a class from vttree.
875  */
876 }
877 
878 static unsigned int
879 qdisc_peek_len(struct Qdisc *sch)
880 {
881  struct sk_buff *skb;
882  unsigned int len;
883 
884  skb = sch->ops->peek(sch);
885  if (skb == NULL) {
886  qdisc_warn_nonwc("qdisc_peek_len", sch);
887  return 0;
888  }
889  len = qdisc_pkt_len(skb);
890 
891  return len;
892 }
893 
894 static void
895 hfsc_purge_queue(struct Qdisc *sch, struct hfsc_class *cl)
896 {
897  unsigned int len = cl->qdisc->q.qlen;
898 
899  qdisc_reset(cl->qdisc);
901 }
902 
903 static void
904 hfsc_adjust_levels(struct hfsc_class *cl)
905 {
906  struct hfsc_class *p;
907  unsigned int level;
908 
909  do {
910  level = 0;
912  if (p->level >= level)
913  level = p->level + 1;
914  }
915  cl->level = level;
916  } while ((cl = cl->cl_parent) != NULL);
917 }
918 
919 static inline struct hfsc_class *
920 hfsc_find_class(u32 classid, struct Qdisc *sch)
921 {
922  struct hfsc_sched *q = qdisc_priv(sch);
923  struct Qdisc_class_common *clc;
924 
925  clc = qdisc_class_find(&q->clhash, classid);
926  if (clc == NULL)
927  return NULL;
928  return container_of(clc, struct hfsc_class, cl_common);
929 }
930 
931 static void
932 hfsc_change_rsc(struct hfsc_class *cl, struct tc_service_curve *rsc,
933  u64 cur_time)
934 {
935  sc2isc(rsc, &cl->cl_rsc);
936  rtsc_init(&cl->cl_deadline, &cl->cl_rsc, cur_time, cl->cl_cumul);
937  cl->cl_eligible = cl->cl_deadline;
938  if (cl->cl_rsc.sm1 <= cl->cl_rsc.sm2) {
939  cl->cl_eligible.dx = 0;
940  cl->cl_eligible.dy = 0;
941  }
942  cl->cl_flags |= HFSC_RSC;
943 }
944 
945 static void
946 hfsc_change_fsc(struct hfsc_class *cl, struct tc_service_curve *fsc)
947 {
948  sc2isc(fsc, &cl->cl_fsc);
949  rtsc_init(&cl->cl_virtual, &cl->cl_fsc, cl->cl_vt, cl->cl_total);
950  cl->cl_flags |= HFSC_FSC;
951 }
952 
953 static void
954 hfsc_change_usc(struct hfsc_class *cl, struct tc_service_curve *usc,
955  u64 cur_time)
956 {
957  sc2isc(usc, &cl->cl_usc);
958  rtsc_init(&cl->cl_ulimit, &cl->cl_usc, cur_time, cl->cl_total);
959  cl->cl_flags |= HFSC_USC;
960 }
961 
962 static const struct nla_policy hfsc_policy[TCA_HFSC_MAX + 1] = {
963  [TCA_HFSC_RSC] = { .len = sizeof(struct tc_service_curve) },
964  [TCA_HFSC_FSC] = { .len = sizeof(struct tc_service_curve) },
965  [TCA_HFSC_USC] = { .len = sizeof(struct tc_service_curve) },
966 };
967 
968 static int
969 hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
970  struct nlattr **tca, unsigned long *arg)
971 {
972  struct hfsc_sched *q = qdisc_priv(sch);
973  struct hfsc_class *cl = (struct hfsc_class *)*arg;
974  struct hfsc_class *parent = NULL;
975  struct nlattr *opt = tca[TCA_OPTIONS];
976  struct nlattr *tb[TCA_HFSC_MAX + 1];
977  struct tc_service_curve *rsc = NULL, *fsc = NULL, *usc = NULL;
978  u64 cur_time;
979  int err;
980 
981  if (opt == NULL)
982  return -EINVAL;
983 
984  err = nla_parse_nested(tb, TCA_HFSC_MAX, opt, hfsc_policy);
985  if (err < 0)
986  return err;
987 
988  if (tb[TCA_HFSC_RSC]) {
989  rsc = nla_data(tb[TCA_HFSC_RSC]);
990  if (rsc->m1 == 0 && rsc->m2 == 0)
991  rsc = NULL;
992  }
993 
994  if (tb[TCA_HFSC_FSC]) {
995  fsc = nla_data(tb[TCA_HFSC_FSC]);
996  if (fsc->m1 == 0 && fsc->m2 == 0)
997  fsc = NULL;
998  }
999 
1000  if (tb[TCA_HFSC_USC]) {
1001  usc = nla_data(tb[TCA_HFSC_USC]);
1002  if (usc->m1 == 0 && usc->m2 == 0)
1003  usc = NULL;
1004  }
1005 
1006  if (cl != NULL) {
1007  if (parentid) {
1008  if (cl->cl_parent &&
1009  cl->cl_parent->cl_common.classid != parentid)
1010  return -EINVAL;
1011  if (cl->cl_parent == NULL && parentid != TC_H_ROOT)
1012  return -EINVAL;
1013  }
1014  cur_time = psched_get_time();
1015 
1016  if (tca[TCA_RATE]) {
1017  err = gen_replace_estimator(&cl->bstats, &cl->rate_est,
1018  qdisc_root_sleeping_lock(sch),
1019  tca[TCA_RATE]);
1020  if (err)
1021  return err;
1022  }
1023 
1024  sch_tree_lock(sch);
1025  if (rsc != NULL)
1026  hfsc_change_rsc(cl, rsc, cur_time);
1027  if (fsc != NULL)
1028  hfsc_change_fsc(cl, fsc);
1029  if (usc != NULL)
1030  hfsc_change_usc(cl, usc, cur_time);
1031 
1032  if (cl->qdisc->q.qlen != 0) {
1033  if (cl->cl_flags & HFSC_RSC)
1034  update_ed(cl, qdisc_peek_len(cl->qdisc));
1035  if (cl->cl_flags & HFSC_FSC)
1036  update_vf(cl, 0, cur_time);
1037  }
1038  sch_tree_unlock(sch);
1039 
1040  return 0;
1041  }
1042 
1043  if (parentid == TC_H_ROOT)
1044  return -EEXIST;
1045 
1046  parent = &q->root;
1047  if (parentid) {
1048  parent = hfsc_find_class(parentid, sch);
1049  if (parent == NULL)
1050  return -ENOENT;
1051  }
1052 
1053  if (classid == 0 || TC_H_MAJ(classid ^ sch->handle) != 0)
1054  return -EINVAL;
1055  if (hfsc_find_class(classid, sch))
1056  return -EEXIST;
1057 
1058  if (rsc == NULL && fsc == NULL)
1059  return -EINVAL;
1060 
1061  cl = kzalloc(sizeof(struct hfsc_class), GFP_KERNEL);
1062  if (cl == NULL)
1063  return -ENOBUFS;
1064 
1065  if (tca[TCA_RATE]) {
1066  err = gen_new_estimator(&cl->bstats, &cl->rate_est,
1067  qdisc_root_sleeping_lock(sch),
1068  tca[TCA_RATE]);
1069  if (err) {
1070  kfree(cl);
1071  return err;
1072  }
1073  }
1074 
1075  if (rsc != NULL)
1076  hfsc_change_rsc(cl, rsc, 0);
1077  if (fsc != NULL)
1078  hfsc_change_fsc(cl, fsc);
1079  if (usc != NULL)
1080  hfsc_change_usc(cl, usc, 0);
1081 
1082  cl->cl_common.classid = classid;
1083  cl->refcnt = 1;
1084  cl->sched = q;
1085  cl->cl_parent = parent;
1086  cl->qdisc = qdisc_create_dflt(sch->dev_queue,
1087  &pfifo_qdisc_ops, classid);
1088  if (cl->qdisc == NULL)
1089  cl->qdisc = &noop_qdisc;
1090  INIT_LIST_HEAD(&cl->children);
1091  cl->vt_tree = RB_ROOT;
1092  cl->cf_tree = RB_ROOT;
1093 
1094  sch_tree_lock(sch);
1096  list_add_tail(&cl->siblings, &parent->children);
1097  if (parent->level == 0)
1098  hfsc_purge_queue(sch, parent);
1099  hfsc_adjust_levels(parent);
1100  cl->cl_pcvtoff = parent->cl_cvtoff;
1101  sch_tree_unlock(sch);
1102 
1103  qdisc_class_hash_grow(sch, &q->clhash);
1104 
1105  *arg = (unsigned long)cl;
1106  return 0;
1107 }
1108 
1109 static void
1110 hfsc_destroy_class(struct Qdisc *sch, struct hfsc_class *cl)
1111 {
1112  struct hfsc_sched *q = qdisc_priv(sch);
1113 
1115  qdisc_destroy(cl->qdisc);
1116  gen_kill_estimator(&cl->bstats, &cl->rate_est);
1117  if (cl != &q->root)
1118  kfree(cl);
1119 }
1120 
1121 static int
1122 hfsc_delete_class(struct Qdisc *sch, unsigned long arg)
1123 {
1124  struct hfsc_sched *q = qdisc_priv(sch);
1125  struct hfsc_class *cl = (struct hfsc_class *)arg;
1126 
1127  if (cl->level > 0 || cl->filter_cnt > 0 || cl == &q->root)
1128  return -EBUSY;
1129 
1130  sch_tree_lock(sch);
1131 
1132  list_del(&cl->siblings);
1133  hfsc_adjust_levels(cl->cl_parent);
1134 
1135  hfsc_purge_queue(sch, cl);
1137 
1138  BUG_ON(--cl->refcnt == 0);
1139  /*
1140  * This shouldn't happen: we "hold" one cops->get() when called
1141  * from tc_ctl_tclass; the destroy method is done from cops->put().
1142  */
1143 
1144  sch_tree_unlock(sch);
1145  return 0;
1146 }
1147 
1148 static struct hfsc_class *
1149 hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
1150 {
1151  struct hfsc_sched *q = qdisc_priv(sch);
1152  struct hfsc_class *head, *cl;
1153  struct tcf_result res;
1154  struct tcf_proto *tcf;
1155  int result;
1156 
1157  if (TC_H_MAJ(skb->priority ^ sch->handle) == 0 &&
1158  (cl = hfsc_find_class(skb->priority, sch)) != NULL)
1159  if (cl->level == 0)
1160  return cl;
1161 
1163  head = &q->root;
1164  tcf = q->root.filter_list;
1165  while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) {
1166 #ifdef CONFIG_NET_CLS_ACT
1167  switch (result) {
1168  case TC_ACT_QUEUED:
1169  case TC_ACT_STOLEN:
1171  case TC_ACT_SHOT:
1172  return NULL;
1173  }
1174 #endif
1175  cl = (struct hfsc_class *)res.class;
1176  if (!cl) {
1177  cl = hfsc_find_class(res.classid, sch);
1178  if (!cl)
1179  break; /* filter selected invalid classid */
1180  if (cl->level >= head->level)
1181  break; /* filter may only point downwards */
1182  }
1183 
1184  if (cl->level == 0)
1185  return cl; /* hit leaf class */
1186 
1187  /* apply inner filter chain */
1188  tcf = cl->filter_list;
1189  head = cl;
1190  }
1191 
1192  /* classification failed, try default class */
1193  cl = hfsc_find_class(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch);
1194  if (cl == NULL || cl->level > 0)
1195  return NULL;
1196 
1197  return cl;
1198 }
1199 
1200 static int
1201 hfsc_graft_class(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1202  struct Qdisc **old)
1203 {
1204  struct hfsc_class *cl = (struct hfsc_class *)arg;
1205 
1206  if (cl->level > 0)
1207  return -EINVAL;
1208  if (new == NULL) {
1209  new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
1210  cl->cl_common.classid);
1211  if (new == NULL)
1212  new = &noop_qdisc;
1213  }
1214 
1215  sch_tree_lock(sch);
1216  hfsc_purge_queue(sch, cl);
1217  *old = cl->qdisc;
1218  cl->qdisc = new;
1219  sch_tree_unlock(sch);
1220  return 0;
1221 }
1222 
1223 static struct Qdisc *
1224 hfsc_class_leaf(struct Qdisc *sch, unsigned long arg)
1225 {
1226  struct hfsc_class *cl = (struct hfsc_class *)arg;
1227 
1228  if (cl->level == 0)
1229  return cl->qdisc;
1230 
1231  return NULL;
1232 }
1233 
1234 static void
1235 hfsc_qlen_notify(struct Qdisc *sch, unsigned long arg)
1236 {
1237  struct hfsc_class *cl = (struct hfsc_class *)arg;
1238 
1239  if (cl->qdisc->q.qlen == 0) {
1240  update_vf(cl, 0, 0);
1241  set_passive(cl);
1242  }
1243 }
1244 
1245 static unsigned long
1246 hfsc_get_class(struct Qdisc *sch, u32 classid)
1247 {
1248  struct hfsc_class *cl = hfsc_find_class(classid, sch);
1249 
1250  if (cl != NULL)
1251  cl->refcnt++;
1252 
1253  return (unsigned long)cl;
1254 }
1255 
1256 static void
1257 hfsc_put_class(struct Qdisc *sch, unsigned long arg)
1258 {
1259  struct hfsc_class *cl = (struct hfsc_class *)arg;
1260 
1261  if (--cl->refcnt == 0)
1262  hfsc_destroy_class(sch, cl);
1263 }
1264 
1265 static unsigned long
1266 hfsc_bind_tcf(struct Qdisc *sch, unsigned long parent, u32 classid)
1267 {
1268  struct hfsc_class *p = (struct hfsc_class *)parent;
1269  struct hfsc_class *cl = hfsc_find_class(classid, sch);
1270 
1271  if (cl != NULL) {
1272  if (p != NULL && p->level <= cl->level)
1273  return 0;
1274  cl->filter_cnt++;
1275  }
1276 
1277  return (unsigned long)cl;
1278 }
1279 
1280 static void
1281 hfsc_unbind_tcf(struct Qdisc *sch, unsigned long arg)
1282 {
1283  struct hfsc_class *cl = (struct hfsc_class *)arg;
1284 
1285  cl->filter_cnt--;
1286 }
1287 
1288 static struct tcf_proto **
1289 hfsc_tcf_chain(struct Qdisc *sch, unsigned long arg)
1290 {
1291  struct hfsc_sched *q = qdisc_priv(sch);
1292  struct hfsc_class *cl = (struct hfsc_class *)arg;
1293 
1294  if (cl == NULL)
1295  cl = &q->root;
1296 
1297  return &cl->filter_list;
1298 }
1299 
1300 static int
1301 hfsc_dump_sc(struct sk_buff *skb, int attr, struct internal_sc *sc)
1302 {
1303  struct tc_service_curve tsc;
1304 
1305  tsc.m1 = sm2m(sc->sm1);
1306  tsc.d = dx2d(sc->dx);
1307  tsc.m2 = sm2m(sc->sm2);
1308  if (nla_put(skb, attr, sizeof(tsc), &tsc))
1309  goto nla_put_failure;
1310 
1311  return skb->len;
1312 
1313  nla_put_failure:
1314  return -1;
1315 }
1316 
1317 static int
1318 hfsc_dump_curves(struct sk_buff *skb, struct hfsc_class *cl)
1319 {
1320  if ((cl->cl_flags & HFSC_RSC) &&
1321  (hfsc_dump_sc(skb, TCA_HFSC_RSC, &cl->cl_rsc) < 0))
1322  goto nla_put_failure;
1323 
1324  if ((cl->cl_flags & HFSC_FSC) &&
1325  (hfsc_dump_sc(skb, TCA_HFSC_FSC, &cl->cl_fsc) < 0))
1326  goto nla_put_failure;
1327 
1328  if ((cl->cl_flags & HFSC_USC) &&
1329  (hfsc_dump_sc(skb, TCA_HFSC_USC, &cl->cl_usc) < 0))
1330  goto nla_put_failure;
1331 
1332  return skb->len;
1333 
1334  nla_put_failure:
1335  return -1;
1336 }
1337 
1338 static int
1339 hfsc_dump_class(struct Qdisc *sch, unsigned long arg, struct sk_buff *skb,
1340  struct tcmsg *tcm)
1341 {
1342  struct hfsc_class *cl = (struct hfsc_class *)arg;
1343  struct nlattr *nest;
1344 
1345  tcm->tcm_parent = cl->cl_parent ? cl->cl_parent->cl_common.classid :
1346  TC_H_ROOT;
1347  tcm->tcm_handle = cl->cl_common.classid;
1348  if (cl->level == 0)
1349  tcm->tcm_info = cl->qdisc->handle;
1350 
1351  nest = nla_nest_start(skb, TCA_OPTIONS);
1352  if (nest == NULL)
1353  goto nla_put_failure;
1354  if (hfsc_dump_curves(skb, cl) < 0)
1355  goto nla_put_failure;
1356  nla_nest_end(skb, nest);
1357  return skb->len;
1358 
1359  nla_put_failure:
1360  nla_nest_cancel(skb, nest);
1361  return -EMSGSIZE;
1362 }
1363 
1364 static int
1365 hfsc_dump_class_stats(struct Qdisc *sch, unsigned long arg,
1366  struct gnet_dump *d)
1367 {
1368  struct hfsc_class *cl = (struct hfsc_class *)arg;
1369  struct tc_hfsc_stats xstats;
1370 
1371  cl->qstats.qlen = cl->qdisc->q.qlen;
1372  cl->qstats.backlog = cl->qdisc->qstats.backlog;
1373  xstats.level = cl->level;
1374  xstats.period = cl->cl_vtperiod;
1375  xstats.work = cl->cl_total;
1376  xstats.rtwork = cl->cl_cumul;
1377 
1378  if (gnet_stats_copy_basic(d, &cl->bstats) < 0 ||
1379  gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 ||
1380  gnet_stats_copy_queue(d, &cl->qstats) < 0)
1381  return -1;
1382 
1383  return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
1384 }
1385 
1386 
1387 
1388 static void
1389 hfsc_walk(struct Qdisc *sch, struct qdisc_walker *arg)
1390 {
1391  struct hfsc_sched *q = qdisc_priv(sch);
1392  struct hlist_node *n;
1393  struct hfsc_class *cl;
1394  unsigned int i;
1395 
1396  if (arg->stop)
1397  return;
1398 
1399  for (i = 0; i < q->clhash.hashsize; i++) {
1400  hlist_for_each_entry(cl, n, &q->clhash.hash[i],
1401  cl_common.hnode) {
1402  if (arg->count < arg->skip) {
1403  arg->count++;
1404  continue;
1405  }
1406  if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
1407  arg->stop = 1;
1408  return;
1409  }
1410  arg->count++;
1411  }
1412  }
1413 }
1414 
1415 static void
1416 hfsc_schedule_watchdog(struct Qdisc *sch)
1417 {
1418  struct hfsc_sched *q = qdisc_priv(sch);
1419  struct hfsc_class *cl;
1420  u64 next_time = 0;
1421 
1422  cl = eltree_get_minel(q);
1423  if (cl)
1424  next_time = cl->cl_e;
1425  if (q->root.cl_cfmin != 0) {
1426  if (next_time == 0 || next_time > q->root.cl_cfmin)
1427  next_time = q->root.cl_cfmin;
1428  }
1429  WARN_ON(next_time == 0);
1430  qdisc_watchdog_schedule(&q->watchdog, next_time);
1431 }
1432 
1433 static int
1434 hfsc_init_qdisc(struct Qdisc *sch, struct nlattr *opt)
1435 {
1436  struct hfsc_sched *q = qdisc_priv(sch);
1437  struct tc_hfsc_qopt *qopt;
1438  int err;
1439 
1440  if (opt == NULL || nla_len(opt) < sizeof(*qopt))
1441  return -EINVAL;
1442  qopt = nla_data(opt);
1443 
1444  q->defcls = qopt->defcls;
1445  err = qdisc_class_hash_init(&q->clhash);
1446  if (err < 0)
1447  return err;
1448  q->eligible = RB_ROOT;
1449  INIT_LIST_HEAD(&q->droplist);
1450 
1451  q->root.cl_common.classid = sch->handle;
1452  q->root.refcnt = 1;
1453  q->root.sched = q;
1454  q->root.qdisc = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
1455  sch->handle);
1456  if (q->root.qdisc == NULL)
1457  q->root.qdisc = &noop_qdisc;
1458  INIT_LIST_HEAD(&q->root.children);
1459  q->root.vt_tree = RB_ROOT;
1460  q->root.cf_tree = RB_ROOT;
1461 
1462  qdisc_class_hash_insert(&q->clhash, &q->root.cl_common);
1463  qdisc_class_hash_grow(sch, &q->clhash);
1464 
1465  qdisc_watchdog_init(&q->watchdog, sch);
1466 
1467  return 0;
1468 }
1469 
1470 static int
1471 hfsc_change_qdisc(struct Qdisc *sch, struct nlattr *opt)
1472 {
1473  struct hfsc_sched *q = qdisc_priv(sch);
1474  struct tc_hfsc_qopt *qopt;
1475 
1476  if (opt == NULL || nla_len(opt) < sizeof(*qopt))
1477  return -EINVAL;
1478  qopt = nla_data(opt);
1479 
1480  sch_tree_lock(sch);
1481  q->defcls = qopt->defcls;
1482  sch_tree_unlock(sch);
1483 
1484  return 0;
1485 }
1486 
1487 static void
1488 hfsc_reset_class(struct hfsc_class *cl)
1489 {
1490  cl->cl_total = 0;
1491  cl->cl_cumul = 0;
1492  cl->cl_d = 0;
1493  cl->cl_e = 0;
1494  cl->cl_vt = 0;
1495  cl->cl_vtadj = 0;
1496  cl->cl_vtoff = 0;
1497  cl->cl_cvtmin = 0;
1498  cl->cl_cvtmax = 0;
1499  cl->cl_cvtoff = 0;
1500  cl->cl_pcvtoff = 0;
1501  cl->cl_vtperiod = 0;
1502  cl->cl_parentperiod = 0;
1503  cl->cl_f = 0;
1504  cl->cl_myf = 0;
1505  cl->cl_myfadj = 0;
1506  cl->cl_cfmin = 0;
1507  cl->cl_nactive = 0;
1508 
1509  cl->vt_tree = RB_ROOT;
1510  cl->cf_tree = RB_ROOT;
1511  qdisc_reset(cl->qdisc);
1512 
1513  if (cl->cl_flags & HFSC_RSC)
1514  rtsc_init(&cl->cl_deadline, &cl->cl_rsc, 0, 0);
1515  if (cl->cl_flags & HFSC_FSC)
1516  rtsc_init(&cl->cl_virtual, &cl->cl_fsc, 0, 0);
1517  if (cl->cl_flags & HFSC_USC)
1518  rtsc_init(&cl->cl_ulimit, &cl->cl_usc, 0, 0);
1519 }
1520 
1521 static void
1522 hfsc_reset_qdisc(struct Qdisc *sch)
1523 {
1524  struct hfsc_sched *q = qdisc_priv(sch);
1525  struct hfsc_class *cl;
1526  struct hlist_node *n;
1527  unsigned int i;
1528 
1529  for (i = 0; i < q->clhash.hashsize; i++) {
1530  hlist_for_each_entry(cl, n, &q->clhash.hash[i], cl_common.hnode)
1531  hfsc_reset_class(cl);
1532  }
1533  q->eligible = RB_ROOT;
1534  INIT_LIST_HEAD(&q->droplist);
1536  sch->q.qlen = 0;
1537 }
1538 
1539 static void
1540 hfsc_destroy_qdisc(struct Qdisc *sch)
1541 {
1542  struct hfsc_sched *q = qdisc_priv(sch);
1543  struct hlist_node *n, *next;
1544  struct hfsc_class *cl;
1545  unsigned int i;
1546 
1547  for (i = 0; i < q->clhash.hashsize; i++) {
1548  hlist_for_each_entry(cl, n, &q->clhash.hash[i], cl_common.hnode)
1550  }
1551  for (i = 0; i < q->clhash.hashsize; i++) {
1552  hlist_for_each_entry_safe(cl, n, next, &q->clhash.hash[i],
1553  cl_common.hnode)
1554  hfsc_destroy_class(sch, cl);
1555  }
1556  qdisc_class_hash_destroy(&q->clhash);
1557  qdisc_watchdog_cancel(&q->watchdog);
1558 }
1559 
1560 static int
1561 hfsc_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb)
1562 {
1563  struct hfsc_sched *q = qdisc_priv(sch);
1564  unsigned char *b = skb_tail_pointer(skb);
1565  struct tc_hfsc_qopt qopt;
1566  struct hfsc_class *cl;
1567  struct hlist_node *n;
1568  unsigned int i;
1569 
1570  sch->qstats.backlog = 0;
1571  for (i = 0; i < q->clhash.hashsize; i++) {
1572  hlist_for_each_entry(cl, n, &q->clhash.hash[i], cl_common.hnode)
1573  sch->qstats.backlog += cl->qdisc->qstats.backlog;
1574  }
1575 
1576  qopt.defcls = q->defcls;
1577  if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt))
1578  goto nla_put_failure;
1579  return skb->len;
1580 
1581  nla_put_failure:
1582  nlmsg_trim(skb, b);
1583  return -1;
1584 }
1585 
1586 static int
1587 hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
1588 {
1589  struct hfsc_class *cl;
1590  int uninitialized_var(err);
1591 
1592  cl = hfsc_classify(skb, sch, &err);
1593  if (cl == NULL) {
1594  if (err & __NET_XMIT_BYPASS)
1595  sch->qstats.drops++;
1596  kfree_skb(skb);
1597  return err;
1598  }
1599 
1600  err = qdisc_enqueue(skb, cl->qdisc);
1601  if (unlikely(err != NET_XMIT_SUCCESS)) {
1602  if (net_xmit_drop_count(err)) {
1603  cl->qstats.drops++;
1604  sch->qstats.drops++;
1605  }
1606  return err;
1607  }
1608 
1609  if (cl->qdisc->q.qlen == 1)
1610  set_active(cl, qdisc_pkt_len(skb));
1611 
1612  sch->q.qlen++;
1613 
1614  return NET_XMIT_SUCCESS;
1615 }
1616 
1617 static struct sk_buff *
1618 hfsc_dequeue(struct Qdisc *sch)
1619 {
1620  struct hfsc_sched *q = qdisc_priv(sch);
1621  struct hfsc_class *cl;
1622  struct sk_buff *skb;
1623  u64 cur_time;
1624  unsigned int next_len;
1625  int realtime = 0;
1626 
1627  if (sch->q.qlen == 0)
1628  return NULL;
1629 
1630  cur_time = psched_get_time();
1631 
1632  /*
1633  * if there are eligible classes, use real-time criteria.
1634  * find the class with the minimum deadline among
1635  * the eligible classes.
1636  */
1637  cl = eltree_get_mindl(q, cur_time);
1638  if (cl) {
1639  realtime = 1;
1640  } else {
1641  /*
1642  * use link-sharing criteria
1643  * get the class with the minimum vt in the hierarchy
1644  */
1645  cl = vttree_get_minvt(&q->root, cur_time);
1646  if (cl == NULL) {
1647  sch->qstats.overlimits++;
1648  hfsc_schedule_watchdog(sch);
1649  return NULL;
1650  }
1651  }
1652 
1653  skb = qdisc_dequeue_peeked(cl->qdisc);
1654  if (skb == NULL) {
1655  qdisc_warn_nonwc("HFSC", cl->qdisc);
1656  return NULL;
1657  }
1658 
1659  bstats_update(&cl->bstats, skb);
1660  update_vf(cl, qdisc_pkt_len(skb), cur_time);
1661  if (realtime)
1662  cl->cl_cumul += qdisc_pkt_len(skb);
1663 
1664  if (cl->qdisc->q.qlen != 0) {
1665  if (cl->cl_flags & HFSC_RSC) {
1666  /* update ed */
1667  next_len = qdisc_peek_len(cl->qdisc);
1668  if (realtime)
1669  update_ed(cl, next_len);
1670  else
1671  update_d(cl, next_len);
1672  }
1673  } else {
1674  /* the class becomes passive */
1675  set_passive(cl);
1676  }
1677 
1678  qdisc_unthrottled(sch);
1679  qdisc_bstats_update(sch, skb);
1680  sch->q.qlen--;
1681 
1682  return skb;
1683 }
1684 
1685 static unsigned int
1686 hfsc_drop(struct Qdisc *sch)
1687 {
1688  struct hfsc_sched *q = qdisc_priv(sch);
1689  struct hfsc_class *cl;
1690  unsigned int len;
1691 
1692  list_for_each_entry(cl, &q->droplist, dlist) {
1693  if (cl->qdisc->ops->drop != NULL &&
1694  (len = cl->qdisc->ops->drop(cl->qdisc)) > 0) {
1695  if (cl->qdisc->q.qlen == 0) {
1696  update_vf(cl, 0, 0);
1697  set_passive(cl);
1698  } else {
1699  list_move_tail(&cl->dlist, &q->droplist);
1700  }
1701  cl->qstats.drops++;
1702  sch->qstats.drops++;
1703  sch->q.qlen--;
1704  return len;
1705  }
1706  }
1707  return 0;
1708 }
1709 
1710 static const struct Qdisc_class_ops hfsc_class_ops = {
1711  .change = hfsc_change_class,
1712  .delete = hfsc_delete_class,
1713  .graft = hfsc_graft_class,
1714  .leaf = hfsc_class_leaf,
1715  .qlen_notify = hfsc_qlen_notify,
1716  .get = hfsc_get_class,
1717  .put = hfsc_put_class,
1718  .bind_tcf = hfsc_bind_tcf,
1719  .unbind_tcf = hfsc_unbind_tcf,
1720  .tcf_chain = hfsc_tcf_chain,
1721  .dump = hfsc_dump_class,
1722  .dump_stats = hfsc_dump_class_stats,
1723  .walk = hfsc_walk
1724 };
1725 
1726 static struct Qdisc_ops hfsc_qdisc_ops __read_mostly = {
1727  .id = "hfsc",
1728  .init = hfsc_init_qdisc,
1729  .change = hfsc_change_qdisc,
1730  .reset = hfsc_reset_qdisc,
1731  .destroy = hfsc_destroy_qdisc,
1732  .dump = hfsc_dump_qdisc,
1733  .enqueue = hfsc_enqueue,
1734  .dequeue = hfsc_dequeue,
1735  .peek = qdisc_peek_dequeued,
1736  .drop = hfsc_drop,
1737  .cl_ops = &hfsc_class_ops,
1738  .priv_size = sizeof(struct hfsc_sched),
1739  .owner = THIS_MODULE
1740 };
1741 
1742 static int __init
1743 hfsc_init(void)
1744 {
1745  return register_qdisc(&hfsc_qdisc_ops);
1746 }
1747 
1748 static void __exit
1749 hfsc_cleanup(void)
1750 {
1751  unregister_qdisc(&hfsc_qdisc_ops);
1752 }
1753 
1754 MODULE_LICENSE("GPL");
1755 module_init(hfsc_init);
1756 module_exit(hfsc_cleanup);