iwlagn: Offical name for 6000g2a device
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / net / sched / sch_hfsc.c
blob069c62b7bb3691ac8b4008202c4d3d9aee006d0a
1 /*
2 * Copyright (c) 2003 Patrick McHardy, <kaber@trash.net>
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 2
7 * of the License, or (at your option) any later version.
9 * 2003-10-17 - Ported from altq
12 * Copyright (c) 1997-1999 Carnegie Mellon University. All Rights Reserved.
14 * Permission to use, copy, modify, and distribute this software and
15 * its documentation is hereby granted (including for commercial or
16 * for-profit use), provided that both the copyright notice and this
17 * permission notice appear in all copies of the software, derivative
18 * works, or modified versions, and any portions thereof.
20 * THIS SOFTWARE IS EXPERIMENTAL AND IS KNOWN TO HAVE BUGS, SOME OF
21 * WHICH MAY HAVE SERIOUS CONSEQUENCES. CARNEGIE MELLON PROVIDES THIS
22 * SOFTWARE IN ITS ``AS IS'' CONDITION, AND ANY EXPRESS OR IMPLIED
23 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
25 * DISCLAIMED. IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
28 * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
29 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
30 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
32 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
33 * DAMAGE.
35 * Carnegie Mellon encourages (but does not require) users of this
36 * software to return any improvements or extensions that they make,
37 * and to grant Carnegie Mellon the rights to redistribute these
38 * changes without encumbrance.
41 * H-FSC is described in Proceedings of SIGCOMM'97,
42 * "A Hierarchical Fair Service Curve Algorithm for Link-Sharing,
43 * Real-Time and Priority Service"
44 * by Ion Stoica, Hui Zhang, and T. S. Eugene Ng.
46 * Oleg Cherevko <olwi@aq.ml.com.ua> added the upperlimit for link-sharing.
47 * when a class has an upperlimit, the fit-time is computed from the
48 * upperlimit service curve. the link-sharing scheduler does not schedule
49 * a class whose fit-time exceeds the current time.
52 #include <linux/kernel.h>
53 #include <linux/module.h>
54 #include <linux/types.h>
55 #include <linux/errno.h>
56 #include <linux/compiler.h>
57 #include <linux/spinlock.h>
58 #include <linux/skbuff.h>
59 #include <linux/string.h>
60 #include <linux/slab.h>
61 #include <linux/list.h>
62 #include <linux/rbtree.h>
63 #include <linux/init.h>
64 #include <linux/rtnetlink.h>
65 #include <linux/pkt_sched.h>
66 #include <net/netlink.h>
67 #include <net/pkt_sched.h>
68 #include <net/pkt_cls.h>
69 #include <asm/div64.h>
72 * kernel internal service curve representation:
73 * coordinates are given by 64 bit unsigned integers.
74 * x-axis: unit is clock count.
75 * y-axis: unit is byte.
77 * The service curve parameters are converted to the internal
78 * representation. The slope values are scaled to avoid overflow.
79 * the inverse slope values as well as the y-projection of the 1st
80 * segment are kept in order to avoid 64-bit divide operations
81 * that are expensive on 32-bit architectures.
84 struct internal_sc
86 u64 sm1; /* scaled slope of the 1st segment */
87 u64 ism1; /* scaled inverse-slope of the 1st segment */
88 u64 dx; /* the x-projection of the 1st segment */
89 u64 dy; /* the y-projection of the 1st segment */
90 u64 sm2; /* scaled slope of the 2nd segment */
91 u64 ism2; /* scaled inverse-slope of the 2nd segment */
94 /* runtime service curve */
95 struct runtime_sc
97 u64 x; /* current starting position on x-axis */
98 u64 y; /* current starting position on y-axis */
99 u64 sm1; /* scaled slope of the 1st segment */
100 u64 ism1; /* scaled inverse-slope of the 1st segment */
101 u64 dx; /* the x-projection of the 1st segment */
102 u64 dy; /* the y-projection of the 1st segment */
103 u64 sm2; /* scaled slope of the 2nd segment */
104 u64 ism2; /* scaled inverse-slope of the 2nd segment */
107 enum hfsc_class_flags
109 HFSC_RSC = 0x1,
110 HFSC_FSC = 0x2,
111 HFSC_USC = 0x4
114 struct hfsc_class
116 struct Qdisc_class_common cl_common;
117 unsigned int refcnt; /* usage count */
119 struct gnet_stats_basic_packed bstats;
120 struct gnet_stats_queue qstats;
121 struct gnet_stats_rate_est rate_est;
122 unsigned int level; /* class level in hierarchy */
123 struct tcf_proto *filter_list; /* filter list */
124 unsigned int filter_cnt; /* filter count */
126 struct hfsc_sched *sched; /* scheduler data */
127 struct hfsc_class *cl_parent; /* parent class */
128 struct list_head siblings; /* sibling classes */
129 struct list_head children; /* child classes */
130 struct Qdisc *qdisc; /* leaf qdisc */
132 struct rb_node el_node; /* qdisc's eligible tree member */
133 struct rb_root vt_tree; /* active children sorted by cl_vt */
134 struct rb_node vt_node; /* parent's vt_tree member */
135 struct rb_root cf_tree; /* active children sorted by cl_f */
136 struct rb_node cf_node; /* parent's cf_heap member */
137 struct list_head dlist; /* drop list member */
139 u64 cl_total; /* total work in bytes */
140 u64 cl_cumul; /* cumulative work in bytes done by
141 real-time criteria */
143 u64 cl_d; /* deadline*/
144 u64 cl_e; /* eligible time */
145 u64 cl_vt; /* virtual time */
146 u64 cl_f; /* time when this class will fit for
147 link-sharing, max(myf, cfmin) */
148 u64 cl_myf; /* my fit-time (calculated from this
149 class's own upperlimit curve) */
150 u64 cl_myfadj; /* my fit-time adjustment (to cancel
151 history dependence) */
152 u64 cl_cfmin; /* earliest children's fit-time (used
153 with cl_myf to obtain cl_f) */
154 u64 cl_cvtmin; /* minimal virtual time among the
155 children fit for link-sharing
156 (monotonic within a period) */
157 u64 cl_vtadj; /* intra-period cumulative vt
158 adjustment */
159 u64 cl_vtoff; /* inter-period cumulative vt offset */
160 u64 cl_cvtmax; /* max child's vt in the last period */
161 u64 cl_cvtoff; /* cumulative cvtmax of all periods */
162 u64 cl_pcvtoff; /* parent's cvtoff at initialization
163 time */
165 struct internal_sc cl_rsc; /* internal real-time service curve */
166 struct internal_sc cl_fsc; /* internal fair service curve */
167 struct internal_sc cl_usc; /* internal upperlimit service curve */
168 struct runtime_sc cl_deadline; /* deadline curve */
169 struct runtime_sc cl_eligible; /* eligible curve */
170 struct runtime_sc cl_virtual; /* virtual curve */
171 struct runtime_sc cl_ulimit; /* upperlimit curve */
173 unsigned long cl_flags; /* which curves are valid */
174 unsigned long cl_vtperiod; /* vt period sequence number */
175 unsigned long cl_parentperiod;/* parent's vt period sequence number*/
176 unsigned long cl_nactive; /* number of active children */
179 struct hfsc_sched
181 u16 defcls; /* default class id */
182 struct hfsc_class root; /* root class */
183 struct Qdisc_class_hash clhash; /* class hash */
184 struct rb_root eligible; /* eligible tree */
185 struct list_head droplist; /* active leaf class list (for
186 dropping) */
187 struct qdisc_watchdog watchdog; /* watchdog timer */
190 #define HT_INFINITY 0xffffffffffffffffULL /* infinite time value */
194 * eligible tree holds backlogged classes being sorted by their eligible times.
195 * there is one eligible tree per hfsc instance.
198 static void
199 eltree_insert(struct hfsc_class *cl)
201 struct rb_node **p = &cl->sched->eligible.rb_node;
202 struct rb_node *parent = NULL;
203 struct hfsc_class *cl1;
205 while (*p != NULL) {
206 parent = *p;
207 cl1 = rb_entry(parent, struct hfsc_class, el_node);
208 if (cl->cl_e >= cl1->cl_e)
209 p = &parent->rb_right;
210 else
211 p = &parent->rb_left;
213 rb_link_node(&cl->el_node, parent, p);
214 rb_insert_color(&cl->el_node, &cl->sched->eligible);
217 static inline void
218 eltree_remove(struct hfsc_class *cl)
220 rb_erase(&cl->el_node, &cl->sched->eligible);
223 static inline void
224 eltree_update(struct hfsc_class *cl)
226 eltree_remove(cl);
227 eltree_insert(cl);
230 /* find the class with the minimum deadline among the eligible classes */
231 static inline struct hfsc_class *
232 eltree_get_mindl(struct hfsc_sched *q, u64 cur_time)
234 struct hfsc_class *p, *cl = NULL;
235 struct rb_node *n;
237 for (n = rb_first(&q->eligible); n != NULL; n = rb_next(n)) {
238 p = rb_entry(n, struct hfsc_class, el_node);
239 if (p->cl_e > cur_time)
240 break;
241 if (cl == NULL || p->cl_d < cl->cl_d)
242 cl = p;
244 return cl;
247 /* find the class with minimum eligible time among the eligible classes */
248 static inline struct hfsc_class *
249 eltree_get_minel(struct hfsc_sched *q)
251 struct rb_node *n;
253 n = rb_first(&q->eligible);
254 if (n == NULL)
255 return NULL;
256 return rb_entry(n, struct hfsc_class, el_node);
260 * vttree holds holds backlogged child classes being sorted by their virtual
261 * time. each intermediate class has one vttree.
263 static void
264 vttree_insert(struct hfsc_class *cl)
266 struct rb_node **p = &cl->cl_parent->vt_tree.rb_node;
267 struct rb_node *parent = NULL;
268 struct hfsc_class *cl1;
270 while (*p != NULL) {
271 parent = *p;
272 cl1 = rb_entry(parent, struct hfsc_class, vt_node);
273 if (cl->cl_vt >= cl1->cl_vt)
274 p = &parent->rb_right;
275 else
276 p = &parent->rb_left;
278 rb_link_node(&cl->vt_node, parent, p);
279 rb_insert_color(&cl->vt_node, &cl->cl_parent->vt_tree);
282 static inline void
283 vttree_remove(struct hfsc_class *cl)
285 rb_erase(&cl->vt_node, &cl->cl_parent->vt_tree);
288 static inline void
289 vttree_update(struct hfsc_class *cl)
291 vttree_remove(cl);
292 vttree_insert(cl);
295 static inline struct hfsc_class *
296 vttree_firstfit(struct hfsc_class *cl, u64 cur_time)
298 struct hfsc_class *p;
299 struct rb_node *n;
301 for (n = rb_first(&cl->vt_tree); n != NULL; n = rb_next(n)) {
302 p = rb_entry(n, struct hfsc_class, vt_node);
303 if (p->cl_f <= cur_time)
304 return p;
306 return NULL;
310 * get the leaf class with the minimum vt in the hierarchy
312 static struct hfsc_class *
313 vttree_get_minvt(struct hfsc_class *cl, u64 cur_time)
315 /* if root-class's cfmin is bigger than cur_time nothing to do */
316 if (cl->cl_cfmin > cur_time)
317 return NULL;
319 while (cl->level > 0) {
320 cl = vttree_firstfit(cl, cur_time);
321 if (cl == NULL)
322 return NULL;
324 * update parent's cl_cvtmin.
326 if (cl->cl_parent->cl_cvtmin < cl->cl_vt)
327 cl->cl_parent->cl_cvtmin = cl->cl_vt;
329 return cl;
332 static void
333 cftree_insert(struct hfsc_class *cl)
335 struct rb_node **p = &cl->cl_parent->cf_tree.rb_node;
336 struct rb_node *parent = NULL;
337 struct hfsc_class *cl1;
339 while (*p != NULL) {
340 parent = *p;
341 cl1 = rb_entry(parent, struct hfsc_class, cf_node);
342 if (cl->cl_f >= cl1->cl_f)
343 p = &parent->rb_right;
344 else
345 p = &parent->rb_left;
347 rb_link_node(&cl->cf_node, parent, p);
348 rb_insert_color(&cl->cf_node, &cl->cl_parent->cf_tree);
351 static inline void
352 cftree_remove(struct hfsc_class *cl)
354 rb_erase(&cl->cf_node, &cl->cl_parent->cf_tree);
357 static inline void
358 cftree_update(struct hfsc_class *cl)
360 cftree_remove(cl);
361 cftree_insert(cl);
365 * service curve support functions
367 * external service curve parameters
368 * m: bps
369 * d: us
370 * internal service curve parameters
371 * sm: (bytes/psched_us) << SM_SHIFT
372 * ism: (psched_us/byte) << ISM_SHIFT
373 * dx: psched_us
375 * The clock source resolution with ktime and PSCHED_SHIFT 10 is 1.024us.
377 * sm and ism are scaled in order to keep effective digits.
378 * SM_SHIFT and ISM_SHIFT are selected to keep at least 4 effective
379 * digits in decimal using the following table.
381 * bits/sec 100Kbps 1Mbps 10Mbps 100Mbps 1Gbps
382 * ------------+-------------------------------------------------------
383 * bytes/1.024us 12.8e-3 128e-3 1280e-3 12800e-3 128000e-3
385 * 1.024us/byte 78.125 7.8125 0.78125 0.078125 0.0078125
387 * So, for PSCHED_SHIFT 10 we need: SM_SHIFT 20, ISM_SHIFT 18.
389 #define SM_SHIFT (30 - PSCHED_SHIFT)
390 #define ISM_SHIFT (8 + PSCHED_SHIFT)
392 #define SM_MASK ((1ULL << SM_SHIFT) - 1)
393 #define ISM_MASK ((1ULL << ISM_SHIFT) - 1)
395 static inline u64
396 seg_x2y(u64 x, u64 sm)
398 u64 y;
401 * compute
402 * y = x * sm >> SM_SHIFT
403 * but divide it for the upper and lower bits to avoid overflow
405 y = (x >> SM_SHIFT) * sm + (((x & SM_MASK) * sm) >> SM_SHIFT);
406 return y;
409 static inline u64
410 seg_y2x(u64 y, u64 ism)
412 u64 x;
414 if (y == 0)
415 x = 0;
416 else if (ism == HT_INFINITY)
417 x = HT_INFINITY;
418 else {
419 x = (y >> ISM_SHIFT) * ism
420 + (((y & ISM_MASK) * ism) >> ISM_SHIFT);
422 return x;
425 /* Convert m (bps) into sm (bytes/psched us) */
426 static u64
427 m2sm(u32 m)
429 u64 sm;
431 sm = ((u64)m << SM_SHIFT);
432 sm += PSCHED_TICKS_PER_SEC - 1;
433 do_div(sm, PSCHED_TICKS_PER_SEC);
434 return sm;
437 /* convert m (bps) into ism (psched us/byte) */
438 static u64
439 m2ism(u32 m)
441 u64 ism;
443 if (m == 0)
444 ism = HT_INFINITY;
445 else {
446 ism = ((u64)PSCHED_TICKS_PER_SEC << ISM_SHIFT);
447 ism += m - 1;
448 do_div(ism, m);
450 return ism;
453 /* convert d (us) into dx (psched us) */
454 static u64
455 d2dx(u32 d)
457 u64 dx;
459 dx = ((u64)d * PSCHED_TICKS_PER_SEC);
460 dx += USEC_PER_SEC - 1;
461 do_div(dx, USEC_PER_SEC);
462 return dx;
465 /* convert sm (bytes/psched us) into m (bps) */
466 static u32
467 sm2m(u64 sm)
469 u64 m;
471 m = (sm * PSCHED_TICKS_PER_SEC) >> SM_SHIFT;
472 return (u32)m;
475 /* convert dx (psched us) into d (us) */
476 static u32
477 dx2d(u64 dx)
479 u64 d;
481 d = dx * USEC_PER_SEC;
482 do_div(d, PSCHED_TICKS_PER_SEC);
483 return (u32)d;
486 static void
487 sc2isc(struct tc_service_curve *sc, struct internal_sc *isc)
489 isc->sm1 = m2sm(sc->m1);
490 isc->ism1 = m2ism(sc->m1);
491 isc->dx = d2dx(sc->d);
492 isc->dy = seg_x2y(isc->dx, isc->sm1);
493 isc->sm2 = m2sm(sc->m2);
494 isc->ism2 = m2ism(sc->m2);
498 * initialize the runtime service curve with the given internal
499 * service curve starting at (x, y).
501 static void
502 rtsc_init(struct runtime_sc *rtsc, struct internal_sc *isc, u64 x, u64 y)
504 rtsc->x = x;
505 rtsc->y = y;
506 rtsc->sm1 = isc->sm1;
507 rtsc->ism1 = isc->ism1;
508 rtsc->dx = isc->dx;
509 rtsc->dy = isc->dy;
510 rtsc->sm2 = isc->sm2;
511 rtsc->ism2 = isc->ism2;
515 * calculate the y-projection of the runtime service curve by the
516 * given x-projection value
518 static u64
519 rtsc_y2x(struct runtime_sc *rtsc, u64 y)
521 u64 x;
523 if (y < rtsc->y)
524 x = rtsc->x;
525 else if (y <= rtsc->y + rtsc->dy) {
526 /* x belongs to the 1st segment */
527 if (rtsc->dy == 0)
528 x = rtsc->x + rtsc->dx;
529 else
530 x = rtsc->x + seg_y2x(y - rtsc->y, rtsc->ism1);
531 } else {
532 /* x belongs to the 2nd segment */
533 x = rtsc->x + rtsc->dx
534 + seg_y2x(y - rtsc->y - rtsc->dy, rtsc->ism2);
536 return x;
539 static u64
540 rtsc_x2y(struct runtime_sc *rtsc, u64 x)
542 u64 y;
544 if (x <= rtsc->x)
545 y = rtsc->y;
546 else if (x <= rtsc->x + rtsc->dx)
547 /* y belongs to the 1st segment */
548 y = rtsc->y + seg_x2y(x - rtsc->x, rtsc->sm1);
549 else
550 /* y belongs to the 2nd segment */
551 y = rtsc->y + rtsc->dy
552 + seg_x2y(x - rtsc->x - rtsc->dx, rtsc->sm2);
553 return y;
557 * update the runtime service curve by taking the minimum of the current
558 * runtime service curve and the service curve starting at (x, y).
560 static void
561 rtsc_min(struct runtime_sc *rtsc, struct internal_sc *isc, u64 x, u64 y)
563 u64 y1, y2, dx, dy;
564 u32 dsm;
566 if (isc->sm1 <= isc->sm2) {
567 /* service curve is convex */
568 y1 = rtsc_x2y(rtsc, x);
569 if (y1 < y)
570 /* the current rtsc is smaller */
571 return;
572 rtsc->x = x;
573 rtsc->y = y;
574 return;
578 * service curve is concave
579 * compute the two y values of the current rtsc
580 * y1: at x
581 * y2: at (x + dx)
583 y1 = rtsc_x2y(rtsc, x);
584 if (y1 <= y) {
585 /* rtsc is below isc, no change to rtsc */
586 return;
589 y2 = rtsc_x2y(rtsc, x + isc->dx);
590 if (y2 >= y + isc->dy) {
591 /* rtsc is above isc, replace rtsc by isc */
592 rtsc->x = x;
593 rtsc->y = y;
594 rtsc->dx = isc->dx;
595 rtsc->dy = isc->dy;
596 return;
600 * the two curves intersect
601 * compute the offsets (dx, dy) using the reverse
602 * function of seg_x2y()
603 * seg_x2y(dx, sm1) == seg_x2y(dx, sm2) + (y1 - y)
605 dx = (y1 - y) << SM_SHIFT;
606 dsm = isc->sm1 - isc->sm2;
607 do_div(dx, dsm);
609 * check if (x, y1) belongs to the 1st segment of rtsc.
610 * if so, add the offset.
612 if (rtsc->x + rtsc->dx > x)
613 dx += rtsc->x + rtsc->dx - x;
614 dy = seg_x2y(dx, isc->sm1);
616 rtsc->x = x;
617 rtsc->y = y;
618 rtsc->dx = dx;
619 rtsc->dy = dy;
622 static void
623 init_ed(struct hfsc_class *cl, unsigned int next_len)
625 u64 cur_time = psched_get_time();
627 /* update the deadline curve */
628 rtsc_min(&cl->cl_deadline, &cl->cl_rsc, cur_time, cl->cl_cumul);
631 * update the eligible curve.
632 * for concave, it is equal to the deadline curve.
633 * for convex, it is a linear curve with slope m2.
635 cl->cl_eligible = cl->cl_deadline;
636 if (cl->cl_rsc.sm1 <= cl->cl_rsc.sm2) {
637 cl->cl_eligible.dx = 0;
638 cl->cl_eligible.dy = 0;
641 /* compute e and d */
642 cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul);
643 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
645 eltree_insert(cl);
648 static void
649 update_ed(struct hfsc_class *cl, unsigned int next_len)
651 cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul);
652 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
654 eltree_update(cl);
657 static inline void
658 update_d(struct hfsc_class *cl, unsigned int next_len)
660 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
663 static inline void
664 update_cfmin(struct hfsc_class *cl)
666 struct rb_node *n = rb_first(&cl->cf_tree);
667 struct hfsc_class *p;
669 if (n == NULL) {
670 cl->cl_cfmin = 0;
671 return;
673 p = rb_entry(n, struct hfsc_class, cf_node);
674 cl->cl_cfmin = p->cl_f;
677 static void
678 init_vf(struct hfsc_class *cl, unsigned int len)
680 struct hfsc_class *max_cl;
681 struct rb_node *n;
682 u64 vt, f, cur_time;
683 int go_active;
685 cur_time = 0;
686 go_active = 1;
687 for (; cl->cl_parent != NULL; cl = cl->cl_parent) {
688 if (go_active && cl->cl_nactive++ == 0)
689 go_active = 1;
690 else
691 go_active = 0;
693 if (go_active) {
694 n = rb_last(&cl->cl_parent->vt_tree);
695 if (n != NULL) {
696 max_cl = rb_entry(n, struct hfsc_class,vt_node);
698 * set vt to the average of the min and max
699 * classes. if the parent's period didn't
700 * change, don't decrease vt of the class.
702 vt = max_cl->cl_vt;
703 if (cl->cl_parent->cl_cvtmin != 0)
704 vt = (cl->cl_parent->cl_cvtmin + vt)/2;
706 if (cl->cl_parent->cl_vtperiod !=
707 cl->cl_parentperiod || vt > cl->cl_vt)
708 cl->cl_vt = vt;
709 } else {
711 * first child for a new parent backlog period.
712 * add parent's cvtmax to cvtoff to make a new
713 * vt (vtoff + vt) larger than the vt in the
714 * last period for all children.
716 vt = cl->cl_parent->cl_cvtmax;
717 cl->cl_parent->cl_cvtoff += vt;
718 cl->cl_parent->cl_cvtmax = 0;
719 cl->cl_parent->cl_cvtmin = 0;
720 cl->cl_vt = 0;
723 cl->cl_vtoff = cl->cl_parent->cl_cvtoff -
724 cl->cl_pcvtoff;
726 /* update the virtual curve */
727 vt = cl->cl_vt + cl->cl_vtoff;
728 rtsc_min(&cl->cl_virtual, &cl->cl_fsc, vt,
729 cl->cl_total);
730 if (cl->cl_virtual.x == vt) {
731 cl->cl_virtual.x -= cl->cl_vtoff;
732 cl->cl_vtoff = 0;
734 cl->cl_vtadj = 0;
736 cl->cl_vtperiod++; /* increment vt period */
737 cl->cl_parentperiod = cl->cl_parent->cl_vtperiod;
738 if (cl->cl_parent->cl_nactive == 0)
739 cl->cl_parentperiod++;
740 cl->cl_f = 0;
742 vttree_insert(cl);
743 cftree_insert(cl);
745 if (cl->cl_flags & HFSC_USC) {
746 /* class has upper limit curve */
747 if (cur_time == 0)
748 cur_time = psched_get_time();
750 /* update the ulimit curve */
751 rtsc_min(&cl->cl_ulimit, &cl->cl_usc, cur_time,
752 cl->cl_total);
753 /* compute myf */
754 cl->cl_myf = rtsc_y2x(&cl->cl_ulimit,
755 cl->cl_total);
756 cl->cl_myfadj = 0;
760 f = max(cl->cl_myf, cl->cl_cfmin);
761 if (f != cl->cl_f) {
762 cl->cl_f = f;
763 cftree_update(cl);
765 update_cfmin(cl->cl_parent);
769 static void
770 update_vf(struct hfsc_class *cl, unsigned int len, u64 cur_time)
772 u64 f; /* , myf_bound, delta; */
773 int go_passive = 0;
775 if (cl->qdisc->q.qlen == 0 && cl->cl_flags & HFSC_FSC)
776 go_passive = 1;
778 for (; cl->cl_parent != NULL; cl = cl->cl_parent) {
779 cl->cl_total += len;
781 if (!(cl->cl_flags & HFSC_FSC) || cl->cl_nactive == 0)
782 continue;
784 if (go_passive && --cl->cl_nactive == 0)
785 go_passive = 1;
786 else
787 go_passive = 0;
789 if (go_passive) {
790 /* no more active child, going passive */
792 /* update cvtmax of the parent class */
793 if (cl->cl_vt > cl->cl_parent->cl_cvtmax)
794 cl->cl_parent->cl_cvtmax = cl->cl_vt;
796 /* remove this class from the vt tree */
797 vttree_remove(cl);
799 cftree_remove(cl);
800 update_cfmin(cl->cl_parent);
802 continue;
806 * update vt and f
808 cl->cl_vt = rtsc_y2x(&cl->cl_virtual, cl->cl_total)
809 - cl->cl_vtoff + cl->cl_vtadj;
812 * if vt of the class is smaller than cvtmin,
813 * the class was skipped in the past due to non-fit.
814 * if so, we need to adjust vtadj.
816 if (cl->cl_vt < cl->cl_parent->cl_cvtmin) {
817 cl->cl_vtadj += cl->cl_parent->cl_cvtmin - cl->cl_vt;
818 cl->cl_vt = cl->cl_parent->cl_cvtmin;
821 /* update the vt tree */
822 vttree_update(cl);
824 if (cl->cl_flags & HFSC_USC) {
825 cl->cl_myf = cl->cl_myfadj + rtsc_y2x(&cl->cl_ulimit,
826 cl->cl_total);
827 #if 0
829 * This code causes classes to stay way under their
830 * limit when multiple classes are used at gigabit
831 * speed. needs investigation. -kaber
834 * if myf lags behind by more than one clock tick
835 * from the current time, adjust myfadj to prevent
836 * a rate-limited class from going greedy.
837 * in a steady state under rate-limiting, myf
838 * fluctuates within one clock tick.
840 myf_bound = cur_time - PSCHED_JIFFIE2US(1);
841 if (cl->cl_myf < myf_bound) {
842 delta = cur_time - cl->cl_myf;
843 cl->cl_myfadj += delta;
844 cl->cl_myf += delta;
846 #endif
849 f = max(cl->cl_myf, cl->cl_cfmin);
850 if (f != cl->cl_f) {
851 cl->cl_f = f;
852 cftree_update(cl);
853 update_cfmin(cl->cl_parent);
858 static void
859 set_active(struct hfsc_class *cl, unsigned int len)
861 if (cl->cl_flags & HFSC_RSC)
862 init_ed(cl, len);
863 if (cl->cl_flags & HFSC_FSC)
864 init_vf(cl, len);
866 list_add_tail(&cl->dlist, &cl->sched->droplist);
869 static void
870 set_passive(struct hfsc_class *cl)
872 if (cl->cl_flags & HFSC_RSC)
873 eltree_remove(cl);
875 list_del(&cl->dlist);
878 * vttree is now handled in update_vf() so that update_vf(cl, 0, 0)
879 * needs to be called explicitly to remove a class from vttree.
883 static unsigned int
884 qdisc_peek_len(struct Qdisc *sch)
886 struct sk_buff *skb;
887 unsigned int len;
889 skb = sch->ops->peek(sch);
890 if (skb == NULL) {
891 qdisc_warn_nonwc("qdisc_peek_len", sch);
892 return 0;
894 len = qdisc_pkt_len(skb);
896 return len;
899 static void
900 hfsc_purge_queue(struct Qdisc *sch, struct hfsc_class *cl)
902 unsigned int len = cl->qdisc->q.qlen;
904 qdisc_reset(cl->qdisc);
905 qdisc_tree_decrease_qlen(cl->qdisc, len);
908 static void
909 hfsc_adjust_levels(struct hfsc_class *cl)
911 struct hfsc_class *p;
912 unsigned int level;
914 do {
915 level = 0;
916 list_for_each_entry(p, &cl->children, siblings) {
917 if (p->level >= level)
918 level = p->level + 1;
920 cl->level = level;
921 } while ((cl = cl->cl_parent) != NULL);
924 static inline struct hfsc_class *
925 hfsc_find_class(u32 classid, struct Qdisc *sch)
927 struct hfsc_sched *q = qdisc_priv(sch);
928 struct Qdisc_class_common *clc;
930 clc = qdisc_class_find(&q->clhash, classid);
931 if (clc == NULL)
932 return NULL;
933 return container_of(clc, struct hfsc_class, cl_common);
936 static void
937 hfsc_change_rsc(struct hfsc_class *cl, struct tc_service_curve *rsc,
938 u64 cur_time)
940 sc2isc(rsc, &cl->cl_rsc);
941 rtsc_init(&cl->cl_deadline, &cl->cl_rsc, cur_time, cl->cl_cumul);
942 cl->cl_eligible = cl->cl_deadline;
943 if (cl->cl_rsc.sm1 <= cl->cl_rsc.sm2) {
944 cl->cl_eligible.dx = 0;
945 cl->cl_eligible.dy = 0;
947 cl->cl_flags |= HFSC_RSC;
950 static void
951 hfsc_change_fsc(struct hfsc_class *cl, struct tc_service_curve *fsc)
953 sc2isc(fsc, &cl->cl_fsc);
954 rtsc_init(&cl->cl_virtual, &cl->cl_fsc, cl->cl_vt, cl->cl_total);
955 cl->cl_flags |= HFSC_FSC;
958 static void
959 hfsc_change_usc(struct hfsc_class *cl, struct tc_service_curve *usc,
960 u64 cur_time)
962 sc2isc(usc, &cl->cl_usc);
963 rtsc_init(&cl->cl_ulimit, &cl->cl_usc, cur_time, cl->cl_total);
964 cl->cl_flags |= HFSC_USC;
967 static const struct nla_policy hfsc_policy[TCA_HFSC_MAX + 1] = {
968 [TCA_HFSC_RSC] = { .len = sizeof(struct tc_service_curve) },
969 [TCA_HFSC_FSC] = { .len = sizeof(struct tc_service_curve) },
970 [TCA_HFSC_USC] = { .len = sizeof(struct tc_service_curve) },
973 static int
974 hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
975 struct nlattr **tca, unsigned long *arg)
977 struct hfsc_sched *q = qdisc_priv(sch);
978 struct hfsc_class *cl = (struct hfsc_class *)*arg;
979 struct hfsc_class *parent = NULL;
980 struct nlattr *opt = tca[TCA_OPTIONS];
981 struct nlattr *tb[TCA_HFSC_MAX + 1];
982 struct tc_service_curve *rsc = NULL, *fsc = NULL, *usc = NULL;
983 u64 cur_time;
984 int err;
986 if (opt == NULL)
987 return -EINVAL;
989 err = nla_parse_nested(tb, TCA_HFSC_MAX, opt, hfsc_policy);
990 if (err < 0)
991 return err;
993 if (tb[TCA_HFSC_RSC]) {
994 rsc = nla_data(tb[TCA_HFSC_RSC]);
995 if (rsc->m1 == 0 && rsc->m2 == 0)
996 rsc = NULL;
999 if (tb[TCA_HFSC_FSC]) {
1000 fsc = nla_data(tb[TCA_HFSC_FSC]);
1001 if (fsc->m1 == 0 && fsc->m2 == 0)
1002 fsc = NULL;
1005 if (tb[TCA_HFSC_USC]) {
1006 usc = nla_data(tb[TCA_HFSC_USC]);
1007 if (usc->m1 == 0 && usc->m2 == 0)
1008 usc = NULL;
1011 if (cl != NULL) {
1012 if (parentid) {
1013 if (cl->cl_parent &&
1014 cl->cl_parent->cl_common.classid != parentid)
1015 return -EINVAL;
1016 if (cl->cl_parent == NULL && parentid != TC_H_ROOT)
1017 return -EINVAL;
1019 cur_time = psched_get_time();
1021 if (tca[TCA_RATE]) {
1022 err = gen_replace_estimator(&cl->bstats, &cl->rate_est,
1023 qdisc_root_sleeping_lock(sch),
1024 tca[TCA_RATE]);
1025 if (err)
1026 return err;
1029 sch_tree_lock(sch);
1030 if (rsc != NULL)
1031 hfsc_change_rsc(cl, rsc, cur_time);
1032 if (fsc != NULL)
1033 hfsc_change_fsc(cl, fsc);
1034 if (usc != NULL)
1035 hfsc_change_usc(cl, usc, cur_time);
1037 if (cl->qdisc->q.qlen != 0) {
1038 if (cl->cl_flags & HFSC_RSC)
1039 update_ed(cl, qdisc_peek_len(cl->qdisc));
1040 if (cl->cl_flags & HFSC_FSC)
1041 update_vf(cl, 0, cur_time);
1043 sch_tree_unlock(sch);
1045 return 0;
1048 if (parentid == TC_H_ROOT)
1049 return -EEXIST;
1051 parent = &q->root;
1052 if (parentid) {
1053 parent = hfsc_find_class(parentid, sch);
1054 if (parent == NULL)
1055 return -ENOENT;
1058 if (classid == 0 || TC_H_MAJ(classid ^ sch->handle) != 0)
1059 return -EINVAL;
1060 if (hfsc_find_class(classid, sch))
1061 return -EEXIST;
1063 if (rsc == NULL && fsc == NULL)
1064 return -EINVAL;
1066 cl = kzalloc(sizeof(struct hfsc_class), GFP_KERNEL);
1067 if (cl == NULL)
1068 return -ENOBUFS;
1070 if (tca[TCA_RATE]) {
1071 err = gen_new_estimator(&cl->bstats, &cl->rate_est,
1072 qdisc_root_sleeping_lock(sch),
1073 tca[TCA_RATE]);
1074 if (err) {
1075 kfree(cl);
1076 return err;
1080 if (rsc != NULL)
1081 hfsc_change_rsc(cl, rsc, 0);
1082 if (fsc != NULL)
1083 hfsc_change_fsc(cl, fsc);
1084 if (usc != NULL)
1085 hfsc_change_usc(cl, usc, 0);
1087 cl->cl_common.classid = classid;
1088 cl->refcnt = 1;
1089 cl->sched = q;
1090 cl->cl_parent = parent;
1091 cl->qdisc = qdisc_create_dflt(sch->dev_queue,
1092 &pfifo_qdisc_ops, classid);
1093 if (cl->qdisc == NULL)
1094 cl->qdisc = &noop_qdisc;
1095 INIT_LIST_HEAD(&cl->children);
1096 cl->vt_tree = RB_ROOT;
1097 cl->cf_tree = RB_ROOT;
1099 sch_tree_lock(sch);
1100 qdisc_class_hash_insert(&q->clhash, &cl->cl_common);
1101 list_add_tail(&cl->siblings, &parent->children);
1102 if (parent->level == 0)
1103 hfsc_purge_queue(sch, parent);
1104 hfsc_adjust_levels(parent);
1105 cl->cl_pcvtoff = parent->cl_cvtoff;
1106 sch_tree_unlock(sch);
1108 qdisc_class_hash_grow(sch, &q->clhash);
1110 *arg = (unsigned long)cl;
1111 return 0;
1114 static void
1115 hfsc_destroy_class(struct Qdisc *sch, struct hfsc_class *cl)
1117 struct hfsc_sched *q = qdisc_priv(sch);
1119 tcf_destroy_chain(&cl->filter_list);
1120 qdisc_destroy(cl->qdisc);
1121 gen_kill_estimator(&cl->bstats, &cl->rate_est);
1122 if (cl != &q->root)
1123 kfree(cl);
1126 static int
1127 hfsc_delete_class(struct Qdisc *sch, unsigned long arg)
1129 struct hfsc_sched *q = qdisc_priv(sch);
1130 struct hfsc_class *cl = (struct hfsc_class *)arg;
1132 if (cl->level > 0 || cl->filter_cnt > 0 || cl == &q->root)
1133 return -EBUSY;
1135 sch_tree_lock(sch);
1137 list_del(&cl->siblings);
1138 hfsc_adjust_levels(cl->cl_parent);
1140 hfsc_purge_queue(sch, cl);
1141 qdisc_class_hash_remove(&q->clhash, &cl->cl_common);
1143 BUG_ON(--cl->refcnt == 0);
1145 * This shouldn't happen: we "hold" one cops->get() when called
1146 * from tc_ctl_tclass; the destroy method is done from cops->put().
1149 sch_tree_unlock(sch);
1150 return 0;
1153 static struct hfsc_class *
1154 hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
1156 struct hfsc_sched *q = qdisc_priv(sch);
1157 struct hfsc_class *head, *cl;
1158 struct tcf_result res;
1159 struct tcf_proto *tcf;
1160 int result;
1162 if (TC_H_MAJ(skb->priority ^ sch->handle) == 0 &&
1163 (cl = hfsc_find_class(skb->priority, sch)) != NULL)
1164 if (cl->level == 0)
1165 return cl;
1167 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
1168 head = &q->root;
1169 tcf = q->root.filter_list;
1170 while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) {
1171 #ifdef CONFIG_NET_CLS_ACT
1172 switch (result) {
1173 case TC_ACT_QUEUED:
1174 case TC_ACT_STOLEN:
1175 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
1176 case TC_ACT_SHOT:
1177 return NULL;
1179 #endif
1180 if ((cl = (struct hfsc_class *)res.class) == NULL) {
1181 if ((cl = hfsc_find_class(res.classid, sch)) == NULL)
1182 break; /* filter selected invalid classid */
1183 if (cl->level >= head->level)
1184 break; /* filter may only point downwards */
1187 if (cl->level == 0)
1188 return cl; /* hit leaf class */
1190 /* apply inner filter chain */
1191 tcf = cl->filter_list;
1192 head = cl;
1195 /* classification failed, try default class */
1196 cl = hfsc_find_class(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch);
1197 if (cl == NULL || cl->level > 0)
1198 return NULL;
1200 return cl;
1203 static int
1204 hfsc_graft_class(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1205 struct Qdisc **old)
1207 struct hfsc_class *cl = (struct hfsc_class *)arg;
1209 if (cl->level > 0)
1210 return -EINVAL;
1211 if (new == NULL) {
1212 new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
1213 cl->cl_common.classid);
1214 if (new == NULL)
1215 new = &noop_qdisc;
1218 sch_tree_lock(sch);
1219 hfsc_purge_queue(sch, cl);
1220 *old = cl->qdisc;
1221 cl->qdisc = new;
1222 sch_tree_unlock(sch);
1223 return 0;
1226 static struct Qdisc *
1227 hfsc_class_leaf(struct Qdisc *sch, unsigned long arg)
1229 struct hfsc_class *cl = (struct hfsc_class *)arg;
1231 if (cl->level == 0)
1232 return cl->qdisc;
1234 return NULL;
1237 static void
1238 hfsc_qlen_notify(struct Qdisc *sch, unsigned long arg)
1240 struct hfsc_class *cl = (struct hfsc_class *)arg;
1242 if (cl->qdisc->q.qlen == 0) {
1243 update_vf(cl, 0, 0);
1244 set_passive(cl);
1248 static unsigned long
1249 hfsc_get_class(struct Qdisc *sch, u32 classid)
1251 struct hfsc_class *cl = hfsc_find_class(classid, sch);
1253 if (cl != NULL)
1254 cl->refcnt++;
1256 return (unsigned long)cl;
1259 static void
1260 hfsc_put_class(struct Qdisc *sch, unsigned long arg)
1262 struct hfsc_class *cl = (struct hfsc_class *)arg;
1264 if (--cl->refcnt == 0)
1265 hfsc_destroy_class(sch, cl);
1268 static unsigned long
1269 hfsc_bind_tcf(struct Qdisc *sch, unsigned long parent, u32 classid)
1271 struct hfsc_class *p = (struct hfsc_class *)parent;
1272 struct hfsc_class *cl = hfsc_find_class(classid, sch);
1274 if (cl != NULL) {
1275 if (p != NULL && p->level <= cl->level)
1276 return 0;
1277 cl->filter_cnt++;
1280 return (unsigned long)cl;
1283 static void
1284 hfsc_unbind_tcf(struct Qdisc *sch, unsigned long arg)
1286 struct hfsc_class *cl = (struct hfsc_class *)arg;
1288 cl->filter_cnt--;
1291 static struct tcf_proto **
1292 hfsc_tcf_chain(struct Qdisc *sch, unsigned long arg)
1294 struct hfsc_sched *q = qdisc_priv(sch);
1295 struct hfsc_class *cl = (struct hfsc_class *)arg;
1297 if (cl == NULL)
1298 cl = &q->root;
1300 return &cl->filter_list;
1303 static int
1304 hfsc_dump_sc(struct sk_buff *skb, int attr, struct internal_sc *sc)
1306 struct tc_service_curve tsc;
1308 tsc.m1 = sm2m(sc->sm1);
1309 tsc.d = dx2d(sc->dx);
1310 tsc.m2 = sm2m(sc->sm2);
1311 NLA_PUT(skb, attr, sizeof(tsc), &tsc);
1313 return skb->len;
1315 nla_put_failure:
1316 return -1;
1319 static inline int
1320 hfsc_dump_curves(struct sk_buff *skb, struct hfsc_class *cl)
1322 if ((cl->cl_flags & HFSC_RSC) &&
1323 (hfsc_dump_sc(skb, TCA_HFSC_RSC, &cl->cl_rsc) < 0))
1324 goto nla_put_failure;
1326 if ((cl->cl_flags & HFSC_FSC) &&
1327 (hfsc_dump_sc(skb, TCA_HFSC_FSC, &cl->cl_fsc) < 0))
1328 goto nla_put_failure;
1330 if ((cl->cl_flags & HFSC_USC) &&
1331 (hfsc_dump_sc(skb, TCA_HFSC_USC, &cl->cl_usc) < 0))
1332 goto nla_put_failure;
1334 return skb->len;
1336 nla_put_failure:
1337 return -1;
1340 static int
1341 hfsc_dump_class(struct Qdisc *sch, unsigned long arg, struct sk_buff *skb,
1342 struct tcmsg *tcm)
1344 struct hfsc_class *cl = (struct hfsc_class *)arg;
1345 struct nlattr *nest;
1347 tcm->tcm_parent = cl->cl_parent ? cl->cl_parent->cl_common.classid :
1348 TC_H_ROOT;
1349 tcm->tcm_handle = cl->cl_common.classid;
1350 if (cl->level == 0)
1351 tcm->tcm_info = cl->qdisc->handle;
1353 nest = nla_nest_start(skb, TCA_OPTIONS);
1354 if (nest == NULL)
1355 goto nla_put_failure;
1356 if (hfsc_dump_curves(skb, cl) < 0)
1357 goto nla_put_failure;
1358 nla_nest_end(skb, nest);
1359 return skb->len;
1361 nla_put_failure:
1362 nla_nest_cancel(skb, nest);
1363 return -EMSGSIZE;
1366 static int
1367 hfsc_dump_class_stats(struct Qdisc *sch, unsigned long arg,
1368 struct gnet_dump *d)
1370 struct hfsc_class *cl = (struct hfsc_class *)arg;
1371 struct tc_hfsc_stats xstats;
1373 cl->qstats.qlen = cl->qdisc->q.qlen;
1374 xstats.level = cl->level;
1375 xstats.period = cl->cl_vtperiod;
1376 xstats.work = cl->cl_total;
1377 xstats.rtwork = cl->cl_cumul;
1379 if (gnet_stats_copy_basic(d, &cl->bstats) < 0 ||
1380 gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 ||
1381 gnet_stats_copy_queue(d, &cl->qstats) < 0)
1382 return -1;
1384 return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
1389 static void
1390 hfsc_walk(struct Qdisc *sch, struct qdisc_walker *arg)
1392 struct hfsc_sched *q = qdisc_priv(sch);
1393 struct hlist_node *n;
1394 struct hfsc_class *cl;
1395 unsigned int i;
1397 if (arg->stop)
1398 return;
1400 for (i = 0; i < q->clhash.hashsize; i++) {
1401 hlist_for_each_entry(cl, n, &q->clhash.hash[i],
1402 cl_common.hnode) {
1403 if (arg->count < arg->skip) {
1404 arg->count++;
1405 continue;
1407 if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
1408 arg->stop = 1;
1409 return;
1411 arg->count++;
1416 static void
1417 hfsc_schedule_watchdog(struct Qdisc *sch)
1419 struct hfsc_sched *q = qdisc_priv(sch);
1420 struct hfsc_class *cl;
1421 u64 next_time = 0;
1423 if ((cl = eltree_get_minel(q)) != NULL)
1424 next_time = cl->cl_e;
1425 if (q->root.cl_cfmin != 0) {
1426 if (next_time == 0 || next_time > q->root.cl_cfmin)
1427 next_time = q->root.cl_cfmin;
1429 WARN_ON(next_time == 0);
1430 qdisc_watchdog_schedule(&q->watchdog, next_time);
1433 static int
1434 hfsc_init_qdisc(struct Qdisc *sch, struct nlattr *opt)
1436 struct hfsc_sched *q = qdisc_priv(sch);
1437 struct tc_hfsc_qopt *qopt;
1438 int err;
1440 if (opt == NULL || nla_len(opt) < sizeof(*qopt))
1441 return -EINVAL;
1442 qopt = nla_data(opt);
1444 q->defcls = qopt->defcls;
1445 err = qdisc_class_hash_init(&q->clhash);
1446 if (err < 0)
1447 return err;
1448 q->eligible = RB_ROOT;
1449 INIT_LIST_HEAD(&q->droplist);
1451 q->root.cl_common.classid = sch->handle;
1452 q->root.refcnt = 1;
1453 q->root.sched = q;
1454 q->root.qdisc = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
1455 sch->handle);
1456 if (q->root.qdisc == NULL)
1457 q->root.qdisc = &noop_qdisc;
1458 INIT_LIST_HEAD(&q->root.children);
1459 q->root.vt_tree = RB_ROOT;
1460 q->root.cf_tree = RB_ROOT;
1462 qdisc_class_hash_insert(&q->clhash, &q->root.cl_common);
1463 qdisc_class_hash_grow(sch, &q->clhash);
1465 qdisc_watchdog_init(&q->watchdog, sch);
1467 return 0;
1470 static int
1471 hfsc_change_qdisc(struct Qdisc *sch, struct nlattr *opt)
1473 struct hfsc_sched *q = qdisc_priv(sch);
1474 struct tc_hfsc_qopt *qopt;
1476 if (opt == NULL || nla_len(opt) < sizeof(*qopt))
1477 return -EINVAL;
1478 qopt = nla_data(opt);
1480 sch_tree_lock(sch);
1481 q->defcls = qopt->defcls;
1482 sch_tree_unlock(sch);
1484 return 0;
1487 static void
1488 hfsc_reset_class(struct hfsc_class *cl)
1490 cl->cl_total = 0;
1491 cl->cl_cumul = 0;
1492 cl->cl_d = 0;
1493 cl->cl_e = 0;
1494 cl->cl_vt = 0;
1495 cl->cl_vtadj = 0;
1496 cl->cl_vtoff = 0;
1497 cl->cl_cvtmin = 0;
1498 cl->cl_cvtmax = 0;
1499 cl->cl_cvtoff = 0;
1500 cl->cl_pcvtoff = 0;
1501 cl->cl_vtperiod = 0;
1502 cl->cl_parentperiod = 0;
1503 cl->cl_f = 0;
1504 cl->cl_myf = 0;
1505 cl->cl_myfadj = 0;
1506 cl->cl_cfmin = 0;
1507 cl->cl_nactive = 0;
1509 cl->vt_tree = RB_ROOT;
1510 cl->cf_tree = RB_ROOT;
1511 qdisc_reset(cl->qdisc);
1513 if (cl->cl_flags & HFSC_RSC)
1514 rtsc_init(&cl->cl_deadline, &cl->cl_rsc, 0, 0);
1515 if (cl->cl_flags & HFSC_FSC)
1516 rtsc_init(&cl->cl_virtual, &cl->cl_fsc, 0, 0);
1517 if (cl->cl_flags & HFSC_USC)
1518 rtsc_init(&cl->cl_ulimit, &cl->cl_usc, 0, 0);
1521 static void
1522 hfsc_reset_qdisc(struct Qdisc *sch)
1524 struct hfsc_sched *q = qdisc_priv(sch);
1525 struct hfsc_class *cl;
1526 struct hlist_node *n;
1527 unsigned int i;
1529 for (i = 0; i < q->clhash.hashsize; i++) {
1530 hlist_for_each_entry(cl, n, &q->clhash.hash[i], cl_common.hnode)
1531 hfsc_reset_class(cl);
1533 q->eligible = RB_ROOT;
1534 INIT_LIST_HEAD(&q->droplist);
1535 qdisc_watchdog_cancel(&q->watchdog);
1536 sch->q.qlen = 0;
1539 static void
1540 hfsc_destroy_qdisc(struct Qdisc *sch)
1542 struct hfsc_sched *q = qdisc_priv(sch);
1543 struct hlist_node *n, *next;
1544 struct hfsc_class *cl;
1545 unsigned int i;
1547 for (i = 0; i < q->clhash.hashsize; i++) {
1548 hlist_for_each_entry(cl, n, &q->clhash.hash[i], cl_common.hnode)
1549 tcf_destroy_chain(&cl->filter_list);
1551 for (i = 0; i < q->clhash.hashsize; i++) {
1552 hlist_for_each_entry_safe(cl, n, next, &q->clhash.hash[i],
1553 cl_common.hnode)
1554 hfsc_destroy_class(sch, cl);
1556 qdisc_class_hash_destroy(&q->clhash);
1557 qdisc_watchdog_cancel(&q->watchdog);
1560 static int
1561 hfsc_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb)
1563 struct hfsc_sched *q = qdisc_priv(sch);
1564 unsigned char *b = skb_tail_pointer(skb);
1565 struct tc_hfsc_qopt qopt;
1567 qopt.defcls = q->defcls;
1568 NLA_PUT(skb, TCA_OPTIONS, sizeof(qopt), &qopt);
1569 return skb->len;
1571 nla_put_failure:
1572 nlmsg_trim(skb, b);
1573 return -1;
1576 static int
1577 hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
1579 struct hfsc_class *cl;
1580 int uninitialized_var(err);
1582 cl = hfsc_classify(skb, sch, &err);
1583 if (cl == NULL) {
1584 if (err & __NET_XMIT_BYPASS)
1585 sch->qstats.drops++;
1586 kfree_skb(skb);
1587 return err;
1590 err = qdisc_enqueue(skb, cl->qdisc);
1591 if (unlikely(err != NET_XMIT_SUCCESS)) {
1592 if (net_xmit_drop_count(err)) {
1593 cl->qstats.drops++;
1594 sch->qstats.drops++;
1596 return err;
1599 if (cl->qdisc->q.qlen == 1)
1600 set_active(cl, qdisc_pkt_len(skb));
1602 cl->bstats.packets++;
1603 cl->bstats.bytes += qdisc_pkt_len(skb);
1604 sch->bstats.packets++;
1605 sch->bstats.bytes += qdisc_pkt_len(skb);
1606 sch->q.qlen++;
1608 return NET_XMIT_SUCCESS;
1611 static struct sk_buff *
1612 hfsc_dequeue(struct Qdisc *sch)
1614 struct hfsc_sched *q = qdisc_priv(sch);
1615 struct hfsc_class *cl;
1616 struct sk_buff *skb;
1617 u64 cur_time;
1618 unsigned int next_len;
1619 int realtime = 0;
1621 if (sch->q.qlen == 0)
1622 return NULL;
1624 cur_time = psched_get_time();
1627 * if there are eligible classes, use real-time criteria.
1628 * find the class with the minimum deadline among
1629 * the eligible classes.
1631 if ((cl = eltree_get_mindl(q, cur_time)) != NULL) {
1632 realtime = 1;
1633 } else {
1635 * use link-sharing criteria
1636 * get the class with the minimum vt in the hierarchy
1638 cl = vttree_get_minvt(&q->root, cur_time);
1639 if (cl == NULL) {
1640 sch->qstats.overlimits++;
1641 hfsc_schedule_watchdog(sch);
1642 return NULL;
1646 skb = qdisc_dequeue_peeked(cl->qdisc);
1647 if (skb == NULL) {
1648 qdisc_warn_nonwc("HFSC", cl->qdisc);
1649 return NULL;
1652 update_vf(cl, qdisc_pkt_len(skb), cur_time);
1653 if (realtime)
1654 cl->cl_cumul += qdisc_pkt_len(skb);
1656 if (cl->qdisc->q.qlen != 0) {
1657 if (cl->cl_flags & HFSC_RSC) {
1658 /* update ed */
1659 next_len = qdisc_peek_len(cl->qdisc);
1660 if (realtime)
1661 update_ed(cl, next_len);
1662 else
1663 update_d(cl, next_len);
1665 } else {
1666 /* the class becomes passive */
1667 set_passive(cl);
1670 sch->flags &= ~TCQ_F_THROTTLED;
1671 sch->q.qlen--;
1673 return skb;
1676 static unsigned int
1677 hfsc_drop(struct Qdisc *sch)
1679 struct hfsc_sched *q = qdisc_priv(sch);
1680 struct hfsc_class *cl;
1681 unsigned int len;
1683 list_for_each_entry(cl, &q->droplist, dlist) {
1684 if (cl->qdisc->ops->drop != NULL &&
1685 (len = cl->qdisc->ops->drop(cl->qdisc)) > 0) {
1686 if (cl->qdisc->q.qlen == 0) {
1687 update_vf(cl, 0, 0);
1688 set_passive(cl);
1689 } else {
1690 list_move_tail(&cl->dlist, &q->droplist);
1692 cl->qstats.drops++;
1693 sch->qstats.drops++;
1694 sch->q.qlen--;
1695 return len;
1698 return 0;
1701 static const struct Qdisc_class_ops hfsc_class_ops = {
1702 .change = hfsc_change_class,
1703 .delete = hfsc_delete_class,
1704 .graft = hfsc_graft_class,
1705 .leaf = hfsc_class_leaf,
1706 .qlen_notify = hfsc_qlen_notify,
1707 .get = hfsc_get_class,
1708 .put = hfsc_put_class,
1709 .bind_tcf = hfsc_bind_tcf,
1710 .unbind_tcf = hfsc_unbind_tcf,
1711 .tcf_chain = hfsc_tcf_chain,
1712 .dump = hfsc_dump_class,
1713 .dump_stats = hfsc_dump_class_stats,
1714 .walk = hfsc_walk
1717 static struct Qdisc_ops hfsc_qdisc_ops __read_mostly = {
1718 .id = "hfsc",
1719 .init = hfsc_init_qdisc,
1720 .change = hfsc_change_qdisc,
1721 .reset = hfsc_reset_qdisc,
1722 .destroy = hfsc_destroy_qdisc,
1723 .dump = hfsc_dump_qdisc,
1724 .enqueue = hfsc_enqueue,
1725 .dequeue = hfsc_dequeue,
1726 .peek = qdisc_peek_dequeued,
1727 .drop = hfsc_drop,
1728 .cl_ops = &hfsc_class_ops,
1729 .priv_size = sizeof(struct hfsc_sched),
1730 .owner = THIS_MODULE
1733 static int __init
1734 hfsc_init(void)
1736 return register_qdisc(&hfsc_qdisc_ops);
1739 static void __exit
1740 hfsc_cleanup(void)
1742 unregister_qdisc(&hfsc_qdisc_ops);
1745 MODULE_LICENSE("GPL");
1746 module_init(hfsc_init);
1747 module_exit(hfsc_cleanup);