mac80211: Reset assoc_scan_tries after an unsuccessful scan run
[firewire-audio.git] / net / sched / sch_hfsc.c
blob74226b265528cb9d2e37a5aed39cb48dc0c2f6db
1 /*
2 * Copyright (c) 2003 Patrick McHardy, <kaber@trash.net>
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 2
7 * of the License, or (at your option) any later version.
9 * 2003-10-17 - Ported from altq
12 * Copyright (c) 1997-1999 Carnegie Mellon University. All Rights Reserved.
14 * Permission to use, copy, modify, and distribute this software and
15 * its documentation is hereby granted (including for commercial or
16 * for-profit use), provided that both the copyright notice and this
17 * permission notice appear in all copies of the software, derivative
18 * works, or modified versions, and any portions thereof.
20 * THIS SOFTWARE IS EXPERIMENTAL AND IS KNOWN TO HAVE BUGS, SOME OF
21 * WHICH MAY HAVE SERIOUS CONSEQUENCES. CARNEGIE MELLON PROVIDES THIS
22 * SOFTWARE IN ITS ``AS IS'' CONDITION, AND ANY EXPRESS OR IMPLIED
23 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
25 * DISCLAIMED. IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
28 * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
29 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
30 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
32 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
33 * DAMAGE.
35 * Carnegie Mellon encourages (but does not require) users of this
36 * software to return any improvements or extensions that they make,
37 * and to grant Carnegie Mellon the rights to redistribute these
38 * changes without encumbrance.
41 * H-FSC is described in Proceedings of SIGCOMM'97,
42 * "A Hierarchical Fair Service Curve Algorithm for Link-Sharing,
43 * Real-Time and Priority Service"
44 * by Ion Stoica, Hui Zhang, and T. S. Eugene Ng.
46 * Oleg Cherevko <olwi@aq.ml.com.ua> added the upperlimit for link-sharing.
47 * when a class has an upperlimit, the fit-time is computed from the
48 * upperlimit service curve. the link-sharing scheduler does not schedule
49 * a class whose fit-time exceeds the current time.
52 #include <linux/kernel.h>
53 #include <linux/module.h>
54 #include <linux/types.h>
55 #include <linux/errno.h>
56 #include <linux/compiler.h>
57 #include <linux/spinlock.h>
58 #include <linux/skbuff.h>
59 #include <linux/string.h>
60 #include <linux/slab.h>
61 #include <linux/list.h>
62 #include <linux/rbtree.h>
63 #include <linux/init.h>
64 #include <linux/rtnetlink.h>
65 #include <linux/pkt_sched.h>
66 #include <net/netlink.h>
67 #include <net/pkt_sched.h>
68 #include <net/pkt_cls.h>
69 #include <asm/div64.h>
72 * kernel internal service curve representation:
73 * coordinates are given by 64 bit unsigned integers.
74 * x-axis: unit is clock count.
75 * y-axis: unit is byte.
77 * The service curve parameters are converted to the internal
78 * representation. The slope values are scaled to avoid overflow.
79 * the inverse slope values as well as the y-projection of the 1st
80 * segment are kept in order to to avoid 64-bit divide operations
81 * that are expensive on 32-bit architectures.
84 struct internal_sc
86 u64 sm1; /* scaled slope of the 1st segment */
87 u64 ism1; /* scaled inverse-slope of the 1st segment */
88 u64 dx; /* the x-projection of the 1st segment */
89 u64 dy; /* the y-projection of the 1st segment */
90 u64 sm2; /* scaled slope of the 2nd segment */
91 u64 ism2; /* scaled inverse-slope of the 2nd segment */
94 /* runtime service curve */
95 struct runtime_sc
97 u64 x; /* current starting position on x-axis */
98 u64 y; /* current starting position on y-axis */
99 u64 sm1; /* scaled slope of the 1st segment */
100 u64 ism1; /* scaled inverse-slope of the 1st segment */
101 u64 dx; /* the x-projection of the 1st segment */
102 u64 dy; /* the y-projection of the 1st segment */
103 u64 sm2; /* scaled slope of the 2nd segment */
104 u64 ism2; /* scaled inverse-slope of the 2nd segment */
107 enum hfsc_class_flags
109 HFSC_RSC = 0x1,
110 HFSC_FSC = 0x2,
111 HFSC_USC = 0x4
114 struct hfsc_class
116 struct Qdisc_class_common cl_common;
117 unsigned int refcnt; /* usage count */
119 struct gnet_stats_basic bstats;
120 struct gnet_stats_queue qstats;
121 struct gnet_stats_rate_est rate_est;
122 unsigned int level; /* class level in hierarchy */
123 struct tcf_proto *filter_list; /* filter list */
124 unsigned int filter_cnt; /* filter count */
126 struct hfsc_sched *sched; /* scheduler data */
127 struct hfsc_class *cl_parent; /* parent class */
128 struct list_head siblings; /* sibling classes */
129 struct list_head children; /* child classes */
130 struct Qdisc *qdisc; /* leaf qdisc */
132 struct rb_node el_node; /* qdisc's eligible tree member */
133 struct rb_root vt_tree; /* active children sorted by cl_vt */
134 struct rb_node vt_node; /* parent's vt_tree member */
135 struct rb_root cf_tree; /* active children sorted by cl_f */
136 struct rb_node cf_node; /* parent's cf_heap member */
137 struct list_head dlist; /* drop list member */
139 u64 cl_total; /* total work in bytes */
140 u64 cl_cumul; /* cumulative work in bytes done by
141 real-time criteria */
143 u64 cl_d; /* deadline*/
144 u64 cl_e; /* eligible time */
145 u64 cl_vt; /* virtual time */
146 u64 cl_f; /* time when this class will fit for
147 link-sharing, max(myf, cfmin) */
148 u64 cl_myf; /* my fit-time (calculated from this
149 class's own upperlimit curve) */
150 u64 cl_myfadj; /* my fit-time adjustment (to cancel
151 history dependence) */
152 u64 cl_cfmin; /* earliest children's fit-time (used
153 with cl_myf to obtain cl_f) */
154 u64 cl_cvtmin; /* minimal virtual time among the
155 children fit for link-sharing
156 (monotonic within a period) */
157 u64 cl_vtadj; /* intra-period cumulative vt
158 adjustment */
159 u64 cl_vtoff; /* inter-period cumulative vt offset */
160 u64 cl_cvtmax; /* max child's vt in the last period */
161 u64 cl_cvtoff; /* cumulative cvtmax of all periods */
162 u64 cl_pcvtoff; /* parent's cvtoff at initialization
163 time */
165 struct internal_sc cl_rsc; /* internal real-time service curve */
166 struct internal_sc cl_fsc; /* internal fair service curve */
167 struct internal_sc cl_usc; /* internal upperlimit service curve */
168 struct runtime_sc cl_deadline; /* deadline curve */
169 struct runtime_sc cl_eligible; /* eligible curve */
170 struct runtime_sc cl_virtual; /* virtual curve */
171 struct runtime_sc cl_ulimit; /* upperlimit curve */
173 unsigned long cl_flags; /* which curves are valid */
174 unsigned long cl_vtperiod; /* vt period sequence number */
175 unsigned long cl_parentperiod;/* parent's vt period sequence number*/
176 unsigned long cl_nactive; /* number of active children */
179 struct hfsc_sched
181 u16 defcls; /* default class id */
182 struct hfsc_class root; /* root class */
183 struct Qdisc_class_hash clhash; /* class hash */
184 struct rb_root eligible; /* eligible tree */
185 struct list_head droplist; /* active leaf class list (for
186 dropping) */
187 struct qdisc_watchdog watchdog; /* watchdog timer */
190 #define HT_INFINITY 0xffffffffffffffffULL /* infinite time value */
194 * eligible tree holds backlogged classes being sorted by their eligible times.
195 * there is one eligible tree per hfsc instance.
198 static void
199 eltree_insert(struct hfsc_class *cl)
201 struct rb_node **p = &cl->sched->eligible.rb_node;
202 struct rb_node *parent = NULL;
203 struct hfsc_class *cl1;
205 while (*p != NULL) {
206 parent = *p;
207 cl1 = rb_entry(parent, struct hfsc_class, el_node);
208 if (cl->cl_e >= cl1->cl_e)
209 p = &parent->rb_right;
210 else
211 p = &parent->rb_left;
213 rb_link_node(&cl->el_node, parent, p);
214 rb_insert_color(&cl->el_node, &cl->sched->eligible);
217 static inline void
218 eltree_remove(struct hfsc_class *cl)
220 rb_erase(&cl->el_node, &cl->sched->eligible);
223 static inline void
224 eltree_update(struct hfsc_class *cl)
226 eltree_remove(cl);
227 eltree_insert(cl);
230 /* find the class with the minimum deadline among the eligible classes */
231 static inline struct hfsc_class *
232 eltree_get_mindl(struct hfsc_sched *q, u64 cur_time)
234 struct hfsc_class *p, *cl = NULL;
235 struct rb_node *n;
237 for (n = rb_first(&q->eligible); n != NULL; n = rb_next(n)) {
238 p = rb_entry(n, struct hfsc_class, el_node);
239 if (p->cl_e > cur_time)
240 break;
241 if (cl == NULL || p->cl_d < cl->cl_d)
242 cl = p;
244 return cl;
247 /* find the class with minimum eligible time among the eligible classes */
248 static inline struct hfsc_class *
249 eltree_get_minel(struct hfsc_sched *q)
251 struct rb_node *n;
253 n = rb_first(&q->eligible);
254 if (n == NULL)
255 return NULL;
256 return rb_entry(n, struct hfsc_class, el_node);
260 * vttree holds holds backlogged child classes being sorted by their virtual
261 * time. each intermediate class has one vttree.
263 static void
264 vttree_insert(struct hfsc_class *cl)
266 struct rb_node **p = &cl->cl_parent->vt_tree.rb_node;
267 struct rb_node *parent = NULL;
268 struct hfsc_class *cl1;
270 while (*p != NULL) {
271 parent = *p;
272 cl1 = rb_entry(parent, struct hfsc_class, vt_node);
273 if (cl->cl_vt >= cl1->cl_vt)
274 p = &parent->rb_right;
275 else
276 p = &parent->rb_left;
278 rb_link_node(&cl->vt_node, parent, p);
279 rb_insert_color(&cl->vt_node, &cl->cl_parent->vt_tree);
282 static inline void
283 vttree_remove(struct hfsc_class *cl)
285 rb_erase(&cl->vt_node, &cl->cl_parent->vt_tree);
288 static inline void
289 vttree_update(struct hfsc_class *cl)
291 vttree_remove(cl);
292 vttree_insert(cl);
295 static inline struct hfsc_class *
296 vttree_firstfit(struct hfsc_class *cl, u64 cur_time)
298 struct hfsc_class *p;
299 struct rb_node *n;
301 for (n = rb_first(&cl->vt_tree); n != NULL; n = rb_next(n)) {
302 p = rb_entry(n, struct hfsc_class, vt_node);
303 if (p->cl_f <= cur_time)
304 return p;
306 return NULL;
310 * get the leaf class with the minimum vt in the hierarchy
312 static struct hfsc_class *
313 vttree_get_minvt(struct hfsc_class *cl, u64 cur_time)
315 /* if root-class's cfmin is bigger than cur_time nothing to do */
316 if (cl->cl_cfmin > cur_time)
317 return NULL;
319 while (cl->level > 0) {
320 cl = vttree_firstfit(cl, cur_time);
321 if (cl == NULL)
322 return NULL;
324 * update parent's cl_cvtmin.
326 if (cl->cl_parent->cl_cvtmin < cl->cl_vt)
327 cl->cl_parent->cl_cvtmin = cl->cl_vt;
329 return cl;
332 static void
333 cftree_insert(struct hfsc_class *cl)
335 struct rb_node **p = &cl->cl_parent->cf_tree.rb_node;
336 struct rb_node *parent = NULL;
337 struct hfsc_class *cl1;
339 while (*p != NULL) {
340 parent = *p;
341 cl1 = rb_entry(parent, struct hfsc_class, cf_node);
342 if (cl->cl_f >= cl1->cl_f)
343 p = &parent->rb_right;
344 else
345 p = &parent->rb_left;
347 rb_link_node(&cl->cf_node, parent, p);
348 rb_insert_color(&cl->cf_node, &cl->cl_parent->cf_tree);
351 static inline void
352 cftree_remove(struct hfsc_class *cl)
354 rb_erase(&cl->cf_node, &cl->cl_parent->cf_tree);
357 static inline void
358 cftree_update(struct hfsc_class *cl)
360 cftree_remove(cl);
361 cftree_insert(cl);
365 * service curve support functions
367 * external service curve parameters
368 * m: bps
369 * d: us
370 * internal service curve parameters
371 * sm: (bytes/psched_us) << SM_SHIFT
372 * ism: (psched_us/byte) << ISM_SHIFT
373 * dx: psched_us
375 * The clock source resolution with ktime is 1.024us.
377 * sm and ism are scaled in order to keep effective digits.
378 * SM_SHIFT and ISM_SHIFT are selected to keep at least 4 effective
379 * digits in decimal using the following table.
381 * bits/sec 100Kbps 1Mbps 10Mbps 100Mbps 1Gbps
382 * ------------+-------------------------------------------------------
383 * bytes/1.024us 12.8e-3 128e-3 1280e-3 12800e-3 128000e-3
385 * 1.024us/byte 78.125 7.8125 0.78125 0.078125 0.0078125
387 #define SM_SHIFT 20
388 #define ISM_SHIFT 18
390 #define SM_MASK ((1ULL << SM_SHIFT) - 1)
391 #define ISM_MASK ((1ULL << ISM_SHIFT) - 1)
393 static inline u64
394 seg_x2y(u64 x, u64 sm)
396 u64 y;
399 * compute
400 * y = x * sm >> SM_SHIFT
401 * but divide it for the upper and lower bits to avoid overflow
403 y = (x >> SM_SHIFT) * sm + (((x & SM_MASK) * sm) >> SM_SHIFT);
404 return y;
407 static inline u64
408 seg_y2x(u64 y, u64 ism)
410 u64 x;
412 if (y == 0)
413 x = 0;
414 else if (ism == HT_INFINITY)
415 x = HT_INFINITY;
416 else {
417 x = (y >> ISM_SHIFT) * ism
418 + (((y & ISM_MASK) * ism) >> ISM_SHIFT);
420 return x;
423 /* Convert m (bps) into sm (bytes/psched us) */
424 static u64
425 m2sm(u32 m)
427 u64 sm;
429 sm = ((u64)m << SM_SHIFT);
430 sm += PSCHED_TICKS_PER_SEC - 1;
431 do_div(sm, PSCHED_TICKS_PER_SEC);
432 return sm;
435 /* convert m (bps) into ism (psched us/byte) */
436 static u64
437 m2ism(u32 m)
439 u64 ism;
441 if (m == 0)
442 ism = HT_INFINITY;
443 else {
444 ism = ((u64)PSCHED_TICKS_PER_SEC << ISM_SHIFT);
445 ism += m - 1;
446 do_div(ism, m);
448 return ism;
451 /* convert d (us) into dx (psched us) */
452 static u64
453 d2dx(u32 d)
455 u64 dx;
457 dx = ((u64)d * PSCHED_TICKS_PER_SEC);
458 dx += USEC_PER_SEC - 1;
459 do_div(dx, USEC_PER_SEC);
460 return dx;
463 /* convert sm (bytes/psched us) into m (bps) */
464 static u32
465 sm2m(u64 sm)
467 u64 m;
469 m = (sm * PSCHED_TICKS_PER_SEC) >> SM_SHIFT;
470 return (u32)m;
473 /* convert dx (psched us) into d (us) */
474 static u32
475 dx2d(u64 dx)
477 u64 d;
479 d = dx * USEC_PER_SEC;
480 do_div(d, PSCHED_TICKS_PER_SEC);
481 return (u32)d;
484 static void
485 sc2isc(struct tc_service_curve *sc, struct internal_sc *isc)
487 isc->sm1 = m2sm(sc->m1);
488 isc->ism1 = m2ism(sc->m1);
489 isc->dx = d2dx(sc->d);
490 isc->dy = seg_x2y(isc->dx, isc->sm1);
491 isc->sm2 = m2sm(sc->m2);
492 isc->ism2 = m2ism(sc->m2);
496 * initialize the runtime service curve with the given internal
497 * service curve starting at (x, y).
499 static void
500 rtsc_init(struct runtime_sc *rtsc, struct internal_sc *isc, u64 x, u64 y)
502 rtsc->x = x;
503 rtsc->y = y;
504 rtsc->sm1 = isc->sm1;
505 rtsc->ism1 = isc->ism1;
506 rtsc->dx = isc->dx;
507 rtsc->dy = isc->dy;
508 rtsc->sm2 = isc->sm2;
509 rtsc->ism2 = isc->ism2;
513 * calculate the y-projection of the runtime service curve by the
514 * given x-projection value
516 static u64
517 rtsc_y2x(struct runtime_sc *rtsc, u64 y)
519 u64 x;
521 if (y < rtsc->y)
522 x = rtsc->x;
523 else if (y <= rtsc->y + rtsc->dy) {
524 /* x belongs to the 1st segment */
525 if (rtsc->dy == 0)
526 x = rtsc->x + rtsc->dx;
527 else
528 x = rtsc->x + seg_y2x(y - rtsc->y, rtsc->ism1);
529 } else {
530 /* x belongs to the 2nd segment */
531 x = rtsc->x + rtsc->dx
532 + seg_y2x(y - rtsc->y - rtsc->dy, rtsc->ism2);
534 return x;
537 static u64
538 rtsc_x2y(struct runtime_sc *rtsc, u64 x)
540 u64 y;
542 if (x <= rtsc->x)
543 y = rtsc->y;
544 else if (x <= rtsc->x + rtsc->dx)
545 /* y belongs to the 1st segment */
546 y = rtsc->y + seg_x2y(x - rtsc->x, rtsc->sm1);
547 else
548 /* y belongs to the 2nd segment */
549 y = rtsc->y + rtsc->dy
550 + seg_x2y(x - rtsc->x - rtsc->dx, rtsc->sm2);
551 return y;
555 * update the runtime service curve by taking the minimum of the current
556 * runtime service curve and the service curve starting at (x, y).
558 static void
559 rtsc_min(struct runtime_sc *rtsc, struct internal_sc *isc, u64 x, u64 y)
561 u64 y1, y2, dx, dy;
562 u32 dsm;
564 if (isc->sm1 <= isc->sm2) {
565 /* service curve is convex */
566 y1 = rtsc_x2y(rtsc, x);
567 if (y1 < y)
568 /* the current rtsc is smaller */
569 return;
570 rtsc->x = x;
571 rtsc->y = y;
572 return;
576 * service curve is concave
577 * compute the two y values of the current rtsc
578 * y1: at x
579 * y2: at (x + dx)
581 y1 = rtsc_x2y(rtsc, x);
582 if (y1 <= y) {
583 /* rtsc is below isc, no change to rtsc */
584 return;
587 y2 = rtsc_x2y(rtsc, x + isc->dx);
588 if (y2 >= y + isc->dy) {
589 /* rtsc is above isc, replace rtsc by isc */
590 rtsc->x = x;
591 rtsc->y = y;
592 rtsc->dx = isc->dx;
593 rtsc->dy = isc->dy;
594 return;
598 * the two curves intersect
599 * compute the offsets (dx, dy) using the reverse
600 * function of seg_x2y()
601 * seg_x2y(dx, sm1) == seg_x2y(dx, sm2) + (y1 - y)
603 dx = (y1 - y) << SM_SHIFT;
604 dsm = isc->sm1 - isc->sm2;
605 do_div(dx, dsm);
607 * check if (x, y1) belongs to the 1st segment of rtsc.
608 * if so, add the offset.
610 if (rtsc->x + rtsc->dx > x)
611 dx += rtsc->x + rtsc->dx - x;
612 dy = seg_x2y(dx, isc->sm1);
614 rtsc->x = x;
615 rtsc->y = y;
616 rtsc->dx = dx;
617 rtsc->dy = dy;
618 return;
621 static void
622 init_ed(struct hfsc_class *cl, unsigned int next_len)
624 u64 cur_time = psched_get_time();
626 /* update the deadline curve */
627 rtsc_min(&cl->cl_deadline, &cl->cl_rsc, cur_time, cl->cl_cumul);
630 * update the eligible curve.
631 * for concave, it is equal to the deadline curve.
632 * for convex, it is a linear curve with slope m2.
634 cl->cl_eligible = cl->cl_deadline;
635 if (cl->cl_rsc.sm1 <= cl->cl_rsc.sm2) {
636 cl->cl_eligible.dx = 0;
637 cl->cl_eligible.dy = 0;
640 /* compute e and d */
641 cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul);
642 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
644 eltree_insert(cl);
647 static void
648 update_ed(struct hfsc_class *cl, unsigned int next_len)
650 cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul);
651 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
653 eltree_update(cl);
656 static inline void
657 update_d(struct hfsc_class *cl, unsigned int next_len)
659 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
662 static inline void
663 update_cfmin(struct hfsc_class *cl)
665 struct rb_node *n = rb_first(&cl->cf_tree);
666 struct hfsc_class *p;
668 if (n == NULL) {
669 cl->cl_cfmin = 0;
670 return;
672 p = rb_entry(n, struct hfsc_class, cf_node);
673 cl->cl_cfmin = p->cl_f;
676 static void
677 init_vf(struct hfsc_class *cl, unsigned int len)
679 struct hfsc_class *max_cl;
680 struct rb_node *n;
681 u64 vt, f, cur_time;
682 int go_active;
684 cur_time = 0;
685 go_active = 1;
686 for (; cl->cl_parent != NULL; cl = cl->cl_parent) {
687 if (go_active && cl->cl_nactive++ == 0)
688 go_active = 1;
689 else
690 go_active = 0;
692 if (go_active) {
693 n = rb_last(&cl->cl_parent->vt_tree);
694 if (n != NULL) {
695 max_cl = rb_entry(n, struct hfsc_class,vt_node);
697 * set vt to the average of the min and max
698 * classes. if the parent's period didn't
699 * change, don't decrease vt of the class.
701 vt = max_cl->cl_vt;
702 if (cl->cl_parent->cl_cvtmin != 0)
703 vt = (cl->cl_parent->cl_cvtmin + vt)/2;
705 if (cl->cl_parent->cl_vtperiod !=
706 cl->cl_parentperiod || vt > cl->cl_vt)
707 cl->cl_vt = vt;
708 } else {
710 * first child for a new parent backlog period.
711 * add parent's cvtmax to cvtoff to make a new
712 * vt (vtoff + vt) larger than the vt in the
713 * last period for all children.
715 vt = cl->cl_parent->cl_cvtmax;
716 cl->cl_parent->cl_cvtoff += vt;
717 cl->cl_parent->cl_cvtmax = 0;
718 cl->cl_parent->cl_cvtmin = 0;
719 cl->cl_vt = 0;
722 cl->cl_vtoff = cl->cl_parent->cl_cvtoff -
723 cl->cl_pcvtoff;
725 /* update the virtual curve */
726 vt = cl->cl_vt + cl->cl_vtoff;
727 rtsc_min(&cl->cl_virtual, &cl->cl_fsc, vt,
728 cl->cl_total);
729 if (cl->cl_virtual.x == vt) {
730 cl->cl_virtual.x -= cl->cl_vtoff;
731 cl->cl_vtoff = 0;
733 cl->cl_vtadj = 0;
735 cl->cl_vtperiod++; /* increment vt period */
736 cl->cl_parentperiod = cl->cl_parent->cl_vtperiod;
737 if (cl->cl_parent->cl_nactive == 0)
738 cl->cl_parentperiod++;
739 cl->cl_f = 0;
741 vttree_insert(cl);
742 cftree_insert(cl);
744 if (cl->cl_flags & HFSC_USC) {
745 /* class has upper limit curve */
746 if (cur_time == 0)
747 cur_time = psched_get_time();
749 /* update the ulimit curve */
750 rtsc_min(&cl->cl_ulimit, &cl->cl_usc, cur_time,
751 cl->cl_total);
752 /* compute myf */
753 cl->cl_myf = rtsc_y2x(&cl->cl_ulimit,
754 cl->cl_total);
755 cl->cl_myfadj = 0;
759 f = max(cl->cl_myf, cl->cl_cfmin);
760 if (f != cl->cl_f) {
761 cl->cl_f = f;
762 cftree_update(cl);
763 update_cfmin(cl->cl_parent);
768 static void
769 update_vf(struct hfsc_class *cl, unsigned int len, u64 cur_time)
771 u64 f; /* , myf_bound, delta; */
772 int go_passive = 0;
774 if (cl->qdisc->q.qlen == 0 && cl->cl_flags & HFSC_FSC)
775 go_passive = 1;
777 for (; cl->cl_parent != NULL; cl = cl->cl_parent) {
778 cl->cl_total += len;
780 if (!(cl->cl_flags & HFSC_FSC) || cl->cl_nactive == 0)
781 continue;
783 if (go_passive && --cl->cl_nactive == 0)
784 go_passive = 1;
785 else
786 go_passive = 0;
788 if (go_passive) {
789 /* no more active child, going passive */
791 /* update cvtmax of the parent class */
792 if (cl->cl_vt > cl->cl_parent->cl_cvtmax)
793 cl->cl_parent->cl_cvtmax = cl->cl_vt;
795 /* remove this class from the vt tree */
796 vttree_remove(cl);
798 cftree_remove(cl);
799 update_cfmin(cl->cl_parent);
801 continue;
805 * update vt and f
807 cl->cl_vt = rtsc_y2x(&cl->cl_virtual, cl->cl_total)
808 - cl->cl_vtoff + cl->cl_vtadj;
811 * if vt of the class is smaller than cvtmin,
812 * the class was skipped in the past due to non-fit.
813 * if so, we need to adjust vtadj.
815 if (cl->cl_vt < cl->cl_parent->cl_cvtmin) {
816 cl->cl_vtadj += cl->cl_parent->cl_cvtmin - cl->cl_vt;
817 cl->cl_vt = cl->cl_parent->cl_cvtmin;
820 /* update the vt tree */
821 vttree_update(cl);
823 if (cl->cl_flags & HFSC_USC) {
824 cl->cl_myf = cl->cl_myfadj + rtsc_y2x(&cl->cl_ulimit,
825 cl->cl_total);
826 #if 0
828 * This code causes classes to stay way under their
829 * limit when multiple classes are used at gigabit
830 * speed. needs investigation. -kaber
833 * if myf lags behind by more than one clock tick
834 * from the current time, adjust myfadj to prevent
835 * a rate-limited class from going greedy.
836 * in a steady state under rate-limiting, myf
837 * fluctuates within one clock tick.
839 myf_bound = cur_time - PSCHED_JIFFIE2US(1);
840 if (cl->cl_myf < myf_bound) {
841 delta = cur_time - cl->cl_myf;
842 cl->cl_myfadj += delta;
843 cl->cl_myf += delta;
845 #endif
848 f = max(cl->cl_myf, cl->cl_cfmin);
849 if (f != cl->cl_f) {
850 cl->cl_f = f;
851 cftree_update(cl);
852 update_cfmin(cl->cl_parent);
857 static void
858 set_active(struct hfsc_class *cl, unsigned int len)
860 if (cl->cl_flags & HFSC_RSC)
861 init_ed(cl, len);
862 if (cl->cl_flags & HFSC_FSC)
863 init_vf(cl, len);
865 list_add_tail(&cl->dlist, &cl->sched->droplist);
868 static void
869 set_passive(struct hfsc_class *cl)
871 if (cl->cl_flags & HFSC_RSC)
872 eltree_remove(cl);
874 list_del(&cl->dlist);
877 * vttree is now handled in update_vf() so that update_vf(cl, 0, 0)
878 * needs to be called explicitly to remove a class from vttree.
882 static unsigned int
883 qdisc_peek_len(struct Qdisc *sch)
885 struct sk_buff *skb;
886 unsigned int len;
888 skb = sch->ops->peek(sch);
889 if (skb == NULL) {
890 qdisc_warn_nonwc("qdisc_peek_len", sch);
891 return 0;
893 len = qdisc_pkt_len(skb);
895 return len;
898 static void
899 hfsc_purge_queue(struct Qdisc *sch, struct hfsc_class *cl)
901 unsigned int len = cl->qdisc->q.qlen;
903 qdisc_reset(cl->qdisc);
904 qdisc_tree_decrease_qlen(cl->qdisc, len);
907 static void
908 hfsc_adjust_levels(struct hfsc_class *cl)
910 struct hfsc_class *p;
911 unsigned int level;
913 do {
914 level = 0;
915 list_for_each_entry(p, &cl->children, siblings) {
916 if (p->level >= level)
917 level = p->level + 1;
919 cl->level = level;
920 } while ((cl = cl->cl_parent) != NULL);
923 static inline struct hfsc_class *
924 hfsc_find_class(u32 classid, struct Qdisc *sch)
926 struct hfsc_sched *q = qdisc_priv(sch);
927 struct Qdisc_class_common *clc;
929 clc = qdisc_class_find(&q->clhash, classid);
930 if (clc == NULL)
931 return NULL;
932 return container_of(clc, struct hfsc_class, cl_common);
935 static void
936 hfsc_change_rsc(struct hfsc_class *cl, struct tc_service_curve *rsc,
937 u64 cur_time)
939 sc2isc(rsc, &cl->cl_rsc);
940 rtsc_init(&cl->cl_deadline, &cl->cl_rsc, cur_time, cl->cl_cumul);
941 cl->cl_eligible = cl->cl_deadline;
942 if (cl->cl_rsc.sm1 <= cl->cl_rsc.sm2) {
943 cl->cl_eligible.dx = 0;
944 cl->cl_eligible.dy = 0;
946 cl->cl_flags |= HFSC_RSC;
949 static void
950 hfsc_change_fsc(struct hfsc_class *cl, struct tc_service_curve *fsc)
952 sc2isc(fsc, &cl->cl_fsc);
953 rtsc_init(&cl->cl_virtual, &cl->cl_fsc, cl->cl_vt, cl->cl_total);
954 cl->cl_flags |= HFSC_FSC;
957 static void
958 hfsc_change_usc(struct hfsc_class *cl, struct tc_service_curve *usc,
959 u64 cur_time)
961 sc2isc(usc, &cl->cl_usc);
962 rtsc_init(&cl->cl_ulimit, &cl->cl_usc, cur_time, cl->cl_total);
963 cl->cl_flags |= HFSC_USC;
966 static const struct nla_policy hfsc_policy[TCA_HFSC_MAX + 1] = {
967 [TCA_HFSC_RSC] = { .len = sizeof(struct tc_service_curve) },
968 [TCA_HFSC_FSC] = { .len = sizeof(struct tc_service_curve) },
969 [TCA_HFSC_USC] = { .len = sizeof(struct tc_service_curve) },
972 static int
973 hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
974 struct nlattr **tca, unsigned long *arg)
976 struct hfsc_sched *q = qdisc_priv(sch);
977 struct hfsc_class *cl = (struct hfsc_class *)*arg;
978 struct hfsc_class *parent = NULL;
979 struct nlattr *opt = tca[TCA_OPTIONS];
980 struct nlattr *tb[TCA_HFSC_MAX + 1];
981 struct tc_service_curve *rsc = NULL, *fsc = NULL, *usc = NULL;
982 u64 cur_time;
983 int err;
985 if (opt == NULL)
986 return -EINVAL;
988 err = nla_parse_nested(tb, TCA_HFSC_MAX, opt, hfsc_policy);
989 if (err < 0)
990 return err;
992 if (tb[TCA_HFSC_RSC]) {
993 rsc = nla_data(tb[TCA_HFSC_RSC]);
994 if (rsc->m1 == 0 && rsc->m2 == 0)
995 rsc = NULL;
998 if (tb[TCA_HFSC_FSC]) {
999 fsc = nla_data(tb[TCA_HFSC_FSC]);
1000 if (fsc->m1 == 0 && fsc->m2 == 0)
1001 fsc = NULL;
1004 if (tb[TCA_HFSC_USC]) {
1005 usc = nla_data(tb[TCA_HFSC_USC]);
1006 if (usc->m1 == 0 && usc->m2 == 0)
1007 usc = NULL;
1010 if (cl != NULL) {
1011 if (parentid) {
1012 if (cl->cl_parent &&
1013 cl->cl_parent->cl_common.classid != parentid)
1014 return -EINVAL;
1015 if (cl->cl_parent == NULL && parentid != TC_H_ROOT)
1016 return -EINVAL;
1018 cur_time = psched_get_time();
1020 if (tca[TCA_RATE]) {
1021 err = gen_replace_estimator(&cl->bstats, &cl->rate_est,
1022 qdisc_root_sleeping_lock(sch),
1023 tca[TCA_RATE]);
1024 if (err)
1025 return err;
1028 sch_tree_lock(sch);
1029 if (rsc != NULL)
1030 hfsc_change_rsc(cl, rsc, cur_time);
1031 if (fsc != NULL)
1032 hfsc_change_fsc(cl, fsc);
1033 if (usc != NULL)
1034 hfsc_change_usc(cl, usc, cur_time);
1036 if (cl->qdisc->q.qlen != 0) {
1037 if (cl->cl_flags & HFSC_RSC)
1038 update_ed(cl, qdisc_peek_len(cl->qdisc));
1039 if (cl->cl_flags & HFSC_FSC)
1040 update_vf(cl, 0, cur_time);
1042 sch_tree_unlock(sch);
1044 return 0;
1047 if (parentid == TC_H_ROOT)
1048 return -EEXIST;
1050 parent = &q->root;
1051 if (parentid) {
1052 parent = hfsc_find_class(parentid, sch);
1053 if (parent == NULL)
1054 return -ENOENT;
1057 if (classid == 0 || TC_H_MAJ(classid ^ sch->handle) != 0)
1058 return -EINVAL;
1059 if (hfsc_find_class(classid, sch))
1060 return -EEXIST;
1062 if (rsc == NULL && fsc == NULL)
1063 return -EINVAL;
1065 cl = kzalloc(sizeof(struct hfsc_class), GFP_KERNEL);
1066 if (cl == NULL)
1067 return -ENOBUFS;
1069 if (tca[TCA_RATE]) {
1070 err = gen_new_estimator(&cl->bstats, &cl->rate_est,
1071 qdisc_root_sleeping_lock(sch),
1072 tca[TCA_RATE]);
1073 if (err) {
1074 kfree(cl);
1075 return err;
1079 if (rsc != NULL)
1080 hfsc_change_rsc(cl, rsc, 0);
1081 if (fsc != NULL)
1082 hfsc_change_fsc(cl, fsc);
1083 if (usc != NULL)
1084 hfsc_change_usc(cl, usc, 0);
1086 cl->cl_common.classid = classid;
1087 cl->refcnt = 1;
1088 cl->sched = q;
1089 cl->cl_parent = parent;
1090 cl->qdisc = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
1091 &pfifo_qdisc_ops, classid);
1092 if (cl->qdisc == NULL)
1093 cl->qdisc = &noop_qdisc;
1094 INIT_LIST_HEAD(&cl->children);
1095 cl->vt_tree = RB_ROOT;
1096 cl->cf_tree = RB_ROOT;
1098 sch_tree_lock(sch);
1099 qdisc_class_hash_insert(&q->clhash, &cl->cl_common);
1100 list_add_tail(&cl->siblings, &parent->children);
1101 if (parent->level == 0)
1102 hfsc_purge_queue(sch, parent);
1103 hfsc_adjust_levels(parent);
1104 cl->cl_pcvtoff = parent->cl_cvtoff;
1105 sch_tree_unlock(sch);
1107 qdisc_class_hash_grow(sch, &q->clhash);
1109 *arg = (unsigned long)cl;
1110 return 0;
1113 static void
1114 hfsc_destroy_class(struct Qdisc *sch, struct hfsc_class *cl)
1116 struct hfsc_sched *q = qdisc_priv(sch);
1118 tcf_destroy_chain(&cl->filter_list);
1119 qdisc_destroy(cl->qdisc);
1120 gen_kill_estimator(&cl->bstats, &cl->rate_est);
1121 if (cl != &q->root)
1122 kfree(cl);
1125 static int
1126 hfsc_delete_class(struct Qdisc *sch, unsigned long arg)
1128 struct hfsc_sched *q = qdisc_priv(sch);
1129 struct hfsc_class *cl = (struct hfsc_class *)arg;
1131 if (cl->level > 0 || cl->filter_cnt > 0 || cl == &q->root)
1132 return -EBUSY;
1134 sch_tree_lock(sch);
1136 list_del(&cl->siblings);
1137 hfsc_adjust_levels(cl->cl_parent);
1139 hfsc_purge_queue(sch, cl);
1140 qdisc_class_hash_remove(&q->clhash, &cl->cl_common);
1142 if (--cl->refcnt == 0)
1143 hfsc_destroy_class(sch, cl);
1145 sch_tree_unlock(sch);
1146 return 0;
1149 static struct hfsc_class *
1150 hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
1152 struct hfsc_sched *q = qdisc_priv(sch);
1153 struct hfsc_class *cl;
1154 struct tcf_result res;
1155 struct tcf_proto *tcf;
1156 int result;
1158 if (TC_H_MAJ(skb->priority ^ sch->handle) == 0 &&
1159 (cl = hfsc_find_class(skb->priority, sch)) != NULL)
1160 if (cl->level == 0)
1161 return cl;
1163 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
1164 tcf = q->root.filter_list;
1165 while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) {
1166 #ifdef CONFIG_NET_CLS_ACT
1167 switch (result) {
1168 case TC_ACT_QUEUED:
1169 case TC_ACT_STOLEN:
1170 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
1171 case TC_ACT_SHOT:
1172 return NULL;
1174 #endif
1175 if ((cl = (struct hfsc_class *)res.class) == NULL) {
1176 if ((cl = hfsc_find_class(res.classid, sch)) == NULL)
1177 break; /* filter selected invalid classid */
1180 if (cl->level == 0)
1181 return cl; /* hit leaf class */
1183 /* apply inner filter chain */
1184 tcf = cl->filter_list;
1187 /* classification failed, try default class */
1188 cl = hfsc_find_class(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch);
1189 if (cl == NULL || cl->level > 0)
1190 return NULL;
1192 return cl;
1195 static int
1196 hfsc_graft_class(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1197 struct Qdisc **old)
1199 struct hfsc_class *cl = (struct hfsc_class *)arg;
1201 if (cl == NULL)
1202 return -ENOENT;
1203 if (cl->level > 0)
1204 return -EINVAL;
1205 if (new == NULL) {
1206 new = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
1207 &pfifo_qdisc_ops,
1208 cl->cl_common.classid);
1209 if (new == NULL)
1210 new = &noop_qdisc;
1213 sch_tree_lock(sch);
1214 hfsc_purge_queue(sch, cl);
1215 *old = cl->qdisc;
1216 cl->qdisc = new;
1217 sch_tree_unlock(sch);
1218 return 0;
1221 static struct Qdisc *
1222 hfsc_class_leaf(struct Qdisc *sch, unsigned long arg)
1224 struct hfsc_class *cl = (struct hfsc_class *)arg;
1226 if (cl != NULL && cl->level == 0)
1227 return cl->qdisc;
1229 return NULL;
1232 static void
1233 hfsc_qlen_notify(struct Qdisc *sch, unsigned long arg)
1235 struct hfsc_class *cl = (struct hfsc_class *)arg;
1237 if (cl->qdisc->q.qlen == 0) {
1238 update_vf(cl, 0, 0);
1239 set_passive(cl);
1243 static unsigned long
1244 hfsc_get_class(struct Qdisc *sch, u32 classid)
1246 struct hfsc_class *cl = hfsc_find_class(classid, sch);
1248 if (cl != NULL)
1249 cl->refcnt++;
1251 return (unsigned long)cl;
1254 static void
1255 hfsc_put_class(struct Qdisc *sch, unsigned long arg)
1257 struct hfsc_class *cl = (struct hfsc_class *)arg;
1259 if (--cl->refcnt == 0)
1260 hfsc_destroy_class(sch, cl);
1263 static unsigned long
1264 hfsc_bind_tcf(struct Qdisc *sch, unsigned long parent, u32 classid)
1266 struct hfsc_class *p = (struct hfsc_class *)parent;
1267 struct hfsc_class *cl = hfsc_find_class(classid, sch);
1269 if (cl != NULL) {
1270 if (p != NULL && p->level <= cl->level)
1271 return 0;
1272 cl->filter_cnt++;
1275 return (unsigned long)cl;
1278 static void
1279 hfsc_unbind_tcf(struct Qdisc *sch, unsigned long arg)
1281 struct hfsc_class *cl = (struct hfsc_class *)arg;
1283 cl->filter_cnt--;
1286 static struct tcf_proto **
1287 hfsc_tcf_chain(struct Qdisc *sch, unsigned long arg)
1289 struct hfsc_sched *q = qdisc_priv(sch);
1290 struct hfsc_class *cl = (struct hfsc_class *)arg;
1292 if (cl == NULL)
1293 cl = &q->root;
1295 return &cl->filter_list;
1298 static int
1299 hfsc_dump_sc(struct sk_buff *skb, int attr, struct internal_sc *sc)
1301 struct tc_service_curve tsc;
1303 tsc.m1 = sm2m(sc->sm1);
1304 tsc.d = dx2d(sc->dx);
1305 tsc.m2 = sm2m(sc->sm2);
1306 NLA_PUT(skb, attr, sizeof(tsc), &tsc);
1308 return skb->len;
1310 nla_put_failure:
1311 return -1;
1314 static inline int
1315 hfsc_dump_curves(struct sk_buff *skb, struct hfsc_class *cl)
1317 if ((cl->cl_flags & HFSC_RSC) &&
1318 (hfsc_dump_sc(skb, TCA_HFSC_RSC, &cl->cl_rsc) < 0))
1319 goto nla_put_failure;
1321 if ((cl->cl_flags & HFSC_FSC) &&
1322 (hfsc_dump_sc(skb, TCA_HFSC_FSC, &cl->cl_fsc) < 0))
1323 goto nla_put_failure;
1325 if ((cl->cl_flags & HFSC_USC) &&
1326 (hfsc_dump_sc(skb, TCA_HFSC_USC, &cl->cl_usc) < 0))
1327 goto nla_put_failure;
1329 return skb->len;
1331 nla_put_failure:
1332 return -1;
1335 static int
1336 hfsc_dump_class(struct Qdisc *sch, unsigned long arg, struct sk_buff *skb,
1337 struct tcmsg *tcm)
1339 struct hfsc_class *cl = (struct hfsc_class *)arg;
1340 struct nlattr *nest;
1342 tcm->tcm_parent = cl->cl_parent ? cl->cl_parent->cl_common.classid :
1343 TC_H_ROOT;
1344 tcm->tcm_handle = cl->cl_common.classid;
1345 if (cl->level == 0)
1346 tcm->tcm_info = cl->qdisc->handle;
1348 nest = nla_nest_start(skb, TCA_OPTIONS);
1349 if (nest == NULL)
1350 goto nla_put_failure;
1351 if (hfsc_dump_curves(skb, cl) < 0)
1352 goto nla_put_failure;
1353 nla_nest_end(skb, nest);
1354 return skb->len;
1356 nla_put_failure:
1357 nla_nest_cancel(skb, nest);
1358 return -EMSGSIZE;
1361 static int
1362 hfsc_dump_class_stats(struct Qdisc *sch, unsigned long arg,
1363 struct gnet_dump *d)
1365 struct hfsc_class *cl = (struct hfsc_class *)arg;
1366 struct tc_hfsc_stats xstats;
1368 cl->qstats.qlen = cl->qdisc->q.qlen;
1369 xstats.level = cl->level;
1370 xstats.period = cl->cl_vtperiod;
1371 xstats.work = cl->cl_total;
1372 xstats.rtwork = cl->cl_cumul;
1374 if (gnet_stats_copy_basic(d, &cl->bstats) < 0 ||
1375 gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
1376 gnet_stats_copy_queue(d, &cl->qstats) < 0)
1377 return -1;
1379 return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
1384 static void
1385 hfsc_walk(struct Qdisc *sch, struct qdisc_walker *arg)
1387 struct hfsc_sched *q = qdisc_priv(sch);
1388 struct hlist_node *n;
1389 struct hfsc_class *cl;
1390 unsigned int i;
1392 if (arg->stop)
1393 return;
1395 for (i = 0; i < q->clhash.hashsize; i++) {
1396 hlist_for_each_entry(cl, n, &q->clhash.hash[i],
1397 cl_common.hnode) {
1398 if (arg->count < arg->skip) {
1399 arg->count++;
1400 continue;
1402 if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
1403 arg->stop = 1;
1404 return;
1406 arg->count++;
1411 static void
1412 hfsc_schedule_watchdog(struct Qdisc *sch)
1414 struct hfsc_sched *q = qdisc_priv(sch);
1415 struct hfsc_class *cl;
1416 u64 next_time = 0;
1418 if ((cl = eltree_get_minel(q)) != NULL)
1419 next_time = cl->cl_e;
1420 if (q->root.cl_cfmin != 0) {
1421 if (next_time == 0 || next_time > q->root.cl_cfmin)
1422 next_time = q->root.cl_cfmin;
1424 WARN_ON(next_time == 0);
1425 qdisc_watchdog_schedule(&q->watchdog, next_time);
1428 static int
1429 hfsc_init_qdisc(struct Qdisc *sch, struct nlattr *opt)
1431 struct hfsc_sched *q = qdisc_priv(sch);
1432 struct tc_hfsc_qopt *qopt;
1433 int err;
1435 if (opt == NULL || nla_len(opt) < sizeof(*qopt))
1436 return -EINVAL;
1437 qopt = nla_data(opt);
1439 q->defcls = qopt->defcls;
1440 err = qdisc_class_hash_init(&q->clhash);
1441 if (err < 0)
1442 return err;
1443 q->eligible = RB_ROOT;
1444 INIT_LIST_HEAD(&q->droplist);
1446 q->root.cl_common.classid = sch->handle;
1447 q->root.refcnt = 1;
1448 q->root.sched = q;
1449 q->root.qdisc = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
1450 &pfifo_qdisc_ops,
1451 sch->handle);
1452 if (q->root.qdisc == NULL)
1453 q->root.qdisc = &noop_qdisc;
1454 INIT_LIST_HEAD(&q->root.children);
1455 q->root.vt_tree = RB_ROOT;
1456 q->root.cf_tree = RB_ROOT;
1458 qdisc_class_hash_insert(&q->clhash, &q->root.cl_common);
1459 qdisc_class_hash_grow(sch, &q->clhash);
1461 qdisc_watchdog_init(&q->watchdog, sch);
1463 return 0;
1466 static int
1467 hfsc_change_qdisc(struct Qdisc *sch, struct nlattr *opt)
1469 struct hfsc_sched *q = qdisc_priv(sch);
1470 struct tc_hfsc_qopt *qopt;
1472 if (opt == NULL || nla_len(opt) < sizeof(*qopt))
1473 return -EINVAL;
1474 qopt = nla_data(opt);
1476 sch_tree_lock(sch);
1477 q->defcls = qopt->defcls;
1478 sch_tree_unlock(sch);
1480 return 0;
1483 static void
1484 hfsc_reset_class(struct hfsc_class *cl)
1486 cl->cl_total = 0;
1487 cl->cl_cumul = 0;
1488 cl->cl_d = 0;
1489 cl->cl_e = 0;
1490 cl->cl_vt = 0;
1491 cl->cl_vtadj = 0;
1492 cl->cl_vtoff = 0;
1493 cl->cl_cvtmin = 0;
1494 cl->cl_cvtmax = 0;
1495 cl->cl_cvtoff = 0;
1496 cl->cl_pcvtoff = 0;
1497 cl->cl_vtperiod = 0;
1498 cl->cl_parentperiod = 0;
1499 cl->cl_f = 0;
1500 cl->cl_myf = 0;
1501 cl->cl_myfadj = 0;
1502 cl->cl_cfmin = 0;
1503 cl->cl_nactive = 0;
1505 cl->vt_tree = RB_ROOT;
1506 cl->cf_tree = RB_ROOT;
1507 qdisc_reset(cl->qdisc);
1509 if (cl->cl_flags & HFSC_RSC)
1510 rtsc_init(&cl->cl_deadline, &cl->cl_rsc, 0, 0);
1511 if (cl->cl_flags & HFSC_FSC)
1512 rtsc_init(&cl->cl_virtual, &cl->cl_fsc, 0, 0);
1513 if (cl->cl_flags & HFSC_USC)
1514 rtsc_init(&cl->cl_ulimit, &cl->cl_usc, 0, 0);
1517 static void
1518 hfsc_reset_qdisc(struct Qdisc *sch)
1520 struct hfsc_sched *q = qdisc_priv(sch);
1521 struct hfsc_class *cl;
1522 struct hlist_node *n;
1523 unsigned int i;
1525 for (i = 0; i < q->clhash.hashsize; i++) {
1526 hlist_for_each_entry(cl, n, &q->clhash.hash[i], cl_common.hnode)
1527 hfsc_reset_class(cl);
1529 q->eligible = RB_ROOT;
1530 INIT_LIST_HEAD(&q->droplist);
1531 qdisc_watchdog_cancel(&q->watchdog);
1532 sch->q.qlen = 0;
1535 static void
1536 hfsc_destroy_qdisc(struct Qdisc *sch)
1538 struct hfsc_sched *q = qdisc_priv(sch);
1539 struct hlist_node *n, *next;
1540 struct hfsc_class *cl;
1541 unsigned int i;
1543 for (i = 0; i < q->clhash.hashsize; i++) {
1544 hlist_for_each_entry(cl, n, &q->clhash.hash[i], cl_common.hnode)
1545 tcf_destroy_chain(&cl->filter_list);
1547 for (i = 0; i < q->clhash.hashsize; i++) {
1548 hlist_for_each_entry_safe(cl, n, next, &q->clhash.hash[i],
1549 cl_common.hnode)
1550 hfsc_destroy_class(sch, cl);
1552 qdisc_class_hash_destroy(&q->clhash);
1553 qdisc_watchdog_cancel(&q->watchdog);
1556 static int
1557 hfsc_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb)
1559 struct hfsc_sched *q = qdisc_priv(sch);
1560 unsigned char *b = skb_tail_pointer(skb);
1561 struct tc_hfsc_qopt qopt;
1563 qopt.defcls = q->defcls;
1564 NLA_PUT(skb, TCA_OPTIONS, sizeof(qopt), &qopt);
1565 return skb->len;
1567 nla_put_failure:
1568 nlmsg_trim(skb, b);
1569 return -1;
1572 static int
1573 hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
1575 struct hfsc_class *cl;
1576 int uninitialized_var(err);
1578 cl = hfsc_classify(skb, sch, &err);
1579 if (cl == NULL) {
1580 if (err & __NET_XMIT_BYPASS)
1581 sch->qstats.drops++;
1582 kfree_skb(skb);
1583 return err;
1586 err = qdisc_enqueue(skb, cl->qdisc);
1587 if (unlikely(err != NET_XMIT_SUCCESS)) {
1588 if (net_xmit_drop_count(err)) {
1589 cl->qstats.drops++;
1590 sch->qstats.drops++;
1592 return err;
1595 if (cl->qdisc->q.qlen == 1)
1596 set_active(cl, qdisc_pkt_len(skb));
1598 cl->bstats.packets++;
1599 cl->bstats.bytes += qdisc_pkt_len(skb);
1600 sch->bstats.packets++;
1601 sch->bstats.bytes += qdisc_pkt_len(skb);
1602 sch->q.qlen++;
1604 return NET_XMIT_SUCCESS;
1607 static struct sk_buff *
1608 hfsc_dequeue(struct Qdisc *sch)
1610 struct hfsc_sched *q = qdisc_priv(sch);
1611 struct hfsc_class *cl;
1612 struct sk_buff *skb;
1613 u64 cur_time;
1614 unsigned int next_len;
1615 int realtime = 0;
1617 if (sch->q.qlen == 0)
1618 return NULL;
1620 cur_time = psched_get_time();
1623 * if there are eligible classes, use real-time criteria.
1624 * find the class with the minimum deadline among
1625 * the eligible classes.
1627 if ((cl = eltree_get_mindl(q, cur_time)) != NULL) {
1628 realtime = 1;
1629 } else {
1631 * use link-sharing criteria
1632 * get the class with the minimum vt in the hierarchy
1634 cl = vttree_get_minvt(&q->root, cur_time);
1635 if (cl == NULL) {
1636 sch->qstats.overlimits++;
1637 hfsc_schedule_watchdog(sch);
1638 return NULL;
1642 skb = qdisc_dequeue_peeked(cl->qdisc);
1643 if (skb == NULL) {
1644 qdisc_warn_nonwc("HFSC", cl->qdisc);
1645 return NULL;
1648 update_vf(cl, qdisc_pkt_len(skb), cur_time);
1649 if (realtime)
1650 cl->cl_cumul += qdisc_pkt_len(skb);
1652 if (cl->qdisc->q.qlen != 0) {
1653 if (cl->cl_flags & HFSC_RSC) {
1654 /* update ed */
1655 next_len = qdisc_peek_len(cl->qdisc);
1656 if (realtime)
1657 update_ed(cl, next_len);
1658 else
1659 update_d(cl, next_len);
1661 } else {
1662 /* the class becomes passive */
1663 set_passive(cl);
1666 sch->flags &= ~TCQ_F_THROTTLED;
1667 sch->q.qlen--;
1669 return skb;
1672 static unsigned int
1673 hfsc_drop(struct Qdisc *sch)
1675 struct hfsc_sched *q = qdisc_priv(sch);
1676 struct hfsc_class *cl;
1677 unsigned int len;
1679 list_for_each_entry(cl, &q->droplist, dlist) {
1680 if (cl->qdisc->ops->drop != NULL &&
1681 (len = cl->qdisc->ops->drop(cl->qdisc)) > 0) {
1682 if (cl->qdisc->q.qlen == 0) {
1683 update_vf(cl, 0, 0);
1684 set_passive(cl);
1685 } else {
1686 list_move_tail(&cl->dlist, &q->droplist);
1688 cl->qstats.drops++;
1689 sch->qstats.drops++;
1690 sch->q.qlen--;
1691 return len;
1694 return 0;
1697 static const struct Qdisc_class_ops hfsc_class_ops = {
1698 .change = hfsc_change_class,
1699 .delete = hfsc_delete_class,
1700 .graft = hfsc_graft_class,
1701 .leaf = hfsc_class_leaf,
1702 .qlen_notify = hfsc_qlen_notify,
1703 .get = hfsc_get_class,
1704 .put = hfsc_put_class,
1705 .bind_tcf = hfsc_bind_tcf,
1706 .unbind_tcf = hfsc_unbind_tcf,
1707 .tcf_chain = hfsc_tcf_chain,
1708 .dump = hfsc_dump_class,
1709 .dump_stats = hfsc_dump_class_stats,
1710 .walk = hfsc_walk
1713 static struct Qdisc_ops hfsc_qdisc_ops __read_mostly = {
1714 .id = "hfsc",
1715 .init = hfsc_init_qdisc,
1716 .change = hfsc_change_qdisc,
1717 .reset = hfsc_reset_qdisc,
1718 .destroy = hfsc_destroy_qdisc,
1719 .dump = hfsc_dump_qdisc,
1720 .enqueue = hfsc_enqueue,
1721 .dequeue = hfsc_dequeue,
1722 .peek = qdisc_peek_dequeued,
1723 .drop = hfsc_drop,
1724 .cl_ops = &hfsc_class_ops,
1725 .priv_size = sizeof(struct hfsc_sched),
1726 .owner = THIS_MODULE
1729 static int __init
1730 hfsc_init(void)
1732 return register_qdisc(&hfsc_qdisc_ops);
1735 static void __exit
1736 hfsc_cleanup(void)
1738 unregister_qdisc(&hfsc_qdisc_ops);
1741 MODULE_LICENSE("GPL");
1742 module_init(hfsc_init);
1743 module_exit(hfsc_cleanup);