2 * Copyright (c) 2003 Patrick McHardy, <kaber@trash.net>
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 2
7 * of the License, or (at your option) any later version.
9 * 2003-10-17 - Ported from altq
12 * Copyright (c) 1997-1999 Carnegie Mellon University. All Rights Reserved.
14 * Permission to use, copy, modify, and distribute this software and
15 * its documentation is hereby granted (including for commercial or
16 * for-profit use), provided that both the copyright notice and this
17 * permission notice appear in all copies of the software, derivative
18 * works, or modified versions, and any portions thereof.
20 * THIS SOFTWARE IS EXPERIMENTAL AND IS KNOWN TO HAVE BUGS, SOME OF
21 * WHICH MAY HAVE SERIOUS CONSEQUENCES. CARNEGIE MELLON PROVIDES THIS
22 * SOFTWARE IN ITS ``AS IS'' CONDITION, AND ANY EXPRESS OR IMPLIED
23 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
25 * DISCLAIMED. IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
28 * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
29 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
30 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
32 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
35 * Carnegie Mellon encourages (but does not require) users of this
36 * software to return any improvements or extensions that they make,
37 * and to grant Carnegie Mellon the rights to redistribute these
38 * changes without encumbrance.
41 * H-FSC is described in Proceedings of SIGCOMM'97,
42 * "A Hierarchical Fair Service Curve Algorithm for Link-Sharing,
43 * Real-Time and Priority Service"
44 * by Ion Stoica, Hui Zhang, and T. S. Eugene Ng.
46 * Oleg Cherevko <olwi@aq.ml.com.ua> added the upperlimit for link-sharing.
47 * when a class has an upperlimit, the fit-time is computed from the
48 * upperlimit service curve. the link-sharing scheduler does not schedule
49 * a class whose fit-time exceeds the current time.
52 #include <linux/kernel.h>
53 #include <linux/module.h>
54 #include <linux/types.h>
55 #include <linux/errno.h>
56 #include <linux/jiffies.h>
57 #include <linux/compiler.h>
58 #include <linux/spinlock.h>
59 #include <linux/skbuff.h>
60 #include <linux/string.h>
61 #include <linux/slab.h>
62 #include <linux/list.h>
63 #include <linux/rbtree.h>
64 #include <linux/init.h>
65 #include <linux/netdevice.h>
66 #include <linux/rtnetlink.h>
67 #include <linux/pkt_sched.h>
68 #include <net/netlink.h>
69 #include <net/pkt_sched.h>
70 #include <net/pkt_cls.h>
71 #include <asm/system.h>
72 #include <asm/div64.h>
75 * kernel internal service curve representation:
76 * coordinates are given by 64 bit unsigned integers.
77 * x-axis: unit is clock count.
78 * y-axis: unit is byte.
80 * The service curve parameters are converted to the internal
81 * representation. The slope values are scaled to avoid overflow.
82 * the inverse slope values as well as the y-projection of the 1st
83 * segment are kept in order to to avoid 64-bit divide operations
84 * that are expensive on 32-bit architectures.
89 u64 sm1
; /* scaled slope of the 1st segment */
90 u64 ism1
; /* scaled inverse-slope of the 1st segment */
91 u64 dx
; /* the x-projection of the 1st segment */
92 u64 dy
; /* the y-projection of the 1st segment */
93 u64 sm2
; /* scaled slope of the 2nd segment */
94 u64 ism2
; /* scaled inverse-slope of the 2nd segment */
97 /* runtime service curve */
100 u64 x
; /* current starting position on x-axis */
101 u64 y
; /* current starting position on y-axis */
102 u64 sm1
; /* scaled slope of the 1st segment */
103 u64 ism1
; /* scaled inverse-slope of the 1st segment */
104 u64 dx
; /* the x-projection of the 1st segment */
105 u64 dy
; /* the y-projection of the 1st segment */
106 u64 sm2
; /* scaled slope of the 2nd segment */
107 u64 ism2
; /* scaled inverse-slope of the 2nd segment */
110 enum hfsc_class_flags
119 u32 classid
; /* class id */
120 unsigned int refcnt
; /* usage count */
122 struct gnet_stats_basic bstats
;
123 struct gnet_stats_queue qstats
;
124 struct gnet_stats_rate_est rate_est
;
125 unsigned int level
; /* class level in hierarchy */
126 struct tcf_proto
*filter_list
; /* filter list */
127 unsigned int filter_cnt
; /* filter count */
129 struct hfsc_sched
*sched
; /* scheduler data */
130 struct hfsc_class
*cl_parent
; /* parent class */
131 struct list_head siblings
; /* sibling classes */
132 struct list_head children
; /* child classes */
133 struct Qdisc
*qdisc
; /* leaf qdisc */
135 struct rb_node el_node
; /* qdisc's eligible tree member */
136 struct rb_root vt_tree
; /* active children sorted by cl_vt */
137 struct rb_node vt_node
; /* parent's vt_tree member */
138 struct rb_root cf_tree
; /* active children sorted by cl_f */
139 struct rb_node cf_node
; /* parent's cf_heap member */
140 struct list_head hlist
; /* hash list member */
141 struct list_head dlist
; /* drop list member */
143 u64 cl_total
; /* total work in bytes */
144 u64 cl_cumul
; /* cumulative work in bytes done by
145 real-time criteria */
147 u64 cl_d
; /* deadline*/
148 u64 cl_e
; /* eligible time */
149 u64 cl_vt
; /* virtual time */
150 u64 cl_f
; /* time when this class will fit for
151 link-sharing, max(myf, cfmin) */
152 u64 cl_myf
; /* my fit-time (calculated from this
153 class's own upperlimit curve) */
154 u64 cl_myfadj
; /* my fit-time adjustment (to cancel
155 history dependence) */
156 u64 cl_cfmin
; /* earliest children's fit-time (used
157 with cl_myf to obtain cl_f) */
158 u64 cl_cvtmin
; /* minimal virtual time among the
159 children fit for link-sharing
160 (monotonic within a period) */
161 u64 cl_vtadj
; /* intra-period cumulative vt
163 u64 cl_vtoff
; /* inter-period cumulative vt offset */
164 u64 cl_cvtmax
; /* max child's vt in the last period */
165 u64 cl_cvtoff
; /* cumulative cvtmax of all periods */
166 u64 cl_pcvtoff
; /* parent's cvtoff at initalization
169 struct internal_sc cl_rsc
; /* internal real-time service curve */
170 struct internal_sc cl_fsc
; /* internal fair service curve */
171 struct internal_sc cl_usc
; /* internal upperlimit service curve */
172 struct runtime_sc cl_deadline
; /* deadline curve */
173 struct runtime_sc cl_eligible
; /* eligible curve */
174 struct runtime_sc cl_virtual
; /* virtual curve */
175 struct runtime_sc cl_ulimit
; /* upperlimit curve */
177 unsigned long cl_flags
; /* which curves are valid */
178 unsigned long cl_vtperiod
; /* vt period sequence number */
179 unsigned long cl_parentperiod
;/* parent's vt period sequence number*/
180 unsigned long cl_nactive
; /* number of active children */
183 #define HFSC_HSIZE 16
187 u16 defcls
; /* default class id */
188 struct hfsc_class root
; /* root class */
189 struct list_head clhash
[HFSC_HSIZE
]; /* class hash */
190 struct rb_root eligible
; /* eligible tree */
191 struct list_head droplist
; /* active leaf class list (for
193 struct sk_buff_head requeue
; /* requeued packet */
194 struct qdisc_watchdog watchdog
; /* watchdog timer */
197 #define HT_INFINITY 0xffffffffffffffffULL /* infinite time value */
201 * eligible tree holds backlogged classes being sorted by their eligible times.
202 * there is one eligible tree per hfsc instance.
206 eltree_insert(struct hfsc_class
*cl
)
208 struct rb_node
**p
= &cl
->sched
->eligible
.rb_node
;
209 struct rb_node
*parent
= NULL
;
210 struct hfsc_class
*cl1
;
214 cl1
= rb_entry(parent
, struct hfsc_class
, el_node
);
215 if (cl
->cl_e
>= cl1
->cl_e
)
216 p
= &parent
->rb_right
;
218 p
= &parent
->rb_left
;
220 rb_link_node(&cl
->el_node
, parent
, p
);
221 rb_insert_color(&cl
->el_node
, &cl
->sched
->eligible
);
225 eltree_remove(struct hfsc_class
*cl
)
227 rb_erase(&cl
->el_node
, &cl
->sched
->eligible
);
231 eltree_update(struct hfsc_class
*cl
)
237 /* find the class with the minimum deadline among the eligible classes */
238 static inline struct hfsc_class
*
239 eltree_get_mindl(struct hfsc_sched
*q
, u64 cur_time
)
241 struct hfsc_class
*p
, *cl
= NULL
;
244 for (n
= rb_first(&q
->eligible
); n
!= NULL
; n
= rb_next(n
)) {
245 p
= rb_entry(n
, struct hfsc_class
, el_node
);
246 if (p
->cl_e
> cur_time
)
248 if (cl
== NULL
|| p
->cl_d
< cl
->cl_d
)
254 /* find the class with minimum eligible time among the eligible classes */
255 static inline struct hfsc_class
*
256 eltree_get_minel(struct hfsc_sched
*q
)
260 n
= rb_first(&q
->eligible
);
263 return rb_entry(n
, struct hfsc_class
, el_node
);
267 * vttree holds holds backlogged child classes being sorted by their virtual
268 * time. each intermediate class has one vttree.
271 vttree_insert(struct hfsc_class
*cl
)
273 struct rb_node
**p
= &cl
->cl_parent
->vt_tree
.rb_node
;
274 struct rb_node
*parent
= NULL
;
275 struct hfsc_class
*cl1
;
279 cl1
= rb_entry(parent
, struct hfsc_class
, vt_node
);
280 if (cl
->cl_vt
>= cl1
->cl_vt
)
281 p
= &parent
->rb_right
;
283 p
= &parent
->rb_left
;
285 rb_link_node(&cl
->vt_node
, parent
, p
);
286 rb_insert_color(&cl
->vt_node
, &cl
->cl_parent
->vt_tree
);
290 vttree_remove(struct hfsc_class
*cl
)
292 rb_erase(&cl
->vt_node
, &cl
->cl_parent
->vt_tree
);
296 vttree_update(struct hfsc_class
*cl
)
302 static inline struct hfsc_class
*
303 vttree_firstfit(struct hfsc_class
*cl
, u64 cur_time
)
305 struct hfsc_class
*p
;
308 for (n
= rb_first(&cl
->vt_tree
); n
!= NULL
; n
= rb_next(n
)) {
309 p
= rb_entry(n
, struct hfsc_class
, vt_node
);
310 if (p
->cl_f
<= cur_time
)
317 * get the leaf class with the minimum vt in the hierarchy
319 static struct hfsc_class
*
320 vttree_get_minvt(struct hfsc_class
*cl
, u64 cur_time
)
322 /* if root-class's cfmin is bigger than cur_time nothing to do */
323 if (cl
->cl_cfmin
> cur_time
)
326 while (cl
->level
> 0) {
327 cl
= vttree_firstfit(cl
, cur_time
);
331 * update parent's cl_cvtmin.
333 if (cl
->cl_parent
->cl_cvtmin
< cl
->cl_vt
)
334 cl
->cl_parent
->cl_cvtmin
= cl
->cl_vt
;
340 cftree_insert(struct hfsc_class
*cl
)
342 struct rb_node
**p
= &cl
->cl_parent
->cf_tree
.rb_node
;
343 struct rb_node
*parent
= NULL
;
344 struct hfsc_class
*cl1
;
348 cl1
= rb_entry(parent
, struct hfsc_class
, cf_node
);
349 if (cl
->cl_f
>= cl1
->cl_f
)
350 p
= &parent
->rb_right
;
352 p
= &parent
->rb_left
;
354 rb_link_node(&cl
->cf_node
, parent
, p
);
355 rb_insert_color(&cl
->cf_node
, &cl
->cl_parent
->cf_tree
);
359 cftree_remove(struct hfsc_class
*cl
)
361 rb_erase(&cl
->cf_node
, &cl
->cl_parent
->cf_tree
);
365 cftree_update(struct hfsc_class
*cl
)
372 * service curve support functions
374 * external service curve parameters
377 * internal service curve parameters
378 * sm: (bytes/psched_us) << SM_SHIFT
379 * ism: (psched_us/byte) << ISM_SHIFT
382 * The clock source resolution with ktime is 1.024us.
384 * sm and ism are scaled in order to keep effective digits.
385 * SM_SHIFT and ISM_SHIFT are selected to keep at least 4 effective
386 * digits in decimal using the following table.
388 * bits/sec 100Kbps 1Mbps 10Mbps 100Mbps 1Gbps
389 * ------------+-------------------------------------------------------
390 * bytes/1.024us 12.8e-3 128e-3 1280e-3 12800e-3 128000e-3
392 * 1.024us/byte 78.125 7.8125 0.78125 0.078125 0.0078125
397 #define SM_MASK ((1ULL << SM_SHIFT) - 1)
398 #define ISM_MASK ((1ULL << ISM_SHIFT) - 1)
401 seg_x2y(u64 x
, u64 sm
)
407 * y = x * sm >> SM_SHIFT
408 * but divide it for the upper and lower bits to avoid overflow
410 y
= (x
>> SM_SHIFT
) * sm
+ (((x
& SM_MASK
) * sm
) >> SM_SHIFT
);
415 seg_y2x(u64 y
, u64 ism
)
421 else if (ism
== HT_INFINITY
)
424 x
= (y
>> ISM_SHIFT
) * ism
425 + (((y
& ISM_MASK
) * ism
) >> ISM_SHIFT
);
430 /* Convert m (bps) into sm (bytes/psched us) */
436 sm
= ((u64
)m
<< SM_SHIFT
);
437 sm
+= PSCHED_TICKS_PER_SEC
- 1;
438 do_div(sm
, PSCHED_TICKS_PER_SEC
);
442 /* convert m (bps) into ism (psched us/byte) */
451 ism
= ((u64
)PSCHED_TICKS_PER_SEC
<< ISM_SHIFT
);
458 /* convert d (us) into dx (psched us) */
464 dx
= ((u64
)d
* PSCHED_TICKS_PER_SEC
);
465 dx
+= USEC_PER_SEC
- 1;
466 do_div(dx
, USEC_PER_SEC
);
470 /* convert sm (bytes/psched us) into m (bps) */
476 m
= (sm
* PSCHED_TICKS_PER_SEC
) >> SM_SHIFT
;
480 /* convert dx (psched us) into d (us) */
486 d
= dx
* USEC_PER_SEC
;
487 do_div(d
, PSCHED_TICKS_PER_SEC
);
492 sc2isc(struct tc_service_curve
*sc
, struct internal_sc
*isc
)
494 isc
->sm1
= m2sm(sc
->m1
);
495 isc
->ism1
= m2ism(sc
->m1
);
496 isc
->dx
= d2dx(sc
->d
);
497 isc
->dy
= seg_x2y(isc
->dx
, isc
->sm1
);
498 isc
->sm2
= m2sm(sc
->m2
);
499 isc
->ism2
= m2ism(sc
->m2
);
503 * initialize the runtime service curve with the given internal
504 * service curve starting at (x, y).
507 rtsc_init(struct runtime_sc
*rtsc
, struct internal_sc
*isc
, u64 x
, u64 y
)
511 rtsc
->sm1
= isc
->sm1
;
512 rtsc
->ism1
= isc
->ism1
;
515 rtsc
->sm2
= isc
->sm2
;
516 rtsc
->ism2
= isc
->ism2
;
520 * calculate the y-projection of the runtime service curve by the
521 * given x-projection value
524 rtsc_y2x(struct runtime_sc
*rtsc
, u64 y
)
530 else if (y
<= rtsc
->y
+ rtsc
->dy
) {
531 /* x belongs to the 1st segment */
533 x
= rtsc
->x
+ rtsc
->dx
;
535 x
= rtsc
->x
+ seg_y2x(y
- rtsc
->y
, rtsc
->ism1
);
537 /* x belongs to the 2nd segment */
538 x
= rtsc
->x
+ rtsc
->dx
539 + seg_y2x(y
- rtsc
->y
- rtsc
->dy
, rtsc
->ism2
);
545 rtsc_x2y(struct runtime_sc
*rtsc
, u64 x
)
551 else if (x
<= rtsc
->x
+ rtsc
->dx
)
552 /* y belongs to the 1st segment */
553 y
= rtsc
->y
+ seg_x2y(x
- rtsc
->x
, rtsc
->sm1
);
555 /* y belongs to the 2nd segment */
556 y
= rtsc
->y
+ rtsc
->dy
557 + seg_x2y(x
- rtsc
->x
- rtsc
->dx
, rtsc
->sm2
);
562 * update the runtime service curve by taking the minimum of the current
563 * runtime service curve and the service curve starting at (x, y).
566 rtsc_min(struct runtime_sc
*rtsc
, struct internal_sc
*isc
, u64 x
, u64 y
)
571 if (isc
->sm1
<= isc
->sm2
) {
572 /* service curve is convex */
573 y1
= rtsc_x2y(rtsc
, x
);
575 /* the current rtsc is smaller */
583 * service curve is concave
584 * compute the two y values of the current rtsc
588 y1
= rtsc_x2y(rtsc
, x
);
590 /* rtsc is below isc, no change to rtsc */
594 y2
= rtsc_x2y(rtsc
, x
+ isc
->dx
);
595 if (y2
>= y
+ isc
->dy
) {
596 /* rtsc is above isc, replace rtsc by isc */
605 * the two curves intersect
606 * compute the offsets (dx, dy) using the reverse
607 * function of seg_x2y()
608 * seg_x2y(dx, sm1) == seg_x2y(dx, sm2) + (y1 - y)
610 dx
= (y1
- y
) << SM_SHIFT
;
611 dsm
= isc
->sm1
- isc
->sm2
;
614 * check if (x, y1) belongs to the 1st segment of rtsc.
615 * if so, add the offset.
617 if (rtsc
->x
+ rtsc
->dx
> x
)
618 dx
+= rtsc
->x
+ rtsc
->dx
- x
;
619 dy
= seg_x2y(dx
, isc
->sm1
);
629 init_ed(struct hfsc_class
*cl
, unsigned int next_len
)
631 u64 cur_time
= psched_get_time();
633 /* update the deadline curve */
634 rtsc_min(&cl
->cl_deadline
, &cl
->cl_rsc
, cur_time
, cl
->cl_cumul
);
637 * update the eligible curve.
638 * for concave, it is equal to the deadline curve.
639 * for convex, it is a linear curve with slope m2.
641 cl
->cl_eligible
= cl
->cl_deadline
;
642 if (cl
->cl_rsc
.sm1
<= cl
->cl_rsc
.sm2
) {
643 cl
->cl_eligible
.dx
= 0;
644 cl
->cl_eligible
.dy
= 0;
647 /* compute e and d */
648 cl
->cl_e
= rtsc_y2x(&cl
->cl_eligible
, cl
->cl_cumul
);
649 cl
->cl_d
= rtsc_y2x(&cl
->cl_deadline
, cl
->cl_cumul
+ next_len
);
655 update_ed(struct hfsc_class
*cl
, unsigned int next_len
)
657 cl
->cl_e
= rtsc_y2x(&cl
->cl_eligible
, cl
->cl_cumul
);
658 cl
->cl_d
= rtsc_y2x(&cl
->cl_deadline
, cl
->cl_cumul
+ next_len
);
664 update_d(struct hfsc_class
*cl
, unsigned int next_len
)
666 cl
->cl_d
= rtsc_y2x(&cl
->cl_deadline
, cl
->cl_cumul
+ next_len
);
670 update_cfmin(struct hfsc_class
*cl
)
672 struct rb_node
*n
= rb_first(&cl
->cf_tree
);
673 struct hfsc_class
*p
;
679 p
= rb_entry(n
, struct hfsc_class
, cf_node
);
680 cl
->cl_cfmin
= p
->cl_f
;
684 init_vf(struct hfsc_class
*cl
, unsigned int len
)
686 struct hfsc_class
*max_cl
;
693 for (; cl
->cl_parent
!= NULL
; cl
= cl
->cl_parent
) {
694 if (go_active
&& cl
->cl_nactive
++ == 0)
700 n
= rb_last(&cl
->cl_parent
->vt_tree
);
702 max_cl
= rb_entry(n
, struct hfsc_class
,vt_node
);
704 * set vt to the average of the min and max
705 * classes. if the parent's period didn't
706 * change, don't decrease vt of the class.
709 if (cl
->cl_parent
->cl_cvtmin
!= 0)
710 vt
= (cl
->cl_parent
->cl_cvtmin
+ vt
)/2;
712 if (cl
->cl_parent
->cl_vtperiod
!=
713 cl
->cl_parentperiod
|| vt
> cl
->cl_vt
)
717 * first child for a new parent backlog period.
718 * add parent's cvtmax to cvtoff to make a new
719 * vt (vtoff + vt) larger than the vt in the
720 * last period for all children.
722 vt
= cl
->cl_parent
->cl_cvtmax
;
723 cl
->cl_parent
->cl_cvtoff
+= vt
;
724 cl
->cl_parent
->cl_cvtmax
= 0;
725 cl
->cl_parent
->cl_cvtmin
= 0;
729 cl
->cl_vtoff
= cl
->cl_parent
->cl_cvtoff
-
732 /* update the virtual curve */
733 vt
= cl
->cl_vt
+ cl
->cl_vtoff
;
734 rtsc_min(&cl
->cl_virtual
, &cl
->cl_fsc
, vt
,
736 if (cl
->cl_virtual
.x
== vt
) {
737 cl
->cl_virtual
.x
-= cl
->cl_vtoff
;
742 cl
->cl_vtperiod
++; /* increment vt period */
743 cl
->cl_parentperiod
= cl
->cl_parent
->cl_vtperiod
;
744 if (cl
->cl_parent
->cl_nactive
== 0)
745 cl
->cl_parentperiod
++;
751 if (cl
->cl_flags
& HFSC_USC
) {
752 /* class has upper limit curve */
754 cur_time
= psched_get_time();
756 /* update the ulimit curve */
757 rtsc_min(&cl
->cl_ulimit
, &cl
->cl_usc
, cur_time
,
760 cl
->cl_myf
= rtsc_y2x(&cl
->cl_ulimit
,
766 f
= max(cl
->cl_myf
, cl
->cl_cfmin
);
770 update_cfmin(cl
->cl_parent
);
776 update_vf(struct hfsc_class
*cl
, unsigned int len
, u64 cur_time
)
778 u64 f
; /* , myf_bound, delta; */
781 if (cl
->qdisc
->q
.qlen
== 0 && cl
->cl_flags
& HFSC_FSC
)
784 for (; cl
->cl_parent
!= NULL
; cl
= cl
->cl_parent
) {
787 if (!(cl
->cl_flags
& HFSC_FSC
) || cl
->cl_nactive
== 0)
790 if (go_passive
&& --cl
->cl_nactive
== 0)
796 /* no more active child, going passive */
798 /* update cvtmax of the parent class */
799 if (cl
->cl_vt
> cl
->cl_parent
->cl_cvtmax
)
800 cl
->cl_parent
->cl_cvtmax
= cl
->cl_vt
;
802 /* remove this class from the vt tree */
806 update_cfmin(cl
->cl_parent
);
814 cl
->cl_vt
= rtsc_y2x(&cl
->cl_virtual
, cl
->cl_total
)
815 - cl
->cl_vtoff
+ cl
->cl_vtadj
;
818 * if vt of the class is smaller than cvtmin,
819 * the class was skipped in the past due to non-fit.
820 * if so, we need to adjust vtadj.
822 if (cl
->cl_vt
< cl
->cl_parent
->cl_cvtmin
) {
823 cl
->cl_vtadj
+= cl
->cl_parent
->cl_cvtmin
- cl
->cl_vt
;
824 cl
->cl_vt
= cl
->cl_parent
->cl_cvtmin
;
827 /* update the vt tree */
830 if (cl
->cl_flags
& HFSC_USC
) {
831 cl
->cl_myf
= cl
->cl_myfadj
+ rtsc_y2x(&cl
->cl_ulimit
,
835 * This code causes classes to stay way under their
836 * limit when multiple classes are used at gigabit
837 * speed. needs investigation. -kaber
840 * if myf lags behind by more than one clock tick
841 * from the current time, adjust myfadj to prevent
842 * a rate-limited class from going greedy.
843 * in a steady state under rate-limiting, myf
844 * fluctuates within one clock tick.
846 myf_bound
= cur_time
- PSCHED_JIFFIE2US(1);
847 if (cl
->cl_myf
< myf_bound
) {
848 delta
= cur_time
- cl
->cl_myf
;
849 cl
->cl_myfadj
+= delta
;
855 f
= max(cl
->cl_myf
, cl
->cl_cfmin
);
859 update_cfmin(cl
->cl_parent
);
865 set_active(struct hfsc_class
*cl
, unsigned int len
)
867 if (cl
->cl_flags
& HFSC_RSC
)
869 if (cl
->cl_flags
& HFSC_FSC
)
872 list_add_tail(&cl
->dlist
, &cl
->sched
->droplist
);
876 set_passive(struct hfsc_class
*cl
)
878 if (cl
->cl_flags
& HFSC_RSC
)
881 list_del(&cl
->dlist
);
884 * vttree is now handled in update_vf() so that update_vf(cl, 0, 0)
885 * needs to be called explicitly to remove a class from vttree.
890 * hack to get length of first packet in queue.
893 qdisc_peek_len(struct Qdisc
*sch
)
898 skb
= sch
->dequeue(sch
);
901 printk("qdisc_peek_len: non work-conserving qdisc ?\n");
905 if (unlikely(sch
->ops
->requeue(skb
, sch
) != NET_XMIT_SUCCESS
)) {
907 printk("qdisc_peek_len: failed to requeue\n");
908 qdisc_tree_decrease_qlen(sch
, 1);
915 hfsc_purge_queue(struct Qdisc
*sch
, struct hfsc_class
*cl
)
917 unsigned int len
= cl
->qdisc
->q
.qlen
;
919 qdisc_reset(cl
->qdisc
);
920 qdisc_tree_decrease_qlen(cl
->qdisc
, len
);
924 hfsc_adjust_levels(struct hfsc_class
*cl
)
926 struct hfsc_class
*p
;
931 list_for_each_entry(p
, &cl
->children
, siblings
) {
932 if (p
->level
>= level
)
933 level
= p
->level
+ 1;
936 } while ((cl
= cl
->cl_parent
) != NULL
);
939 static inline unsigned int
945 return h
& (HFSC_HSIZE
- 1);
948 static inline struct hfsc_class
*
949 hfsc_find_class(u32 classid
, struct Qdisc
*sch
)
951 struct hfsc_sched
*q
= qdisc_priv(sch
);
952 struct hfsc_class
*cl
;
954 list_for_each_entry(cl
, &q
->clhash
[hfsc_hash(classid
)], hlist
) {
955 if (cl
->classid
== classid
)
962 hfsc_change_rsc(struct hfsc_class
*cl
, struct tc_service_curve
*rsc
,
965 sc2isc(rsc
, &cl
->cl_rsc
);
966 rtsc_init(&cl
->cl_deadline
, &cl
->cl_rsc
, cur_time
, cl
->cl_cumul
);
967 cl
->cl_eligible
= cl
->cl_deadline
;
968 if (cl
->cl_rsc
.sm1
<= cl
->cl_rsc
.sm2
) {
969 cl
->cl_eligible
.dx
= 0;
970 cl
->cl_eligible
.dy
= 0;
972 cl
->cl_flags
|= HFSC_RSC
;
976 hfsc_change_fsc(struct hfsc_class
*cl
, struct tc_service_curve
*fsc
)
978 sc2isc(fsc
, &cl
->cl_fsc
);
979 rtsc_init(&cl
->cl_virtual
, &cl
->cl_fsc
, cl
->cl_vt
, cl
->cl_total
);
980 cl
->cl_flags
|= HFSC_FSC
;
984 hfsc_change_usc(struct hfsc_class
*cl
, struct tc_service_curve
*usc
,
987 sc2isc(usc
, &cl
->cl_usc
);
988 rtsc_init(&cl
->cl_ulimit
, &cl
->cl_usc
, cur_time
, cl
->cl_total
);
989 cl
->cl_flags
|= HFSC_USC
;
993 hfsc_change_class(struct Qdisc
*sch
, u32 classid
, u32 parentid
,
994 struct rtattr
**tca
, unsigned long *arg
)
996 struct hfsc_sched
*q
= qdisc_priv(sch
);
997 struct hfsc_class
*cl
= (struct hfsc_class
*)*arg
;
998 struct hfsc_class
*parent
= NULL
;
999 struct rtattr
*opt
= tca
[TCA_OPTIONS
-1];
1000 struct rtattr
*tb
[TCA_HFSC_MAX
];
1001 struct tc_service_curve
*rsc
= NULL
, *fsc
= NULL
, *usc
= NULL
;
1004 if (opt
== NULL
|| rtattr_parse_nested(tb
, TCA_HFSC_MAX
, opt
))
1007 if (tb
[TCA_HFSC_RSC
-1]) {
1008 if (RTA_PAYLOAD(tb
[TCA_HFSC_RSC
-1]) < sizeof(*rsc
))
1010 rsc
= RTA_DATA(tb
[TCA_HFSC_RSC
-1]);
1011 if (rsc
->m1
== 0 && rsc
->m2
== 0)
1015 if (tb
[TCA_HFSC_FSC
-1]) {
1016 if (RTA_PAYLOAD(tb
[TCA_HFSC_FSC
-1]) < sizeof(*fsc
))
1018 fsc
= RTA_DATA(tb
[TCA_HFSC_FSC
-1]);
1019 if (fsc
->m1
== 0 && fsc
->m2
== 0)
1023 if (tb
[TCA_HFSC_USC
-1]) {
1024 if (RTA_PAYLOAD(tb
[TCA_HFSC_USC
-1]) < sizeof(*usc
))
1026 usc
= RTA_DATA(tb
[TCA_HFSC_USC
-1]);
1027 if (usc
->m1
== 0 && usc
->m2
== 0)
1033 if (cl
->cl_parent
&& cl
->cl_parent
->classid
!= parentid
)
1035 if (cl
->cl_parent
== NULL
&& parentid
!= TC_H_ROOT
)
1038 cur_time
= psched_get_time();
1042 hfsc_change_rsc(cl
, rsc
, cur_time
);
1044 hfsc_change_fsc(cl
, fsc
);
1046 hfsc_change_usc(cl
, usc
, cur_time
);
1048 if (cl
->qdisc
->q
.qlen
!= 0) {
1049 if (cl
->cl_flags
& HFSC_RSC
)
1050 update_ed(cl
, qdisc_peek_len(cl
->qdisc
));
1051 if (cl
->cl_flags
& HFSC_FSC
)
1052 update_vf(cl
, 0, cur_time
);
1054 sch_tree_unlock(sch
);
1056 if (tca
[TCA_RATE
-1])
1057 gen_replace_estimator(&cl
->bstats
, &cl
->rate_est
,
1058 &sch
->dev
->queue_lock
,
1063 if (parentid
== TC_H_ROOT
)
1068 parent
= hfsc_find_class(parentid
, sch
);
1073 if (classid
== 0 || TC_H_MAJ(classid
^ sch
->handle
) != 0)
1075 if (hfsc_find_class(classid
, sch
))
1078 if (rsc
== NULL
&& fsc
== NULL
)
1081 cl
= kzalloc(sizeof(struct hfsc_class
), GFP_KERNEL
);
1086 hfsc_change_rsc(cl
, rsc
, 0);
1088 hfsc_change_fsc(cl
, fsc
);
1090 hfsc_change_usc(cl
, usc
, 0);
1093 cl
->classid
= classid
;
1095 cl
->cl_parent
= parent
;
1096 cl
->qdisc
= qdisc_create_dflt(sch
->dev
, &pfifo_qdisc_ops
, classid
);
1097 if (cl
->qdisc
== NULL
)
1098 cl
->qdisc
= &noop_qdisc
;
1099 INIT_LIST_HEAD(&cl
->children
);
1100 cl
->vt_tree
= RB_ROOT
;
1101 cl
->cf_tree
= RB_ROOT
;
1104 list_add_tail(&cl
->hlist
, &q
->clhash
[hfsc_hash(classid
)]);
1105 list_add_tail(&cl
->siblings
, &parent
->children
);
1106 if (parent
->level
== 0)
1107 hfsc_purge_queue(sch
, parent
);
1108 hfsc_adjust_levels(parent
);
1109 cl
->cl_pcvtoff
= parent
->cl_cvtoff
;
1110 sch_tree_unlock(sch
);
1112 if (tca
[TCA_RATE
-1])
1113 gen_new_estimator(&cl
->bstats
, &cl
->rate_est
,
1114 &sch
->dev
->queue_lock
, tca
[TCA_RATE
-1]);
1115 *arg
= (unsigned long)cl
;
1120 hfsc_destroy_class(struct Qdisc
*sch
, struct hfsc_class
*cl
)
1122 struct hfsc_sched
*q
= qdisc_priv(sch
);
1124 tcf_destroy_chain(cl
->filter_list
);
1125 qdisc_destroy(cl
->qdisc
);
1126 gen_kill_estimator(&cl
->bstats
, &cl
->rate_est
);
1132 hfsc_delete_class(struct Qdisc
*sch
, unsigned long arg
)
1134 struct hfsc_sched
*q
= qdisc_priv(sch
);
1135 struct hfsc_class
*cl
= (struct hfsc_class
*)arg
;
1137 if (cl
->level
> 0 || cl
->filter_cnt
> 0 || cl
== &q
->root
)
1142 list_del(&cl
->siblings
);
1143 hfsc_adjust_levels(cl
->cl_parent
);
1145 hfsc_purge_queue(sch
, cl
);
1146 list_del(&cl
->hlist
);
1148 if (--cl
->refcnt
== 0)
1149 hfsc_destroy_class(sch
, cl
);
1151 sch_tree_unlock(sch
);
1155 static struct hfsc_class
*
1156 hfsc_classify(struct sk_buff
*skb
, struct Qdisc
*sch
, int *qerr
)
1158 struct hfsc_sched
*q
= qdisc_priv(sch
);
1159 struct hfsc_class
*cl
;
1160 struct tcf_result res
;
1161 struct tcf_proto
*tcf
;
1164 if (TC_H_MAJ(skb
->priority
^ sch
->handle
) == 0 &&
1165 (cl
= hfsc_find_class(skb
->priority
, sch
)) != NULL
)
1169 *qerr
= NET_XMIT_BYPASS
;
1170 tcf
= q
->root
.filter_list
;
1171 while (tcf
&& (result
= tc_classify(skb
, tcf
, &res
)) >= 0) {
1172 #ifdef CONFIG_NET_CLS_ACT
1176 *qerr
= NET_XMIT_SUCCESS
;
1180 #elif defined(CONFIG_NET_CLS_POLICE)
1181 if (result
== TC_POLICE_SHOT
)
1184 if ((cl
= (struct hfsc_class
*)res
.class) == NULL
) {
1185 if ((cl
= hfsc_find_class(res
.classid
, sch
)) == NULL
)
1186 break; /* filter selected invalid classid */
1190 return cl
; /* hit leaf class */
1192 /* apply inner filter chain */
1193 tcf
= cl
->filter_list
;
1196 /* classification failed, try default class */
1197 cl
= hfsc_find_class(TC_H_MAKE(TC_H_MAJ(sch
->handle
), q
->defcls
), sch
);
1198 if (cl
== NULL
|| cl
->level
> 0)
1205 hfsc_graft_class(struct Qdisc
*sch
, unsigned long arg
, struct Qdisc
*new,
1208 struct hfsc_class
*cl
= (struct hfsc_class
*)arg
;
1215 new = qdisc_create_dflt(sch
->dev
, &pfifo_qdisc_ops
,
1222 hfsc_purge_queue(sch
, cl
);
1223 *old
= xchg(&cl
->qdisc
, new);
1224 sch_tree_unlock(sch
);
1228 static struct Qdisc
*
1229 hfsc_class_leaf(struct Qdisc
*sch
, unsigned long arg
)
1231 struct hfsc_class
*cl
= (struct hfsc_class
*)arg
;
1233 if (cl
!= NULL
&& cl
->level
== 0)
1240 hfsc_qlen_notify(struct Qdisc
*sch
, unsigned long arg
)
1242 struct hfsc_class
*cl
= (struct hfsc_class
*)arg
;
1244 if (cl
->qdisc
->q
.qlen
== 0) {
1245 update_vf(cl
, 0, 0);
1250 static unsigned long
1251 hfsc_get_class(struct Qdisc
*sch
, u32 classid
)
1253 struct hfsc_class
*cl
= hfsc_find_class(classid
, sch
);
1258 return (unsigned long)cl
;
1262 hfsc_put_class(struct Qdisc
*sch
, unsigned long arg
)
1264 struct hfsc_class
*cl
= (struct hfsc_class
*)arg
;
1266 if (--cl
->refcnt
== 0)
1267 hfsc_destroy_class(sch
, cl
);
1270 static unsigned long
1271 hfsc_bind_tcf(struct Qdisc
*sch
, unsigned long parent
, u32 classid
)
1273 struct hfsc_class
*p
= (struct hfsc_class
*)parent
;
1274 struct hfsc_class
*cl
= hfsc_find_class(classid
, sch
);
1277 if (p
!= NULL
&& p
->level
<= cl
->level
)
1282 return (unsigned long)cl
;
1286 hfsc_unbind_tcf(struct Qdisc
*sch
, unsigned long arg
)
1288 struct hfsc_class
*cl
= (struct hfsc_class
*)arg
;
1293 static struct tcf_proto
**
1294 hfsc_tcf_chain(struct Qdisc
*sch
, unsigned long arg
)
1296 struct hfsc_sched
*q
= qdisc_priv(sch
);
1297 struct hfsc_class
*cl
= (struct hfsc_class
*)arg
;
1302 return &cl
->filter_list
;
1306 hfsc_dump_sc(struct sk_buff
*skb
, int attr
, struct internal_sc
*sc
)
1308 struct tc_service_curve tsc
;
1310 tsc
.m1
= sm2m(sc
->sm1
);
1311 tsc
.d
= dx2d(sc
->dx
);
1312 tsc
.m2
= sm2m(sc
->sm2
);
1313 RTA_PUT(skb
, attr
, sizeof(tsc
), &tsc
);
1322 hfsc_dump_curves(struct sk_buff
*skb
, struct hfsc_class
*cl
)
1324 if ((cl
->cl_flags
& HFSC_RSC
) &&
1325 (hfsc_dump_sc(skb
, TCA_HFSC_RSC
, &cl
->cl_rsc
) < 0))
1326 goto rtattr_failure
;
1328 if ((cl
->cl_flags
& HFSC_FSC
) &&
1329 (hfsc_dump_sc(skb
, TCA_HFSC_FSC
, &cl
->cl_fsc
) < 0))
1330 goto rtattr_failure
;
1332 if ((cl
->cl_flags
& HFSC_USC
) &&
1333 (hfsc_dump_sc(skb
, TCA_HFSC_USC
, &cl
->cl_usc
) < 0))
1334 goto rtattr_failure
;
1343 hfsc_dump_class(struct Qdisc
*sch
, unsigned long arg
, struct sk_buff
*skb
,
1346 struct hfsc_class
*cl
= (struct hfsc_class
*)arg
;
1347 unsigned char *b
= skb_tail_pointer(skb
);
1348 struct rtattr
*rta
= (struct rtattr
*)b
;
1350 tcm
->tcm_parent
= cl
->cl_parent
? cl
->cl_parent
->classid
: TC_H_ROOT
;
1351 tcm
->tcm_handle
= cl
->classid
;
1353 tcm
->tcm_info
= cl
->qdisc
->handle
;
1355 RTA_PUT(skb
, TCA_OPTIONS
, 0, NULL
);
1356 if (hfsc_dump_curves(skb
, cl
) < 0)
1357 goto rtattr_failure
;
1358 rta
->rta_len
= skb_tail_pointer(skb
) - b
;
1367 hfsc_dump_class_stats(struct Qdisc
*sch
, unsigned long arg
,
1368 struct gnet_dump
*d
)
1370 struct hfsc_class
*cl
= (struct hfsc_class
*)arg
;
1371 struct tc_hfsc_stats xstats
;
1373 cl
->qstats
.qlen
= cl
->qdisc
->q
.qlen
;
1374 xstats
.level
= cl
->level
;
1375 xstats
.period
= cl
->cl_vtperiod
;
1376 xstats
.work
= cl
->cl_total
;
1377 xstats
.rtwork
= cl
->cl_cumul
;
1379 if (gnet_stats_copy_basic(d
, &cl
->bstats
) < 0 ||
1380 gnet_stats_copy_rate_est(d
, &cl
->rate_est
) < 0 ||
1381 gnet_stats_copy_queue(d
, &cl
->qstats
) < 0)
1384 return gnet_stats_copy_app(d
, &xstats
, sizeof(xstats
));
1390 hfsc_walk(struct Qdisc
*sch
, struct qdisc_walker
*arg
)
1392 struct hfsc_sched
*q
= qdisc_priv(sch
);
1393 struct hfsc_class
*cl
;
1399 for (i
= 0; i
< HFSC_HSIZE
; i
++) {
1400 list_for_each_entry(cl
, &q
->clhash
[i
], hlist
) {
1401 if (arg
->count
< arg
->skip
) {
1405 if (arg
->fn(sch
, (unsigned long)cl
, arg
) < 0) {
1415 hfsc_schedule_watchdog(struct Qdisc
*sch
)
1417 struct hfsc_sched
*q
= qdisc_priv(sch
);
1418 struct hfsc_class
*cl
;
1421 if ((cl
= eltree_get_minel(q
)) != NULL
)
1422 next_time
= cl
->cl_e
;
1423 if (q
->root
.cl_cfmin
!= 0) {
1424 if (next_time
== 0 || next_time
> q
->root
.cl_cfmin
)
1425 next_time
= q
->root
.cl_cfmin
;
1427 WARN_ON(next_time
== 0);
1428 qdisc_watchdog_schedule(&q
->watchdog
, next_time
);
1432 hfsc_init_qdisc(struct Qdisc
*sch
, struct rtattr
*opt
)
1434 struct hfsc_sched
*q
= qdisc_priv(sch
);
1435 struct tc_hfsc_qopt
*qopt
;
1438 if (opt
== NULL
|| RTA_PAYLOAD(opt
) < sizeof(*qopt
))
1440 qopt
= RTA_DATA(opt
);
1442 q
->defcls
= qopt
->defcls
;
1443 for (i
= 0; i
< HFSC_HSIZE
; i
++)
1444 INIT_LIST_HEAD(&q
->clhash
[i
]);
1445 q
->eligible
= RB_ROOT
;
1446 INIT_LIST_HEAD(&q
->droplist
);
1447 skb_queue_head_init(&q
->requeue
);
1450 q
->root
.classid
= sch
->handle
;
1452 q
->root
.qdisc
= qdisc_create_dflt(sch
->dev
, &pfifo_qdisc_ops
,
1454 if (q
->root
.qdisc
== NULL
)
1455 q
->root
.qdisc
= &noop_qdisc
;
1456 INIT_LIST_HEAD(&q
->root
.children
);
1457 q
->root
.vt_tree
= RB_ROOT
;
1458 q
->root
.cf_tree
= RB_ROOT
;
1460 list_add(&q
->root
.hlist
, &q
->clhash
[hfsc_hash(q
->root
.classid
)]);
1462 qdisc_watchdog_init(&q
->watchdog
, sch
);
1468 hfsc_change_qdisc(struct Qdisc
*sch
, struct rtattr
*opt
)
1470 struct hfsc_sched
*q
= qdisc_priv(sch
);
1471 struct tc_hfsc_qopt
*qopt
;
1473 if (opt
== NULL
|| RTA_PAYLOAD(opt
) < sizeof(*qopt
))
1475 qopt
= RTA_DATA(opt
);
1478 q
->defcls
= qopt
->defcls
;
1479 sch_tree_unlock(sch
);
1485 hfsc_reset_class(struct hfsc_class
*cl
)
1498 cl
->cl_vtperiod
= 0;
1499 cl
->cl_parentperiod
= 0;
1506 cl
->vt_tree
= RB_ROOT
;
1507 cl
->cf_tree
= RB_ROOT
;
1508 qdisc_reset(cl
->qdisc
);
1510 if (cl
->cl_flags
& HFSC_RSC
)
1511 rtsc_init(&cl
->cl_deadline
, &cl
->cl_rsc
, 0, 0);
1512 if (cl
->cl_flags
& HFSC_FSC
)
1513 rtsc_init(&cl
->cl_virtual
, &cl
->cl_fsc
, 0, 0);
1514 if (cl
->cl_flags
& HFSC_USC
)
1515 rtsc_init(&cl
->cl_ulimit
, &cl
->cl_usc
, 0, 0);
1519 hfsc_reset_qdisc(struct Qdisc
*sch
)
1521 struct hfsc_sched
*q
= qdisc_priv(sch
);
1522 struct hfsc_class
*cl
;
1525 for (i
= 0; i
< HFSC_HSIZE
; i
++) {
1526 list_for_each_entry(cl
, &q
->clhash
[i
], hlist
)
1527 hfsc_reset_class(cl
);
1529 __skb_queue_purge(&q
->requeue
);
1530 q
->eligible
= RB_ROOT
;
1531 INIT_LIST_HEAD(&q
->droplist
);
1532 qdisc_watchdog_cancel(&q
->watchdog
);
1537 hfsc_destroy_qdisc(struct Qdisc
*sch
)
1539 struct hfsc_sched
*q
= qdisc_priv(sch
);
1540 struct hfsc_class
*cl
, *next
;
1543 for (i
= 0; i
< HFSC_HSIZE
; i
++) {
1544 list_for_each_entry_safe(cl
, next
, &q
->clhash
[i
], hlist
)
1545 hfsc_destroy_class(sch
, cl
);
1547 __skb_queue_purge(&q
->requeue
);
1548 qdisc_watchdog_cancel(&q
->watchdog
);
1552 hfsc_dump_qdisc(struct Qdisc
*sch
, struct sk_buff
*skb
)
1554 struct hfsc_sched
*q
= qdisc_priv(sch
);
1555 unsigned char *b
= skb_tail_pointer(skb
);
1556 struct tc_hfsc_qopt qopt
;
1558 qopt
.defcls
= q
->defcls
;
1559 RTA_PUT(skb
, TCA_OPTIONS
, sizeof(qopt
), &qopt
);
1568 hfsc_enqueue(struct sk_buff
*skb
, struct Qdisc
*sch
)
1570 struct hfsc_class
*cl
;
1574 cl
= hfsc_classify(skb
, sch
, &err
);
1576 if (err
== NET_XMIT_BYPASS
)
1577 sch
->qstats
.drops
++;
1583 err
= cl
->qdisc
->enqueue(skb
, cl
->qdisc
);
1584 if (unlikely(err
!= NET_XMIT_SUCCESS
)) {
1586 sch
->qstats
.drops
++;
1590 if (cl
->qdisc
->q
.qlen
== 1)
1591 set_active(cl
, len
);
1593 cl
->bstats
.packets
++;
1594 cl
->bstats
.bytes
+= len
;
1595 sch
->bstats
.packets
++;
1596 sch
->bstats
.bytes
+= len
;
1599 return NET_XMIT_SUCCESS
;
1602 static struct sk_buff
*
1603 hfsc_dequeue(struct Qdisc
*sch
)
1605 struct hfsc_sched
*q
= qdisc_priv(sch
);
1606 struct hfsc_class
*cl
;
1607 struct sk_buff
*skb
;
1609 unsigned int next_len
;
1612 if (sch
->q
.qlen
== 0)
1614 if ((skb
= __skb_dequeue(&q
->requeue
)))
1617 cur_time
= psched_get_time();
1620 * if there are eligible classes, use real-time criteria.
1621 * find the class with the minimum deadline among
1622 * the eligible classes.
1624 if ((cl
= eltree_get_mindl(q
, cur_time
)) != NULL
) {
1628 * use link-sharing criteria
1629 * get the class with the minimum vt in the hierarchy
1631 cl
= vttree_get_minvt(&q
->root
, cur_time
);
1633 sch
->qstats
.overlimits
++;
1634 hfsc_schedule_watchdog(sch
);
1639 skb
= cl
->qdisc
->dequeue(cl
->qdisc
);
1641 if (net_ratelimit())
1642 printk("HFSC: Non-work-conserving qdisc ?\n");
1646 update_vf(cl
, skb
->len
, cur_time
);
1648 cl
->cl_cumul
+= skb
->len
;
1650 if (cl
->qdisc
->q
.qlen
!= 0) {
1651 if (cl
->cl_flags
& HFSC_RSC
) {
1653 next_len
= qdisc_peek_len(cl
->qdisc
);
1655 update_ed(cl
, next_len
);
1657 update_d(cl
, next_len
);
1660 /* the class becomes passive */
1665 sch
->flags
&= ~TCQ_F_THROTTLED
;
1672 hfsc_requeue(struct sk_buff
*skb
, struct Qdisc
*sch
)
1674 struct hfsc_sched
*q
= qdisc_priv(sch
);
1676 __skb_queue_head(&q
->requeue
, skb
);
1678 sch
->qstats
.requeues
++;
1679 return NET_XMIT_SUCCESS
;
1683 hfsc_drop(struct Qdisc
*sch
)
1685 struct hfsc_sched
*q
= qdisc_priv(sch
);
1686 struct hfsc_class
*cl
;
1689 list_for_each_entry(cl
, &q
->droplist
, dlist
) {
1690 if (cl
->qdisc
->ops
->drop
!= NULL
&&
1691 (len
= cl
->qdisc
->ops
->drop(cl
->qdisc
)) > 0) {
1692 if (cl
->qdisc
->q
.qlen
== 0) {
1693 update_vf(cl
, 0, 0);
1696 list_move_tail(&cl
->dlist
, &q
->droplist
);
1699 sch
->qstats
.drops
++;
1707 static struct Qdisc_class_ops hfsc_class_ops
= {
1708 .change
= hfsc_change_class
,
1709 .delete = hfsc_delete_class
,
1710 .graft
= hfsc_graft_class
,
1711 .leaf
= hfsc_class_leaf
,
1712 .qlen_notify
= hfsc_qlen_notify
,
1713 .get
= hfsc_get_class
,
1714 .put
= hfsc_put_class
,
1715 .bind_tcf
= hfsc_bind_tcf
,
1716 .unbind_tcf
= hfsc_unbind_tcf
,
1717 .tcf_chain
= hfsc_tcf_chain
,
1718 .dump
= hfsc_dump_class
,
1719 .dump_stats
= hfsc_dump_class_stats
,
1723 static struct Qdisc_ops hfsc_qdisc_ops
= {
1725 .init
= hfsc_init_qdisc
,
1726 .change
= hfsc_change_qdisc
,
1727 .reset
= hfsc_reset_qdisc
,
1728 .destroy
= hfsc_destroy_qdisc
,
1729 .dump
= hfsc_dump_qdisc
,
1730 .enqueue
= hfsc_enqueue
,
1731 .dequeue
= hfsc_dequeue
,
1732 .requeue
= hfsc_requeue
,
1734 .cl_ops
= &hfsc_class_ops
,
1735 .priv_size
= sizeof(struct hfsc_sched
),
1736 .owner
= THIS_MODULE
1742 return register_qdisc(&hfsc_qdisc_ops
);
1748 unregister_qdisc(&hfsc_qdisc_ops
);
1751 MODULE_LICENSE("GPL");
1752 module_init(hfsc_init
);
1753 module_exit(hfsc_cleanup
);