2 * Copyright (c) 2003 Patrick McHardy, <kaber@trash.net>
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 2
7 * of the License, or (at your option) any later version.
9 * 2003-10-17 - Ported from altq
12 * Copyright (c) 1997-1999 Carnegie Mellon University. All Rights Reserved.
14 * Permission to use, copy, modify, and distribute this software and
15 * its documentation is hereby granted (including for commercial or
16 * for-profit use), provided that both the copyright notice and this
17 * permission notice appear in all copies of the software, derivative
18 * works, or modified versions, and any portions thereof.
20 * THIS SOFTWARE IS EXPERIMENTAL AND IS KNOWN TO HAVE BUGS, SOME OF
21 * WHICH MAY HAVE SERIOUS CONSEQUENCES. CARNEGIE MELLON PROVIDES THIS
22 * SOFTWARE IN ITS ``AS IS'' CONDITION, AND ANY EXPRESS OR IMPLIED
23 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
25 * DISCLAIMED. IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
28 * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
29 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
30 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
32 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
35 * Carnegie Mellon encourages (but does not require) users of this
36 * software to return any improvements or extensions that they make,
37 * and to grant Carnegie Mellon the rights to redistribute these
38 * changes without encumbrance.
41 * H-FSC is described in Proceedings of SIGCOMM'97,
42 * "A Hierarchical Fair Service Curve Algorithm for Link-Sharing,
43 * Real-Time and Priority Service"
44 * by Ion Stoica, Hui Zhang, and T. S. Eugene Ng.
46 * Oleg Cherevko <olwi@aq.ml.com.ua> added the upperlimit for link-sharing.
47 * when a class has an upperlimit, the fit-time is computed from the
48 * upperlimit service curve. the link-sharing scheduler does not schedule
49 * a class whose fit-time exceeds the current time.
52 #include <linux/kernel.h>
53 #include <linux/module.h>
54 #include <linux/types.h>
55 #include <linux/errno.h>
56 #include <linux/compiler.h>
57 #include <linux/spinlock.h>
58 #include <linux/skbuff.h>
59 #include <linux/string.h>
60 #include <linux/slab.h>
61 #include <linux/list.h>
62 #include <linux/rbtree.h>
63 #include <linux/init.h>
64 #include <linux/rtnetlink.h>
65 #include <linux/pkt_sched.h>
66 #include <net/netlink.h>
67 #include <net/pkt_sched.h>
68 #include <net/pkt_cls.h>
69 #include <asm/div64.h>
72 * kernel internal service curve representation:
73 * coordinates are given by 64 bit unsigned integers.
74 * x-axis: unit is clock count.
75 * y-axis: unit is byte.
77 * The service curve parameters are converted to the internal
78 * representation. The slope values are scaled to avoid overflow.
79 * the inverse slope values as well as the y-projection of the 1st
80 * segment are kept in order to to avoid 64-bit divide operations
81 * that are expensive on 32-bit architectures.
86 u64 sm1
; /* scaled slope of the 1st segment */
87 u64 ism1
; /* scaled inverse-slope of the 1st segment */
88 u64 dx
; /* the x-projection of the 1st segment */
89 u64 dy
; /* the y-projection of the 1st segment */
90 u64 sm2
; /* scaled slope of the 2nd segment */
91 u64 ism2
; /* scaled inverse-slope of the 2nd segment */
94 /* runtime service curve */
97 u64 x
; /* current starting position on x-axis */
98 u64 y
; /* current starting position on y-axis */
99 u64 sm1
; /* scaled slope of the 1st segment */
100 u64 ism1
; /* scaled inverse-slope of the 1st segment */
101 u64 dx
; /* the x-projection of the 1st segment */
102 u64 dy
; /* the y-projection of the 1st segment */
103 u64 sm2
; /* scaled slope of the 2nd segment */
104 u64 ism2
; /* scaled inverse-slope of the 2nd segment */
107 enum hfsc_class_flags
116 u32 classid
; /* class id */
117 unsigned int refcnt
; /* usage count */
119 struct gnet_stats_basic bstats
;
120 struct gnet_stats_queue qstats
;
121 struct gnet_stats_rate_est rate_est
;
122 unsigned int level
; /* class level in hierarchy */
123 struct tcf_proto
*filter_list
; /* filter list */
124 unsigned int filter_cnt
; /* filter count */
126 struct hfsc_sched
*sched
; /* scheduler data */
127 struct hfsc_class
*cl_parent
; /* parent class */
128 struct list_head siblings
; /* sibling classes */
129 struct list_head children
; /* child classes */
130 struct Qdisc
*qdisc
; /* leaf qdisc */
132 struct rb_node el_node
; /* qdisc's eligible tree member */
133 struct rb_root vt_tree
; /* active children sorted by cl_vt */
134 struct rb_node vt_node
; /* parent's vt_tree member */
135 struct rb_root cf_tree
; /* active children sorted by cl_f */
136 struct rb_node cf_node
; /* parent's cf_heap member */
137 struct list_head hlist
; /* hash list member */
138 struct list_head dlist
; /* drop list member */
140 u64 cl_total
; /* total work in bytes */
141 u64 cl_cumul
; /* cumulative work in bytes done by
142 real-time criteria */
144 u64 cl_d
; /* deadline*/
145 u64 cl_e
; /* eligible time */
146 u64 cl_vt
; /* virtual time */
147 u64 cl_f
; /* time when this class will fit for
148 link-sharing, max(myf, cfmin) */
149 u64 cl_myf
; /* my fit-time (calculated from this
150 class's own upperlimit curve) */
151 u64 cl_myfadj
; /* my fit-time adjustment (to cancel
152 history dependence) */
153 u64 cl_cfmin
; /* earliest children's fit-time (used
154 with cl_myf to obtain cl_f) */
155 u64 cl_cvtmin
; /* minimal virtual time among the
156 children fit for link-sharing
157 (monotonic within a period) */
158 u64 cl_vtadj
; /* intra-period cumulative vt
160 u64 cl_vtoff
; /* inter-period cumulative vt offset */
161 u64 cl_cvtmax
; /* max child's vt in the last period */
162 u64 cl_cvtoff
; /* cumulative cvtmax of all periods */
163 u64 cl_pcvtoff
; /* parent's cvtoff at initalization
166 struct internal_sc cl_rsc
; /* internal real-time service curve */
167 struct internal_sc cl_fsc
; /* internal fair service curve */
168 struct internal_sc cl_usc
; /* internal upperlimit service curve */
169 struct runtime_sc cl_deadline
; /* deadline curve */
170 struct runtime_sc cl_eligible
; /* eligible curve */
171 struct runtime_sc cl_virtual
; /* virtual curve */
172 struct runtime_sc cl_ulimit
; /* upperlimit curve */
174 unsigned long cl_flags
; /* which curves are valid */
175 unsigned long cl_vtperiod
; /* vt period sequence number */
176 unsigned long cl_parentperiod
;/* parent's vt period sequence number*/
177 unsigned long cl_nactive
; /* number of active children */
180 #define HFSC_HSIZE 16
184 u16 defcls
; /* default class id */
185 struct hfsc_class root
; /* root class */
186 struct list_head clhash
[HFSC_HSIZE
]; /* class hash */
187 struct rb_root eligible
; /* eligible tree */
188 struct list_head droplist
; /* active leaf class list (for
190 struct sk_buff_head requeue
; /* requeued packet */
191 struct qdisc_watchdog watchdog
; /* watchdog timer */
194 #define HT_INFINITY 0xffffffffffffffffULL /* infinite time value */
198 * eligible tree holds backlogged classes being sorted by their eligible times.
199 * there is one eligible tree per hfsc instance.
203 eltree_insert(struct hfsc_class
*cl
)
205 struct rb_node
**p
= &cl
->sched
->eligible
.rb_node
;
206 struct rb_node
*parent
= NULL
;
207 struct hfsc_class
*cl1
;
211 cl1
= rb_entry(parent
, struct hfsc_class
, el_node
);
212 if (cl
->cl_e
>= cl1
->cl_e
)
213 p
= &parent
->rb_right
;
215 p
= &parent
->rb_left
;
217 rb_link_node(&cl
->el_node
, parent
, p
);
218 rb_insert_color(&cl
->el_node
, &cl
->sched
->eligible
);
222 eltree_remove(struct hfsc_class
*cl
)
224 rb_erase(&cl
->el_node
, &cl
->sched
->eligible
);
228 eltree_update(struct hfsc_class
*cl
)
234 /* find the class with the minimum deadline among the eligible classes */
235 static inline struct hfsc_class
*
236 eltree_get_mindl(struct hfsc_sched
*q
, u64 cur_time
)
238 struct hfsc_class
*p
, *cl
= NULL
;
241 for (n
= rb_first(&q
->eligible
); n
!= NULL
; n
= rb_next(n
)) {
242 p
= rb_entry(n
, struct hfsc_class
, el_node
);
243 if (p
->cl_e
> cur_time
)
245 if (cl
== NULL
|| p
->cl_d
< cl
->cl_d
)
251 /* find the class with minimum eligible time among the eligible classes */
252 static inline struct hfsc_class
*
253 eltree_get_minel(struct hfsc_sched
*q
)
257 n
= rb_first(&q
->eligible
);
260 return rb_entry(n
, struct hfsc_class
, el_node
);
264 * vttree holds holds backlogged child classes being sorted by their virtual
265 * time. each intermediate class has one vttree.
268 vttree_insert(struct hfsc_class
*cl
)
270 struct rb_node
**p
= &cl
->cl_parent
->vt_tree
.rb_node
;
271 struct rb_node
*parent
= NULL
;
272 struct hfsc_class
*cl1
;
276 cl1
= rb_entry(parent
, struct hfsc_class
, vt_node
);
277 if (cl
->cl_vt
>= cl1
->cl_vt
)
278 p
= &parent
->rb_right
;
280 p
= &parent
->rb_left
;
282 rb_link_node(&cl
->vt_node
, parent
, p
);
283 rb_insert_color(&cl
->vt_node
, &cl
->cl_parent
->vt_tree
);
287 vttree_remove(struct hfsc_class
*cl
)
289 rb_erase(&cl
->vt_node
, &cl
->cl_parent
->vt_tree
);
293 vttree_update(struct hfsc_class
*cl
)
299 static inline struct hfsc_class
*
300 vttree_firstfit(struct hfsc_class
*cl
, u64 cur_time
)
302 struct hfsc_class
*p
;
305 for (n
= rb_first(&cl
->vt_tree
); n
!= NULL
; n
= rb_next(n
)) {
306 p
= rb_entry(n
, struct hfsc_class
, vt_node
);
307 if (p
->cl_f
<= cur_time
)
314 * get the leaf class with the minimum vt in the hierarchy
316 static struct hfsc_class
*
317 vttree_get_minvt(struct hfsc_class
*cl
, u64 cur_time
)
319 /* if root-class's cfmin is bigger than cur_time nothing to do */
320 if (cl
->cl_cfmin
> cur_time
)
323 while (cl
->level
> 0) {
324 cl
= vttree_firstfit(cl
, cur_time
);
328 * update parent's cl_cvtmin.
330 if (cl
->cl_parent
->cl_cvtmin
< cl
->cl_vt
)
331 cl
->cl_parent
->cl_cvtmin
= cl
->cl_vt
;
337 cftree_insert(struct hfsc_class
*cl
)
339 struct rb_node
**p
= &cl
->cl_parent
->cf_tree
.rb_node
;
340 struct rb_node
*parent
= NULL
;
341 struct hfsc_class
*cl1
;
345 cl1
= rb_entry(parent
, struct hfsc_class
, cf_node
);
346 if (cl
->cl_f
>= cl1
->cl_f
)
347 p
= &parent
->rb_right
;
349 p
= &parent
->rb_left
;
351 rb_link_node(&cl
->cf_node
, parent
, p
);
352 rb_insert_color(&cl
->cf_node
, &cl
->cl_parent
->cf_tree
);
356 cftree_remove(struct hfsc_class
*cl
)
358 rb_erase(&cl
->cf_node
, &cl
->cl_parent
->cf_tree
);
362 cftree_update(struct hfsc_class
*cl
)
369 * service curve support functions
371 * external service curve parameters
374 * internal service curve parameters
375 * sm: (bytes/psched_us) << SM_SHIFT
376 * ism: (psched_us/byte) << ISM_SHIFT
379 * The clock source resolution with ktime is 1.024us.
381 * sm and ism are scaled in order to keep effective digits.
382 * SM_SHIFT and ISM_SHIFT are selected to keep at least 4 effective
383 * digits in decimal using the following table.
385 * bits/sec 100Kbps 1Mbps 10Mbps 100Mbps 1Gbps
386 * ------------+-------------------------------------------------------
387 * bytes/1.024us 12.8e-3 128e-3 1280e-3 12800e-3 128000e-3
389 * 1.024us/byte 78.125 7.8125 0.78125 0.078125 0.0078125
394 #define SM_MASK ((1ULL << SM_SHIFT) - 1)
395 #define ISM_MASK ((1ULL << ISM_SHIFT) - 1)
398 seg_x2y(u64 x
, u64 sm
)
404 * y = x * sm >> SM_SHIFT
405 * but divide it for the upper and lower bits to avoid overflow
407 y
= (x
>> SM_SHIFT
) * sm
+ (((x
& SM_MASK
) * sm
) >> SM_SHIFT
);
412 seg_y2x(u64 y
, u64 ism
)
418 else if (ism
== HT_INFINITY
)
421 x
= (y
>> ISM_SHIFT
) * ism
422 + (((y
& ISM_MASK
) * ism
) >> ISM_SHIFT
);
427 /* Convert m (bps) into sm (bytes/psched us) */
433 sm
= ((u64
)m
<< SM_SHIFT
);
434 sm
+= PSCHED_TICKS_PER_SEC
- 1;
435 do_div(sm
, PSCHED_TICKS_PER_SEC
);
439 /* convert m (bps) into ism (psched us/byte) */
448 ism
= ((u64
)PSCHED_TICKS_PER_SEC
<< ISM_SHIFT
);
455 /* convert d (us) into dx (psched us) */
461 dx
= ((u64
)d
* PSCHED_TICKS_PER_SEC
);
462 dx
+= USEC_PER_SEC
- 1;
463 do_div(dx
, USEC_PER_SEC
);
467 /* convert sm (bytes/psched us) into m (bps) */
473 m
= (sm
* PSCHED_TICKS_PER_SEC
) >> SM_SHIFT
;
477 /* convert dx (psched us) into d (us) */
483 d
= dx
* USEC_PER_SEC
;
484 do_div(d
, PSCHED_TICKS_PER_SEC
);
489 sc2isc(struct tc_service_curve
*sc
, struct internal_sc
*isc
)
491 isc
->sm1
= m2sm(sc
->m1
);
492 isc
->ism1
= m2ism(sc
->m1
);
493 isc
->dx
= d2dx(sc
->d
);
494 isc
->dy
= seg_x2y(isc
->dx
, isc
->sm1
);
495 isc
->sm2
= m2sm(sc
->m2
);
496 isc
->ism2
= m2ism(sc
->m2
);
500 * initialize the runtime service curve with the given internal
501 * service curve starting at (x, y).
504 rtsc_init(struct runtime_sc
*rtsc
, struct internal_sc
*isc
, u64 x
, u64 y
)
508 rtsc
->sm1
= isc
->sm1
;
509 rtsc
->ism1
= isc
->ism1
;
512 rtsc
->sm2
= isc
->sm2
;
513 rtsc
->ism2
= isc
->ism2
;
517 * calculate the y-projection of the runtime service curve by the
518 * given x-projection value
521 rtsc_y2x(struct runtime_sc
*rtsc
, u64 y
)
527 else if (y
<= rtsc
->y
+ rtsc
->dy
) {
528 /* x belongs to the 1st segment */
530 x
= rtsc
->x
+ rtsc
->dx
;
532 x
= rtsc
->x
+ seg_y2x(y
- rtsc
->y
, rtsc
->ism1
);
534 /* x belongs to the 2nd segment */
535 x
= rtsc
->x
+ rtsc
->dx
536 + seg_y2x(y
- rtsc
->y
- rtsc
->dy
, rtsc
->ism2
);
542 rtsc_x2y(struct runtime_sc
*rtsc
, u64 x
)
548 else if (x
<= rtsc
->x
+ rtsc
->dx
)
549 /* y belongs to the 1st segment */
550 y
= rtsc
->y
+ seg_x2y(x
- rtsc
->x
, rtsc
->sm1
);
552 /* y belongs to the 2nd segment */
553 y
= rtsc
->y
+ rtsc
->dy
554 + seg_x2y(x
- rtsc
->x
- rtsc
->dx
, rtsc
->sm2
);
559 * update the runtime service curve by taking the minimum of the current
560 * runtime service curve and the service curve starting at (x, y).
563 rtsc_min(struct runtime_sc
*rtsc
, struct internal_sc
*isc
, u64 x
, u64 y
)
568 if (isc
->sm1
<= isc
->sm2
) {
569 /* service curve is convex */
570 y1
= rtsc_x2y(rtsc
, x
);
572 /* the current rtsc is smaller */
580 * service curve is concave
581 * compute the two y values of the current rtsc
585 y1
= rtsc_x2y(rtsc
, x
);
587 /* rtsc is below isc, no change to rtsc */
591 y2
= rtsc_x2y(rtsc
, x
+ isc
->dx
);
592 if (y2
>= y
+ isc
->dy
) {
593 /* rtsc is above isc, replace rtsc by isc */
602 * the two curves intersect
603 * compute the offsets (dx, dy) using the reverse
604 * function of seg_x2y()
605 * seg_x2y(dx, sm1) == seg_x2y(dx, sm2) + (y1 - y)
607 dx
= (y1
- y
) << SM_SHIFT
;
608 dsm
= isc
->sm1
- isc
->sm2
;
611 * check if (x, y1) belongs to the 1st segment of rtsc.
612 * if so, add the offset.
614 if (rtsc
->x
+ rtsc
->dx
> x
)
615 dx
+= rtsc
->x
+ rtsc
->dx
- x
;
616 dy
= seg_x2y(dx
, isc
->sm1
);
626 init_ed(struct hfsc_class
*cl
, unsigned int next_len
)
628 u64 cur_time
= psched_get_time();
630 /* update the deadline curve */
631 rtsc_min(&cl
->cl_deadline
, &cl
->cl_rsc
, cur_time
, cl
->cl_cumul
);
634 * update the eligible curve.
635 * for concave, it is equal to the deadline curve.
636 * for convex, it is a linear curve with slope m2.
638 cl
->cl_eligible
= cl
->cl_deadline
;
639 if (cl
->cl_rsc
.sm1
<= cl
->cl_rsc
.sm2
) {
640 cl
->cl_eligible
.dx
= 0;
641 cl
->cl_eligible
.dy
= 0;
644 /* compute e and d */
645 cl
->cl_e
= rtsc_y2x(&cl
->cl_eligible
, cl
->cl_cumul
);
646 cl
->cl_d
= rtsc_y2x(&cl
->cl_deadline
, cl
->cl_cumul
+ next_len
);
652 update_ed(struct hfsc_class
*cl
, unsigned int next_len
)
654 cl
->cl_e
= rtsc_y2x(&cl
->cl_eligible
, cl
->cl_cumul
);
655 cl
->cl_d
= rtsc_y2x(&cl
->cl_deadline
, cl
->cl_cumul
+ next_len
);
661 update_d(struct hfsc_class
*cl
, unsigned int next_len
)
663 cl
->cl_d
= rtsc_y2x(&cl
->cl_deadline
, cl
->cl_cumul
+ next_len
);
667 update_cfmin(struct hfsc_class
*cl
)
669 struct rb_node
*n
= rb_first(&cl
->cf_tree
);
670 struct hfsc_class
*p
;
676 p
= rb_entry(n
, struct hfsc_class
, cf_node
);
677 cl
->cl_cfmin
= p
->cl_f
;
681 init_vf(struct hfsc_class
*cl
, unsigned int len
)
683 struct hfsc_class
*max_cl
;
690 for (; cl
->cl_parent
!= NULL
; cl
= cl
->cl_parent
) {
691 if (go_active
&& cl
->cl_nactive
++ == 0)
697 n
= rb_last(&cl
->cl_parent
->vt_tree
);
699 max_cl
= rb_entry(n
, struct hfsc_class
,vt_node
);
701 * set vt to the average of the min and max
702 * classes. if the parent's period didn't
703 * change, don't decrease vt of the class.
706 if (cl
->cl_parent
->cl_cvtmin
!= 0)
707 vt
= (cl
->cl_parent
->cl_cvtmin
+ vt
)/2;
709 if (cl
->cl_parent
->cl_vtperiod
!=
710 cl
->cl_parentperiod
|| vt
> cl
->cl_vt
)
714 * first child for a new parent backlog period.
715 * add parent's cvtmax to cvtoff to make a new
716 * vt (vtoff + vt) larger than the vt in the
717 * last period for all children.
719 vt
= cl
->cl_parent
->cl_cvtmax
;
720 cl
->cl_parent
->cl_cvtoff
+= vt
;
721 cl
->cl_parent
->cl_cvtmax
= 0;
722 cl
->cl_parent
->cl_cvtmin
= 0;
726 cl
->cl_vtoff
= cl
->cl_parent
->cl_cvtoff
-
729 /* update the virtual curve */
730 vt
= cl
->cl_vt
+ cl
->cl_vtoff
;
731 rtsc_min(&cl
->cl_virtual
, &cl
->cl_fsc
, vt
,
733 if (cl
->cl_virtual
.x
== vt
) {
734 cl
->cl_virtual
.x
-= cl
->cl_vtoff
;
739 cl
->cl_vtperiod
++; /* increment vt period */
740 cl
->cl_parentperiod
= cl
->cl_parent
->cl_vtperiod
;
741 if (cl
->cl_parent
->cl_nactive
== 0)
742 cl
->cl_parentperiod
++;
748 if (cl
->cl_flags
& HFSC_USC
) {
749 /* class has upper limit curve */
751 cur_time
= psched_get_time();
753 /* update the ulimit curve */
754 rtsc_min(&cl
->cl_ulimit
, &cl
->cl_usc
, cur_time
,
757 cl
->cl_myf
= rtsc_y2x(&cl
->cl_ulimit
,
763 f
= max(cl
->cl_myf
, cl
->cl_cfmin
);
767 update_cfmin(cl
->cl_parent
);
773 update_vf(struct hfsc_class
*cl
, unsigned int len
, u64 cur_time
)
775 u64 f
; /* , myf_bound, delta; */
778 if (cl
->qdisc
->q
.qlen
== 0 && cl
->cl_flags
& HFSC_FSC
)
781 for (; cl
->cl_parent
!= NULL
; cl
= cl
->cl_parent
) {
784 if (!(cl
->cl_flags
& HFSC_FSC
) || cl
->cl_nactive
== 0)
787 if (go_passive
&& --cl
->cl_nactive
== 0)
793 /* no more active child, going passive */
795 /* update cvtmax of the parent class */
796 if (cl
->cl_vt
> cl
->cl_parent
->cl_cvtmax
)
797 cl
->cl_parent
->cl_cvtmax
= cl
->cl_vt
;
799 /* remove this class from the vt tree */
803 update_cfmin(cl
->cl_parent
);
811 cl
->cl_vt
= rtsc_y2x(&cl
->cl_virtual
, cl
->cl_total
)
812 - cl
->cl_vtoff
+ cl
->cl_vtadj
;
815 * if vt of the class is smaller than cvtmin,
816 * the class was skipped in the past due to non-fit.
817 * if so, we need to adjust vtadj.
819 if (cl
->cl_vt
< cl
->cl_parent
->cl_cvtmin
) {
820 cl
->cl_vtadj
+= cl
->cl_parent
->cl_cvtmin
- cl
->cl_vt
;
821 cl
->cl_vt
= cl
->cl_parent
->cl_cvtmin
;
824 /* update the vt tree */
827 if (cl
->cl_flags
& HFSC_USC
) {
828 cl
->cl_myf
= cl
->cl_myfadj
+ rtsc_y2x(&cl
->cl_ulimit
,
832 * This code causes classes to stay way under their
833 * limit when multiple classes are used at gigabit
834 * speed. needs investigation. -kaber
837 * if myf lags behind by more than one clock tick
838 * from the current time, adjust myfadj to prevent
839 * a rate-limited class from going greedy.
840 * in a steady state under rate-limiting, myf
841 * fluctuates within one clock tick.
843 myf_bound
= cur_time
- PSCHED_JIFFIE2US(1);
844 if (cl
->cl_myf
< myf_bound
) {
845 delta
= cur_time
- cl
->cl_myf
;
846 cl
->cl_myfadj
+= delta
;
852 f
= max(cl
->cl_myf
, cl
->cl_cfmin
);
856 update_cfmin(cl
->cl_parent
);
862 set_active(struct hfsc_class
*cl
, unsigned int len
)
864 if (cl
->cl_flags
& HFSC_RSC
)
866 if (cl
->cl_flags
& HFSC_FSC
)
869 list_add_tail(&cl
->dlist
, &cl
->sched
->droplist
);
873 set_passive(struct hfsc_class
*cl
)
875 if (cl
->cl_flags
& HFSC_RSC
)
878 list_del(&cl
->dlist
);
881 * vttree is now handled in update_vf() so that update_vf(cl, 0, 0)
882 * needs to be called explicitly to remove a class from vttree.
887 * hack to get length of first packet in queue.
890 qdisc_peek_len(struct Qdisc
*sch
)
895 skb
= sch
->dequeue(sch
);
898 printk("qdisc_peek_len: non work-conserving qdisc ?\n");
902 if (unlikely(sch
->ops
->requeue(skb
, sch
) != NET_XMIT_SUCCESS
)) {
904 printk("qdisc_peek_len: failed to requeue\n");
905 qdisc_tree_decrease_qlen(sch
, 1);
912 hfsc_purge_queue(struct Qdisc
*sch
, struct hfsc_class
*cl
)
914 unsigned int len
= cl
->qdisc
->q
.qlen
;
916 qdisc_reset(cl
->qdisc
);
917 qdisc_tree_decrease_qlen(cl
->qdisc
, len
);
921 hfsc_adjust_levels(struct hfsc_class
*cl
)
923 struct hfsc_class
*p
;
928 list_for_each_entry(p
, &cl
->children
, siblings
) {
929 if (p
->level
>= level
)
930 level
= p
->level
+ 1;
933 } while ((cl
= cl
->cl_parent
) != NULL
);
936 static inline unsigned int
942 return h
& (HFSC_HSIZE
- 1);
945 static inline struct hfsc_class
*
946 hfsc_find_class(u32 classid
, struct Qdisc
*sch
)
948 struct hfsc_sched
*q
= qdisc_priv(sch
);
949 struct hfsc_class
*cl
;
951 list_for_each_entry(cl
, &q
->clhash
[hfsc_hash(classid
)], hlist
) {
952 if (cl
->classid
== classid
)
959 hfsc_change_rsc(struct hfsc_class
*cl
, struct tc_service_curve
*rsc
,
962 sc2isc(rsc
, &cl
->cl_rsc
);
963 rtsc_init(&cl
->cl_deadline
, &cl
->cl_rsc
, cur_time
, cl
->cl_cumul
);
964 cl
->cl_eligible
= cl
->cl_deadline
;
965 if (cl
->cl_rsc
.sm1
<= cl
->cl_rsc
.sm2
) {
966 cl
->cl_eligible
.dx
= 0;
967 cl
->cl_eligible
.dy
= 0;
969 cl
->cl_flags
|= HFSC_RSC
;
973 hfsc_change_fsc(struct hfsc_class
*cl
, struct tc_service_curve
*fsc
)
975 sc2isc(fsc
, &cl
->cl_fsc
);
976 rtsc_init(&cl
->cl_virtual
, &cl
->cl_fsc
, cl
->cl_vt
, cl
->cl_total
);
977 cl
->cl_flags
|= HFSC_FSC
;
981 hfsc_change_usc(struct hfsc_class
*cl
, struct tc_service_curve
*usc
,
984 sc2isc(usc
, &cl
->cl_usc
);
985 rtsc_init(&cl
->cl_ulimit
, &cl
->cl_usc
, cur_time
, cl
->cl_total
);
986 cl
->cl_flags
|= HFSC_USC
;
990 hfsc_change_class(struct Qdisc
*sch
, u32 classid
, u32 parentid
,
991 struct rtattr
**tca
, unsigned long *arg
)
993 struct hfsc_sched
*q
= qdisc_priv(sch
);
994 struct hfsc_class
*cl
= (struct hfsc_class
*)*arg
;
995 struct hfsc_class
*parent
= NULL
;
996 struct rtattr
*opt
= tca
[TCA_OPTIONS
-1];
997 struct rtattr
*tb
[TCA_HFSC_MAX
];
998 struct tc_service_curve
*rsc
= NULL
, *fsc
= NULL
, *usc
= NULL
;
1001 if (opt
== NULL
|| rtattr_parse_nested(tb
, TCA_HFSC_MAX
, opt
))
1004 if (tb
[TCA_HFSC_RSC
-1]) {
1005 if (RTA_PAYLOAD(tb
[TCA_HFSC_RSC
-1]) < sizeof(*rsc
))
1007 rsc
= RTA_DATA(tb
[TCA_HFSC_RSC
-1]);
1008 if (rsc
->m1
== 0 && rsc
->m2
== 0)
1012 if (tb
[TCA_HFSC_FSC
-1]) {
1013 if (RTA_PAYLOAD(tb
[TCA_HFSC_FSC
-1]) < sizeof(*fsc
))
1015 fsc
= RTA_DATA(tb
[TCA_HFSC_FSC
-1]);
1016 if (fsc
->m1
== 0 && fsc
->m2
== 0)
1020 if (tb
[TCA_HFSC_USC
-1]) {
1021 if (RTA_PAYLOAD(tb
[TCA_HFSC_USC
-1]) < sizeof(*usc
))
1023 usc
= RTA_DATA(tb
[TCA_HFSC_USC
-1]);
1024 if (usc
->m1
== 0 && usc
->m2
== 0)
1030 if (cl
->cl_parent
&& cl
->cl_parent
->classid
!= parentid
)
1032 if (cl
->cl_parent
== NULL
&& parentid
!= TC_H_ROOT
)
1035 cur_time
= psched_get_time();
1039 hfsc_change_rsc(cl
, rsc
, cur_time
);
1041 hfsc_change_fsc(cl
, fsc
);
1043 hfsc_change_usc(cl
, usc
, cur_time
);
1045 if (cl
->qdisc
->q
.qlen
!= 0) {
1046 if (cl
->cl_flags
& HFSC_RSC
)
1047 update_ed(cl
, qdisc_peek_len(cl
->qdisc
));
1048 if (cl
->cl_flags
& HFSC_FSC
)
1049 update_vf(cl
, 0, cur_time
);
1051 sch_tree_unlock(sch
);
1053 if (tca
[TCA_RATE
-1])
1054 gen_replace_estimator(&cl
->bstats
, &cl
->rate_est
,
1055 &sch
->dev
->queue_lock
,
1060 if (parentid
== TC_H_ROOT
)
1065 parent
= hfsc_find_class(parentid
, sch
);
1070 if (classid
== 0 || TC_H_MAJ(classid
^ sch
->handle
) != 0)
1072 if (hfsc_find_class(classid
, sch
))
1075 if (rsc
== NULL
&& fsc
== NULL
)
1078 cl
= kzalloc(sizeof(struct hfsc_class
), GFP_KERNEL
);
1083 hfsc_change_rsc(cl
, rsc
, 0);
1085 hfsc_change_fsc(cl
, fsc
);
1087 hfsc_change_usc(cl
, usc
, 0);
1090 cl
->classid
= classid
;
1092 cl
->cl_parent
= parent
;
1093 cl
->qdisc
= qdisc_create_dflt(sch
->dev
, &pfifo_qdisc_ops
, classid
);
1094 if (cl
->qdisc
== NULL
)
1095 cl
->qdisc
= &noop_qdisc
;
1096 INIT_LIST_HEAD(&cl
->children
);
1097 cl
->vt_tree
= RB_ROOT
;
1098 cl
->cf_tree
= RB_ROOT
;
1101 list_add_tail(&cl
->hlist
, &q
->clhash
[hfsc_hash(classid
)]);
1102 list_add_tail(&cl
->siblings
, &parent
->children
);
1103 if (parent
->level
== 0)
1104 hfsc_purge_queue(sch
, parent
);
1105 hfsc_adjust_levels(parent
);
1106 cl
->cl_pcvtoff
= parent
->cl_cvtoff
;
1107 sch_tree_unlock(sch
);
1109 if (tca
[TCA_RATE
-1])
1110 gen_new_estimator(&cl
->bstats
, &cl
->rate_est
,
1111 &sch
->dev
->queue_lock
, tca
[TCA_RATE
-1]);
1112 *arg
= (unsigned long)cl
;
1117 hfsc_destroy_class(struct Qdisc
*sch
, struct hfsc_class
*cl
)
1119 struct hfsc_sched
*q
= qdisc_priv(sch
);
1121 tcf_destroy_chain(cl
->filter_list
);
1122 qdisc_destroy(cl
->qdisc
);
1123 gen_kill_estimator(&cl
->bstats
, &cl
->rate_est
);
1129 hfsc_delete_class(struct Qdisc
*sch
, unsigned long arg
)
1131 struct hfsc_sched
*q
= qdisc_priv(sch
);
1132 struct hfsc_class
*cl
= (struct hfsc_class
*)arg
;
1134 if (cl
->level
> 0 || cl
->filter_cnt
> 0 || cl
== &q
->root
)
1139 list_del(&cl
->siblings
);
1140 hfsc_adjust_levels(cl
->cl_parent
);
1142 hfsc_purge_queue(sch
, cl
);
1143 list_del(&cl
->hlist
);
1145 if (--cl
->refcnt
== 0)
1146 hfsc_destroy_class(sch
, cl
);
1148 sch_tree_unlock(sch
);
1152 static struct hfsc_class
*
1153 hfsc_classify(struct sk_buff
*skb
, struct Qdisc
*sch
, int *qerr
)
1155 struct hfsc_sched
*q
= qdisc_priv(sch
);
1156 struct hfsc_class
*cl
;
1157 struct tcf_result res
;
1158 struct tcf_proto
*tcf
;
1161 if (TC_H_MAJ(skb
->priority
^ sch
->handle
) == 0 &&
1162 (cl
= hfsc_find_class(skb
->priority
, sch
)) != NULL
)
1166 *qerr
= NET_XMIT_BYPASS
;
1167 tcf
= q
->root
.filter_list
;
1168 while (tcf
&& (result
= tc_classify(skb
, tcf
, &res
)) >= 0) {
1169 #ifdef CONFIG_NET_CLS_ACT
1173 *qerr
= NET_XMIT_SUCCESS
;
1178 if ((cl
= (struct hfsc_class
*)res
.class) == NULL
) {
1179 if ((cl
= hfsc_find_class(res
.classid
, sch
)) == NULL
)
1180 break; /* filter selected invalid classid */
1184 return cl
; /* hit leaf class */
1186 /* apply inner filter chain */
1187 tcf
= cl
->filter_list
;
1190 /* classification failed, try default class */
1191 cl
= hfsc_find_class(TC_H_MAKE(TC_H_MAJ(sch
->handle
), q
->defcls
), sch
);
1192 if (cl
== NULL
|| cl
->level
> 0)
1199 hfsc_graft_class(struct Qdisc
*sch
, unsigned long arg
, struct Qdisc
*new,
1202 struct hfsc_class
*cl
= (struct hfsc_class
*)arg
;
1209 new = qdisc_create_dflt(sch
->dev
, &pfifo_qdisc_ops
,
1216 hfsc_purge_queue(sch
, cl
);
1217 *old
= xchg(&cl
->qdisc
, new);
1218 sch_tree_unlock(sch
);
1222 static struct Qdisc
*
1223 hfsc_class_leaf(struct Qdisc
*sch
, unsigned long arg
)
1225 struct hfsc_class
*cl
= (struct hfsc_class
*)arg
;
1227 if (cl
!= NULL
&& cl
->level
== 0)
1234 hfsc_qlen_notify(struct Qdisc
*sch
, unsigned long arg
)
1236 struct hfsc_class
*cl
= (struct hfsc_class
*)arg
;
1238 if (cl
->qdisc
->q
.qlen
== 0) {
1239 update_vf(cl
, 0, 0);
1244 static unsigned long
1245 hfsc_get_class(struct Qdisc
*sch
, u32 classid
)
1247 struct hfsc_class
*cl
= hfsc_find_class(classid
, sch
);
1252 return (unsigned long)cl
;
1256 hfsc_put_class(struct Qdisc
*sch
, unsigned long arg
)
1258 struct hfsc_class
*cl
= (struct hfsc_class
*)arg
;
1260 if (--cl
->refcnt
== 0)
1261 hfsc_destroy_class(sch
, cl
);
1264 static unsigned long
1265 hfsc_bind_tcf(struct Qdisc
*sch
, unsigned long parent
, u32 classid
)
1267 struct hfsc_class
*p
= (struct hfsc_class
*)parent
;
1268 struct hfsc_class
*cl
= hfsc_find_class(classid
, sch
);
1271 if (p
!= NULL
&& p
->level
<= cl
->level
)
1276 return (unsigned long)cl
;
1280 hfsc_unbind_tcf(struct Qdisc
*sch
, unsigned long arg
)
1282 struct hfsc_class
*cl
= (struct hfsc_class
*)arg
;
1287 static struct tcf_proto
**
1288 hfsc_tcf_chain(struct Qdisc
*sch
, unsigned long arg
)
1290 struct hfsc_sched
*q
= qdisc_priv(sch
);
1291 struct hfsc_class
*cl
= (struct hfsc_class
*)arg
;
1296 return &cl
->filter_list
;
1300 hfsc_dump_sc(struct sk_buff
*skb
, int attr
, struct internal_sc
*sc
)
1302 struct tc_service_curve tsc
;
1304 tsc
.m1
= sm2m(sc
->sm1
);
1305 tsc
.d
= dx2d(sc
->dx
);
1306 tsc
.m2
= sm2m(sc
->sm2
);
1307 RTA_PUT(skb
, attr
, sizeof(tsc
), &tsc
);
1316 hfsc_dump_curves(struct sk_buff
*skb
, struct hfsc_class
*cl
)
1318 if ((cl
->cl_flags
& HFSC_RSC
) &&
1319 (hfsc_dump_sc(skb
, TCA_HFSC_RSC
, &cl
->cl_rsc
) < 0))
1320 goto rtattr_failure
;
1322 if ((cl
->cl_flags
& HFSC_FSC
) &&
1323 (hfsc_dump_sc(skb
, TCA_HFSC_FSC
, &cl
->cl_fsc
) < 0))
1324 goto rtattr_failure
;
1326 if ((cl
->cl_flags
& HFSC_USC
) &&
1327 (hfsc_dump_sc(skb
, TCA_HFSC_USC
, &cl
->cl_usc
) < 0))
1328 goto rtattr_failure
;
1337 hfsc_dump_class(struct Qdisc
*sch
, unsigned long arg
, struct sk_buff
*skb
,
1340 struct hfsc_class
*cl
= (struct hfsc_class
*)arg
;
1341 unsigned char *b
= skb_tail_pointer(skb
);
1342 struct rtattr
*rta
= (struct rtattr
*)b
;
1344 tcm
->tcm_parent
= cl
->cl_parent
? cl
->cl_parent
->classid
: TC_H_ROOT
;
1345 tcm
->tcm_handle
= cl
->classid
;
1347 tcm
->tcm_info
= cl
->qdisc
->handle
;
1349 RTA_PUT(skb
, TCA_OPTIONS
, 0, NULL
);
1350 if (hfsc_dump_curves(skb
, cl
) < 0)
1351 goto rtattr_failure
;
1352 rta
->rta_len
= skb_tail_pointer(skb
) - b
;
1361 hfsc_dump_class_stats(struct Qdisc
*sch
, unsigned long arg
,
1362 struct gnet_dump
*d
)
1364 struct hfsc_class
*cl
= (struct hfsc_class
*)arg
;
1365 struct tc_hfsc_stats xstats
;
1367 cl
->qstats
.qlen
= cl
->qdisc
->q
.qlen
;
1368 xstats
.level
= cl
->level
;
1369 xstats
.period
= cl
->cl_vtperiod
;
1370 xstats
.work
= cl
->cl_total
;
1371 xstats
.rtwork
= cl
->cl_cumul
;
1373 if (gnet_stats_copy_basic(d
, &cl
->bstats
) < 0 ||
1374 gnet_stats_copy_rate_est(d
, &cl
->rate_est
) < 0 ||
1375 gnet_stats_copy_queue(d
, &cl
->qstats
) < 0)
1378 return gnet_stats_copy_app(d
, &xstats
, sizeof(xstats
));
1384 hfsc_walk(struct Qdisc
*sch
, struct qdisc_walker
*arg
)
1386 struct hfsc_sched
*q
= qdisc_priv(sch
);
1387 struct hfsc_class
*cl
;
1393 for (i
= 0; i
< HFSC_HSIZE
; i
++) {
1394 list_for_each_entry(cl
, &q
->clhash
[i
], hlist
) {
1395 if (arg
->count
< arg
->skip
) {
1399 if (arg
->fn(sch
, (unsigned long)cl
, arg
) < 0) {
1409 hfsc_schedule_watchdog(struct Qdisc
*sch
)
1411 struct hfsc_sched
*q
= qdisc_priv(sch
);
1412 struct hfsc_class
*cl
;
1415 if ((cl
= eltree_get_minel(q
)) != NULL
)
1416 next_time
= cl
->cl_e
;
1417 if (q
->root
.cl_cfmin
!= 0) {
1418 if (next_time
== 0 || next_time
> q
->root
.cl_cfmin
)
1419 next_time
= q
->root
.cl_cfmin
;
1421 WARN_ON(next_time
== 0);
1422 qdisc_watchdog_schedule(&q
->watchdog
, next_time
);
1426 hfsc_init_qdisc(struct Qdisc
*sch
, struct rtattr
*opt
)
1428 struct hfsc_sched
*q
= qdisc_priv(sch
);
1429 struct tc_hfsc_qopt
*qopt
;
1432 if (opt
== NULL
|| RTA_PAYLOAD(opt
) < sizeof(*qopt
))
1434 qopt
= RTA_DATA(opt
);
1436 q
->defcls
= qopt
->defcls
;
1437 for (i
= 0; i
< HFSC_HSIZE
; i
++)
1438 INIT_LIST_HEAD(&q
->clhash
[i
]);
1439 q
->eligible
= RB_ROOT
;
1440 INIT_LIST_HEAD(&q
->droplist
);
1441 skb_queue_head_init(&q
->requeue
);
1444 q
->root
.classid
= sch
->handle
;
1446 q
->root
.qdisc
= qdisc_create_dflt(sch
->dev
, &pfifo_qdisc_ops
,
1448 if (q
->root
.qdisc
== NULL
)
1449 q
->root
.qdisc
= &noop_qdisc
;
1450 INIT_LIST_HEAD(&q
->root
.children
);
1451 q
->root
.vt_tree
= RB_ROOT
;
1452 q
->root
.cf_tree
= RB_ROOT
;
1454 list_add(&q
->root
.hlist
, &q
->clhash
[hfsc_hash(q
->root
.classid
)]);
1456 qdisc_watchdog_init(&q
->watchdog
, sch
);
1462 hfsc_change_qdisc(struct Qdisc
*sch
, struct rtattr
*opt
)
1464 struct hfsc_sched
*q
= qdisc_priv(sch
);
1465 struct tc_hfsc_qopt
*qopt
;
1467 if (opt
== NULL
|| RTA_PAYLOAD(opt
) < sizeof(*qopt
))
1469 qopt
= RTA_DATA(opt
);
1472 q
->defcls
= qopt
->defcls
;
1473 sch_tree_unlock(sch
);
1479 hfsc_reset_class(struct hfsc_class
*cl
)
1492 cl
->cl_vtperiod
= 0;
1493 cl
->cl_parentperiod
= 0;
1500 cl
->vt_tree
= RB_ROOT
;
1501 cl
->cf_tree
= RB_ROOT
;
1502 qdisc_reset(cl
->qdisc
);
1504 if (cl
->cl_flags
& HFSC_RSC
)
1505 rtsc_init(&cl
->cl_deadline
, &cl
->cl_rsc
, 0, 0);
1506 if (cl
->cl_flags
& HFSC_FSC
)
1507 rtsc_init(&cl
->cl_virtual
, &cl
->cl_fsc
, 0, 0);
1508 if (cl
->cl_flags
& HFSC_USC
)
1509 rtsc_init(&cl
->cl_ulimit
, &cl
->cl_usc
, 0, 0);
1513 hfsc_reset_qdisc(struct Qdisc
*sch
)
1515 struct hfsc_sched
*q
= qdisc_priv(sch
);
1516 struct hfsc_class
*cl
;
1519 for (i
= 0; i
< HFSC_HSIZE
; i
++) {
1520 list_for_each_entry(cl
, &q
->clhash
[i
], hlist
)
1521 hfsc_reset_class(cl
);
1523 __skb_queue_purge(&q
->requeue
);
1524 q
->eligible
= RB_ROOT
;
1525 INIT_LIST_HEAD(&q
->droplist
);
1526 qdisc_watchdog_cancel(&q
->watchdog
);
1531 hfsc_destroy_qdisc(struct Qdisc
*sch
)
1533 struct hfsc_sched
*q
= qdisc_priv(sch
);
1534 struct hfsc_class
*cl
, *next
;
1537 for (i
= 0; i
< HFSC_HSIZE
; i
++) {
1538 list_for_each_entry_safe(cl
, next
, &q
->clhash
[i
], hlist
)
1539 hfsc_destroy_class(sch
, cl
);
1541 __skb_queue_purge(&q
->requeue
);
1542 qdisc_watchdog_cancel(&q
->watchdog
);
1546 hfsc_dump_qdisc(struct Qdisc
*sch
, struct sk_buff
*skb
)
1548 struct hfsc_sched
*q
= qdisc_priv(sch
);
1549 unsigned char *b
= skb_tail_pointer(skb
);
1550 struct tc_hfsc_qopt qopt
;
1552 qopt
.defcls
= q
->defcls
;
1553 RTA_PUT(skb
, TCA_OPTIONS
, sizeof(qopt
), &qopt
);
1562 hfsc_enqueue(struct sk_buff
*skb
, struct Qdisc
*sch
)
1564 struct hfsc_class
*cl
;
1568 cl
= hfsc_classify(skb
, sch
, &err
);
1570 if (err
== NET_XMIT_BYPASS
)
1571 sch
->qstats
.drops
++;
1577 err
= cl
->qdisc
->enqueue(skb
, cl
->qdisc
);
1578 if (unlikely(err
!= NET_XMIT_SUCCESS
)) {
1580 sch
->qstats
.drops
++;
1584 if (cl
->qdisc
->q
.qlen
== 1)
1585 set_active(cl
, len
);
1587 cl
->bstats
.packets
++;
1588 cl
->bstats
.bytes
+= len
;
1589 sch
->bstats
.packets
++;
1590 sch
->bstats
.bytes
+= len
;
1593 return NET_XMIT_SUCCESS
;
1596 static struct sk_buff
*
1597 hfsc_dequeue(struct Qdisc
*sch
)
1599 struct hfsc_sched
*q
= qdisc_priv(sch
);
1600 struct hfsc_class
*cl
;
1601 struct sk_buff
*skb
;
1603 unsigned int next_len
;
1606 if (sch
->q
.qlen
== 0)
1608 if ((skb
= __skb_dequeue(&q
->requeue
)))
1611 cur_time
= psched_get_time();
1614 * if there are eligible classes, use real-time criteria.
1615 * find the class with the minimum deadline among
1616 * the eligible classes.
1618 if ((cl
= eltree_get_mindl(q
, cur_time
)) != NULL
) {
1622 * use link-sharing criteria
1623 * get the class with the minimum vt in the hierarchy
1625 cl
= vttree_get_minvt(&q
->root
, cur_time
);
1627 sch
->qstats
.overlimits
++;
1628 hfsc_schedule_watchdog(sch
);
1633 skb
= cl
->qdisc
->dequeue(cl
->qdisc
);
1635 if (net_ratelimit())
1636 printk("HFSC: Non-work-conserving qdisc ?\n");
1640 update_vf(cl
, skb
->len
, cur_time
);
1642 cl
->cl_cumul
+= skb
->len
;
1644 if (cl
->qdisc
->q
.qlen
!= 0) {
1645 if (cl
->cl_flags
& HFSC_RSC
) {
1647 next_len
= qdisc_peek_len(cl
->qdisc
);
1649 update_ed(cl
, next_len
);
1651 update_d(cl
, next_len
);
1654 /* the class becomes passive */
1659 sch
->flags
&= ~TCQ_F_THROTTLED
;
1666 hfsc_requeue(struct sk_buff
*skb
, struct Qdisc
*sch
)
1668 struct hfsc_sched
*q
= qdisc_priv(sch
);
1670 __skb_queue_head(&q
->requeue
, skb
);
1672 sch
->qstats
.requeues
++;
1673 return NET_XMIT_SUCCESS
;
1677 hfsc_drop(struct Qdisc
*sch
)
1679 struct hfsc_sched
*q
= qdisc_priv(sch
);
1680 struct hfsc_class
*cl
;
1683 list_for_each_entry(cl
, &q
->droplist
, dlist
) {
1684 if (cl
->qdisc
->ops
->drop
!= NULL
&&
1685 (len
= cl
->qdisc
->ops
->drop(cl
->qdisc
)) > 0) {
1686 if (cl
->qdisc
->q
.qlen
== 0) {
1687 update_vf(cl
, 0, 0);
1690 list_move_tail(&cl
->dlist
, &q
->droplist
);
1693 sch
->qstats
.drops
++;
1701 static struct Qdisc_class_ops hfsc_class_ops
= {
1702 .change
= hfsc_change_class
,
1703 .delete = hfsc_delete_class
,
1704 .graft
= hfsc_graft_class
,
1705 .leaf
= hfsc_class_leaf
,
1706 .qlen_notify
= hfsc_qlen_notify
,
1707 .get
= hfsc_get_class
,
1708 .put
= hfsc_put_class
,
1709 .bind_tcf
= hfsc_bind_tcf
,
1710 .unbind_tcf
= hfsc_unbind_tcf
,
1711 .tcf_chain
= hfsc_tcf_chain
,
1712 .dump
= hfsc_dump_class
,
1713 .dump_stats
= hfsc_dump_class_stats
,
1717 static struct Qdisc_ops hfsc_qdisc_ops
= {
1719 .init
= hfsc_init_qdisc
,
1720 .change
= hfsc_change_qdisc
,
1721 .reset
= hfsc_reset_qdisc
,
1722 .destroy
= hfsc_destroy_qdisc
,
1723 .dump
= hfsc_dump_qdisc
,
1724 .enqueue
= hfsc_enqueue
,
1725 .dequeue
= hfsc_dequeue
,
1726 .requeue
= hfsc_requeue
,
1728 .cl_ops
= &hfsc_class_ops
,
1729 .priv_size
= sizeof(struct hfsc_sched
),
1730 .owner
= THIS_MODULE
1736 return register_qdisc(&hfsc_qdisc_ops
);
1742 unregister_qdisc(&hfsc_qdisc_ops
);
1745 MODULE_LICENSE("GPL");
1746 module_init(hfsc_init
);
1747 module_exit(hfsc_cleanup
);