Linux 2.6.26
[linux-2.6/kmemtrace.git] / net / sched / sch_htb.c
blob3fb58f428f724ca9a8fd8c6cd9a41450df567c4d
1 /*
2 * net/sched/sch_htb.c Hierarchical token bucket, feed tree version
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Martin Devera, <devik@cdi.cz>
11 * Credits (in time order) for older HTB versions:
12 * Stef Coene <stef.coene@docum.org>
13 * HTB support at LARTC mailing list
14 * Ondrej Kraus, <krauso@barr.cz>
15 * found missing INIT_QDISC(htb)
16 * Vladimir Smelhaus, Aamer Akhter, Bert Hubert
17 * helped a lot to locate nasty class stall bug
18 * Andi Kleen, Jamal Hadi, Bert Hubert
19 * code review and helpful comments on shaping
20 * Tomasz Wrona, <tw@eter.tym.pl>
21 * created test case so that I was able to fix nasty bug
22 * Wilfried Weissmann
23 * spotted bug in dequeue code and helped with fix
24 * Jiri Fojtasek
25 * fixed requeue routine
26 * and many others. thanks.
28 * $Id: sch_htb.c,v 1.25 2003/12/07 11:08:25 devik Exp devik $
30 #include <linux/module.h>
31 #include <linux/moduleparam.h>
32 #include <linux/types.h>
33 #include <linux/kernel.h>
34 #include <linux/string.h>
35 #include <linux/errno.h>
36 #include <linux/skbuff.h>
37 #include <linux/list.h>
38 #include <linux/compiler.h>
39 #include <linux/rbtree.h>
40 #include <net/netlink.h>
41 #include <net/pkt_sched.h>
43 /* HTB algorithm.
44 Author: devik@cdi.cz
45 ========================================================================
46 HTB is like TBF with multiple classes. It is also similar to CBQ because
47 it allows to assign priority to each class in hierarchy.
48 In fact it is another implementation of Floyd's formal sharing.
50 Levels:
51 Each class is assigned level. Leaf has ALWAYS level 0 and root
52 classes have level TC_HTB_MAXDEPTH-1. Interior nodes has level
53 one less than their parent.
56 #define HTB_HSIZE 16 /* classid hash size */
57 static int htb_hysteresis __read_mostly = 0; /* whether to use mode hysteresis for speedup */
58 #define HTB_VER 0x30011 /* major must be matched with number suplied by TC as version */
60 #if HTB_VER >> 16 != TC_HTB_PROTOVER
61 #error "Mismatched sch_htb.c and pkt_sch.h"
62 #endif
64 /* Module parameter and sysfs export */
65 module_param (htb_hysteresis, int, 0640);
66 MODULE_PARM_DESC(htb_hysteresis, "Hysteresis mode, less CPU load, less accurate");
68 /* used internaly to keep status of single class */
69 enum htb_cmode {
70 HTB_CANT_SEND, /* class can't send and can't borrow */
71 HTB_MAY_BORROW, /* class can't send but may borrow */
72 HTB_CAN_SEND /* class can send */
75 /* interior & leaf nodes; props specific to leaves are marked L: */
76 struct htb_class {
77 /* general class parameters */
78 u32 classid;
79 struct gnet_stats_basic bstats;
80 struct gnet_stats_queue qstats;
81 struct gnet_stats_rate_est rate_est;
82 struct tc_htb_xstats xstats; /* our special stats */
83 int refcnt; /* usage count of this class */
85 /* topology */
86 int level; /* our level (see above) */
87 struct htb_class *parent; /* parent class */
88 struct hlist_node hlist; /* classid hash list item */
89 struct list_head sibling; /* sibling list item */
90 struct list_head children; /* children list */
92 union {
93 struct htb_class_leaf {
94 struct Qdisc *q;
95 int prio;
96 int aprio;
97 int quantum;
98 int deficit[TC_HTB_MAXDEPTH];
99 struct list_head drop_list;
100 } leaf;
101 struct htb_class_inner {
102 struct rb_root feed[TC_HTB_NUMPRIO]; /* feed trees */
103 struct rb_node *ptr[TC_HTB_NUMPRIO]; /* current class ptr */
104 /* When class changes from state 1->2 and disconnects from
105 parent's feed then we lost ptr value and start from the
106 first child again. Here we store classid of the
107 last valid ptr (used when ptr is NULL). */
108 u32 last_ptr_id[TC_HTB_NUMPRIO];
109 } inner;
110 } un;
111 struct rb_node node[TC_HTB_NUMPRIO]; /* node for self or feed tree */
112 struct rb_node pq_node; /* node for event queue */
113 psched_time_t pq_key;
115 int prio_activity; /* for which prios are we active */
116 enum htb_cmode cmode; /* current mode of the class */
118 /* class attached filters */
119 struct tcf_proto *filter_list;
120 int filter_cnt;
122 int warned; /* only one warning about non work conserving .. */
124 /* token bucket parameters */
125 struct qdisc_rate_table *rate; /* rate table of the class itself */
126 struct qdisc_rate_table *ceil; /* ceiling rate (limits borrows too) */
127 long buffer, cbuffer; /* token bucket depth/rate */
128 psched_tdiff_t mbuffer; /* max wait time */
129 long tokens, ctokens; /* current number of tokens */
130 psched_time_t t_c; /* checkpoint time */
132 int prio; /* For parent to leaf return possible here */
133 int quantum; /* we do backup. Finally full replacement */
134 /* of un.leaf originals should be done. */
137 static inline long L2T(struct htb_class *cl, struct qdisc_rate_table *rate,
138 int size)
140 long result = qdisc_l2t(rate, size);
141 return result;
144 struct htb_sched {
145 struct list_head root; /* root classes list */
146 struct hlist_head hash[HTB_HSIZE]; /* hashed by classid */
147 struct list_head drops[TC_HTB_NUMPRIO];/* active leaves (for drops) */
149 /* self list - roots of self generating tree */
150 struct rb_root row[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
151 int row_mask[TC_HTB_MAXDEPTH];
152 struct rb_node *ptr[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
153 u32 last_ptr_id[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
155 /* self wait list - roots of wait PQs per row */
156 struct rb_root wait_pq[TC_HTB_MAXDEPTH];
158 /* time of nearest event per level (row) */
159 psched_time_t near_ev_cache[TC_HTB_MAXDEPTH];
161 /* whether we hit non-work conserving class during this dequeue; we use */
162 int nwc_hit; /* this to disable mindelay complaint in dequeue */
164 int defcls; /* class where unclassified flows go to */
166 /* filters for qdisc itself */
167 struct tcf_proto *filter_list;
168 int filter_cnt;
170 int rate2quantum; /* quant = rate / rate2quantum */
171 psched_time_t now; /* cached dequeue time */
172 struct qdisc_watchdog watchdog;
174 /* non shaped skbs; let them go directly thru */
175 struct sk_buff_head direct_queue;
176 int direct_qlen; /* max qlen of above */
178 long direct_pkts;
181 /* compute hash of size HTB_HSIZE for given handle */
182 static inline int htb_hash(u32 h)
184 #if HTB_HSIZE != 16
185 #error "Declare new hash for your HTB_HSIZE"
186 #endif
187 h ^= h >> 8; /* stolen from cbq_hash */
188 h ^= h >> 4;
189 return h & 0xf;
192 /* find class in global hash table using given handle */
193 static inline struct htb_class *htb_find(u32 handle, struct Qdisc *sch)
195 struct htb_sched *q = qdisc_priv(sch);
196 struct hlist_node *p;
197 struct htb_class *cl;
199 if (TC_H_MAJ(handle) != sch->handle)
200 return NULL;
202 hlist_for_each_entry(cl, p, q->hash + htb_hash(handle), hlist) {
203 if (cl->classid == handle)
204 return cl;
206 return NULL;
210 * htb_classify - classify a packet into class
212 * It returns NULL if the packet should be dropped or -1 if the packet
213 * should be passed directly thru. In all other cases leaf class is returned.
214 * We allow direct class selection by classid in priority. The we examine
215 * filters in qdisc and in inner nodes (if higher filter points to the inner
216 * node). If we end up with classid MAJOR:0 we enqueue the skb into special
217 * internal fifo (direct). These packets then go directly thru. If we still
218 * have no valid leaf we try to use MAJOR:default leaf. It still unsuccessfull
219 * then finish and return direct queue.
221 #define HTB_DIRECT (struct htb_class*)-1
223 static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch,
224 int *qerr)
226 struct htb_sched *q = qdisc_priv(sch);
227 struct htb_class *cl;
228 struct tcf_result res;
229 struct tcf_proto *tcf;
230 int result;
232 /* allow to select class by setting skb->priority to valid classid;
233 note that nfmark can be used too by attaching filter fw with no
234 rules in it */
235 if (skb->priority == sch->handle)
236 return HTB_DIRECT; /* X:0 (direct flow) selected */
237 if ((cl = htb_find(skb->priority, sch)) != NULL && cl->level == 0)
238 return cl;
240 *qerr = NET_XMIT_BYPASS;
241 tcf = q->filter_list;
242 while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) {
243 #ifdef CONFIG_NET_CLS_ACT
244 switch (result) {
245 case TC_ACT_QUEUED:
246 case TC_ACT_STOLEN:
247 *qerr = NET_XMIT_SUCCESS;
248 case TC_ACT_SHOT:
249 return NULL;
251 #endif
252 if ((cl = (void *)res.class) == NULL) {
253 if (res.classid == sch->handle)
254 return HTB_DIRECT; /* X:0 (direct flow) */
255 if ((cl = htb_find(res.classid, sch)) == NULL)
256 break; /* filter selected invalid classid */
258 if (!cl->level)
259 return cl; /* we hit leaf; return it */
261 /* we have got inner class; apply inner filter chain */
262 tcf = cl->filter_list;
264 /* classification failed; try to use default class */
265 cl = htb_find(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch);
266 if (!cl || cl->level)
267 return HTB_DIRECT; /* bad default .. this is safe bet */
268 return cl;
272 * htb_add_to_id_tree - adds class to the round robin list
274 * Routine adds class to the list (actually tree) sorted by classid.
275 * Make sure that class is not already on such list for given prio.
277 static void htb_add_to_id_tree(struct rb_root *root,
278 struct htb_class *cl, int prio)
280 struct rb_node **p = &root->rb_node, *parent = NULL;
282 while (*p) {
283 struct htb_class *c;
284 parent = *p;
285 c = rb_entry(parent, struct htb_class, node[prio]);
287 if (cl->classid > c->classid)
288 p = &parent->rb_right;
289 else
290 p = &parent->rb_left;
292 rb_link_node(&cl->node[prio], parent, p);
293 rb_insert_color(&cl->node[prio], root);
297 * htb_add_to_wait_tree - adds class to the event queue with delay
299 * The class is added to priority event queue to indicate that class will
300 * change its mode in cl->pq_key microseconds. Make sure that class is not
301 * already in the queue.
303 static void htb_add_to_wait_tree(struct htb_sched *q,
304 struct htb_class *cl, long delay)
306 struct rb_node **p = &q->wait_pq[cl->level].rb_node, *parent = NULL;
308 cl->pq_key = q->now + delay;
309 if (cl->pq_key == q->now)
310 cl->pq_key++;
312 /* update the nearest event cache */
313 if (q->near_ev_cache[cl->level] > cl->pq_key)
314 q->near_ev_cache[cl->level] = cl->pq_key;
316 while (*p) {
317 struct htb_class *c;
318 parent = *p;
319 c = rb_entry(parent, struct htb_class, pq_node);
320 if (cl->pq_key >= c->pq_key)
321 p = &parent->rb_right;
322 else
323 p = &parent->rb_left;
325 rb_link_node(&cl->pq_node, parent, p);
326 rb_insert_color(&cl->pq_node, &q->wait_pq[cl->level]);
330 * htb_next_rb_node - finds next node in binary tree
332 * When we are past last key we return NULL.
333 * Average complexity is 2 steps per call.
335 static inline void htb_next_rb_node(struct rb_node **n)
337 *n = rb_next(*n);
341 * htb_add_class_to_row - add class to its row
343 * The class is added to row at priorities marked in mask.
344 * It does nothing if mask == 0.
346 static inline void htb_add_class_to_row(struct htb_sched *q,
347 struct htb_class *cl, int mask)
349 q->row_mask[cl->level] |= mask;
350 while (mask) {
351 int prio = ffz(~mask);
352 mask &= ~(1 << prio);
353 htb_add_to_id_tree(q->row[cl->level] + prio, cl, prio);
357 /* If this triggers, it is a bug in this code, but it need not be fatal */
358 static void htb_safe_rb_erase(struct rb_node *rb, struct rb_root *root)
360 if (RB_EMPTY_NODE(rb)) {
361 WARN_ON(1);
362 } else {
363 rb_erase(rb, root);
364 RB_CLEAR_NODE(rb);
370 * htb_remove_class_from_row - removes class from its row
372 * The class is removed from row at priorities marked in mask.
373 * It does nothing if mask == 0.
375 static inline void htb_remove_class_from_row(struct htb_sched *q,
376 struct htb_class *cl, int mask)
378 int m = 0;
380 while (mask) {
381 int prio = ffz(~mask);
383 mask &= ~(1 << prio);
384 if (q->ptr[cl->level][prio] == cl->node + prio)
385 htb_next_rb_node(q->ptr[cl->level] + prio);
387 htb_safe_rb_erase(cl->node + prio, q->row[cl->level] + prio);
388 if (!q->row[cl->level][prio].rb_node)
389 m |= 1 << prio;
391 q->row_mask[cl->level] &= ~m;
395 * htb_activate_prios - creates active classe's feed chain
397 * The class is connected to ancestors and/or appropriate rows
398 * for priorities it is participating on. cl->cmode must be new
399 * (activated) mode. It does nothing if cl->prio_activity == 0.
401 static void htb_activate_prios(struct htb_sched *q, struct htb_class *cl)
403 struct htb_class *p = cl->parent;
404 long m, mask = cl->prio_activity;
406 while (cl->cmode == HTB_MAY_BORROW && p && mask) {
407 m = mask;
408 while (m) {
409 int prio = ffz(~m);
410 m &= ~(1 << prio);
412 if (p->un.inner.feed[prio].rb_node)
413 /* parent already has its feed in use so that
414 reset bit in mask as parent is already ok */
415 mask &= ~(1 << prio);
417 htb_add_to_id_tree(p->un.inner.feed + prio, cl, prio);
419 p->prio_activity |= mask;
420 cl = p;
421 p = cl->parent;
424 if (cl->cmode == HTB_CAN_SEND && mask)
425 htb_add_class_to_row(q, cl, mask);
429 * htb_deactivate_prios - remove class from feed chain
431 * cl->cmode must represent old mode (before deactivation). It does
432 * nothing if cl->prio_activity == 0. Class is removed from all feed
433 * chains and rows.
435 static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl)
437 struct htb_class *p = cl->parent;
438 long m, mask = cl->prio_activity;
440 while (cl->cmode == HTB_MAY_BORROW && p && mask) {
441 m = mask;
442 mask = 0;
443 while (m) {
444 int prio = ffz(~m);
445 m &= ~(1 << prio);
447 if (p->un.inner.ptr[prio] == cl->node + prio) {
448 /* we are removing child which is pointed to from
449 parent feed - forget the pointer but remember
450 classid */
451 p->un.inner.last_ptr_id[prio] = cl->classid;
452 p->un.inner.ptr[prio] = NULL;
455 htb_safe_rb_erase(cl->node + prio, p->un.inner.feed + prio);
457 if (!p->un.inner.feed[prio].rb_node)
458 mask |= 1 << prio;
461 p->prio_activity &= ~mask;
462 cl = p;
463 p = cl->parent;
466 if (cl->cmode == HTB_CAN_SEND && mask)
467 htb_remove_class_from_row(q, cl, mask);
470 static inline long htb_lowater(const struct htb_class *cl)
472 if (htb_hysteresis)
473 return cl->cmode != HTB_CANT_SEND ? -cl->cbuffer : 0;
474 else
475 return 0;
477 static inline long htb_hiwater(const struct htb_class *cl)
479 if (htb_hysteresis)
480 return cl->cmode == HTB_CAN_SEND ? -cl->buffer : 0;
481 else
482 return 0;
487 * htb_class_mode - computes and returns current class mode
489 * It computes cl's mode at time cl->t_c+diff and returns it. If mode
490 * is not HTB_CAN_SEND then cl->pq_key is updated to time difference
491 * from now to time when cl will change its state.
492 * Also it is worth to note that class mode doesn't change simply
493 * at cl->{c,}tokens == 0 but there can rather be hysteresis of
494 * 0 .. -cl->{c,}buffer range. It is meant to limit number of
495 * mode transitions per time unit. The speed gain is about 1/6.
497 static inline enum htb_cmode
498 htb_class_mode(struct htb_class *cl, long *diff)
500 long toks;
502 if ((toks = (cl->ctokens + *diff)) < htb_lowater(cl)) {
503 *diff = -toks;
504 return HTB_CANT_SEND;
507 if ((toks = (cl->tokens + *diff)) >= htb_hiwater(cl))
508 return HTB_CAN_SEND;
510 *diff = -toks;
511 return HTB_MAY_BORROW;
515 * htb_change_class_mode - changes classe's mode
517 * This should be the only way how to change classe's mode under normal
518 * cirsumstances. Routine will update feed lists linkage, change mode
519 * and add class to the wait event queue if appropriate. New mode should
520 * be different from old one and cl->pq_key has to be valid if changing
521 * to mode other than HTB_CAN_SEND (see htb_add_to_wait_tree).
523 static void
524 htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, long *diff)
526 enum htb_cmode new_mode = htb_class_mode(cl, diff);
528 if (new_mode == cl->cmode)
529 return;
531 if (cl->prio_activity) { /* not necessary: speed optimization */
532 if (cl->cmode != HTB_CANT_SEND)
533 htb_deactivate_prios(q, cl);
534 cl->cmode = new_mode;
535 if (new_mode != HTB_CANT_SEND)
536 htb_activate_prios(q, cl);
537 } else
538 cl->cmode = new_mode;
542 * htb_activate - inserts leaf cl into appropriate active feeds
544 * Routine learns (new) priority of leaf and activates feed chain
545 * for the prio. It can be called on already active leaf safely.
546 * It also adds leaf into droplist.
548 static inline void htb_activate(struct htb_sched *q, struct htb_class *cl)
550 BUG_TRAP(!cl->level && cl->un.leaf.q && cl->un.leaf.q->q.qlen);
552 if (!cl->prio_activity) {
553 cl->prio_activity = 1 << (cl->un.leaf.aprio = cl->un.leaf.prio);
554 htb_activate_prios(q, cl);
555 list_add_tail(&cl->un.leaf.drop_list,
556 q->drops + cl->un.leaf.aprio);
561 * htb_deactivate - remove leaf cl from active feeds
563 * Make sure that leaf is active. In the other words it can't be called
564 * with non-active leaf. It also removes class from the drop list.
566 static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl)
568 BUG_TRAP(cl->prio_activity);
570 htb_deactivate_prios(q, cl);
571 cl->prio_activity = 0;
572 list_del_init(&cl->un.leaf.drop_list);
575 static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
577 int ret;
578 struct htb_sched *q = qdisc_priv(sch);
579 struct htb_class *cl = htb_classify(skb, sch, &ret);
581 if (cl == HTB_DIRECT) {
582 /* enqueue to helper queue */
583 if (q->direct_queue.qlen < q->direct_qlen) {
584 __skb_queue_tail(&q->direct_queue, skb);
585 q->direct_pkts++;
586 } else {
587 kfree_skb(skb);
588 sch->qstats.drops++;
589 return NET_XMIT_DROP;
591 #ifdef CONFIG_NET_CLS_ACT
592 } else if (!cl) {
593 if (ret == NET_XMIT_BYPASS)
594 sch->qstats.drops++;
595 kfree_skb(skb);
596 return ret;
597 #endif
598 } else if (cl->un.leaf.q->enqueue(skb, cl->un.leaf.q) !=
599 NET_XMIT_SUCCESS) {
600 sch->qstats.drops++;
601 cl->qstats.drops++;
602 return NET_XMIT_DROP;
603 } else {
604 cl->bstats.packets +=
605 skb_is_gso(skb)?skb_shinfo(skb)->gso_segs:1;
606 cl->bstats.bytes += skb->len;
607 htb_activate(q, cl);
610 sch->q.qlen++;
611 sch->bstats.packets += skb_is_gso(skb)?skb_shinfo(skb)->gso_segs:1;
612 sch->bstats.bytes += skb->len;
613 return NET_XMIT_SUCCESS;
616 /* TODO: requeuing packet charges it to policers again !! */
617 static int htb_requeue(struct sk_buff *skb, struct Qdisc *sch)
619 int ret;
620 struct htb_sched *q = qdisc_priv(sch);
621 struct htb_class *cl = htb_classify(skb, sch, &ret);
622 struct sk_buff *tskb;
624 if (cl == HTB_DIRECT) {
625 /* enqueue to helper queue */
626 if (q->direct_queue.qlen < q->direct_qlen) {
627 __skb_queue_head(&q->direct_queue, skb);
628 } else {
629 __skb_queue_head(&q->direct_queue, skb);
630 tskb = __skb_dequeue_tail(&q->direct_queue);
631 kfree_skb(tskb);
632 sch->qstats.drops++;
633 return NET_XMIT_CN;
635 #ifdef CONFIG_NET_CLS_ACT
636 } else if (!cl) {
637 if (ret == NET_XMIT_BYPASS)
638 sch->qstats.drops++;
639 kfree_skb(skb);
640 return ret;
641 #endif
642 } else if (cl->un.leaf.q->ops->requeue(skb, cl->un.leaf.q) !=
643 NET_XMIT_SUCCESS) {
644 sch->qstats.drops++;
645 cl->qstats.drops++;
646 return NET_XMIT_DROP;
647 } else
648 htb_activate(q, cl);
650 sch->q.qlen++;
651 sch->qstats.requeues++;
652 return NET_XMIT_SUCCESS;
656 * htb_charge_class - charges amount "bytes" to leaf and ancestors
658 * Routine assumes that packet "bytes" long was dequeued from leaf cl
659 * borrowing from "level". It accounts bytes to ceil leaky bucket for
660 * leaf and all ancestors and to rate bucket for ancestors at levels
661 * "level" and higher. It also handles possible change of mode resulting
662 * from the update. Note that mode can also increase here (MAY_BORROW to
663 * CAN_SEND) because we can use more precise clock that event queue here.
664 * In such case we remove class from event queue first.
666 static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,
667 int level, struct sk_buff *skb)
669 int bytes = skb->len;
670 long toks, diff;
671 enum htb_cmode old_mode;
673 #define HTB_ACCNT(T,B,R) toks = diff + cl->T; \
674 if (toks > cl->B) toks = cl->B; \
675 toks -= L2T(cl, cl->R, bytes); \
676 if (toks <= -cl->mbuffer) toks = 1-cl->mbuffer; \
677 cl->T = toks
679 while (cl) {
680 diff = psched_tdiff_bounded(q->now, cl->t_c, cl->mbuffer);
681 if (cl->level >= level) {
682 if (cl->level == level)
683 cl->xstats.lends++;
684 HTB_ACCNT(tokens, buffer, rate);
685 } else {
686 cl->xstats.borrows++;
687 cl->tokens += diff; /* we moved t_c; update tokens */
689 HTB_ACCNT(ctokens, cbuffer, ceil);
690 cl->t_c = q->now;
692 old_mode = cl->cmode;
693 diff = 0;
694 htb_change_class_mode(q, cl, &diff);
695 if (old_mode != cl->cmode) {
696 if (old_mode != HTB_CAN_SEND)
697 htb_safe_rb_erase(&cl->pq_node, q->wait_pq + cl->level);
698 if (cl->cmode != HTB_CAN_SEND)
699 htb_add_to_wait_tree(q, cl, diff);
702 /* update byte stats except for leaves which are already updated */
703 if (cl->level) {
704 cl->bstats.bytes += bytes;
705 cl->bstats.packets += skb_is_gso(skb)?
706 skb_shinfo(skb)->gso_segs:1;
708 cl = cl->parent;
713 * htb_do_events - make mode changes to classes at the level
715 * Scans event queue for pending events and applies them. Returns time of
716 * next pending event (0 for no event in pq).
717 * Note: Applied are events whose have cl->pq_key <= q->now.
719 static psched_time_t htb_do_events(struct htb_sched *q, int level)
721 /* don't run for longer than 2 jiffies; 2 is used instead of
722 1 to simplify things when jiffy is going to be incremented
723 too soon */
724 unsigned long stop_at = jiffies + 2;
725 while (time_before(jiffies, stop_at)) {
726 struct htb_class *cl;
727 long diff;
728 struct rb_node *p = rb_first(&q->wait_pq[level]);
730 if (!p)
731 return 0;
733 cl = rb_entry(p, struct htb_class, pq_node);
734 if (cl->pq_key > q->now)
735 return cl->pq_key;
737 htb_safe_rb_erase(p, q->wait_pq + level);
738 diff = psched_tdiff_bounded(q->now, cl->t_c, cl->mbuffer);
739 htb_change_class_mode(q, cl, &diff);
740 if (cl->cmode != HTB_CAN_SEND)
741 htb_add_to_wait_tree(q, cl, diff);
743 /* too much load - let's continue on next jiffie */
744 return q->now + PSCHED_TICKS_PER_SEC / HZ;
747 /* Returns class->node+prio from id-tree where classe's id is >= id. NULL
748 is no such one exists. */
749 static struct rb_node *htb_id_find_next_upper(int prio, struct rb_node *n,
750 u32 id)
752 struct rb_node *r = NULL;
753 while (n) {
754 struct htb_class *cl =
755 rb_entry(n, struct htb_class, node[prio]);
756 if (id == cl->classid)
757 return n;
759 if (id > cl->classid) {
760 n = n->rb_right;
761 } else {
762 r = n;
763 n = n->rb_left;
766 return r;
770 * htb_lookup_leaf - returns next leaf class in DRR order
772 * Find leaf where current feed pointers points to.
774 static struct htb_class *htb_lookup_leaf(struct rb_root *tree, int prio,
775 struct rb_node **pptr, u32 * pid)
777 int i;
778 struct {
779 struct rb_node *root;
780 struct rb_node **pptr;
781 u32 *pid;
782 } stk[TC_HTB_MAXDEPTH], *sp = stk;
784 BUG_TRAP(tree->rb_node);
785 sp->root = tree->rb_node;
786 sp->pptr = pptr;
787 sp->pid = pid;
789 for (i = 0; i < 65535; i++) {
790 if (!*sp->pptr && *sp->pid) {
791 /* ptr was invalidated but id is valid - try to recover
792 the original or next ptr */
793 *sp->pptr =
794 htb_id_find_next_upper(prio, sp->root, *sp->pid);
796 *sp->pid = 0; /* ptr is valid now so that remove this hint as it
797 can become out of date quickly */
798 if (!*sp->pptr) { /* we are at right end; rewind & go up */
799 *sp->pptr = sp->root;
800 while ((*sp->pptr)->rb_left)
801 *sp->pptr = (*sp->pptr)->rb_left;
802 if (sp > stk) {
803 sp--;
804 BUG_TRAP(*sp->pptr);
805 if (!*sp->pptr)
806 return NULL;
807 htb_next_rb_node(sp->pptr);
809 } else {
810 struct htb_class *cl;
811 cl = rb_entry(*sp->pptr, struct htb_class, node[prio]);
812 if (!cl->level)
813 return cl;
814 (++sp)->root = cl->un.inner.feed[prio].rb_node;
815 sp->pptr = cl->un.inner.ptr + prio;
816 sp->pid = cl->un.inner.last_ptr_id + prio;
819 BUG_TRAP(0);
820 return NULL;
823 /* dequeues packet at given priority and level; call only if
824 you are sure that there is active class at prio/level */
825 static struct sk_buff *htb_dequeue_tree(struct htb_sched *q, int prio,
826 int level)
828 struct sk_buff *skb = NULL;
829 struct htb_class *cl, *start;
830 /* look initial class up in the row */
831 start = cl = htb_lookup_leaf(q->row[level] + prio, prio,
832 q->ptr[level] + prio,
833 q->last_ptr_id[level] + prio);
835 do {
836 next:
837 BUG_TRAP(cl);
838 if (!cl)
839 return NULL;
841 /* class can be empty - it is unlikely but can be true if leaf
842 qdisc drops packets in enqueue routine or if someone used
843 graft operation on the leaf since last dequeue;
844 simply deactivate and skip such class */
845 if (unlikely(cl->un.leaf.q->q.qlen == 0)) {
846 struct htb_class *next;
847 htb_deactivate(q, cl);
849 /* row/level might become empty */
850 if ((q->row_mask[level] & (1 << prio)) == 0)
851 return NULL;
853 next = htb_lookup_leaf(q->row[level] + prio,
854 prio, q->ptr[level] + prio,
855 q->last_ptr_id[level] + prio);
857 if (cl == start) /* fix start if we just deleted it */
858 start = next;
859 cl = next;
860 goto next;
863 skb = cl->un.leaf.q->dequeue(cl->un.leaf.q);
864 if (likely(skb != NULL))
865 break;
866 if (!cl->warned) {
867 printk(KERN_WARNING
868 "htb: class %X isn't work conserving ?!\n",
869 cl->classid);
870 cl->warned = 1;
872 q->nwc_hit++;
873 htb_next_rb_node((level ? cl->parent->un.inner.ptr : q->
874 ptr[0]) + prio);
875 cl = htb_lookup_leaf(q->row[level] + prio, prio,
876 q->ptr[level] + prio,
877 q->last_ptr_id[level] + prio);
879 } while (cl != start);
881 if (likely(skb != NULL)) {
882 if ((cl->un.leaf.deficit[level] -= skb->len) < 0) {
883 cl->un.leaf.deficit[level] += cl->un.leaf.quantum;
884 htb_next_rb_node((level ? cl->parent->un.inner.ptr : q->
885 ptr[0]) + prio);
887 /* this used to be after charge_class but this constelation
888 gives us slightly better performance */
889 if (!cl->un.leaf.q->q.qlen)
890 htb_deactivate(q, cl);
891 htb_charge_class(q, cl, level, skb);
893 return skb;
896 static struct sk_buff *htb_dequeue(struct Qdisc *sch)
898 struct sk_buff *skb = NULL;
899 struct htb_sched *q = qdisc_priv(sch);
900 int level;
901 psched_time_t next_event;
903 /* try to dequeue direct packets as high prio (!) to minimize cpu work */
904 skb = __skb_dequeue(&q->direct_queue);
905 if (skb != NULL) {
906 sch->flags &= ~TCQ_F_THROTTLED;
907 sch->q.qlen--;
908 return skb;
911 if (!sch->q.qlen)
912 goto fin;
913 q->now = psched_get_time();
915 next_event = q->now + 5 * PSCHED_TICKS_PER_SEC;
916 q->nwc_hit = 0;
917 for (level = 0; level < TC_HTB_MAXDEPTH; level++) {
918 /* common case optimization - skip event handler quickly */
919 int m;
920 psched_time_t event;
922 if (q->now >= q->near_ev_cache[level]) {
923 event = htb_do_events(q, level);
924 if (!event)
925 event = q->now + PSCHED_TICKS_PER_SEC;
926 q->near_ev_cache[level] = event;
927 } else
928 event = q->near_ev_cache[level];
930 if (event && next_event > event)
931 next_event = event;
933 m = ~q->row_mask[level];
934 while (m != (int)(-1)) {
935 int prio = ffz(m);
936 m |= 1 << prio;
937 skb = htb_dequeue_tree(q, prio, level);
938 if (likely(skb != NULL)) {
939 sch->q.qlen--;
940 sch->flags &= ~TCQ_F_THROTTLED;
941 goto fin;
945 sch->qstats.overlimits++;
946 qdisc_watchdog_schedule(&q->watchdog, next_event);
947 fin:
948 return skb;
951 /* try to drop from each class (by prio) until one succeed */
952 static unsigned int htb_drop(struct Qdisc *sch)
954 struct htb_sched *q = qdisc_priv(sch);
955 int prio;
957 for (prio = TC_HTB_NUMPRIO - 1; prio >= 0; prio--) {
958 struct list_head *p;
959 list_for_each(p, q->drops + prio) {
960 struct htb_class *cl = list_entry(p, struct htb_class,
961 un.leaf.drop_list);
962 unsigned int len;
963 if (cl->un.leaf.q->ops->drop &&
964 (len = cl->un.leaf.q->ops->drop(cl->un.leaf.q))) {
965 sch->q.qlen--;
966 if (!cl->un.leaf.q->q.qlen)
967 htb_deactivate(q, cl);
968 return len;
972 return 0;
975 /* reset all classes */
976 /* always caled under BH & queue lock */
977 static void htb_reset(struct Qdisc *sch)
979 struct htb_sched *q = qdisc_priv(sch);
980 int i;
982 for (i = 0; i < HTB_HSIZE; i++) {
983 struct hlist_node *p;
984 struct htb_class *cl;
986 hlist_for_each_entry(cl, p, q->hash + i, hlist) {
987 if (cl->level)
988 memset(&cl->un.inner, 0, sizeof(cl->un.inner));
989 else {
990 if (cl->un.leaf.q)
991 qdisc_reset(cl->un.leaf.q);
992 INIT_LIST_HEAD(&cl->un.leaf.drop_list);
994 cl->prio_activity = 0;
995 cl->cmode = HTB_CAN_SEND;
999 qdisc_watchdog_cancel(&q->watchdog);
1000 __skb_queue_purge(&q->direct_queue);
1001 sch->q.qlen = 0;
1002 memset(q->row, 0, sizeof(q->row));
1003 memset(q->row_mask, 0, sizeof(q->row_mask));
1004 memset(q->wait_pq, 0, sizeof(q->wait_pq));
1005 memset(q->ptr, 0, sizeof(q->ptr));
1006 for (i = 0; i < TC_HTB_NUMPRIO; i++)
1007 INIT_LIST_HEAD(q->drops + i);
1010 static const struct nla_policy htb_policy[TCA_HTB_MAX + 1] = {
1011 [TCA_HTB_PARMS] = { .len = sizeof(struct tc_htb_opt) },
1012 [TCA_HTB_INIT] = { .len = sizeof(struct tc_htb_glob) },
1013 [TCA_HTB_CTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
1014 [TCA_HTB_RTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
1017 static int htb_init(struct Qdisc *sch, struct nlattr *opt)
1019 struct htb_sched *q = qdisc_priv(sch);
1020 struct nlattr *tb[TCA_HTB_INIT + 1];
1021 struct tc_htb_glob *gopt;
1022 int err;
1023 int i;
1025 if (!opt)
1026 return -EINVAL;
1028 err = nla_parse_nested(tb, TCA_HTB_INIT, opt, htb_policy);
1029 if (err < 0)
1030 return err;
1032 if (tb[TCA_HTB_INIT] == NULL) {
1033 printk(KERN_ERR "HTB: hey probably you have bad tc tool ?\n");
1034 return -EINVAL;
1036 gopt = nla_data(tb[TCA_HTB_INIT]);
1037 if (gopt->version != HTB_VER >> 16) {
1038 printk(KERN_ERR
1039 "HTB: need tc/htb version %d (minor is %d), you have %d\n",
1040 HTB_VER >> 16, HTB_VER & 0xffff, gopt->version);
1041 return -EINVAL;
1044 INIT_LIST_HEAD(&q->root);
1045 for (i = 0; i < HTB_HSIZE; i++)
1046 INIT_HLIST_HEAD(q->hash + i);
1047 for (i = 0; i < TC_HTB_NUMPRIO; i++)
1048 INIT_LIST_HEAD(q->drops + i);
1050 qdisc_watchdog_init(&q->watchdog, sch);
1051 skb_queue_head_init(&q->direct_queue);
1053 q->direct_qlen = sch->dev->tx_queue_len;
1054 if (q->direct_qlen < 2) /* some devices have zero tx_queue_len */
1055 q->direct_qlen = 2;
1057 if ((q->rate2quantum = gopt->rate2quantum) < 1)
1058 q->rate2quantum = 1;
1059 q->defcls = gopt->defcls;
1061 return 0;
1064 static int htb_dump(struct Qdisc *sch, struct sk_buff *skb)
1066 struct htb_sched *q = qdisc_priv(sch);
1067 struct nlattr *nest;
1068 struct tc_htb_glob gopt;
1070 spin_lock_bh(&sch->dev->queue_lock);
1072 gopt.direct_pkts = q->direct_pkts;
1073 gopt.version = HTB_VER;
1074 gopt.rate2quantum = q->rate2quantum;
1075 gopt.defcls = q->defcls;
1076 gopt.debug = 0;
1078 nest = nla_nest_start(skb, TCA_OPTIONS);
1079 if (nest == NULL)
1080 goto nla_put_failure;
1081 NLA_PUT(skb, TCA_HTB_INIT, sizeof(gopt), &gopt);
1082 nla_nest_end(skb, nest);
1084 spin_unlock_bh(&sch->dev->queue_lock);
1085 return skb->len;
1087 nla_put_failure:
1088 spin_unlock_bh(&sch->dev->queue_lock);
1089 nla_nest_cancel(skb, nest);
1090 return -1;
1093 static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
1094 struct sk_buff *skb, struct tcmsg *tcm)
1096 struct htb_class *cl = (struct htb_class *)arg;
1097 struct nlattr *nest;
1098 struct tc_htb_opt opt;
1100 spin_lock_bh(&sch->dev->queue_lock);
1101 tcm->tcm_parent = cl->parent ? cl->parent->classid : TC_H_ROOT;
1102 tcm->tcm_handle = cl->classid;
1103 if (!cl->level && cl->un.leaf.q)
1104 tcm->tcm_info = cl->un.leaf.q->handle;
1106 nest = nla_nest_start(skb, TCA_OPTIONS);
1107 if (nest == NULL)
1108 goto nla_put_failure;
1110 memset(&opt, 0, sizeof(opt));
1112 opt.rate = cl->rate->rate;
1113 opt.buffer = cl->buffer;
1114 opt.ceil = cl->ceil->rate;
1115 opt.cbuffer = cl->cbuffer;
1116 opt.quantum = cl->un.leaf.quantum;
1117 opt.prio = cl->un.leaf.prio;
1118 opt.level = cl->level;
1119 NLA_PUT(skb, TCA_HTB_PARMS, sizeof(opt), &opt);
1121 nla_nest_end(skb, nest);
1122 spin_unlock_bh(&sch->dev->queue_lock);
1123 return skb->len;
1125 nla_put_failure:
1126 spin_unlock_bh(&sch->dev->queue_lock);
1127 nla_nest_cancel(skb, nest);
1128 return -1;
1131 static int
1132 htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)
1134 struct htb_class *cl = (struct htb_class *)arg;
1136 if (!cl->level && cl->un.leaf.q)
1137 cl->qstats.qlen = cl->un.leaf.q->q.qlen;
1138 cl->xstats.tokens = cl->tokens;
1139 cl->xstats.ctokens = cl->ctokens;
1141 if (gnet_stats_copy_basic(d, &cl->bstats) < 0 ||
1142 gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
1143 gnet_stats_copy_queue(d, &cl->qstats) < 0)
1144 return -1;
1146 return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats));
1149 static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1150 struct Qdisc **old)
1152 struct htb_class *cl = (struct htb_class *)arg;
1154 if (cl && !cl->level) {
1155 if (new == NULL &&
1156 (new = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops,
1157 cl->classid))
1158 == NULL)
1159 return -ENOBUFS;
1160 sch_tree_lock(sch);
1161 if ((*old = xchg(&cl->un.leaf.q, new)) != NULL) {
1162 qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
1163 qdisc_reset(*old);
1165 sch_tree_unlock(sch);
1166 return 0;
1168 return -ENOENT;
1171 static struct Qdisc *htb_leaf(struct Qdisc *sch, unsigned long arg)
1173 struct htb_class *cl = (struct htb_class *)arg;
1174 return (cl && !cl->level) ? cl->un.leaf.q : NULL;
1177 static void htb_qlen_notify(struct Qdisc *sch, unsigned long arg)
1179 struct htb_class *cl = (struct htb_class *)arg;
1181 if (cl->un.leaf.q->q.qlen == 0)
1182 htb_deactivate(qdisc_priv(sch), cl);
1185 static unsigned long htb_get(struct Qdisc *sch, u32 classid)
1187 struct htb_class *cl = htb_find(classid, sch);
1188 if (cl)
1189 cl->refcnt++;
1190 return (unsigned long)cl;
1193 static inline int htb_parent_last_child(struct htb_class *cl)
1195 if (!cl->parent)
1196 /* the root class */
1197 return 0;
1199 if (!(cl->parent->children.next == &cl->sibling &&
1200 cl->parent->children.prev == &cl->sibling))
1201 /* not the last child */
1202 return 0;
1204 return 1;
1207 static void htb_parent_to_leaf(struct htb_sched *q, struct htb_class *cl,
1208 struct Qdisc *new_q)
1210 struct htb_class *parent = cl->parent;
1212 BUG_TRAP(!cl->level && cl->un.leaf.q && !cl->prio_activity);
1214 if (parent->cmode != HTB_CAN_SEND)
1215 htb_safe_rb_erase(&parent->pq_node, q->wait_pq + parent->level);
1217 parent->level = 0;
1218 memset(&parent->un.inner, 0, sizeof(parent->un.inner));
1219 INIT_LIST_HEAD(&parent->un.leaf.drop_list);
1220 parent->un.leaf.q = new_q ? new_q : &noop_qdisc;
1221 parent->un.leaf.quantum = parent->quantum;
1222 parent->un.leaf.prio = parent->prio;
1223 parent->tokens = parent->buffer;
1224 parent->ctokens = parent->cbuffer;
1225 parent->t_c = psched_get_time();
1226 parent->cmode = HTB_CAN_SEND;
1229 static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl)
1231 struct htb_sched *q = qdisc_priv(sch);
1233 if (!cl->level) {
1234 BUG_TRAP(cl->un.leaf.q);
1235 qdisc_destroy(cl->un.leaf.q);
1237 gen_kill_estimator(&cl->bstats, &cl->rate_est);
1238 qdisc_put_rtab(cl->rate);
1239 qdisc_put_rtab(cl->ceil);
1241 tcf_destroy_chain(&cl->filter_list);
1243 while (!list_empty(&cl->children))
1244 htb_destroy_class(sch, list_entry(cl->children.next,
1245 struct htb_class, sibling));
1247 /* note: this delete may happen twice (see htb_delete) */
1248 hlist_del_init(&cl->hlist);
1249 list_del(&cl->sibling);
1251 if (cl->prio_activity)
1252 htb_deactivate(q, cl);
1254 if (cl->cmode != HTB_CAN_SEND)
1255 htb_safe_rb_erase(&cl->pq_node, q->wait_pq + cl->level);
1257 kfree(cl);
1260 /* always caled under BH & queue lock */
1261 static void htb_destroy(struct Qdisc *sch)
1263 struct htb_sched *q = qdisc_priv(sch);
1265 qdisc_watchdog_cancel(&q->watchdog);
1266 /* This line used to be after htb_destroy_class call below
1267 and surprisingly it worked in 2.4. But it must precede it
1268 because filter need its target class alive to be able to call
1269 unbind_filter on it (without Oops). */
1270 tcf_destroy_chain(&q->filter_list);
1272 while (!list_empty(&q->root))
1273 htb_destroy_class(sch, list_entry(q->root.next,
1274 struct htb_class, sibling));
1276 __skb_queue_purge(&q->direct_queue);
1279 static int htb_delete(struct Qdisc *sch, unsigned long arg)
1281 struct htb_sched *q = qdisc_priv(sch);
1282 struct htb_class *cl = (struct htb_class *)arg;
1283 unsigned int qlen;
1284 struct Qdisc *new_q = NULL;
1285 int last_child = 0;
1287 // TODO: why don't allow to delete subtree ? references ? does
1288 // tc subsys quarantee us that in htb_destroy it holds no class
1289 // refs so that we can remove children safely there ?
1290 if (!list_empty(&cl->children) || cl->filter_cnt)
1291 return -EBUSY;
1293 if (!cl->level && htb_parent_last_child(cl)) {
1294 new_q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops,
1295 cl->parent->classid);
1296 last_child = 1;
1299 sch_tree_lock(sch);
1301 if (!cl->level) {
1302 qlen = cl->un.leaf.q->q.qlen;
1303 qdisc_reset(cl->un.leaf.q);
1304 qdisc_tree_decrease_qlen(cl->un.leaf.q, qlen);
1307 /* delete from hash and active; remainder in destroy_class */
1308 hlist_del_init(&cl->hlist);
1310 if (cl->prio_activity)
1311 htb_deactivate(q, cl);
1313 if (last_child)
1314 htb_parent_to_leaf(q, cl, new_q);
1316 if (--cl->refcnt == 0)
1317 htb_destroy_class(sch, cl);
1319 sch_tree_unlock(sch);
1320 return 0;
1323 static void htb_put(struct Qdisc *sch, unsigned long arg)
1325 struct htb_class *cl = (struct htb_class *)arg;
1327 if (--cl->refcnt == 0)
1328 htb_destroy_class(sch, cl);
1331 static int htb_change_class(struct Qdisc *sch, u32 classid,
1332 u32 parentid, struct nlattr **tca,
1333 unsigned long *arg)
1335 int err = -EINVAL;
1336 struct htb_sched *q = qdisc_priv(sch);
1337 struct htb_class *cl = (struct htb_class *)*arg, *parent;
1338 struct nlattr *opt = tca[TCA_OPTIONS];
1339 struct qdisc_rate_table *rtab = NULL, *ctab = NULL;
1340 struct nlattr *tb[TCA_HTB_RTAB + 1];
1341 struct tc_htb_opt *hopt;
1343 /* extract all subattrs from opt attr */
1344 if (!opt)
1345 goto failure;
1347 err = nla_parse_nested(tb, TCA_HTB_RTAB, opt, htb_policy);
1348 if (err < 0)
1349 goto failure;
1351 err = -EINVAL;
1352 if (tb[TCA_HTB_PARMS] == NULL)
1353 goto failure;
1355 parent = parentid == TC_H_ROOT ? NULL : htb_find(parentid, sch);
1357 hopt = nla_data(tb[TCA_HTB_PARMS]);
1359 rtab = qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB]);
1360 ctab = qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB]);
1361 if (!rtab || !ctab)
1362 goto failure;
1364 if (!cl) { /* new class */
1365 struct Qdisc *new_q;
1366 int prio;
1367 struct {
1368 struct nlattr nla;
1369 struct gnet_estimator opt;
1370 } est = {
1371 .nla = {
1372 .nla_len = nla_attr_size(sizeof(est.opt)),
1373 .nla_type = TCA_RATE,
1375 .opt = {
1376 /* 4s interval, 16s averaging constant */
1377 .interval = 2,
1378 .ewma_log = 2,
1382 /* check for valid classid */
1383 if (!classid || TC_H_MAJ(classid ^ sch->handle)
1384 || htb_find(classid, sch))
1385 goto failure;
1387 /* check maximal depth */
1388 if (parent && parent->parent && parent->parent->level < 2) {
1389 printk(KERN_ERR "htb: tree is too deep\n");
1390 goto failure;
1392 err = -ENOBUFS;
1393 if ((cl = kzalloc(sizeof(*cl), GFP_KERNEL)) == NULL)
1394 goto failure;
1396 gen_new_estimator(&cl->bstats, &cl->rate_est,
1397 &sch->dev->queue_lock,
1398 tca[TCA_RATE] ? : &est.nla);
1399 cl->refcnt = 1;
1400 INIT_LIST_HEAD(&cl->sibling);
1401 INIT_HLIST_NODE(&cl->hlist);
1402 INIT_LIST_HEAD(&cl->children);
1403 INIT_LIST_HEAD(&cl->un.leaf.drop_list);
1404 RB_CLEAR_NODE(&cl->pq_node);
1406 for (prio = 0; prio < TC_HTB_NUMPRIO; prio++)
1407 RB_CLEAR_NODE(&cl->node[prio]);
1409 /* create leaf qdisc early because it uses kmalloc(GFP_KERNEL)
1410 so that can't be used inside of sch_tree_lock
1411 -- thanks to Karlis Peisenieks */
1412 new_q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, classid);
1413 sch_tree_lock(sch);
1414 if (parent && !parent->level) {
1415 unsigned int qlen = parent->un.leaf.q->q.qlen;
1417 /* turn parent into inner node */
1418 qdisc_reset(parent->un.leaf.q);
1419 qdisc_tree_decrease_qlen(parent->un.leaf.q, qlen);
1420 qdisc_destroy(parent->un.leaf.q);
1421 if (parent->prio_activity)
1422 htb_deactivate(q, parent);
1424 /* remove from evt list because of level change */
1425 if (parent->cmode != HTB_CAN_SEND) {
1426 htb_safe_rb_erase(&parent->pq_node, q->wait_pq);
1427 parent->cmode = HTB_CAN_SEND;
1429 parent->level = (parent->parent ? parent->parent->level
1430 : TC_HTB_MAXDEPTH) - 1;
1431 memset(&parent->un.inner, 0, sizeof(parent->un.inner));
1433 /* leaf (we) needs elementary qdisc */
1434 cl->un.leaf.q = new_q ? new_q : &noop_qdisc;
1436 cl->classid = classid;
1437 cl->parent = parent;
1439 /* set class to be in HTB_CAN_SEND state */
1440 cl->tokens = hopt->buffer;
1441 cl->ctokens = hopt->cbuffer;
1442 cl->mbuffer = 60 * PSCHED_TICKS_PER_SEC; /* 1min */
1443 cl->t_c = psched_get_time();
1444 cl->cmode = HTB_CAN_SEND;
1446 /* attach to the hash list and parent's family */
1447 hlist_add_head(&cl->hlist, q->hash + htb_hash(classid));
1448 list_add_tail(&cl->sibling,
1449 parent ? &parent->children : &q->root);
1450 } else {
1451 if (tca[TCA_RATE])
1452 gen_replace_estimator(&cl->bstats, &cl->rate_est,
1453 &sch->dev->queue_lock,
1454 tca[TCA_RATE]);
1455 sch_tree_lock(sch);
1458 /* it used to be a nasty bug here, we have to check that node
1459 is really leaf before changing cl->un.leaf ! */
1460 if (!cl->level) {
1461 cl->un.leaf.quantum = rtab->rate.rate / q->rate2quantum;
1462 if (!hopt->quantum && cl->un.leaf.quantum < 1000) {
1463 printk(KERN_WARNING
1464 "HTB: quantum of class %X is small. Consider r2q change.\n",
1465 cl->classid);
1466 cl->un.leaf.quantum = 1000;
1468 if (!hopt->quantum && cl->un.leaf.quantum > 200000) {
1469 printk(KERN_WARNING
1470 "HTB: quantum of class %X is big. Consider r2q change.\n",
1471 cl->classid);
1472 cl->un.leaf.quantum = 200000;
1474 if (hopt->quantum)
1475 cl->un.leaf.quantum = hopt->quantum;
1476 if ((cl->un.leaf.prio = hopt->prio) >= TC_HTB_NUMPRIO)
1477 cl->un.leaf.prio = TC_HTB_NUMPRIO - 1;
1479 /* backup for htb_parent_to_leaf */
1480 cl->quantum = cl->un.leaf.quantum;
1481 cl->prio = cl->un.leaf.prio;
1484 cl->buffer = hopt->buffer;
1485 cl->cbuffer = hopt->cbuffer;
1486 if (cl->rate)
1487 qdisc_put_rtab(cl->rate);
1488 cl->rate = rtab;
1489 if (cl->ceil)
1490 qdisc_put_rtab(cl->ceil);
1491 cl->ceil = ctab;
1492 sch_tree_unlock(sch);
1494 *arg = (unsigned long)cl;
1495 return 0;
1497 failure:
1498 if (rtab)
1499 qdisc_put_rtab(rtab);
1500 if (ctab)
1501 qdisc_put_rtab(ctab);
1502 return err;
1505 static struct tcf_proto **htb_find_tcf(struct Qdisc *sch, unsigned long arg)
1507 struct htb_sched *q = qdisc_priv(sch);
1508 struct htb_class *cl = (struct htb_class *)arg;
1509 struct tcf_proto **fl = cl ? &cl->filter_list : &q->filter_list;
1511 return fl;
1514 static unsigned long htb_bind_filter(struct Qdisc *sch, unsigned long parent,
1515 u32 classid)
1517 struct htb_sched *q = qdisc_priv(sch);
1518 struct htb_class *cl = htb_find(classid, sch);
1520 /*if (cl && !cl->level) return 0;
1521 The line above used to be there to prevent attaching filters to
1522 leaves. But at least tc_index filter uses this just to get class
1523 for other reasons so that we have to allow for it.
1524 ----
1525 19.6.2002 As Werner explained it is ok - bind filter is just
1526 another way to "lock" the class - unlike "get" this lock can
1527 be broken by class during destroy IIUC.
1529 if (cl)
1530 cl->filter_cnt++;
1531 else
1532 q->filter_cnt++;
1533 return (unsigned long)cl;
1536 static void htb_unbind_filter(struct Qdisc *sch, unsigned long arg)
1538 struct htb_sched *q = qdisc_priv(sch);
1539 struct htb_class *cl = (struct htb_class *)arg;
1541 if (cl)
1542 cl->filter_cnt--;
1543 else
1544 q->filter_cnt--;
1547 static void htb_walk(struct Qdisc *sch, struct qdisc_walker *arg)
1549 struct htb_sched *q = qdisc_priv(sch);
1550 int i;
1552 if (arg->stop)
1553 return;
1555 for (i = 0; i < HTB_HSIZE; i++) {
1556 struct hlist_node *p;
1557 struct htb_class *cl;
1559 hlist_for_each_entry(cl, p, q->hash + i, hlist) {
1560 if (arg->count < arg->skip) {
1561 arg->count++;
1562 continue;
1564 if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
1565 arg->stop = 1;
1566 return;
1568 arg->count++;
1573 static const struct Qdisc_class_ops htb_class_ops = {
1574 .graft = htb_graft,
1575 .leaf = htb_leaf,
1576 .qlen_notify = htb_qlen_notify,
1577 .get = htb_get,
1578 .put = htb_put,
1579 .change = htb_change_class,
1580 .delete = htb_delete,
1581 .walk = htb_walk,
1582 .tcf_chain = htb_find_tcf,
1583 .bind_tcf = htb_bind_filter,
1584 .unbind_tcf = htb_unbind_filter,
1585 .dump = htb_dump_class,
1586 .dump_stats = htb_dump_class_stats,
1589 static struct Qdisc_ops htb_qdisc_ops __read_mostly = {
1590 .next = NULL,
1591 .cl_ops = &htb_class_ops,
1592 .id = "htb",
1593 .priv_size = sizeof(struct htb_sched),
1594 .enqueue = htb_enqueue,
1595 .dequeue = htb_dequeue,
1596 .requeue = htb_requeue,
1597 .drop = htb_drop,
1598 .init = htb_init,
1599 .reset = htb_reset,
1600 .destroy = htb_destroy,
1601 .change = NULL /* htb_change */,
1602 .dump = htb_dump,
1603 .owner = THIS_MODULE,
1606 static int __init htb_module_init(void)
1608 return register_qdisc(&htb_qdisc_ops);
1610 static void __exit htb_module_exit(void)
1612 unregister_qdisc(&htb_qdisc_ops);
1615 module_init(htb_module_init)
1616 module_exit(htb_module_exit)
1617 MODULE_LICENSE("GPL");