pkt_sched: sch_htb: Warn on too many events.
[linux-2.6.git] / net / sched / sch_htb.c
blob826f92145261d746565dee6dce7ac9d032332736
1 /*
2 * net/sched/sch_htb.c Hierarchical token bucket, feed tree version
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Martin Devera, <devik@cdi.cz>
11 * Credits (in time order) for older HTB versions:
12 * Stef Coene <stef.coene@docum.org>
13 * HTB support at LARTC mailing list
14 * Ondrej Kraus, <krauso@barr.cz>
15 * found missing INIT_QDISC(htb)
16 * Vladimir Smelhaus, Aamer Akhter, Bert Hubert
17 * helped a lot to locate nasty class stall bug
18 * Andi Kleen, Jamal Hadi, Bert Hubert
19 * code review and helpful comments on shaping
20 * Tomasz Wrona, <tw@eter.tym.pl>
21 * created test case so that I was able to fix nasty bug
22 * Wilfried Weissmann
23 * spotted bug in dequeue code and helped with fix
24 * Jiri Fojtasek
25 * fixed requeue routine
26 * and many others. thanks.
28 #include <linux/module.h>
29 #include <linux/moduleparam.h>
30 #include <linux/types.h>
31 #include <linux/kernel.h>
32 #include <linux/string.h>
33 #include <linux/errno.h>
34 #include <linux/skbuff.h>
35 #include <linux/list.h>
36 #include <linux/compiler.h>
37 #include <linux/rbtree.h>
38 #include <net/netlink.h>
39 #include <net/pkt_sched.h>
41 /* HTB algorithm.
42 Author: devik@cdi.cz
43 ========================================================================
44 HTB is like TBF with multiple classes. It is also similar to CBQ because
45 it allows to assign priority to each class in hierarchy.
46 In fact it is another implementation of Floyd's formal sharing.
48 Levels:
49 Each class is assigned level. Leaf has ALWAYS level 0 and root
50 classes have level TC_HTB_MAXDEPTH-1. Interior nodes has level
51 one less than their parent.
54 static int htb_hysteresis __read_mostly = 0; /* whether to use mode hysteresis for speedup */
55 #define HTB_VER 0x30011 /* major must be matched with number suplied by TC as version */
57 #if HTB_VER >> 16 != TC_HTB_PROTOVER
58 #error "Mismatched sch_htb.c and pkt_sch.h"
59 #endif
61 /* Module parameter and sysfs export */
62 module_param (htb_hysteresis, int, 0640);
63 MODULE_PARM_DESC(htb_hysteresis, "Hysteresis mode, less CPU load, less accurate");
65 /* used internaly to keep status of single class */
66 enum htb_cmode {
67 HTB_CANT_SEND, /* class can't send and can't borrow */
68 HTB_MAY_BORROW, /* class can't send but may borrow */
69 HTB_CAN_SEND /* class can send */
72 /* interior & leaf nodes; props specific to leaves are marked L: */
73 struct htb_class {
74 struct Qdisc_class_common common;
75 /* general class parameters */
76 struct gnet_stats_basic bstats;
77 struct gnet_stats_queue qstats;
78 struct gnet_stats_rate_est rate_est;
79 struct tc_htb_xstats xstats; /* our special stats */
80 int refcnt; /* usage count of this class */
82 /* topology */
83 int level; /* our level (see above) */
84 unsigned int children;
85 struct htb_class *parent; /* parent class */
87 int prio; /* these two are used only by leaves... */
88 int quantum; /* but stored for parent-to-leaf return */
90 union {
91 struct htb_class_leaf {
92 struct Qdisc *q;
93 int deficit[TC_HTB_MAXDEPTH];
94 struct list_head drop_list;
95 } leaf;
96 struct htb_class_inner {
97 struct rb_root feed[TC_HTB_NUMPRIO]; /* feed trees */
98 struct rb_node *ptr[TC_HTB_NUMPRIO]; /* current class ptr */
99 /* When class changes from state 1->2 and disconnects from
100 parent's feed then we lost ptr value and start from the
101 first child again. Here we store classid of the
102 last valid ptr (used when ptr is NULL). */
103 u32 last_ptr_id[TC_HTB_NUMPRIO];
104 } inner;
105 } un;
106 struct rb_node node[TC_HTB_NUMPRIO]; /* node for self or feed tree */
107 struct rb_node pq_node; /* node for event queue */
108 psched_time_t pq_key;
110 int prio_activity; /* for which prios are we active */
111 enum htb_cmode cmode; /* current mode of the class */
113 /* class attached filters */
114 struct tcf_proto *filter_list;
115 int filter_cnt;
117 /* token bucket parameters */
118 struct qdisc_rate_table *rate; /* rate table of the class itself */
119 struct qdisc_rate_table *ceil; /* ceiling rate (limits borrows too) */
120 long buffer, cbuffer; /* token bucket depth/rate */
121 psched_tdiff_t mbuffer; /* max wait time */
122 long tokens, ctokens; /* current number of tokens */
123 psched_time_t t_c; /* checkpoint time */
126 struct htb_sched {
127 struct Qdisc_class_hash clhash;
128 struct list_head drops[TC_HTB_NUMPRIO];/* active leaves (for drops) */
130 /* self list - roots of self generating tree */
131 struct rb_root row[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
132 int row_mask[TC_HTB_MAXDEPTH];
133 struct rb_node *ptr[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
134 u32 last_ptr_id[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
136 /* self wait list - roots of wait PQs per row */
137 struct rb_root wait_pq[TC_HTB_MAXDEPTH];
139 /* time of nearest event per level (row) */
140 psched_time_t near_ev_cache[TC_HTB_MAXDEPTH];
142 int defcls; /* class where unclassified flows go to */
144 /* filters for qdisc itself */
145 struct tcf_proto *filter_list;
147 int rate2quantum; /* quant = rate / rate2quantum */
148 psched_time_t now; /* cached dequeue time */
149 struct qdisc_watchdog watchdog;
151 /* non shaped skbs; let them go directly thru */
152 struct sk_buff_head direct_queue;
153 int direct_qlen; /* max qlen of above */
155 long direct_pkts;
157 #define HTB_WARN_TOOMANYEVENTS 0x1
158 unsigned int warned; /* only one warning */
161 /* find class in global hash table using given handle */
162 static inline struct htb_class *htb_find(u32 handle, struct Qdisc *sch)
164 struct htb_sched *q = qdisc_priv(sch);
165 struct Qdisc_class_common *clc;
167 clc = qdisc_class_find(&q->clhash, handle);
168 if (clc == NULL)
169 return NULL;
170 return container_of(clc, struct htb_class, common);
174 * htb_classify - classify a packet into class
176 * It returns NULL if the packet should be dropped or -1 if the packet
177 * should be passed directly thru. In all other cases leaf class is returned.
178 * We allow direct class selection by classid in priority. The we examine
179 * filters in qdisc and in inner nodes (if higher filter points to the inner
180 * node). If we end up with classid MAJOR:0 we enqueue the skb into special
181 * internal fifo (direct). These packets then go directly thru. If we still
182 * have no valid leaf we try to use MAJOR:default leaf. It still unsuccessfull
183 * then finish and return direct queue.
185 #define HTB_DIRECT (struct htb_class*)-1
187 static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch,
188 int *qerr)
190 struct htb_sched *q = qdisc_priv(sch);
191 struct htb_class *cl;
192 struct tcf_result res;
193 struct tcf_proto *tcf;
194 int result;
196 /* allow to select class by setting skb->priority to valid classid;
197 note that nfmark can be used too by attaching filter fw with no
198 rules in it */
199 if (skb->priority == sch->handle)
200 return HTB_DIRECT; /* X:0 (direct flow) selected */
201 if ((cl = htb_find(skb->priority, sch)) != NULL && cl->level == 0)
202 return cl;
204 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
205 tcf = q->filter_list;
206 while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) {
207 #ifdef CONFIG_NET_CLS_ACT
208 switch (result) {
209 case TC_ACT_QUEUED:
210 case TC_ACT_STOLEN:
211 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
212 case TC_ACT_SHOT:
213 return NULL;
215 #endif
216 if ((cl = (void *)res.class) == NULL) {
217 if (res.classid == sch->handle)
218 return HTB_DIRECT; /* X:0 (direct flow) */
219 if ((cl = htb_find(res.classid, sch)) == NULL)
220 break; /* filter selected invalid classid */
222 if (!cl->level)
223 return cl; /* we hit leaf; return it */
225 /* we have got inner class; apply inner filter chain */
226 tcf = cl->filter_list;
228 /* classification failed; try to use default class */
229 cl = htb_find(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch);
230 if (!cl || cl->level)
231 return HTB_DIRECT; /* bad default .. this is safe bet */
232 return cl;
236 * htb_add_to_id_tree - adds class to the round robin list
238 * Routine adds class to the list (actually tree) sorted by classid.
239 * Make sure that class is not already on such list for given prio.
241 static void htb_add_to_id_tree(struct rb_root *root,
242 struct htb_class *cl, int prio)
244 struct rb_node **p = &root->rb_node, *parent = NULL;
246 while (*p) {
247 struct htb_class *c;
248 parent = *p;
249 c = rb_entry(parent, struct htb_class, node[prio]);
251 if (cl->common.classid > c->common.classid)
252 p = &parent->rb_right;
253 else
254 p = &parent->rb_left;
256 rb_link_node(&cl->node[prio], parent, p);
257 rb_insert_color(&cl->node[prio], root);
261 * htb_add_to_wait_tree - adds class to the event queue with delay
263 * The class is added to priority event queue to indicate that class will
264 * change its mode in cl->pq_key microseconds. Make sure that class is not
265 * already in the queue.
267 static void htb_add_to_wait_tree(struct htb_sched *q,
268 struct htb_class *cl, long delay)
270 struct rb_node **p = &q->wait_pq[cl->level].rb_node, *parent = NULL;
272 cl->pq_key = q->now + delay;
273 if (cl->pq_key == q->now)
274 cl->pq_key++;
276 /* update the nearest event cache */
277 if (q->near_ev_cache[cl->level] > cl->pq_key)
278 q->near_ev_cache[cl->level] = cl->pq_key;
280 while (*p) {
281 struct htb_class *c;
282 parent = *p;
283 c = rb_entry(parent, struct htb_class, pq_node);
284 if (cl->pq_key >= c->pq_key)
285 p = &parent->rb_right;
286 else
287 p = &parent->rb_left;
289 rb_link_node(&cl->pq_node, parent, p);
290 rb_insert_color(&cl->pq_node, &q->wait_pq[cl->level]);
294 * htb_next_rb_node - finds next node in binary tree
296 * When we are past last key we return NULL.
297 * Average complexity is 2 steps per call.
299 static inline void htb_next_rb_node(struct rb_node **n)
301 *n = rb_next(*n);
305 * htb_add_class_to_row - add class to its row
307 * The class is added to row at priorities marked in mask.
308 * It does nothing if mask == 0.
310 static inline void htb_add_class_to_row(struct htb_sched *q,
311 struct htb_class *cl, int mask)
313 q->row_mask[cl->level] |= mask;
314 while (mask) {
315 int prio = ffz(~mask);
316 mask &= ~(1 << prio);
317 htb_add_to_id_tree(q->row[cl->level] + prio, cl, prio);
321 /* If this triggers, it is a bug in this code, but it need not be fatal */
322 static void htb_safe_rb_erase(struct rb_node *rb, struct rb_root *root)
324 if (RB_EMPTY_NODE(rb)) {
325 WARN_ON(1);
326 } else {
327 rb_erase(rb, root);
328 RB_CLEAR_NODE(rb);
334 * htb_remove_class_from_row - removes class from its row
336 * The class is removed from row at priorities marked in mask.
337 * It does nothing if mask == 0.
339 static inline void htb_remove_class_from_row(struct htb_sched *q,
340 struct htb_class *cl, int mask)
342 int m = 0;
344 while (mask) {
345 int prio = ffz(~mask);
347 mask &= ~(1 << prio);
348 if (q->ptr[cl->level][prio] == cl->node + prio)
349 htb_next_rb_node(q->ptr[cl->level] + prio);
351 htb_safe_rb_erase(cl->node + prio, q->row[cl->level] + prio);
352 if (!q->row[cl->level][prio].rb_node)
353 m |= 1 << prio;
355 q->row_mask[cl->level] &= ~m;
359 * htb_activate_prios - creates active classe's feed chain
361 * The class is connected to ancestors and/or appropriate rows
362 * for priorities it is participating on. cl->cmode must be new
363 * (activated) mode. It does nothing if cl->prio_activity == 0.
365 static void htb_activate_prios(struct htb_sched *q, struct htb_class *cl)
367 struct htb_class *p = cl->parent;
368 long m, mask = cl->prio_activity;
370 while (cl->cmode == HTB_MAY_BORROW && p && mask) {
371 m = mask;
372 while (m) {
373 int prio = ffz(~m);
374 m &= ~(1 << prio);
376 if (p->un.inner.feed[prio].rb_node)
377 /* parent already has its feed in use so that
378 reset bit in mask as parent is already ok */
379 mask &= ~(1 << prio);
381 htb_add_to_id_tree(p->un.inner.feed + prio, cl, prio);
383 p->prio_activity |= mask;
384 cl = p;
385 p = cl->parent;
388 if (cl->cmode == HTB_CAN_SEND && mask)
389 htb_add_class_to_row(q, cl, mask);
393 * htb_deactivate_prios - remove class from feed chain
395 * cl->cmode must represent old mode (before deactivation). It does
396 * nothing if cl->prio_activity == 0. Class is removed from all feed
397 * chains and rows.
399 static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl)
401 struct htb_class *p = cl->parent;
402 long m, mask = cl->prio_activity;
404 while (cl->cmode == HTB_MAY_BORROW && p && mask) {
405 m = mask;
406 mask = 0;
407 while (m) {
408 int prio = ffz(~m);
409 m &= ~(1 << prio);
411 if (p->un.inner.ptr[prio] == cl->node + prio) {
412 /* we are removing child which is pointed to from
413 parent feed - forget the pointer but remember
414 classid */
415 p->un.inner.last_ptr_id[prio] = cl->common.classid;
416 p->un.inner.ptr[prio] = NULL;
419 htb_safe_rb_erase(cl->node + prio, p->un.inner.feed + prio);
421 if (!p->un.inner.feed[prio].rb_node)
422 mask |= 1 << prio;
425 p->prio_activity &= ~mask;
426 cl = p;
427 p = cl->parent;
430 if (cl->cmode == HTB_CAN_SEND && mask)
431 htb_remove_class_from_row(q, cl, mask);
434 static inline long htb_lowater(const struct htb_class *cl)
436 if (htb_hysteresis)
437 return cl->cmode != HTB_CANT_SEND ? -cl->cbuffer : 0;
438 else
439 return 0;
441 static inline long htb_hiwater(const struct htb_class *cl)
443 if (htb_hysteresis)
444 return cl->cmode == HTB_CAN_SEND ? -cl->buffer : 0;
445 else
446 return 0;
451 * htb_class_mode - computes and returns current class mode
453 * It computes cl's mode at time cl->t_c+diff and returns it. If mode
454 * is not HTB_CAN_SEND then cl->pq_key is updated to time difference
455 * from now to time when cl will change its state.
456 * Also it is worth to note that class mode doesn't change simply
457 * at cl->{c,}tokens == 0 but there can rather be hysteresis of
458 * 0 .. -cl->{c,}buffer range. It is meant to limit number of
459 * mode transitions per time unit. The speed gain is about 1/6.
461 static inline enum htb_cmode
462 htb_class_mode(struct htb_class *cl, long *diff)
464 long toks;
466 if ((toks = (cl->ctokens + *diff)) < htb_lowater(cl)) {
467 *diff = -toks;
468 return HTB_CANT_SEND;
471 if ((toks = (cl->tokens + *diff)) >= htb_hiwater(cl))
472 return HTB_CAN_SEND;
474 *diff = -toks;
475 return HTB_MAY_BORROW;
479 * htb_change_class_mode - changes classe's mode
481 * This should be the only way how to change classe's mode under normal
482 * cirsumstances. Routine will update feed lists linkage, change mode
483 * and add class to the wait event queue if appropriate. New mode should
484 * be different from old one and cl->pq_key has to be valid if changing
485 * to mode other than HTB_CAN_SEND (see htb_add_to_wait_tree).
487 static void
488 htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, long *diff)
490 enum htb_cmode new_mode = htb_class_mode(cl, diff);
492 if (new_mode == cl->cmode)
493 return;
495 if (cl->prio_activity) { /* not necessary: speed optimization */
496 if (cl->cmode != HTB_CANT_SEND)
497 htb_deactivate_prios(q, cl);
498 cl->cmode = new_mode;
499 if (new_mode != HTB_CANT_SEND)
500 htb_activate_prios(q, cl);
501 } else
502 cl->cmode = new_mode;
506 * htb_activate - inserts leaf cl into appropriate active feeds
508 * Routine learns (new) priority of leaf and activates feed chain
509 * for the prio. It can be called on already active leaf safely.
510 * It also adds leaf into droplist.
512 static inline void htb_activate(struct htb_sched *q, struct htb_class *cl)
514 WARN_ON(cl->level || !cl->un.leaf.q || !cl->un.leaf.q->q.qlen);
516 if (!cl->prio_activity) {
517 cl->prio_activity = 1 << cl->prio;
518 htb_activate_prios(q, cl);
519 list_add_tail(&cl->un.leaf.drop_list,
520 q->drops + cl->prio);
525 * htb_deactivate - remove leaf cl from active feeds
527 * Make sure that leaf is active. In the other words it can't be called
528 * with non-active leaf. It also removes class from the drop list.
530 static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl)
532 WARN_ON(!cl->prio_activity);
534 htb_deactivate_prios(q, cl);
535 cl->prio_activity = 0;
536 list_del_init(&cl->un.leaf.drop_list);
539 static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
541 int uninitialized_var(ret);
542 struct htb_sched *q = qdisc_priv(sch);
543 struct htb_class *cl = htb_classify(skb, sch, &ret);
545 if (cl == HTB_DIRECT) {
546 /* enqueue to helper queue */
547 if (q->direct_queue.qlen < q->direct_qlen) {
548 __skb_queue_tail(&q->direct_queue, skb);
549 q->direct_pkts++;
550 } else {
551 kfree_skb(skb);
552 sch->qstats.drops++;
553 return NET_XMIT_DROP;
555 #ifdef CONFIG_NET_CLS_ACT
556 } else if (!cl) {
557 if (ret & __NET_XMIT_BYPASS)
558 sch->qstats.drops++;
559 kfree_skb(skb);
560 return ret;
561 #endif
562 } else if ((ret = qdisc_enqueue(skb, cl->un.leaf.q)) != NET_XMIT_SUCCESS) {
563 if (net_xmit_drop_count(ret)) {
564 sch->qstats.drops++;
565 cl->qstats.drops++;
567 return ret;
568 } else {
569 cl->bstats.packets +=
570 skb_is_gso(skb)?skb_shinfo(skb)->gso_segs:1;
571 cl->bstats.bytes += qdisc_pkt_len(skb);
572 htb_activate(q, cl);
575 sch->q.qlen++;
576 sch->bstats.packets += skb_is_gso(skb)?skb_shinfo(skb)->gso_segs:1;
577 sch->bstats.bytes += qdisc_pkt_len(skb);
578 return NET_XMIT_SUCCESS;
581 static inline void htb_accnt_tokens(struct htb_class *cl, int bytes, long diff)
583 long toks = diff + cl->tokens;
585 if (toks > cl->buffer)
586 toks = cl->buffer;
587 toks -= (long) qdisc_l2t(cl->rate, bytes);
588 if (toks <= -cl->mbuffer)
589 toks = 1 - cl->mbuffer;
591 cl->tokens = toks;
594 static inline void htb_accnt_ctokens(struct htb_class *cl, int bytes, long diff)
596 long toks = diff + cl->ctokens;
598 if (toks > cl->cbuffer)
599 toks = cl->cbuffer;
600 toks -= (long) qdisc_l2t(cl->ceil, bytes);
601 if (toks <= -cl->mbuffer)
602 toks = 1 - cl->mbuffer;
604 cl->ctokens = toks;
608 * htb_charge_class - charges amount "bytes" to leaf and ancestors
610 * Routine assumes that packet "bytes" long was dequeued from leaf cl
611 * borrowing from "level". It accounts bytes to ceil leaky bucket for
612 * leaf and all ancestors and to rate bucket for ancestors at levels
613 * "level" and higher. It also handles possible change of mode resulting
614 * from the update. Note that mode can also increase here (MAY_BORROW to
615 * CAN_SEND) because we can use more precise clock that event queue here.
616 * In such case we remove class from event queue first.
618 static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,
619 int level, struct sk_buff *skb)
621 int bytes = qdisc_pkt_len(skb);
622 enum htb_cmode old_mode;
623 long diff;
625 while (cl) {
626 diff = psched_tdiff_bounded(q->now, cl->t_c, cl->mbuffer);
627 if (cl->level >= level) {
628 if (cl->level == level)
629 cl->xstats.lends++;
630 htb_accnt_tokens(cl, bytes, diff);
631 } else {
632 cl->xstats.borrows++;
633 cl->tokens += diff; /* we moved t_c; update tokens */
635 htb_accnt_ctokens(cl, bytes, diff);
636 cl->t_c = q->now;
638 old_mode = cl->cmode;
639 diff = 0;
640 htb_change_class_mode(q, cl, &diff);
641 if (old_mode != cl->cmode) {
642 if (old_mode != HTB_CAN_SEND)
643 htb_safe_rb_erase(&cl->pq_node, q->wait_pq + cl->level);
644 if (cl->cmode != HTB_CAN_SEND)
645 htb_add_to_wait_tree(q, cl, diff);
648 /* update byte stats except for leaves which are already updated */
649 if (cl->level) {
650 cl->bstats.bytes += bytes;
651 cl->bstats.packets += skb_is_gso(skb)?
652 skb_shinfo(skb)->gso_segs:1;
654 cl = cl->parent;
659 * htb_do_events - make mode changes to classes at the level
661 * Scans event queue for pending events and applies them. Returns time of
662 * next pending event (0 for no event in pq).
663 * Note: Applied are events whose have cl->pq_key <= q->now.
665 static psched_time_t htb_do_events(struct htb_sched *q, int level,
666 unsigned long start)
668 /* don't run for longer than 2 jiffies; 2 is used instead of
669 1 to simplify things when jiffy is going to be incremented
670 too soon */
671 unsigned long stop_at = start + 2;
672 while (time_before(jiffies, stop_at)) {
673 struct htb_class *cl;
674 long diff;
675 struct rb_node *p = rb_first(&q->wait_pq[level]);
677 if (!p)
678 return 0;
680 cl = rb_entry(p, struct htb_class, pq_node);
681 if (cl->pq_key > q->now)
682 return cl->pq_key;
684 htb_safe_rb_erase(p, q->wait_pq + level);
685 diff = psched_tdiff_bounded(q->now, cl->t_c, cl->mbuffer);
686 htb_change_class_mode(q, cl, &diff);
687 if (cl->cmode != HTB_CAN_SEND)
688 htb_add_to_wait_tree(q, cl, diff);
690 /* too much load - let's continue on next jiffie (including above) */
691 if (!(q->warned & HTB_WARN_TOOMANYEVENTS)) {
692 printk(KERN_WARNING "htb: too many events!\n");
693 q->warned |= HTB_WARN_TOOMANYEVENTS;
695 return q->now + 2 * PSCHED_TICKS_PER_SEC / HZ;
698 /* Returns class->node+prio from id-tree where classe's id is >= id. NULL
699 is no such one exists. */
700 static struct rb_node *htb_id_find_next_upper(int prio, struct rb_node *n,
701 u32 id)
703 struct rb_node *r = NULL;
704 while (n) {
705 struct htb_class *cl =
706 rb_entry(n, struct htb_class, node[prio]);
708 if (id > cl->common.classid) {
709 n = n->rb_right;
710 } else if (id < cl->common.classid) {
711 r = n;
712 n = n->rb_left;
713 } else {
714 return n;
717 return r;
721 * htb_lookup_leaf - returns next leaf class in DRR order
723 * Find leaf where current feed pointers points to.
725 static struct htb_class *htb_lookup_leaf(struct rb_root *tree, int prio,
726 struct rb_node **pptr, u32 * pid)
728 int i;
729 struct {
730 struct rb_node *root;
731 struct rb_node **pptr;
732 u32 *pid;
733 } stk[TC_HTB_MAXDEPTH], *sp = stk;
735 BUG_ON(!tree->rb_node);
736 sp->root = tree->rb_node;
737 sp->pptr = pptr;
738 sp->pid = pid;
740 for (i = 0; i < 65535; i++) {
741 if (!*sp->pptr && *sp->pid) {
742 /* ptr was invalidated but id is valid - try to recover
743 the original or next ptr */
744 *sp->pptr =
745 htb_id_find_next_upper(prio, sp->root, *sp->pid);
747 *sp->pid = 0; /* ptr is valid now so that remove this hint as it
748 can become out of date quickly */
749 if (!*sp->pptr) { /* we are at right end; rewind & go up */
750 *sp->pptr = sp->root;
751 while ((*sp->pptr)->rb_left)
752 *sp->pptr = (*sp->pptr)->rb_left;
753 if (sp > stk) {
754 sp--;
755 if (!*sp->pptr) {
756 WARN_ON(1);
757 return NULL;
759 htb_next_rb_node(sp->pptr);
761 } else {
762 struct htb_class *cl;
763 cl = rb_entry(*sp->pptr, struct htb_class, node[prio]);
764 if (!cl->level)
765 return cl;
766 (++sp)->root = cl->un.inner.feed[prio].rb_node;
767 sp->pptr = cl->un.inner.ptr + prio;
768 sp->pid = cl->un.inner.last_ptr_id + prio;
771 WARN_ON(1);
772 return NULL;
775 /* dequeues packet at given priority and level; call only if
776 you are sure that there is active class at prio/level */
777 static struct sk_buff *htb_dequeue_tree(struct htb_sched *q, int prio,
778 int level)
780 struct sk_buff *skb = NULL;
781 struct htb_class *cl, *start;
782 /* look initial class up in the row */
783 start = cl = htb_lookup_leaf(q->row[level] + prio, prio,
784 q->ptr[level] + prio,
785 q->last_ptr_id[level] + prio);
787 do {
788 next:
789 if (unlikely(!cl))
790 return NULL;
792 /* class can be empty - it is unlikely but can be true if leaf
793 qdisc drops packets in enqueue routine or if someone used
794 graft operation on the leaf since last dequeue;
795 simply deactivate and skip such class */
796 if (unlikely(cl->un.leaf.q->q.qlen == 0)) {
797 struct htb_class *next;
798 htb_deactivate(q, cl);
800 /* row/level might become empty */
801 if ((q->row_mask[level] & (1 << prio)) == 0)
802 return NULL;
804 next = htb_lookup_leaf(q->row[level] + prio,
805 prio, q->ptr[level] + prio,
806 q->last_ptr_id[level] + prio);
808 if (cl == start) /* fix start if we just deleted it */
809 start = next;
810 cl = next;
811 goto next;
814 skb = cl->un.leaf.q->dequeue(cl->un.leaf.q);
815 if (likely(skb != NULL))
816 break;
818 qdisc_warn_nonwc("htb", cl->un.leaf.q);
819 htb_next_rb_node((level ? cl->parent->un.inner.ptr : q->
820 ptr[0]) + prio);
821 cl = htb_lookup_leaf(q->row[level] + prio, prio,
822 q->ptr[level] + prio,
823 q->last_ptr_id[level] + prio);
825 } while (cl != start);
827 if (likely(skb != NULL)) {
828 cl->un.leaf.deficit[level] -= qdisc_pkt_len(skb);
829 if (cl->un.leaf.deficit[level] < 0) {
830 cl->un.leaf.deficit[level] += cl->quantum;
831 htb_next_rb_node((level ? cl->parent->un.inner.ptr : q->
832 ptr[0]) + prio);
834 /* this used to be after charge_class but this constelation
835 gives us slightly better performance */
836 if (!cl->un.leaf.q->q.qlen)
837 htb_deactivate(q, cl);
838 htb_charge_class(q, cl, level, skb);
840 return skb;
843 static struct sk_buff *htb_dequeue(struct Qdisc *sch)
845 struct sk_buff *skb = NULL;
846 struct htb_sched *q = qdisc_priv(sch);
847 int level;
848 psched_time_t next_event;
849 unsigned long start_at;
851 /* try to dequeue direct packets as high prio (!) to minimize cpu work */
852 skb = __skb_dequeue(&q->direct_queue);
853 if (skb != NULL) {
854 sch->flags &= ~TCQ_F_THROTTLED;
855 sch->q.qlen--;
856 return skb;
859 if (!sch->q.qlen)
860 goto fin;
861 q->now = psched_get_time();
862 start_at = jiffies;
864 next_event = q->now + 5 * PSCHED_TICKS_PER_SEC;
866 for (level = 0; level < TC_HTB_MAXDEPTH; level++) {
867 /* common case optimization - skip event handler quickly */
868 int m;
869 psched_time_t event;
871 if (q->now >= q->near_ev_cache[level]) {
872 event = htb_do_events(q, level, start_at);
873 if (!event)
874 event = q->now + PSCHED_TICKS_PER_SEC;
875 q->near_ev_cache[level] = event;
876 } else
877 event = q->near_ev_cache[level];
879 if (next_event > event)
880 next_event = event;
882 m = ~q->row_mask[level];
883 while (m != (int)(-1)) {
884 int prio = ffz(m);
885 m |= 1 << prio;
886 skb = htb_dequeue_tree(q, prio, level);
887 if (likely(skb != NULL)) {
888 sch->q.qlen--;
889 sch->flags &= ~TCQ_F_THROTTLED;
890 goto fin;
894 sch->qstats.overlimits++;
895 qdisc_watchdog_schedule(&q->watchdog, next_event);
896 fin:
897 return skb;
900 /* try to drop from each class (by prio) until one succeed */
901 static unsigned int htb_drop(struct Qdisc *sch)
903 struct htb_sched *q = qdisc_priv(sch);
904 int prio;
906 for (prio = TC_HTB_NUMPRIO - 1; prio >= 0; prio--) {
907 struct list_head *p;
908 list_for_each(p, q->drops + prio) {
909 struct htb_class *cl = list_entry(p, struct htb_class,
910 un.leaf.drop_list);
911 unsigned int len;
912 if (cl->un.leaf.q->ops->drop &&
913 (len = cl->un.leaf.q->ops->drop(cl->un.leaf.q))) {
914 sch->q.qlen--;
915 if (!cl->un.leaf.q->q.qlen)
916 htb_deactivate(q, cl);
917 return len;
921 return 0;
924 /* reset all classes */
925 /* always caled under BH & queue lock */
926 static void htb_reset(struct Qdisc *sch)
928 struct htb_sched *q = qdisc_priv(sch);
929 struct htb_class *cl;
930 struct hlist_node *n;
931 unsigned int i;
933 for (i = 0; i < q->clhash.hashsize; i++) {
934 hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode) {
935 if (cl->level)
936 memset(&cl->un.inner, 0, sizeof(cl->un.inner));
937 else {
938 if (cl->un.leaf.q)
939 qdisc_reset(cl->un.leaf.q);
940 INIT_LIST_HEAD(&cl->un.leaf.drop_list);
942 cl->prio_activity = 0;
943 cl->cmode = HTB_CAN_SEND;
947 qdisc_watchdog_cancel(&q->watchdog);
948 __skb_queue_purge(&q->direct_queue);
949 sch->q.qlen = 0;
950 memset(q->row, 0, sizeof(q->row));
951 memset(q->row_mask, 0, sizeof(q->row_mask));
952 memset(q->wait_pq, 0, sizeof(q->wait_pq));
953 memset(q->ptr, 0, sizeof(q->ptr));
954 for (i = 0; i < TC_HTB_NUMPRIO; i++)
955 INIT_LIST_HEAD(q->drops + i);
958 static const struct nla_policy htb_policy[TCA_HTB_MAX + 1] = {
959 [TCA_HTB_PARMS] = { .len = sizeof(struct tc_htb_opt) },
960 [TCA_HTB_INIT] = { .len = sizeof(struct tc_htb_glob) },
961 [TCA_HTB_CTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
962 [TCA_HTB_RTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
965 static int htb_init(struct Qdisc *sch, struct nlattr *opt)
967 struct htb_sched *q = qdisc_priv(sch);
968 struct nlattr *tb[TCA_HTB_INIT + 1];
969 struct tc_htb_glob *gopt;
970 int err;
971 int i;
973 if (!opt)
974 return -EINVAL;
976 err = nla_parse_nested(tb, TCA_HTB_INIT, opt, htb_policy);
977 if (err < 0)
978 return err;
980 if (tb[TCA_HTB_INIT] == NULL) {
981 printk(KERN_ERR "HTB: hey probably you have bad tc tool ?\n");
982 return -EINVAL;
984 gopt = nla_data(tb[TCA_HTB_INIT]);
985 if (gopt->version != HTB_VER >> 16) {
986 printk(KERN_ERR
987 "HTB: need tc/htb version %d (minor is %d), you have %d\n",
988 HTB_VER >> 16, HTB_VER & 0xffff, gopt->version);
989 return -EINVAL;
992 err = qdisc_class_hash_init(&q->clhash);
993 if (err < 0)
994 return err;
995 for (i = 0; i < TC_HTB_NUMPRIO; i++)
996 INIT_LIST_HEAD(q->drops + i);
998 qdisc_watchdog_init(&q->watchdog, sch);
999 skb_queue_head_init(&q->direct_queue);
1001 q->direct_qlen = qdisc_dev(sch)->tx_queue_len;
1002 if (q->direct_qlen < 2) /* some devices have zero tx_queue_len */
1003 q->direct_qlen = 2;
1005 if ((q->rate2quantum = gopt->rate2quantum) < 1)
1006 q->rate2quantum = 1;
1007 q->defcls = gopt->defcls;
1009 return 0;
1012 static int htb_dump(struct Qdisc *sch, struct sk_buff *skb)
1014 spinlock_t *root_lock = qdisc_root_sleeping_lock(sch);
1015 struct htb_sched *q = qdisc_priv(sch);
1016 struct nlattr *nest;
1017 struct tc_htb_glob gopt;
1019 spin_lock_bh(root_lock);
1021 gopt.direct_pkts = q->direct_pkts;
1022 gopt.version = HTB_VER;
1023 gopt.rate2quantum = q->rate2quantum;
1024 gopt.defcls = q->defcls;
1025 gopt.debug = 0;
1027 nest = nla_nest_start(skb, TCA_OPTIONS);
1028 if (nest == NULL)
1029 goto nla_put_failure;
1030 NLA_PUT(skb, TCA_HTB_INIT, sizeof(gopt), &gopt);
1031 nla_nest_end(skb, nest);
1033 spin_unlock_bh(root_lock);
1034 return skb->len;
1036 nla_put_failure:
1037 spin_unlock_bh(root_lock);
1038 nla_nest_cancel(skb, nest);
1039 return -1;
1042 static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
1043 struct sk_buff *skb, struct tcmsg *tcm)
1045 struct htb_class *cl = (struct htb_class *)arg;
1046 spinlock_t *root_lock = qdisc_root_sleeping_lock(sch);
1047 struct nlattr *nest;
1048 struct tc_htb_opt opt;
1050 spin_lock_bh(root_lock);
1051 tcm->tcm_parent = cl->parent ? cl->parent->common.classid : TC_H_ROOT;
1052 tcm->tcm_handle = cl->common.classid;
1053 if (!cl->level && cl->un.leaf.q)
1054 tcm->tcm_info = cl->un.leaf.q->handle;
1056 nest = nla_nest_start(skb, TCA_OPTIONS);
1057 if (nest == NULL)
1058 goto nla_put_failure;
1060 memset(&opt, 0, sizeof(opt));
1062 opt.rate = cl->rate->rate;
1063 opt.buffer = cl->buffer;
1064 opt.ceil = cl->ceil->rate;
1065 opt.cbuffer = cl->cbuffer;
1066 opt.quantum = cl->quantum;
1067 opt.prio = cl->prio;
1068 opt.level = cl->level;
1069 NLA_PUT(skb, TCA_HTB_PARMS, sizeof(opt), &opt);
1071 nla_nest_end(skb, nest);
1072 spin_unlock_bh(root_lock);
1073 return skb->len;
1075 nla_put_failure:
1076 spin_unlock_bh(root_lock);
1077 nla_nest_cancel(skb, nest);
1078 return -1;
1081 static int
1082 htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)
1084 struct htb_class *cl = (struct htb_class *)arg;
1086 if (!cl->level && cl->un.leaf.q)
1087 cl->qstats.qlen = cl->un.leaf.q->q.qlen;
1088 cl->xstats.tokens = cl->tokens;
1089 cl->xstats.ctokens = cl->ctokens;
1091 if (gnet_stats_copy_basic(d, &cl->bstats) < 0 ||
1092 gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
1093 gnet_stats_copy_queue(d, &cl->qstats) < 0)
1094 return -1;
1096 return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats));
1099 static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1100 struct Qdisc **old)
1102 struct htb_class *cl = (struct htb_class *)arg;
1104 if (cl && !cl->level) {
1105 if (new == NULL &&
1106 (new = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
1107 &pfifo_qdisc_ops,
1108 cl->common.classid))
1109 == NULL)
1110 return -ENOBUFS;
1111 sch_tree_lock(sch);
1112 *old = cl->un.leaf.q;
1113 cl->un.leaf.q = new;
1114 if (*old != NULL) {
1115 qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
1116 qdisc_reset(*old);
1118 sch_tree_unlock(sch);
1119 return 0;
1121 return -ENOENT;
1124 static struct Qdisc *htb_leaf(struct Qdisc *sch, unsigned long arg)
1126 struct htb_class *cl = (struct htb_class *)arg;
1127 return (cl && !cl->level) ? cl->un.leaf.q : NULL;
1130 static void htb_qlen_notify(struct Qdisc *sch, unsigned long arg)
1132 struct htb_class *cl = (struct htb_class *)arg;
1134 if (cl->un.leaf.q->q.qlen == 0)
1135 htb_deactivate(qdisc_priv(sch), cl);
1138 static unsigned long htb_get(struct Qdisc *sch, u32 classid)
1140 struct htb_class *cl = htb_find(classid, sch);
1141 if (cl)
1142 cl->refcnt++;
1143 return (unsigned long)cl;
1146 static inline int htb_parent_last_child(struct htb_class *cl)
1148 if (!cl->parent)
1149 /* the root class */
1150 return 0;
1151 if (cl->parent->children > 1)
1152 /* not the last child */
1153 return 0;
1154 return 1;
1157 static void htb_parent_to_leaf(struct htb_sched *q, struct htb_class *cl,
1158 struct Qdisc *new_q)
1160 struct htb_class *parent = cl->parent;
1162 WARN_ON(cl->level || !cl->un.leaf.q || cl->prio_activity);
1164 if (parent->cmode != HTB_CAN_SEND)
1165 htb_safe_rb_erase(&parent->pq_node, q->wait_pq + parent->level);
1167 parent->level = 0;
1168 memset(&parent->un.inner, 0, sizeof(parent->un.inner));
1169 INIT_LIST_HEAD(&parent->un.leaf.drop_list);
1170 parent->un.leaf.q = new_q ? new_q : &noop_qdisc;
1171 parent->tokens = parent->buffer;
1172 parent->ctokens = parent->cbuffer;
1173 parent->t_c = psched_get_time();
1174 parent->cmode = HTB_CAN_SEND;
1177 static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl)
1179 if (!cl->level) {
1180 WARN_ON(!cl->un.leaf.q);
1181 qdisc_destroy(cl->un.leaf.q);
1183 gen_kill_estimator(&cl->bstats, &cl->rate_est);
1184 qdisc_put_rtab(cl->rate);
1185 qdisc_put_rtab(cl->ceil);
1187 tcf_destroy_chain(&cl->filter_list);
1188 kfree(cl);
1191 /* always caled under BH & queue lock */
1192 static void htb_destroy(struct Qdisc *sch)
1194 struct htb_sched *q = qdisc_priv(sch);
1195 struct hlist_node *n, *next;
1196 struct htb_class *cl;
1197 unsigned int i;
1199 qdisc_watchdog_cancel(&q->watchdog);
1200 /* This line used to be after htb_destroy_class call below
1201 and surprisingly it worked in 2.4. But it must precede it
1202 because filter need its target class alive to be able to call
1203 unbind_filter on it (without Oops). */
1204 tcf_destroy_chain(&q->filter_list);
1206 for (i = 0; i < q->clhash.hashsize; i++) {
1207 hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode)
1208 tcf_destroy_chain(&cl->filter_list);
1210 for (i = 0; i < q->clhash.hashsize; i++) {
1211 hlist_for_each_entry_safe(cl, n, next, &q->clhash.hash[i],
1212 common.hnode)
1213 htb_destroy_class(sch, cl);
1215 qdisc_class_hash_destroy(&q->clhash);
1216 __skb_queue_purge(&q->direct_queue);
1219 static int htb_delete(struct Qdisc *sch, unsigned long arg)
1221 struct htb_sched *q = qdisc_priv(sch);
1222 struct htb_class *cl = (struct htb_class *)arg;
1223 unsigned int qlen;
1224 struct Qdisc *new_q = NULL;
1225 int last_child = 0;
1227 // TODO: why don't allow to delete subtree ? references ? does
1228 // tc subsys quarantee us that in htb_destroy it holds no class
1229 // refs so that we can remove children safely there ?
1230 if (cl->children || cl->filter_cnt)
1231 return -EBUSY;
1233 if (!cl->level && htb_parent_last_child(cl)) {
1234 new_q = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
1235 &pfifo_qdisc_ops,
1236 cl->parent->common.classid);
1237 last_child = 1;
1240 sch_tree_lock(sch);
1242 if (!cl->level) {
1243 qlen = cl->un.leaf.q->q.qlen;
1244 qdisc_reset(cl->un.leaf.q);
1245 qdisc_tree_decrease_qlen(cl->un.leaf.q, qlen);
1248 /* delete from hash and active; remainder in destroy_class */
1249 qdisc_class_hash_remove(&q->clhash, &cl->common);
1250 if (cl->parent)
1251 cl->parent->children--;
1253 if (cl->prio_activity)
1254 htb_deactivate(q, cl);
1256 if (cl->cmode != HTB_CAN_SEND)
1257 htb_safe_rb_erase(&cl->pq_node, q->wait_pq + cl->level);
1259 if (last_child)
1260 htb_parent_to_leaf(q, cl, new_q);
1262 if (--cl->refcnt == 0)
1263 htb_destroy_class(sch, cl);
1265 sch_tree_unlock(sch);
1266 return 0;
1269 static void htb_put(struct Qdisc *sch, unsigned long arg)
1271 struct htb_class *cl = (struct htb_class *)arg;
1273 if (--cl->refcnt == 0)
1274 htb_destroy_class(sch, cl);
1277 static int htb_change_class(struct Qdisc *sch, u32 classid,
1278 u32 parentid, struct nlattr **tca,
1279 unsigned long *arg)
1281 int err = -EINVAL;
1282 struct htb_sched *q = qdisc_priv(sch);
1283 struct htb_class *cl = (struct htb_class *)*arg, *parent;
1284 struct nlattr *opt = tca[TCA_OPTIONS];
1285 struct qdisc_rate_table *rtab = NULL, *ctab = NULL;
1286 struct nlattr *tb[TCA_HTB_RTAB + 1];
1287 struct tc_htb_opt *hopt;
1289 /* extract all subattrs from opt attr */
1290 if (!opt)
1291 goto failure;
1293 err = nla_parse_nested(tb, TCA_HTB_RTAB, opt, htb_policy);
1294 if (err < 0)
1295 goto failure;
1297 err = -EINVAL;
1298 if (tb[TCA_HTB_PARMS] == NULL)
1299 goto failure;
1301 parent = parentid == TC_H_ROOT ? NULL : htb_find(parentid, sch);
1303 hopt = nla_data(tb[TCA_HTB_PARMS]);
1305 rtab = qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB]);
1306 ctab = qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB]);
1307 if (!rtab || !ctab)
1308 goto failure;
1310 if (!cl) { /* new class */
1311 struct Qdisc *new_q;
1312 int prio;
1313 struct {
1314 struct nlattr nla;
1315 struct gnet_estimator opt;
1316 } est = {
1317 .nla = {
1318 .nla_len = nla_attr_size(sizeof(est.opt)),
1319 .nla_type = TCA_RATE,
1321 .opt = {
1322 /* 4s interval, 16s averaging constant */
1323 .interval = 2,
1324 .ewma_log = 2,
1328 /* check for valid classid */
1329 if (!classid || TC_H_MAJ(classid ^ sch->handle)
1330 || htb_find(classid, sch))
1331 goto failure;
1333 /* check maximal depth */
1334 if (parent && parent->parent && parent->parent->level < 2) {
1335 printk(KERN_ERR "htb: tree is too deep\n");
1336 goto failure;
1338 err = -ENOBUFS;
1339 if ((cl = kzalloc(sizeof(*cl), GFP_KERNEL)) == NULL)
1340 goto failure;
1342 err = gen_new_estimator(&cl->bstats, &cl->rate_est,
1343 qdisc_root_sleeping_lock(sch),
1344 tca[TCA_RATE] ? : &est.nla);
1345 if (err) {
1346 kfree(cl);
1347 goto failure;
1350 cl->refcnt = 1;
1351 cl->children = 0;
1352 INIT_LIST_HEAD(&cl->un.leaf.drop_list);
1353 RB_CLEAR_NODE(&cl->pq_node);
1355 for (prio = 0; prio < TC_HTB_NUMPRIO; prio++)
1356 RB_CLEAR_NODE(&cl->node[prio]);
1358 /* create leaf qdisc early because it uses kmalloc(GFP_KERNEL)
1359 so that can't be used inside of sch_tree_lock
1360 -- thanks to Karlis Peisenieks */
1361 new_q = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
1362 &pfifo_qdisc_ops, classid);
1363 sch_tree_lock(sch);
1364 if (parent && !parent->level) {
1365 unsigned int qlen = parent->un.leaf.q->q.qlen;
1367 /* turn parent into inner node */
1368 qdisc_reset(parent->un.leaf.q);
1369 qdisc_tree_decrease_qlen(parent->un.leaf.q, qlen);
1370 qdisc_destroy(parent->un.leaf.q);
1371 if (parent->prio_activity)
1372 htb_deactivate(q, parent);
1374 /* remove from evt list because of level change */
1375 if (parent->cmode != HTB_CAN_SEND) {
1376 htb_safe_rb_erase(&parent->pq_node, q->wait_pq);
1377 parent->cmode = HTB_CAN_SEND;
1379 parent->level = (parent->parent ? parent->parent->level
1380 : TC_HTB_MAXDEPTH) - 1;
1381 memset(&parent->un.inner, 0, sizeof(parent->un.inner));
1383 /* leaf (we) needs elementary qdisc */
1384 cl->un.leaf.q = new_q ? new_q : &noop_qdisc;
1386 cl->common.classid = classid;
1387 cl->parent = parent;
1389 /* set class to be in HTB_CAN_SEND state */
1390 cl->tokens = hopt->buffer;
1391 cl->ctokens = hopt->cbuffer;
1392 cl->mbuffer = 60 * PSCHED_TICKS_PER_SEC; /* 1min */
1393 cl->t_c = psched_get_time();
1394 cl->cmode = HTB_CAN_SEND;
1396 /* attach to the hash list and parent's family */
1397 qdisc_class_hash_insert(&q->clhash, &cl->common);
1398 if (parent)
1399 parent->children++;
1400 } else {
1401 if (tca[TCA_RATE]) {
1402 err = gen_replace_estimator(&cl->bstats, &cl->rate_est,
1403 qdisc_root_sleeping_lock(sch),
1404 tca[TCA_RATE]);
1405 if (err)
1406 return err;
1408 sch_tree_lock(sch);
1411 /* it used to be a nasty bug here, we have to check that node
1412 is really leaf before changing cl->un.leaf ! */
1413 if (!cl->level) {
1414 cl->quantum = rtab->rate.rate / q->rate2quantum;
1415 if (!hopt->quantum && cl->quantum < 1000) {
1416 printk(KERN_WARNING
1417 "HTB: quantum of class %X is small. Consider r2q change.\n",
1418 cl->common.classid);
1419 cl->quantum = 1000;
1421 if (!hopt->quantum && cl->quantum > 200000) {
1422 printk(KERN_WARNING
1423 "HTB: quantum of class %X is big. Consider r2q change.\n",
1424 cl->common.classid);
1425 cl->quantum = 200000;
1427 if (hopt->quantum)
1428 cl->quantum = hopt->quantum;
1429 if ((cl->prio = hopt->prio) >= TC_HTB_NUMPRIO)
1430 cl->prio = TC_HTB_NUMPRIO - 1;
1433 cl->buffer = hopt->buffer;
1434 cl->cbuffer = hopt->cbuffer;
1435 if (cl->rate)
1436 qdisc_put_rtab(cl->rate);
1437 cl->rate = rtab;
1438 if (cl->ceil)
1439 qdisc_put_rtab(cl->ceil);
1440 cl->ceil = ctab;
1441 sch_tree_unlock(sch);
1443 qdisc_class_hash_grow(sch, &q->clhash);
1445 *arg = (unsigned long)cl;
1446 return 0;
1448 failure:
1449 if (rtab)
1450 qdisc_put_rtab(rtab);
1451 if (ctab)
1452 qdisc_put_rtab(ctab);
1453 return err;
1456 static struct tcf_proto **htb_find_tcf(struct Qdisc *sch, unsigned long arg)
1458 struct htb_sched *q = qdisc_priv(sch);
1459 struct htb_class *cl = (struct htb_class *)arg;
1460 struct tcf_proto **fl = cl ? &cl->filter_list : &q->filter_list;
1462 return fl;
1465 static unsigned long htb_bind_filter(struct Qdisc *sch, unsigned long parent,
1466 u32 classid)
1468 struct htb_class *cl = htb_find(classid, sch);
1470 /*if (cl && !cl->level) return 0;
1471 The line above used to be there to prevent attaching filters to
1472 leaves. But at least tc_index filter uses this just to get class
1473 for other reasons so that we have to allow for it.
1474 ----
1475 19.6.2002 As Werner explained it is ok - bind filter is just
1476 another way to "lock" the class - unlike "get" this lock can
1477 be broken by class during destroy IIUC.
1479 if (cl)
1480 cl->filter_cnt++;
1481 return (unsigned long)cl;
1484 static void htb_unbind_filter(struct Qdisc *sch, unsigned long arg)
1486 struct htb_class *cl = (struct htb_class *)arg;
1488 if (cl)
1489 cl->filter_cnt--;
1492 static void htb_walk(struct Qdisc *sch, struct qdisc_walker *arg)
1494 struct htb_sched *q = qdisc_priv(sch);
1495 struct htb_class *cl;
1496 struct hlist_node *n;
1497 unsigned int i;
1499 if (arg->stop)
1500 return;
1502 for (i = 0; i < q->clhash.hashsize; i++) {
1503 hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode) {
1504 if (arg->count < arg->skip) {
1505 arg->count++;
1506 continue;
1508 if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
1509 arg->stop = 1;
1510 return;
1512 arg->count++;
1517 static const struct Qdisc_class_ops htb_class_ops = {
1518 .graft = htb_graft,
1519 .leaf = htb_leaf,
1520 .qlen_notify = htb_qlen_notify,
1521 .get = htb_get,
1522 .put = htb_put,
1523 .change = htb_change_class,
1524 .delete = htb_delete,
1525 .walk = htb_walk,
1526 .tcf_chain = htb_find_tcf,
1527 .bind_tcf = htb_bind_filter,
1528 .unbind_tcf = htb_unbind_filter,
1529 .dump = htb_dump_class,
1530 .dump_stats = htb_dump_class_stats,
1533 static struct Qdisc_ops htb_qdisc_ops __read_mostly = {
1534 .next = NULL,
1535 .cl_ops = &htb_class_ops,
1536 .id = "htb",
1537 .priv_size = sizeof(struct htb_sched),
1538 .enqueue = htb_enqueue,
1539 .dequeue = htb_dequeue,
1540 .peek = qdisc_peek_dequeued,
1541 .drop = htb_drop,
1542 .init = htb_init,
1543 .reset = htb_reset,
1544 .destroy = htb_destroy,
1545 .change = NULL /* htb_change */,
1546 .dump = htb_dump,
1547 .owner = THIS_MODULE,
1550 static int __init htb_module_init(void)
1552 return register_qdisc(&htb_qdisc_ops);
1554 static void __exit htb_module_exit(void)
1556 unregister_qdisc(&htb_qdisc_ops);
1559 module_init(htb_module_init)
1560 module_exit(htb_module_exit)
1561 MODULE_LICENSE("GPL");