rtnl: protect do_setlink from IFLA_XDP_ATTACHED
[linux-2.6/btrfs-unstable.git] / net / sched / sch_htb.c
blob91982d9784b31b7275eb444ea72983e971f9873c
1 /*
2 * net/sched/sch_htb.c Hierarchical token bucket, feed tree version
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Martin Devera, <devik@cdi.cz>
11 * Credits (in time order) for older HTB versions:
12 * Stef Coene <stef.coene@docum.org>
13 * HTB support at LARTC mailing list
14 * Ondrej Kraus, <krauso@barr.cz>
15 * found missing INIT_QDISC(htb)
16 * Vladimir Smelhaus, Aamer Akhter, Bert Hubert
17 * helped a lot to locate nasty class stall bug
18 * Andi Kleen, Jamal Hadi, Bert Hubert
19 * code review and helpful comments on shaping
20 * Tomasz Wrona, <tw@eter.tym.pl>
21 * created test case so that I was able to fix nasty bug
22 * Wilfried Weissmann
23 * spotted bug in dequeue code and helped with fix
24 * Jiri Fojtasek
25 * fixed requeue routine
26 * and many others. thanks.
28 #include <linux/module.h>
29 #include <linux/moduleparam.h>
30 #include <linux/types.h>
31 #include <linux/kernel.h>
32 #include <linux/string.h>
33 #include <linux/errno.h>
34 #include <linux/skbuff.h>
35 #include <linux/list.h>
36 #include <linux/compiler.h>
37 #include <linux/rbtree.h>
38 #include <linux/workqueue.h>
39 #include <linux/slab.h>
40 #include <net/netlink.h>
41 #include <net/sch_generic.h>
42 #include <net/pkt_sched.h>
44 /* HTB algorithm.
45 Author: devik@cdi.cz
46 ========================================================================
47 HTB is like TBF with multiple classes. It is also similar to CBQ because
48 it allows to assign priority to each class in hierarchy.
49 In fact it is another implementation of Floyd's formal sharing.
51 Levels:
52 Each class is assigned level. Leaf has ALWAYS level 0 and root
53 classes have level TC_HTB_MAXDEPTH-1. Interior nodes has level
54 one less than their parent.
57 static int htb_hysteresis __read_mostly = 0; /* whether to use mode hysteresis for speedup */
58 #define HTB_VER 0x30011 /* major must be matched with number suplied by TC as version */
60 #if HTB_VER >> 16 != TC_HTB_PROTOVER
61 #error "Mismatched sch_htb.c and pkt_sch.h"
62 #endif
64 /* Module parameter and sysfs export */
65 module_param (htb_hysteresis, int, 0640);
66 MODULE_PARM_DESC(htb_hysteresis, "Hysteresis mode, less CPU load, less accurate");
68 static int htb_rate_est = 0; /* htb classes have a default rate estimator */
69 module_param(htb_rate_est, int, 0640);
70 MODULE_PARM_DESC(htb_rate_est, "setup a default rate estimator (4sec 16sec) for htb classes");
72 /* used internaly to keep status of single class */
73 enum htb_cmode {
74 HTB_CANT_SEND, /* class can't send and can't borrow */
75 HTB_MAY_BORROW, /* class can't send but may borrow */
76 HTB_CAN_SEND /* class can send */
79 struct htb_prio {
80 union {
81 struct rb_root row;
82 struct rb_root feed;
84 struct rb_node *ptr;
85 /* When class changes from state 1->2 and disconnects from
86 * parent's feed then we lost ptr value and start from the
87 * first child again. Here we store classid of the
88 * last valid ptr (used when ptr is NULL).
90 u32 last_ptr_id;
93 /* interior & leaf nodes; props specific to leaves are marked L:
94 * To reduce false sharing, place mostly read fields at beginning,
95 * and mostly written ones at the end.
97 struct htb_class {
98 struct Qdisc_class_common common;
99 struct psched_ratecfg rate;
100 struct psched_ratecfg ceil;
101 s64 buffer, cbuffer;/* token bucket depth/rate */
102 s64 mbuffer; /* max wait time */
103 u32 prio; /* these two are used only by leaves... */
104 int quantum; /* but stored for parent-to-leaf return */
106 struct tcf_proto __rcu *filter_list; /* class attached filters */
107 int filter_cnt;
108 int refcnt; /* usage count of this class */
110 int level; /* our level (see above) */
111 unsigned int children;
112 struct htb_class *parent; /* parent class */
114 struct gnet_stats_rate_est64 rate_est;
117 * Written often fields
119 struct gnet_stats_basic_packed bstats;
120 struct tc_htb_xstats xstats; /* our special stats */
122 /* token bucket parameters */
123 s64 tokens, ctokens;/* current number of tokens */
124 s64 t_c; /* checkpoint time */
126 union {
127 struct htb_class_leaf {
128 struct list_head drop_list;
129 int deficit[TC_HTB_MAXDEPTH];
130 struct Qdisc *q;
131 } leaf;
132 struct htb_class_inner {
133 struct htb_prio clprio[TC_HTB_NUMPRIO];
134 } inner;
135 } un;
136 s64 pq_key;
138 int prio_activity; /* for which prios are we active */
139 enum htb_cmode cmode; /* current mode of the class */
140 struct rb_node pq_node; /* node for event queue */
141 struct rb_node node[TC_HTB_NUMPRIO]; /* node for self or feed tree */
143 unsigned int drops ____cacheline_aligned_in_smp;
146 struct htb_level {
147 struct rb_root wait_pq;
148 struct htb_prio hprio[TC_HTB_NUMPRIO];
151 struct htb_sched {
152 struct Qdisc_class_hash clhash;
153 int defcls; /* class where unclassified flows go to */
154 int rate2quantum; /* quant = rate / rate2quantum */
156 /* filters for qdisc itself */
157 struct tcf_proto __rcu *filter_list;
159 #define HTB_WARN_TOOMANYEVENTS 0x1
160 unsigned int warned; /* only one warning */
161 int direct_qlen;
162 struct work_struct work;
164 /* non shaped skbs; let them go directly thru */
165 struct sk_buff_head direct_queue;
166 long direct_pkts;
168 struct qdisc_watchdog watchdog;
170 s64 now; /* cached dequeue time */
171 struct list_head drops[TC_HTB_NUMPRIO];/* active leaves (for drops) */
173 /* time of nearest event per level (row) */
174 s64 near_ev_cache[TC_HTB_MAXDEPTH];
176 int row_mask[TC_HTB_MAXDEPTH];
178 struct htb_level hlevel[TC_HTB_MAXDEPTH];
181 /* find class in global hash table using given handle */
182 static inline struct htb_class *htb_find(u32 handle, struct Qdisc *sch)
184 struct htb_sched *q = qdisc_priv(sch);
185 struct Qdisc_class_common *clc;
187 clc = qdisc_class_find(&q->clhash, handle);
188 if (clc == NULL)
189 return NULL;
190 return container_of(clc, struct htb_class, common);
194 * htb_classify - classify a packet into class
196 * It returns NULL if the packet should be dropped or -1 if the packet
197 * should be passed directly thru. In all other cases leaf class is returned.
198 * We allow direct class selection by classid in priority. The we examine
199 * filters in qdisc and in inner nodes (if higher filter points to the inner
200 * node). If we end up with classid MAJOR:0 we enqueue the skb into special
201 * internal fifo (direct). These packets then go directly thru. If we still
202 * have no valid leaf we try to use MAJOR:default leaf. It still unsuccessful
203 * then finish and return direct queue.
205 #define HTB_DIRECT ((struct htb_class *)-1L)
207 static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch,
208 int *qerr)
210 struct htb_sched *q = qdisc_priv(sch);
211 struct htb_class *cl;
212 struct tcf_result res;
213 struct tcf_proto *tcf;
214 int result;
216 /* allow to select class by setting skb->priority to valid classid;
217 * note that nfmark can be used too by attaching filter fw with no
218 * rules in it
220 if (skb->priority == sch->handle)
221 return HTB_DIRECT; /* X:0 (direct flow) selected */
222 cl = htb_find(skb->priority, sch);
223 if (cl) {
224 if (cl->level == 0)
225 return cl;
226 /* Start with inner filter chain if a non-leaf class is selected */
227 tcf = rcu_dereference_bh(cl->filter_list);
228 } else {
229 tcf = rcu_dereference_bh(q->filter_list);
232 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
233 while (tcf && (result = tc_classify(skb, tcf, &res, false)) >= 0) {
234 #ifdef CONFIG_NET_CLS_ACT
235 switch (result) {
236 case TC_ACT_QUEUED:
237 case TC_ACT_STOLEN:
238 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
239 case TC_ACT_SHOT:
240 return NULL;
242 #endif
243 cl = (void *)res.class;
244 if (!cl) {
245 if (res.classid == sch->handle)
246 return HTB_DIRECT; /* X:0 (direct flow) */
247 cl = htb_find(res.classid, sch);
248 if (!cl)
249 break; /* filter selected invalid classid */
251 if (!cl->level)
252 return cl; /* we hit leaf; return it */
254 /* we have got inner class; apply inner filter chain */
255 tcf = rcu_dereference_bh(cl->filter_list);
257 /* classification failed; try to use default class */
258 cl = htb_find(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch);
259 if (!cl || cl->level)
260 return HTB_DIRECT; /* bad default .. this is safe bet */
261 return cl;
265 * htb_add_to_id_tree - adds class to the round robin list
267 * Routine adds class to the list (actually tree) sorted by classid.
268 * Make sure that class is not already on such list for given prio.
270 static void htb_add_to_id_tree(struct rb_root *root,
271 struct htb_class *cl, int prio)
273 struct rb_node **p = &root->rb_node, *parent = NULL;
275 while (*p) {
276 struct htb_class *c;
277 parent = *p;
278 c = rb_entry(parent, struct htb_class, node[prio]);
280 if (cl->common.classid > c->common.classid)
281 p = &parent->rb_right;
282 else
283 p = &parent->rb_left;
285 rb_link_node(&cl->node[prio], parent, p);
286 rb_insert_color(&cl->node[prio], root);
290 * htb_add_to_wait_tree - adds class to the event queue with delay
292 * The class is added to priority event queue to indicate that class will
293 * change its mode in cl->pq_key microseconds. Make sure that class is not
294 * already in the queue.
296 static void htb_add_to_wait_tree(struct htb_sched *q,
297 struct htb_class *cl, s64 delay)
299 struct rb_node **p = &q->hlevel[cl->level].wait_pq.rb_node, *parent = NULL;
301 cl->pq_key = q->now + delay;
302 if (cl->pq_key == q->now)
303 cl->pq_key++;
305 /* update the nearest event cache */
306 if (q->near_ev_cache[cl->level] > cl->pq_key)
307 q->near_ev_cache[cl->level] = cl->pq_key;
309 while (*p) {
310 struct htb_class *c;
311 parent = *p;
312 c = rb_entry(parent, struct htb_class, pq_node);
313 if (cl->pq_key >= c->pq_key)
314 p = &parent->rb_right;
315 else
316 p = &parent->rb_left;
318 rb_link_node(&cl->pq_node, parent, p);
319 rb_insert_color(&cl->pq_node, &q->hlevel[cl->level].wait_pq);
323 * htb_next_rb_node - finds next node in binary tree
325 * When we are past last key we return NULL.
326 * Average complexity is 2 steps per call.
328 static inline void htb_next_rb_node(struct rb_node **n)
330 *n = rb_next(*n);
334 * htb_add_class_to_row - add class to its row
336 * The class is added to row at priorities marked in mask.
337 * It does nothing if mask == 0.
339 static inline void htb_add_class_to_row(struct htb_sched *q,
340 struct htb_class *cl, int mask)
342 q->row_mask[cl->level] |= mask;
343 while (mask) {
344 int prio = ffz(~mask);
345 mask &= ~(1 << prio);
346 htb_add_to_id_tree(&q->hlevel[cl->level].hprio[prio].row, cl, prio);
350 /* If this triggers, it is a bug in this code, but it need not be fatal */
351 static void htb_safe_rb_erase(struct rb_node *rb, struct rb_root *root)
353 if (RB_EMPTY_NODE(rb)) {
354 WARN_ON(1);
355 } else {
356 rb_erase(rb, root);
357 RB_CLEAR_NODE(rb);
363 * htb_remove_class_from_row - removes class from its row
365 * The class is removed from row at priorities marked in mask.
366 * It does nothing if mask == 0.
368 static inline void htb_remove_class_from_row(struct htb_sched *q,
369 struct htb_class *cl, int mask)
371 int m = 0;
372 struct htb_level *hlevel = &q->hlevel[cl->level];
374 while (mask) {
375 int prio = ffz(~mask);
376 struct htb_prio *hprio = &hlevel->hprio[prio];
378 mask &= ~(1 << prio);
379 if (hprio->ptr == cl->node + prio)
380 htb_next_rb_node(&hprio->ptr);
382 htb_safe_rb_erase(cl->node + prio, &hprio->row);
383 if (!hprio->row.rb_node)
384 m |= 1 << prio;
386 q->row_mask[cl->level] &= ~m;
390 * htb_activate_prios - creates active classe's feed chain
392 * The class is connected to ancestors and/or appropriate rows
393 * for priorities it is participating on. cl->cmode must be new
394 * (activated) mode. It does nothing if cl->prio_activity == 0.
396 static void htb_activate_prios(struct htb_sched *q, struct htb_class *cl)
398 struct htb_class *p = cl->parent;
399 long m, mask = cl->prio_activity;
401 while (cl->cmode == HTB_MAY_BORROW && p && mask) {
402 m = mask;
403 while (m) {
404 int prio = ffz(~m);
405 m &= ~(1 << prio);
407 if (p->un.inner.clprio[prio].feed.rb_node)
408 /* parent already has its feed in use so that
409 * reset bit in mask as parent is already ok
411 mask &= ~(1 << prio);
413 htb_add_to_id_tree(&p->un.inner.clprio[prio].feed, cl, prio);
415 p->prio_activity |= mask;
416 cl = p;
417 p = cl->parent;
420 if (cl->cmode == HTB_CAN_SEND && mask)
421 htb_add_class_to_row(q, cl, mask);
425 * htb_deactivate_prios - remove class from feed chain
427 * cl->cmode must represent old mode (before deactivation). It does
428 * nothing if cl->prio_activity == 0. Class is removed from all feed
429 * chains and rows.
431 static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl)
433 struct htb_class *p = cl->parent;
434 long m, mask = cl->prio_activity;
436 while (cl->cmode == HTB_MAY_BORROW && p && mask) {
437 m = mask;
438 mask = 0;
439 while (m) {
440 int prio = ffz(~m);
441 m &= ~(1 << prio);
443 if (p->un.inner.clprio[prio].ptr == cl->node + prio) {
444 /* we are removing child which is pointed to from
445 * parent feed - forget the pointer but remember
446 * classid
448 p->un.inner.clprio[prio].last_ptr_id = cl->common.classid;
449 p->un.inner.clprio[prio].ptr = NULL;
452 htb_safe_rb_erase(cl->node + prio,
453 &p->un.inner.clprio[prio].feed);
455 if (!p->un.inner.clprio[prio].feed.rb_node)
456 mask |= 1 << prio;
459 p->prio_activity &= ~mask;
460 cl = p;
461 p = cl->parent;
464 if (cl->cmode == HTB_CAN_SEND && mask)
465 htb_remove_class_from_row(q, cl, mask);
468 static inline s64 htb_lowater(const struct htb_class *cl)
470 if (htb_hysteresis)
471 return cl->cmode != HTB_CANT_SEND ? -cl->cbuffer : 0;
472 else
473 return 0;
475 static inline s64 htb_hiwater(const struct htb_class *cl)
477 if (htb_hysteresis)
478 return cl->cmode == HTB_CAN_SEND ? -cl->buffer : 0;
479 else
480 return 0;
485 * htb_class_mode - computes and returns current class mode
487 * It computes cl's mode at time cl->t_c+diff and returns it. If mode
488 * is not HTB_CAN_SEND then cl->pq_key is updated to time difference
489 * from now to time when cl will change its state.
490 * Also it is worth to note that class mode doesn't change simply
491 * at cl->{c,}tokens == 0 but there can rather be hysteresis of
492 * 0 .. -cl->{c,}buffer range. It is meant to limit number of
493 * mode transitions per time unit. The speed gain is about 1/6.
495 static inline enum htb_cmode
496 htb_class_mode(struct htb_class *cl, s64 *diff)
498 s64 toks;
500 if ((toks = (cl->ctokens + *diff)) < htb_lowater(cl)) {
501 *diff = -toks;
502 return HTB_CANT_SEND;
505 if ((toks = (cl->tokens + *diff)) >= htb_hiwater(cl))
506 return HTB_CAN_SEND;
508 *diff = -toks;
509 return HTB_MAY_BORROW;
513 * htb_change_class_mode - changes classe's mode
515 * This should be the only way how to change classe's mode under normal
516 * cirsumstances. Routine will update feed lists linkage, change mode
517 * and add class to the wait event queue if appropriate. New mode should
518 * be different from old one and cl->pq_key has to be valid if changing
519 * to mode other than HTB_CAN_SEND (see htb_add_to_wait_tree).
521 static void
522 htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, s64 *diff)
524 enum htb_cmode new_mode = htb_class_mode(cl, diff);
526 if (new_mode == cl->cmode)
527 return;
529 if (cl->prio_activity) { /* not necessary: speed optimization */
530 if (cl->cmode != HTB_CANT_SEND)
531 htb_deactivate_prios(q, cl);
532 cl->cmode = new_mode;
533 if (new_mode != HTB_CANT_SEND)
534 htb_activate_prios(q, cl);
535 } else
536 cl->cmode = new_mode;
540 * htb_activate - inserts leaf cl into appropriate active feeds
542 * Routine learns (new) priority of leaf and activates feed chain
543 * for the prio. It can be called on already active leaf safely.
544 * It also adds leaf into droplist.
546 static inline void htb_activate(struct htb_sched *q, struct htb_class *cl)
548 WARN_ON(cl->level || !cl->un.leaf.q || !cl->un.leaf.q->q.qlen);
550 if (!cl->prio_activity) {
551 cl->prio_activity = 1 << cl->prio;
552 htb_activate_prios(q, cl);
553 list_add_tail(&cl->un.leaf.drop_list,
554 q->drops + cl->prio);
559 * htb_deactivate - remove leaf cl from active feeds
561 * Make sure that leaf is active. In the other words it can't be called
562 * with non-active leaf. It also removes class from the drop list.
564 static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl)
566 WARN_ON(!cl->prio_activity);
568 htb_deactivate_prios(q, cl);
569 cl->prio_activity = 0;
570 list_del_init(&cl->un.leaf.drop_list);
573 static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
574 struct sk_buff **to_free)
576 int uninitialized_var(ret);
577 struct htb_sched *q = qdisc_priv(sch);
578 struct htb_class *cl = htb_classify(skb, sch, &ret);
580 if (cl == HTB_DIRECT) {
581 /* enqueue to helper queue */
582 if (q->direct_queue.qlen < q->direct_qlen) {
583 __skb_queue_tail(&q->direct_queue, skb);
584 q->direct_pkts++;
585 } else {
586 return qdisc_drop(skb, sch, to_free);
588 #ifdef CONFIG_NET_CLS_ACT
589 } else if (!cl) {
590 if (ret & __NET_XMIT_BYPASS)
591 qdisc_qstats_drop(sch);
592 __qdisc_drop(skb, to_free);
593 return ret;
594 #endif
595 } else if ((ret = qdisc_enqueue(skb, cl->un.leaf.q,
596 to_free)) != NET_XMIT_SUCCESS) {
597 if (net_xmit_drop_count(ret)) {
598 qdisc_qstats_drop(sch);
599 cl->drops++;
601 return ret;
602 } else {
603 htb_activate(q, cl);
606 qdisc_qstats_backlog_inc(sch, skb);
607 sch->q.qlen++;
608 return NET_XMIT_SUCCESS;
611 static inline void htb_accnt_tokens(struct htb_class *cl, int bytes, s64 diff)
613 s64 toks = diff + cl->tokens;
615 if (toks > cl->buffer)
616 toks = cl->buffer;
617 toks -= (s64) psched_l2t_ns(&cl->rate, bytes);
618 if (toks <= -cl->mbuffer)
619 toks = 1 - cl->mbuffer;
621 cl->tokens = toks;
624 static inline void htb_accnt_ctokens(struct htb_class *cl, int bytes, s64 diff)
626 s64 toks = diff + cl->ctokens;
628 if (toks > cl->cbuffer)
629 toks = cl->cbuffer;
630 toks -= (s64) psched_l2t_ns(&cl->ceil, bytes);
631 if (toks <= -cl->mbuffer)
632 toks = 1 - cl->mbuffer;
634 cl->ctokens = toks;
638 * htb_charge_class - charges amount "bytes" to leaf and ancestors
640 * Routine assumes that packet "bytes" long was dequeued from leaf cl
641 * borrowing from "level". It accounts bytes to ceil leaky bucket for
642 * leaf and all ancestors and to rate bucket for ancestors at levels
643 * "level" and higher. It also handles possible change of mode resulting
644 * from the update. Note that mode can also increase here (MAY_BORROW to
645 * CAN_SEND) because we can use more precise clock that event queue here.
646 * In such case we remove class from event queue first.
648 static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,
649 int level, struct sk_buff *skb)
651 int bytes = qdisc_pkt_len(skb);
652 enum htb_cmode old_mode;
653 s64 diff;
655 while (cl) {
656 diff = min_t(s64, q->now - cl->t_c, cl->mbuffer);
657 if (cl->level >= level) {
658 if (cl->level == level)
659 cl->xstats.lends++;
660 htb_accnt_tokens(cl, bytes, diff);
661 } else {
662 cl->xstats.borrows++;
663 cl->tokens += diff; /* we moved t_c; update tokens */
665 htb_accnt_ctokens(cl, bytes, diff);
666 cl->t_c = q->now;
668 old_mode = cl->cmode;
669 diff = 0;
670 htb_change_class_mode(q, cl, &diff);
671 if (old_mode != cl->cmode) {
672 if (old_mode != HTB_CAN_SEND)
673 htb_safe_rb_erase(&cl->pq_node, &q->hlevel[cl->level].wait_pq);
674 if (cl->cmode != HTB_CAN_SEND)
675 htb_add_to_wait_tree(q, cl, diff);
678 /* update basic stats except for leaves which are already updated */
679 if (cl->level)
680 bstats_update(&cl->bstats, skb);
682 cl = cl->parent;
687 * htb_do_events - make mode changes to classes at the level
689 * Scans event queue for pending events and applies them. Returns time of
690 * next pending event (0 for no event in pq, q->now for too many events).
691 * Note: Applied are events whose have cl->pq_key <= q->now.
693 static s64 htb_do_events(struct htb_sched *q, const int level,
694 unsigned long start)
696 /* don't run for longer than 2 jiffies; 2 is used instead of
697 * 1 to simplify things when jiffy is going to be incremented
698 * too soon
700 unsigned long stop_at = start + 2;
701 struct rb_root *wait_pq = &q->hlevel[level].wait_pq;
703 while (time_before(jiffies, stop_at)) {
704 struct htb_class *cl;
705 s64 diff;
706 struct rb_node *p = rb_first(wait_pq);
708 if (!p)
709 return 0;
711 cl = rb_entry(p, struct htb_class, pq_node);
712 if (cl->pq_key > q->now)
713 return cl->pq_key;
715 htb_safe_rb_erase(p, wait_pq);
716 diff = min_t(s64, q->now - cl->t_c, cl->mbuffer);
717 htb_change_class_mode(q, cl, &diff);
718 if (cl->cmode != HTB_CAN_SEND)
719 htb_add_to_wait_tree(q, cl, diff);
722 /* too much load - let's continue after a break for scheduling */
723 if (!(q->warned & HTB_WARN_TOOMANYEVENTS)) {
724 pr_warn("htb: too many events!\n");
725 q->warned |= HTB_WARN_TOOMANYEVENTS;
728 return q->now;
731 /* Returns class->node+prio from id-tree where classe's id is >= id. NULL
732 * is no such one exists.
734 static struct rb_node *htb_id_find_next_upper(int prio, struct rb_node *n,
735 u32 id)
737 struct rb_node *r = NULL;
738 while (n) {
739 struct htb_class *cl =
740 rb_entry(n, struct htb_class, node[prio]);
742 if (id > cl->common.classid) {
743 n = n->rb_right;
744 } else if (id < cl->common.classid) {
745 r = n;
746 n = n->rb_left;
747 } else {
748 return n;
751 return r;
755 * htb_lookup_leaf - returns next leaf class in DRR order
757 * Find leaf where current feed pointers points to.
759 static struct htb_class *htb_lookup_leaf(struct htb_prio *hprio, const int prio)
761 int i;
762 struct {
763 struct rb_node *root;
764 struct rb_node **pptr;
765 u32 *pid;
766 } stk[TC_HTB_MAXDEPTH], *sp = stk;
768 BUG_ON(!hprio->row.rb_node);
769 sp->root = hprio->row.rb_node;
770 sp->pptr = &hprio->ptr;
771 sp->pid = &hprio->last_ptr_id;
773 for (i = 0; i < 65535; i++) {
774 if (!*sp->pptr && *sp->pid) {
775 /* ptr was invalidated but id is valid - try to recover
776 * the original or next ptr
778 *sp->pptr =
779 htb_id_find_next_upper(prio, sp->root, *sp->pid);
781 *sp->pid = 0; /* ptr is valid now so that remove this hint as it
782 * can become out of date quickly
784 if (!*sp->pptr) { /* we are at right end; rewind & go up */
785 *sp->pptr = sp->root;
786 while ((*sp->pptr)->rb_left)
787 *sp->pptr = (*sp->pptr)->rb_left;
788 if (sp > stk) {
789 sp--;
790 if (!*sp->pptr) {
791 WARN_ON(1);
792 return NULL;
794 htb_next_rb_node(sp->pptr);
796 } else {
797 struct htb_class *cl;
798 struct htb_prio *clp;
800 cl = rb_entry(*sp->pptr, struct htb_class, node[prio]);
801 if (!cl->level)
802 return cl;
803 clp = &cl->un.inner.clprio[prio];
804 (++sp)->root = clp->feed.rb_node;
805 sp->pptr = &clp->ptr;
806 sp->pid = &clp->last_ptr_id;
809 WARN_ON(1);
810 return NULL;
813 /* dequeues packet at given priority and level; call only if
814 * you are sure that there is active class at prio/level
816 static struct sk_buff *htb_dequeue_tree(struct htb_sched *q, const int prio,
817 const int level)
819 struct sk_buff *skb = NULL;
820 struct htb_class *cl, *start;
821 struct htb_level *hlevel = &q->hlevel[level];
822 struct htb_prio *hprio = &hlevel->hprio[prio];
824 /* look initial class up in the row */
825 start = cl = htb_lookup_leaf(hprio, prio);
827 do {
828 next:
829 if (unlikely(!cl))
830 return NULL;
832 /* class can be empty - it is unlikely but can be true if leaf
833 * qdisc drops packets in enqueue routine or if someone used
834 * graft operation on the leaf since last dequeue;
835 * simply deactivate and skip such class
837 if (unlikely(cl->un.leaf.q->q.qlen == 0)) {
838 struct htb_class *next;
839 htb_deactivate(q, cl);
841 /* row/level might become empty */
842 if ((q->row_mask[level] & (1 << prio)) == 0)
843 return NULL;
845 next = htb_lookup_leaf(hprio, prio);
847 if (cl == start) /* fix start if we just deleted it */
848 start = next;
849 cl = next;
850 goto next;
853 skb = cl->un.leaf.q->dequeue(cl->un.leaf.q);
854 if (likely(skb != NULL))
855 break;
857 qdisc_warn_nonwc("htb", cl->un.leaf.q);
858 htb_next_rb_node(level ? &cl->parent->un.inner.clprio[prio].ptr:
859 &q->hlevel[0].hprio[prio].ptr);
860 cl = htb_lookup_leaf(hprio, prio);
862 } while (cl != start);
864 if (likely(skb != NULL)) {
865 bstats_update(&cl->bstats, skb);
866 cl->un.leaf.deficit[level] -= qdisc_pkt_len(skb);
867 if (cl->un.leaf.deficit[level] < 0) {
868 cl->un.leaf.deficit[level] += cl->quantum;
869 htb_next_rb_node(level ? &cl->parent->un.inner.clprio[prio].ptr :
870 &q->hlevel[0].hprio[prio].ptr);
872 /* this used to be after charge_class but this constelation
873 * gives us slightly better performance
875 if (!cl->un.leaf.q->q.qlen)
876 htb_deactivate(q, cl);
877 htb_charge_class(q, cl, level, skb);
879 return skb;
882 static struct sk_buff *htb_dequeue(struct Qdisc *sch)
884 struct sk_buff *skb;
885 struct htb_sched *q = qdisc_priv(sch);
886 int level;
887 s64 next_event;
888 unsigned long start_at;
890 /* try to dequeue direct packets as high prio (!) to minimize cpu work */
891 skb = __skb_dequeue(&q->direct_queue);
892 if (skb != NULL) {
894 qdisc_bstats_update(sch, skb);
895 qdisc_qstats_backlog_dec(sch, skb);
896 sch->q.qlen--;
897 return skb;
900 if (!sch->q.qlen)
901 goto fin;
902 q->now = ktime_get_ns();
903 start_at = jiffies;
905 next_event = q->now + 5LLU * NSEC_PER_SEC;
907 for (level = 0; level < TC_HTB_MAXDEPTH; level++) {
908 /* common case optimization - skip event handler quickly */
909 int m;
910 s64 event = q->near_ev_cache[level];
912 if (q->now >= event) {
913 event = htb_do_events(q, level, start_at);
914 if (!event)
915 event = q->now + NSEC_PER_SEC;
916 q->near_ev_cache[level] = event;
919 if (next_event > event)
920 next_event = event;
922 m = ~q->row_mask[level];
923 while (m != (int)(-1)) {
924 int prio = ffz(m);
926 m |= 1 << prio;
927 skb = htb_dequeue_tree(q, prio, level);
928 if (likely(skb != NULL))
929 goto ok;
932 qdisc_qstats_overlimit(sch);
933 if (likely(next_event > q->now))
934 qdisc_watchdog_schedule_ns(&q->watchdog, next_event);
935 else
936 schedule_work(&q->work);
937 fin:
938 return skb;
941 /* reset all classes */
942 /* always caled under BH & queue lock */
943 static void htb_reset(struct Qdisc *sch)
945 struct htb_sched *q = qdisc_priv(sch);
946 struct htb_class *cl;
947 unsigned int i;
949 for (i = 0; i < q->clhash.hashsize; i++) {
950 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
951 if (cl->level)
952 memset(&cl->un.inner, 0, sizeof(cl->un.inner));
953 else {
954 if (cl->un.leaf.q)
955 qdisc_reset(cl->un.leaf.q);
956 INIT_LIST_HEAD(&cl->un.leaf.drop_list);
958 cl->prio_activity = 0;
959 cl->cmode = HTB_CAN_SEND;
962 qdisc_watchdog_cancel(&q->watchdog);
963 __qdisc_reset_queue(&q->direct_queue);
964 sch->q.qlen = 0;
965 sch->qstats.backlog = 0;
966 memset(q->hlevel, 0, sizeof(q->hlevel));
967 memset(q->row_mask, 0, sizeof(q->row_mask));
968 for (i = 0; i < TC_HTB_NUMPRIO; i++)
969 INIT_LIST_HEAD(q->drops + i);
972 static const struct nla_policy htb_policy[TCA_HTB_MAX + 1] = {
973 [TCA_HTB_PARMS] = { .len = sizeof(struct tc_htb_opt) },
974 [TCA_HTB_INIT] = { .len = sizeof(struct tc_htb_glob) },
975 [TCA_HTB_CTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
976 [TCA_HTB_RTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
977 [TCA_HTB_DIRECT_QLEN] = { .type = NLA_U32 },
978 [TCA_HTB_RATE64] = { .type = NLA_U64 },
979 [TCA_HTB_CEIL64] = { .type = NLA_U64 },
982 static void htb_work_func(struct work_struct *work)
984 struct htb_sched *q = container_of(work, struct htb_sched, work);
985 struct Qdisc *sch = q->watchdog.qdisc;
987 rcu_read_lock();
988 __netif_schedule(qdisc_root(sch));
989 rcu_read_unlock();
992 static int htb_init(struct Qdisc *sch, struct nlattr *opt)
994 struct htb_sched *q = qdisc_priv(sch);
995 struct nlattr *tb[TCA_HTB_MAX + 1];
996 struct tc_htb_glob *gopt;
997 int err;
998 int i;
1000 if (!opt)
1001 return -EINVAL;
1003 err = nla_parse_nested(tb, TCA_HTB_MAX, opt, htb_policy);
1004 if (err < 0)
1005 return err;
1007 if (!tb[TCA_HTB_INIT])
1008 return -EINVAL;
1010 gopt = nla_data(tb[TCA_HTB_INIT]);
1011 if (gopt->version != HTB_VER >> 16)
1012 return -EINVAL;
1014 err = qdisc_class_hash_init(&q->clhash);
1015 if (err < 0)
1016 return err;
1017 for (i = 0; i < TC_HTB_NUMPRIO; i++)
1018 INIT_LIST_HEAD(q->drops + i);
1020 qdisc_watchdog_init(&q->watchdog, sch);
1021 INIT_WORK(&q->work, htb_work_func);
1022 __skb_queue_head_init(&q->direct_queue);
1024 if (tb[TCA_HTB_DIRECT_QLEN])
1025 q->direct_qlen = nla_get_u32(tb[TCA_HTB_DIRECT_QLEN]);
1026 else
1027 q->direct_qlen = qdisc_dev(sch)->tx_queue_len;
1029 if ((q->rate2quantum = gopt->rate2quantum) < 1)
1030 q->rate2quantum = 1;
1031 q->defcls = gopt->defcls;
1033 return 0;
1036 static int htb_dump(struct Qdisc *sch, struct sk_buff *skb)
1038 struct htb_sched *q = qdisc_priv(sch);
1039 struct nlattr *nest;
1040 struct tc_htb_glob gopt;
1042 /* Its safe to not acquire qdisc lock. As we hold RTNL,
1043 * no change can happen on the qdisc parameters.
1046 gopt.direct_pkts = q->direct_pkts;
1047 gopt.version = HTB_VER;
1048 gopt.rate2quantum = q->rate2quantum;
1049 gopt.defcls = q->defcls;
1050 gopt.debug = 0;
1052 nest = nla_nest_start(skb, TCA_OPTIONS);
1053 if (nest == NULL)
1054 goto nla_put_failure;
1055 if (nla_put(skb, TCA_HTB_INIT, sizeof(gopt), &gopt) ||
1056 nla_put_u32(skb, TCA_HTB_DIRECT_QLEN, q->direct_qlen))
1057 goto nla_put_failure;
1059 return nla_nest_end(skb, nest);
1061 nla_put_failure:
1062 nla_nest_cancel(skb, nest);
1063 return -1;
1066 static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
1067 struct sk_buff *skb, struct tcmsg *tcm)
1069 struct htb_class *cl = (struct htb_class *)arg;
1070 struct nlattr *nest;
1071 struct tc_htb_opt opt;
1073 /* Its safe to not acquire qdisc lock. As we hold RTNL,
1074 * no change can happen on the class parameters.
1076 tcm->tcm_parent = cl->parent ? cl->parent->common.classid : TC_H_ROOT;
1077 tcm->tcm_handle = cl->common.classid;
1078 if (!cl->level && cl->un.leaf.q)
1079 tcm->tcm_info = cl->un.leaf.q->handle;
1081 nest = nla_nest_start(skb, TCA_OPTIONS);
1082 if (nest == NULL)
1083 goto nla_put_failure;
1085 memset(&opt, 0, sizeof(opt));
1087 psched_ratecfg_getrate(&opt.rate, &cl->rate);
1088 opt.buffer = PSCHED_NS2TICKS(cl->buffer);
1089 psched_ratecfg_getrate(&opt.ceil, &cl->ceil);
1090 opt.cbuffer = PSCHED_NS2TICKS(cl->cbuffer);
1091 opt.quantum = cl->quantum;
1092 opt.prio = cl->prio;
1093 opt.level = cl->level;
1094 if (nla_put(skb, TCA_HTB_PARMS, sizeof(opt), &opt))
1095 goto nla_put_failure;
1096 if ((cl->rate.rate_bytes_ps >= (1ULL << 32)) &&
1097 nla_put_u64_64bit(skb, TCA_HTB_RATE64, cl->rate.rate_bytes_ps,
1098 TCA_HTB_PAD))
1099 goto nla_put_failure;
1100 if ((cl->ceil.rate_bytes_ps >= (1ULL << 32)) &&
1101 nla_put_u64_64bit(skb, TCA_HTB_CEIL64, cl->ceil.rate_bytes_ps,
1102 TCA_HTB_PAD))
1103 goto nla_put_failure;
1105 return nla_nest_end(skb, nest);
1107 nla_put_failure:
1108 nla_nest_cancel(skb, nest);
1109 return -1;
1112 static int
1113 htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)
1115 struct htb_class *cl = (struct htb_class *)arg;
1116 struct gnet_stats_queue qs = {
1117 .drops = cl->drops,
1119 __u32 qlen = 0;
1121 if (!cl->level && cl->un.leaf.q) {
1122 qlen = cl->un.leaf.q->q.qlen;
1123 qs.backlog = cl->un.leaf.q->qstats.backlog;
1125 cl->xstats.tokens = PSCHED_NS2TICKS(cl->tokens);
1126 cl->xstats.ctokens = PSCHED_NS2TICKS(cl->ctokens);
1128 if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
1129 d, NULL, &cl->bstats) < 0 ||
1130 gnet_stats_copy_rate_est(d, NULL, &cl->rate_est) < 0 ||
1131 gnet_stats_copy_queue(d, NULL, &qs, qlen) < 0)
1132 return -1;
1134 return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats));
1137 static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1138 struct Qdisc **old)
1140 struct htb_class *cl = (struct htb_class *)arg;
1142 if (cl->level)
1143 return -EINVAL;
1144 if (new == NULL &&
1145 (new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
1146 cl->common.classid)) == NULL)
1147 return -ENOBUFS;
1149 *old = qdisc_replace(sch, new, &cl->un.leaf.q);
1150 return 0;
1153 static struct Qdisc *htb_leaf(struct Qdisc *sch, unsigned long arg)
1155 struct htb_class *cl = (struct htb_class *)arg;
1156 return !cl->level ? cl->un.leaf.q : NULL;
1159 static void htb_qlen_notify(struct Qdisc *sch, unsigned long arg)
1161 struct htb_class *cl = (struct htb_class *)arg;
1163 if (cl->un.leaf.q->q.qlen == 0)
1164 htb_deactivate(qdisc_priv(sch), cl);
1167 static unsigned long htb_get(struct Qdisc *sch, u32 classid)
1169 struct htb_class *cl = htb_find(classid, sch);
1170 if (cl)
1171 cl->refcnt++;
1172 return (unsigned long)cl;
1175 static inline int htb_parent_last_child(struct htb_class *cl)
1177 if (!cl->parent)
1178 /* the root class */
1179 return 0;
1180 if (cl->parent->children > 1)
1181 /* not the last child */
1182 return 0;
1183 return 1;
1186 static void htb_parent_to_leaf(struct htb_sched *q, struct htb_class *cl,
1187 struct Qdisc *new_q)
1189 struct htb_class *parent = cl->parent;
1191 WARN_ON(cl->level || !cl->un.leaf.q || cl->prio_activity);
1193 if (parent->cmode != HTB_CAN_SEND)
1194 htb_safe_rb_erase(&parent->pq_node,
1195 &q->hlevel[parent->level].wait_pq);
1197 parent->level = 0;
1198 memset(&parent->un.inner, 0, sizeof(parent->un.inner));
1199 INIT_LIST_HEAD(&parent->un.leaf.drop_list);
1200 parent->un.leaf.q = new_q ? new_q : &noop_qdisc;
1201 parent->tokens = parent->buffer;
1202 parent->ctokens = parent->cbuffer;
1203 parent->t_c = ktime_get_ns();
1204 parent->cmode = HTB_CAN_SEND;
1207 static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl)
1209 if (!cl->level) {
1210 WARN_ON(!cl->un.leaf.q);
1211 qdisc_destroy(cl->un.leaf.q);
1213 gen_kill_estimator(&cl->bstats, &cl->rate_est);
1214 tcf_destroy_chain(&cl->filter_list);
1215 kfree(cl);
1218 static void htb_destroy(struct Qdisc *sch)
1220 struct htb_sched *q = qdisc_priv(sch);
1221 struct hlist_node *next;
1222 struct htb_class *cl;
1223 unsigned int i;
1225 cancel_work_sync(&q->work);
1226 qdisc_watchdog_cancel(&q->watchdog);
1227 /* This line used to be after htb_destroy_class call below
1228 * and surprisingly it worked in 2.4. But it must precede it
1229 * because filter need its target class alive to be able to call
1230 * unbind_filter on it (without Oops).
1232 tcf_destroy_chain(&q->filter_list);
1234 for (i = 0; i < q->clhash.hashsize; i++) {
1235 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode)
1236 tcf_destroy_chain(&cl->filter_list);
1238 for (i = 0; i < q->clhash.hashsize; i++) {
1239 hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i],
1240 common.hnode)
1241 htb_destroy_class(sch, cl);
1243 qdisc_class_hash_destroy(&q->clhash);
1244 __qdisc_reset_queue(&q->direct_queue);
1247 static int htb_delete(struct Qdisc *sch, unsigned long arg)
1249 struct htb_sched *q = qdisc_priv(sch);
1250 struct htb_class *cl = (struct htb_class *)arg;
1251 struct Qdisc *new_q = NULL;
1252 int last_child = 0;
1254 /* TODO: why don't allow to delete subtree ? references ? does
1255 * tc subsys guarantee us that in htb_destroy it holds no class
1256 * refs so that we can remove children safely there ?
1258 if (cl->children || cl->filter_cnt)
1259 return -EBUSY;
1261 if (!cl->level && htb_parent_last_child(cl)) {
1262 new_q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
1263 cl->parent->common.classid);
1264 last_child = 1;
1267 sch_tree_lock(sch);
1269 if (!cl->level) {
1270 unsigned int qlen = cl->un.leaf.q->q.qlen;
1271 unsigned int backlog = cl->un.leaf.q->qstats.backlog;
1273 qdisc_reset(cl->un.leaf.q);
1274 qdisc_tree_reduce_backlog(cl->un.leaf.q, qlen, backlog);
1277 /* delete from hash and active; remainder in destroy_class */
1278 qdisc_class_hash_remove(&q->clhash, &cl->common);
1279 if (cl->parent)
1280 cl->parent->children--;
1282 if (cl->prio_activity)
1283 htb_deactivate(q, cl);
1285 if (cl->cmode != HTB_CAN_SEND)
1286 htb_safe_rb_erase(&cl->pq_node,
1287 &q->hlevel[cl->level].wait_pq);
1289 if (last_child)
1290 htb_parent_to_leaf(q, cl, new_q);
1292 BUG_ON(--cl->refcnt == 0);
1294 * This shouldn't happen: we "hold" one cops->get() when called
1295 * from tc_ctl_tclass; the destroy method is done from cops->put().
1298 sch_tree_unlock(sch);
1299 return 0;
1302 static void htb_put(struct Qdisc *sch, unsigned long arg)
1304 struct htb_class *cl = (struct htb_class *)arg;
1306 if (--cl->refcnt == 0)
1307 htb_destroy_class(sch, cl);
1310 static int htb_change_class(struct Qdisc *sch, u32 classid,
1311 u32 parentid, struct nlattr **tca,
1312 unsigned long *arg)
1314 int err = -EINVAL;
1315 struct htb_sched *q = qdisc_priv(sch);
1316 struct htb_class *cl = (struct htb_class *)*arg, *parent;
1317 struct nlattr *opt = tca[TCA_OPTIONS];
1318 struct nlattr *tb[TCA_HTB_MAX + 1];
1319 struct tc_htb_opt *hopt;
1320 u64 rate64, ceil64;
1322 /* extract all subattrs from opt attr */
1323 if (!opt)
1324 goto failure;
1326 err = nla_parse_nested(tb, TCA_HTB_MAX, opt, htb_policy);
1327 if (err < 0)
1328 goto failure;
1330 err = -EINVAL;
1331 if (tb[TCA_HTB_PARMS] == NULL)
1332 goto failure;
1334 parent = parentid == TC_H_ROOT ? NULL : htb_find(parentid, sch);
1336 hopt = nla_data(tb[TCA_HTB_PARMS]);
1337 if (!hopt->rate.rate || !hopt->ceil.rate)
1338 goto failure;
1340 /* Keeping backward compatible with rate_table based iproute2 tc */
1341 if (hopt->rate.linklayer == TC_LINKLAYER_UNAWARE)
1342 qdisc_put_rtab(qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB]));
1344 if (hopt->ceil.linklayer == TC_LINKLAYER_UNAWARE)
1345 qdisc_put_rtab(qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB]));
1347 if (!cl) { /* new class */
1348 struct Qdisc *new_q;
1349 int prio;
1350 struct {
1351 struct nlattr nla;
1352 struct gnet_estimator opt;
1353 } est = {
1354 .nla = {
1355 .nla_len = nla_attr_size(sizeof(est.opt)),
1356 .nla_type = TCA_RATE,
1358 .opt = {
1359 /* 4s interval, 16s averaging constant */
1360 .interval = 2,
1361 .ewma_log = 2,
1365 /* check for valid classid */
1366 if (!classid || TC_H_MAJ(classid ^ sch->handle) ||
1367 htb_find(classid, sch))
1368 goto failure;
1370 /* check maximal depth */
1371 if (parent && parent->parent && parent->parent->level < 2) {
1372 pr_err("htb: tree is too deep\n");
1373 goto failure;
1375 err = -ENOBUFS;
1376 cl = kzalloc(sizeof(*cl), GFP_KERNEL);
1377 if (!cl)
1378 goto failure;
1380 if (htb_rate_est || tca[TCA_RATE]) {
1381 err = gen_new_estimator(&cl->bstats, NULL,
1382 &cl->rate_est,
1383 NULL,
1384 qdisc_root_sleeping_running(sch),
1385 tca[TCA_RATE] ? : &est.nla);
1386 if (err) {
1387 kfree(cl);
1388 goto failure;
1392 cl->refcnt = 1;
1393 cl->children = 0;
1394 INIT_LIST_HEAD(&cl->un.leaf.drop_list);
1395 RB_CLEAR_NODE(&cl->pq_node);
1397 for (prio = 0; prio < TC_HTB_NUMPRIO; prio++)
1398 RB_CLEAR_NODE(&cl->node[prio]);
1400 /* create leaf qdisc early because it uses kmalloc(GFP_KERNEL)
1401 * so that can't be used inside of sch_tree_lock
1402 * -- thanks to Karlis Peisenieks
1404 new_q = qdisc_create_dflt(sch->dev_queue,
1405 &pfifo_qdisc_ops, classid);
1406 sch_tree_lock(sch);
1407 if (parent && !parent->level) {
1408 unsigned int qlen = parent->un.leaf.q->q.qlen;
1409 unsigned int backlog = parent->un.leaf.q->qstats.backlog;
1411 /* turn parent into inner node */
1412 qdisc_reset(parent->un.leaf.q);
1413 qdisc_tree_reduce_backlog(parent->un.leaf.q, qlen, backlog);
1414 qdisc_destroy(parent->un.leaf.q);
1415 if (parent->prio_activity)
1416 htb_deactivate(q, parent);
1418 /* remove from evt list because of level change */
1419 if (parent->cmode != HTB_CAN_SEND) {
1420 htb_safe_rb_erase(&parent->pq_node, &q->hlevel[0].wait_pq);
1421 parent->cmode = HTB_CAN_SEND;
1423 parent->level = (parent->parent ? parent->parent->level
1424 : TC_HTB_MAXDEPTH) - 1;
1425 memset(&parent->un.inner, 0, sizeof(parent->un.inner));
1427 /* leaf (we) needs elementary qdisc */
1428 cl->un.leaf.q = new_q ? new_q : &noop_qdisc;
1430 cl->common.classid = classid;
1431 cl->parent = parent;
1433 /* set class to be in HTB_CAN_SEND state */
1434 cl->tokens = PSCHED_TICKS2NS(hopt->buffer);
1435 cl->ctokens = PSCHED_TICKS2NS(hopt->cbuffer);
1436 cl->mbuffer = 60ULL * NSEC_PER_SEC; /* 1min */
1437 cl->t_c = ktime_get_ns();
1438 cl->cmode = HTB_CAN_SEND;
1440 /* attach to the hash list and parent's family */
1441 qdisc_class_hash_insert(&q->clhash, &cl->common);
1442 if (parent)
1443 parent->children++;
1444 } else {
1445 if (tca[TCA_RATE]) {
1446 err = gen_replace_estimator(&cl->bstats, NULL,
1447 &cl->rate_est,
1448 NULL,
1449 qdisc_root_sleeping_running(sch),
1450 tca[TCA_RATE]);
1451 if (err)
1452 return err;
1454 sch_tree_lock(sch);
1457 rate64 = tb[TCA_HTB_RATE64] ? nla_get_u64(tb[TCA_HTB_RATE64]) : 0;
1459 ceil64 = tb[TCA_HTB_CEIL64] ? nla_get_u64(tb[TCA_HTB_CEIL64]) : 0;
1461 psched_ratecfg_precompute(&cl->rate, &hopt->rate, rate64);
1462 psched_ratecfg_precompute(&cl->ceil, &hopt->ceil, ceil64);
1464 /* it used to be a nasty bug here, we have to check that node
1465 * is really leaf before changing cl->un.leaf !
1467 if (!cl->level) {
1468 u64 quantum = cl->rate.rate_bytes_ps;
1470 do_div(quantum, q->rate2quantum);
1471 cl->quantum = min_t(u64, quantum, INT_MAX);
1473 if (!hopt->quantum && cl->quantum < 1000) {
1474 pr_warn("HTB: quantum of class %X is small. Consider r2q change.\n",
1475 cl->common.classid);
1476 cl->quantum = 1000;
1478 if (!hopt->quantum && cl->quantum > 200000) {
1479 pr_warn("HTB: quantum of class %X is big. Consider r2q change.\n",
1480 cl->common.classid);
1481 cl->quantum = 200000;
1483 if (hopt->quantum)
1484 cl->quantum = hopt->quantum;
1485 if ((cl->prio = hopt->prio) >= TC_HTB_NUMPRIO)
1486 cl->prio = TC_HTB_NUMPRIO - 1;
1489 cl->buffer = PSCHED_TICKS2NS(hopt->buffer);
1490 cl->cbuffer = PSCHED_TICKS2NS(hopt->cbuffer);
1492 sch_tree_unlock(sch);
1494 qdisc_class_hash_grow(sch, &q->clhash);
1496 *arg = (unsigned long)cl;
1497 return 0;
1499 failure:
1500 return err;
1503 static struct tcf_proto __rcu **htb_find_tcf(struct Qdisc *sch,
1504 unsigned long arg)
1506 struct htb_sched *q = qdisc_priv(sch);
1507 struct htb_class *cl = (struct htb_class *)arg;
1508 struct tcf_proto __rcu **fl = cl ? &cl->filter_list : &q->filter_list;
1510 return fl;
1513 static unsigned long htb_bind_filter(struct Qdisc *sch, unsigned long parent,
1514 u32 classid)
1516 struct htb_class *cl = htb_find(classid, sch);
1518 /*if (cl && !cl->level) return 0;
1519 * The line above used to be there to prevent attaching filters to
1520 * leaves. But at least tc_index filter uses this just to get class
1521 * for other reasons so that we have to allow for it.
1522 * ----
1523 * 19.6.2002 As Werner explained it is ok - bind filter is just
1524 * another way to "lock" the class - unlike "get" this lock can
1525 * be broken by class during destroy IIUC.
1527 if (cl)
1528 cl->filter_cnt++;
1529 return (unsigned long)cl;
1532 static void htb_unbind_filter(struct Qdisc *sch, unsigned long arg)
1534 struct htb_class *cl = (struct htb_class *)arg;
1536 if (cl)
1537 cl->filter_cnt--;
1540 static void htb_walk(struct Qdisc *sch, struct qdisc_walker *arg)
1542 struct htb_sched *q = qdisc_priv(sch);
1543 struct htb_class *cl;
1544 unsigned int i;
1546 if (arg->stop)
1547 return;
1549 for (i = 0; i < q->clhash.hashsize; i++) {
1550 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
1551 if (arg->count < arg->skip) {
1552 arg->count++;
1553 continue;
1555 if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
1556 arg->stop = 1;
1557 return;
1559 arg->count++;
1564 static const struct Qdisc_class_ops htb_class_ops = {
1565 .graft = htb_graft,
1566 .leaf = htb_leaf,
1567 .qlen_notify = htb_qlen_notify,
1568 .get = htb_get,
1569 .put = htb_put,
1570 .change = htb_change_class,
1571 .delete = htb_delete,
1572 .walk = htb_walk,
1573 .tcf_chain = htb_find_tcf,
1574 .bind_tcf = htb_bind_filter,
1575 .unbind_tcf = htb_unbind_filter,
1576 .dump = htb_dump_class,
1577 .dump_stats = htb_dump_class_stats,
1580 static struct Qdisc_ops htb_qdisc_ops __read_mostly = {
1581 .cl_ops = &htb_class_ops,
1582 .id = "htb",
1583 .priv_size = sizeof(struct htb_sched),
1584 .enqueue = htb_enqueue,
1585 .dequeue = htb_dequeue,
1586 .peek = qdisc_peek_dequeued,
1587 .init = htb_init,
1588 .reset = htb_reset,
1589 .destroy = htb_destroy,
1590 .dump = htb_dump,
1591 .owner = THIS_MODULE,
1594 static int __init htb_module_init(void)
1596 return register_qdisc(&htb_qdisc_ops);
1598 static void __exit htb_module_exit(void)
1600 unregister_qdisc(&htb_qdisc_ops);
1603 module_init(htb_module_init)
1604 module_exit(htb_module_exit)
1605 MODULE_LICENSE("GPL");