iwlwifi: mvm: don't set AP STA to EINVAL
[linux-2.6/btrfs-unstable.git] / net / sched / sch_cbq.c
blob2f80d01d42a6d8b971345229d407f062df921b04
1 /*
2 * net/sched/sch_cbq.c Class-Based Queueing discipline.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
13 #include <linux/module.h>
14 #include <linux/slab.h>
15 #include <linux/types.h>
16 #include <linux/kernel.h>
17 #include <linux/string.h>
18 #include <linux/errno.h>
19 #include <linux/skbuff.h>
20 #include <net/netlink.h>
21 #include <net/pkt_sched.h>
24 /* Class-Based Queueing (CBQ) algorithm.
25 =======================================
27 Sources: [1] Sally Floyd and Van Jacobson, "Link-sharing and Resource
28 Management Models for Packet Networks",
29 IEEE/ACM Transactions on Networking, Vol.3, No.4, 1995
31 [2] Sally Floyd, "Notes on CBQ and Guaranteed Service", 1995
33 [3] Sally Floyd, "Notes on Class-Based Queueing: Setting
34 Parameters", 1996
36 [4] Sally Floyd and Michael Speer, "Experimental Results
37 for Class-Based Queueing", 1998, not published.
39 -----------------------------------------------------------------------
41 Algorithm skeleton was taken from NS simulator cbq.cc.
42 If someone wants to check this code against the LBL version,
43 he should take into account that ONLY the skeleton was borrowed,
44 the implementation is different. Particularly:
46 --- The WRR algorithm is different. Our version looks more
47 reasonable (I hope) and works when quanta are allowed to be
48 less than MTU, which is always the case when real time classes
49 have small rates. Note, that the statement of [3] is
50 incomplete, delay may actually be estimated even if class
51 per-round allotment is less than MTU. Namely, if per-round
52 allotment is W*r_i, and r_1+...+r_k = r < 1
54 delay_i <= ([MTU/(W*r_i)]*W*r + W*r + k*MTU)/B
56 In the worst case we have IntServ estimate with D = W*r+k*MTU
57 and C = MTU*r. The proof (if correct at all) is trivial.
60 --- It seems that cbq-2.0 is not very accurate. At least, I cannot
61 interpret some places, which look like wrong translations
62 from NS. Anyone is advised to find these differences
63 and explain to me, why I am wrong 8).
65 --- Linux has no EOI event, so that we cannot estimate true class
66 idle time. Workaround is to consider the next dequeue event
67 as sign that previous packet is finished. This is wrong because of
68 internal device queueing, but on a permanently loaded link it is true.
69 Moreover, combined with clock integrator, this scheme looks
70 very close to an ideal solution. */
72 struct cbq_sched_data;
75 struct cbq_class {
76 struct Qdisc_class_common common;
77 struct cbq_class *next_alive; /* next class with backlog in this priority band */
79 /* Parameters */
80 unsigned char priority; /* class priority */
81 unsigned char priority2; /* priority to be used after overlimit */
82 unsigned char ewma_log; /* time constant for idle time calculation */
83 unsigned char ovl_strategy;
84 #ifdef CONFIG_NET_CLS_ACT
85 unsigned char police;
86 #endif
88 u32 defmap;
90 /* Link-sharing scheduler parameters */
91 long maxidle; /* Class parameters: see below. */
92 long offtime;
93 long minidle;
94 u32 avpkt;
95 struct qdisc_rate_table *R_tab;
97 /* Overlimit strategy parameters */
98 void (*overlimit)(struct cbq_class *cl);
99 psched_tdiff_t penalty;
101 /* General scheduler (WRR) parameters */
102 long allot;
103 long quantum; /* Allotment per WRR round */
104 long weight; /* Relative allotment: see below */
106 struct Qdisc *qdisc; /* Ptr to CBQ discipline */
107 struct cbq_class *split; /* Ptr to split node */
108 struct cbq_class *share; /* Ptr to LS parent in the class tree */
109 struct cbq_class *tparent; /* Ptr to tree parent in the class tree */
110 struct cbq_class *borrow; /* NULL if class is bandwidth limited;
111 parent otherwise */
112 struct cbq_class *sibling; /* Sibling chain */
113 struct cbq_class *children; /* Pointer to children chain */
115 struct Qdisc *q; /* Elementary queueing discipline */
118 /* Variables */
119 unsigned char cpriority; /* Effective priority */
120 unsigned char delayed;
121 unsigned char level; /* level of the class in hierarchy:
122 0 for leaf classes, and maximal
123 level of children + 1 for nodes.
126 psched_time_t last; /* Last end of service */
127 psched_time_t undertime;
128 long avgidle;
129 long deficit; /* Saved deficit for WRR */
130 psched_time_t penalized;
131 struct gnet_stats_basic_packed bstats;
132 struct gnet_stats_queue qstats;
133 struct gnet_stats_rate_est64 rate_est;
134 struct tc_cbq_xstats xstats;
136 struct tcf_proto *filter_list;
138 int refcnt;
139 int filters;
141 struct cbq_class *defaults[TC_PRIO_MAX + 1];
144 struct cbq_sched_data {
145 struct Qdisc_class_hash clhash; /* Hash table of all classes */
146 int nclasses[TC_CBQ_MAXPRIO + 1];
147 unsigned int quanta[TC_CBQ_MAXPRIO + 1];
149 struct cbq_class link;
151 unsigned int activemask;
152 struct cbq_class *active[TC_CBQ_MAXPRIO + 1]; /* List of all classes
153 with backlog */
155 #ifdef CONFIG_NET_CLS_ACT
156 struct cbq_class *rx_class;
157 #endif
158 struct cbq_class *tx_class;
159 struct cbq_class *tx_borrowed;
160 int tx_len;
161 psched_time_t now; /* Cached timestamp */
162 psched_time_t now_rt; /* Cached real time */
163 unsigned int pmask;
165 struct hrtimer delay_timer;
166 struct qdisc_watchdog watchdog; /* Watchdog timer,
167 started when CBQ has
168 backlog, but cannot
169 transmit just now */
170 psched_tdiff_t wd_expires;
171 int toplevel;
172 u32 hgenerator;
176 #define L2T(cl, len) qdisc_l2t((cl)->R_tab, len)
178 static inline struct cbq_class *
179 cbq_class_lookup(struct cbq_sched_data *q, u32 classid)
181 struct Qdisc_class_common *clc;
183 clc = qdisc_class_find(&q->clhash, classid);
184 if (clc == NULL)
185 return NULL;
186 return container_of(clc, struct cbq_class, common);
189 #ifdef CONFIG_NET_CLS_ACT
191 static struct cbq_class *
192 cbq_reclassify(struct sk_buff *skb, struct cbq_class *this)
194 struct cbq_class *cl;
196 for (cl = this->tparent; cl; cl = cl->tparent) {
197 struct cbq_class *new = cl->defaults[TC_PRIO_BESTEFFORT];
199 if (new != NULL && new != this)
200 return new;
202 return NULL;
205 #endif
207 /* Classify packet. The procedure is pretty complicated, but
208 * it allows us to combine link sharing and priority scheduling
209 * transparently.
211 * Namely, you can put link sharing rules (f.e. route based) at root of CBQ,
212 * so that it resolves to split nodes. Then packets are classified
213 * by logical priority, or a more specific classifier may be attached
214 * to the split node.
217 static struct cbq_class *
218 cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
220 struct cbq_sched_data *q = qdisc_priv(sch);
221 struct cbq_class *head = &q->link;
222 struct cbq_class **defmap;
223 struct cbq_class *cl = NULL;
224 u32 prio = skb->priority;
225 struct tcf_result res;
228 * Step 1. If skb->priority points to one of our classes, use it.
230 if (TC_H_MAJ(prio ^ sch->handle) == 0 &&
231 (cl = cbq_class_lookup(q, prio)) != NULL)
232 return cl;
234 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
235 for (;;) {
236 int result = 0;
237 defmap = head->defaults;
240 * Step 2+n. Apply classifier.
242 if (!head->filter_list ||
243 (result = tc_classify_compat(skb, head->filter_list, &res)) < 0)
244 goto fallback;
246 cl = (void *)res.class;
247 if (!cl) {
248 if (TC_H_MAJ(res.classid))
249 cl = cbq_class_lookup(q, res.classid);
250 else if ((cl = defmap[res.classid & TC_PRIO_MAX]) == NULL)
251 cl = defmap[TC_PRIO_BESTEFFORT];
253 if (cl == NULL)
254 goto fallback;
256 if (cl->level >= head->level)
257 goto fallback;
258 #ifdef CONFIG_NET_CLS_ACT
259 switch (result) {
260 case TC_ACT_QUEUED:
261 case TC_ACT_STOLEN:
262 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
263 case TC_ACT_SHOT:
264 return NULL;
265 case TC_ACT_RECLASSIFY:
266 return cbq_reclassify(skb, cl);
268 #endif
269 if (cl->level == 0)
270 return cl;
273 * Step 3+n. If classifier selected a link sharing class,
274 * apply agency specific classifier.
275 * Repeat this procdure until we hit a leaf node.
277 head = cl;
280 fallback:
281 cl = head;
284 * Step 4. No success...
286 if (TC_H_MAJ(prio) == 0 &&
287 !(cl = head->defaults[prio & TC_PRIO_MAX]) &&
288 !(cl = head->defaults[TC_PRIO_BESTEFFORT]))
289 return head;
291 return cl;
295 * A packet has just been enqueued on the empty class.
296 * cbq_activate_class adds it to the tail of active class list
297 * of its priority band.
300 static inline void cbq_activate_class(struct cbq_class *cl)
302 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
303 int prio = cl->cpriority;
304 struct cbq_class *cl_tail;
306 cl_tail = q->active[prio];
307 q->active[prio] = cl;
309 if (cl_tail != NULL) {
310 cl->next_alive = cl_tail->next_alive;
311 cl_tail->next_alive = cl;
312 } else {
313 cl->next_alive = cl;
314 q->activemask |= (1<<prio);
319 * Unlink class from active chain.
320 * Note that this same procedure is done directly in cbq_dequeue*
321 * during round-robin procedure.
324 static void cbq_deactivate_class(struct cbq_class *this)
326 struct cbq_sched_data *q = qdisc_priv(this->qdisc);
327 int prio = this->cpriority;
328 struct cbq_class *cl;
329 struct cbq_class *cl_prev = q->active[prio];
331 do {
332 cl = cl_prev->next_alive;
333 if (cl == this) {
334 cl_prev->next_alive = cl->next_alive;
335 cl->next_alive = NULL;
337 if (cl == q->active[prio]) {
338 q->active[prio] = cl_prev;
339 if (cl == q->active[prio]) {
340 q->active[prio] = NULL;
341 q->activemask &= ~(1<<prio);
342 return;
345 return;
347 } while ((cl_prev = cl) != q->active[prio]);
350 static void
351 cbq_mark_toplevel(struct cbq_sched_data *q, struct cbq_class *cl)
353 int toplevel = q->toplevel;
355 if (toplevel > cl->level && !(qdisc_is_throttled(cl->q))) {
356 psched_time_t now;
357 psched_tdiff_t incr;
359 now = psched_get_time();
360 incr = now - q->now_rt;
361 now = q->now + incr;
363 do {
364 if (cl->undertime < now) {
365 q->toplevel = cl->level;
366 return;
368 } while ((cl = cl->borrow) != NULL && toplevel > cl->level);
372 static int
373 cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
375 struct cbq_sched_data *q = qdisc_priv(sch);
376 int uninitialized_var(ret);
377 struct cbq_class *cl = cbq_classify(skb, sch, &ret);
379 #ifdef CONFIG_NET_CLS_ACT
380 q->rx_class = cl;
381 #endif
382 if (cl == NULL) {
383 if (ret & __NET_XMIT_BYPASS)
384 sch->qstats.drops++;
385 kfree_skb(skb);
386 return ret;
389 #ifdef CONFIG_NET_CLS_ACT
390 cl->q->__parent = sch;
391 #endif
392 ret = qdisc_enqueue(skb, cl->q);
393 if (ret == NET_XMIT_SUCCESS) {
394 sch->q.qlen++;
395 cbq_mark_toplevel(q, cl);
396 if (!cl->next_alive)
397 cbq_activate_class(cl);
398 return ret;
401 if (net_xmit_drop_count(ret)) {
402 sch->qstats.drops++;
403 cbq_mark_toplevel(q, cl);
404 cl->qstats.drops++;
406 return ret;
409 /* Overlimit actions */
411 /* TC_CBQ_OVL_CLASSIC: (default) penalize leaf class by adding offtime */
413 static void cbq_ovl_classic(struct cbq_class *cl)
415 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
416 psched_tdiff_t delay = cl->undertime - q->now;
418 if (!cl->delayed) {
419 delay += cl->offtime;
422 * Class goes to sleep, so that it will have no
423 * chance to work avgidle. Let's forgive it 8)
425 * BTW cbq-2.0 has a crap in this
426 * place, apparently they forgot to shift it by cl->ewma_log.
428 if (cl->avgidle < 0)
429 delay -= (-cl->avgidle) - ((-cl->avgidle) >> cl->ewma_log);
430 if (cl->avgidle < cl->minidle)
431 cl->avgidle = cl->minidle;
432 if (delay <= 0)
433 delay = 1;
434 cl->undertime = q->now + delay;
436 cl->xstats.overactions++;
437 cl->delayed = 1;
439 if (q->wd_expires == 0 || q->wd_expires > delay)
440 q->wd_expires = delay;
442 /* Dirty work! We must schedule wakeups based on
443 * real available rate, rather than leaf rate,
444 * which may be tiny (even zero).
446 if (q->toplevel == TC_CBQ_MAXLEVEL) {
447 struct cbq_class *b;
448 psched_tdiff_t base_delay = q->wd_expires;
450 for (b = cl->borrow; b; b = b->borrow) {
451 delay = b->undertime - q->now;
452 if (delay < base_delay) {
453 if (delay <= 0)
454 delay = 1;
455 base_delay = delay;
459 q->wd_expires = base_delay;
463 /* TC_CBQ_OVL_RCLASSIC: penalize by offtime classes in hierarchy, when
464 * they go overlimit
467 static void cbq_ovl_rclassic(struct cbq_class *cl)
469 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
470 struct cbq_class *this = cl;
472 do {
473 if (cl->level > q->toplevel) {
474 cl = NULL;
475 break;
477 } while ((cl = cl->borrow) != NULL);
479 if (cl == NULL)
480 cl = this;
481 cbq_ovl_classic(cl);
484 /* TC_CBQ_OVL_DELAY: delay until it will go to underlimit */
486 static void cbq_ovl_delay(struct cbq_class *cl)
488 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
489 psched_tdiff_t delay = cl->undertime - q->now;
491 if (test_bit(__QDISC_STATE_DEACTIVATED,
492 &qdisc_root_sleeping(cl->qdisc)->state))
493 return;
495 if (!cl->delayed) {
496 psched_time_t sched = q->now;
497 ktime_t expires;
499 delay += cl->offtime;
500 if (cl->avgidle < 0)
501 delay -= (-cl->avgidle) - ((-cl->avgidle) >> cl->ewma_log);
502 if (cl->avgidle < cl->minidle)
503 cl->avgidle = cl->minidle;
504 cl->undertime = q->now + delay;
506 if (delay > 0) {
507 sched += delay + cl->penalty;
508 cl->penalized = sched;
509 cl->cpriority = TC_CBQ_MAXPRIO;
510 q->pmask |= (1<<TC_CBQ_MAXPRIO);
512 expires = ns_to_ktime(PSCHED_TICKS2NS(sched));
513 if (hrtimer_try_to_cancel(&q->delay_timer) &&
514 ktime_to_ns(ktime_sub(
515 hrtimer_get_expires(&q->delay_timer),
516 expires)) > 0)
517 hrtimer_set_expires(&q->delay_timer, expires);
518 hrtimer_restart(&q->delay_timer);
519 cl->delayed = 1;
520 cl->xstats.overactions++;
521 return;
523 delay = 1;
525 if (q->wd_expires == 0 || q->wd_expires > delay)
526 q->wd_expires = delay;
529 /* TC_CBQ_OVL_LOWPRIO: penalize class by lowering its priority band */
531 static void cbq_ovl_lowprio(struct cbq_class *cl)
533 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
535 cl->penalized = q->now + cl->penalty;
537 if (cl->cpriority != cl->priority2) {
538 cl->cpriority = cl->priority2;
539 q->pmask |= (1<<cl->cpriority);
540 cl->xstats.overactions++;
542 cbq_ovl_classic(cl);
545 /* TC_CBQ_OVL_DROP: penalize class by dropping */
547 static void cbq_ovl_drop(struct cbq_class *cl)
549 if (cl->q->ops->drop)
550 if (cl->q->ops->drop(cl->q))
551 cl->qdisc->q.qlen--;
552 cl->xstats.overactions++;
553 cbq_ovl_classic(cl);
556 static psched_tdiff_t cbq_undelay_prio(struct cbq_sched_data *q, int prio,
557 psched_time_t now)
559 struct cbq_class *cl;
560 struct cbq_class *cl_prev = q->active[prio];
561 psched_time_t sched = now;
563 if (cl_prev == NULL)
564 return 0;
566 do {
567 cl = cl_prev->next_alive;
568 if (now - cl->penalized > 0) {
569 cl_prev->next_alive = cl->next_alive;
570 cl->next_alive = NULL;
571 cl->cpriority = cl->priority;
572 cl->delayed = 0;
573 cbq_activate_class(cl);
575 if (cl == q->active[prio]) {
576 q->active[prio] = cl_prev;
577 if (cl == q->active[prio]) {
578 q->active[prio] = NULL;
579 return 0;
583 cl = cl_prev->next_alive;
584 } else if (sched - cl->penalized > 0)
585 sched = cl->penalized;
586 } while ((cl_prev = cl) != q->active[prio]);
588 return sched - now;
591 static enum hrtimer_restart cbq_undelay(struct hrtimer *timer)
593 struct cbq_sched_data *q = container_of(timer, struct cbq_sched_data,
594 delay_timer);
595 struct Qdisc *sch = q->watchdog.qdisc;
596 psched_time_t now;
597 psched_tdiff_t delay = 0;
598 unsigned int pmask;
600 now = psched_get_time();
602 pmask = q->pmask;
603 q->pmask = 0;
605 while (pmask) {
606 int prio = ffz(~pmask);
607 psched_tdiff_t tmp;
609 pmask &= ~(1<<prio);
611 tmp = cbq_undelay_prio(q, prio, now);
612 if (tmp > 0) {
613 q->pmask |= 1<<prio;
614 if (tmp < delay || delay == 0)
615 delay = tmp;
619 if (delay) {
620 ktime_t time;
622 time = ktime_set(0, 0);
623 time = ktime_add_ns(time, PSCHED_TICKS2NS(now + delay));
624 hrtimer_start(&q->delay_timer, time, HRTIMER_MODE_ABS);
627 qdisc_unthrottled(sch);
628 __netif_schedule(qdisc_root(sch));
629 return HRTIMER_NORESTART;
632 #ifdef CONFIG_NET_CLS_ACT
633 static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child)
635 struct Qdisc *sch = child->__parent;
636 struct cbq_sched_data *q = qdisc_priv(sch);
637 struct cbq_class *cl = q->rx_class;
639 q->rx_class = NULL;
641 if (cl && (cl = cbq_reclassify(skb, cl)) != NULL) {
642 int ret;
644 cbq_mark_toplevel(q, cl);
646 q->rx_class = cl;
647 cl->q->__parent = sch;
649 ret = qdisc_enqueue(skb, cl->q);
650 if (ret == NET_XMIT_SUCCESS) {
651 sch->q.qlen++;
652 if (!cl->next_alive)
653 cbq_activate_class(cl);
654 return 0;
656 if (net_xmit_drop_count(ret))
657 sch->qstats.drops++;
658 return 0;
661 sch->qstats.drops++;
662 return -1;
664 #endif
667 * It is mission critical procedure.
669 * We "regenerate" toplevel cutoff, if transmitting class
670 * has backlog and it is not regulated. It is not part of
671 * original CBQ description, but looks more reasonable.
672 * Probably, it is wrong. This question needs further investigation.
675 static inline void
676 cbq_update_toplevel(struct cbq_sched_data *q, struct cbq_class *cl,
677 struct cbq_class *borrowed)
679 if (cl && q->toplevel >= borrowed->level) {
680 if (cl->q->q.qlen > 1) {
681 do {
682 if (borrowed->undertime == PSCHED_PASTPERFECT) {
683 q->toplevel = borrowed->level;
684 return;
686 } while ((borrowed = borrowed->borrow) != NULL);
688 #if 0
689 /* It is not necessary now. Uncommenting it
690 will save CPU cycles, but decrease fairness.
692 q->toplevel = TC_CBQ_MAXLEVEL;
693 #endif
697 static void
698 cbq_update(struct cbq_sched_data *q)
700 struct cbq_class *this = q->tx_class;
701 struct cbq_class *cl = this;
702 int len = q->tx_len;
704 q->tx_class = NULL;
706 for ( ; cl; cl = cl->share) {
707 long avgidle = cl->avgidle;
708 long idle;
710 cl->bstats.packets++;
711 cl->bstats.bytes += len;
714 * (now - last) is total time between packet right edges.
715 * (last_pktlen/rate) is "virtual" busy time, so that
717 * idle = (now - last) - last_pktlen/rate
720 idle = q->now - cl->last;
721 if ((unsigned long)idle > 128*1024*1024) {
722 avgidle = cl->maxidle;
723 } else {
724 idle -= L2T(cl, len);
726 /* true_avgidle := (1-W)*true_avgidle + W*idle,
727 * where W=2^{-ewma_log}. But cl->avgidle is scaled:
728 * cl->avgidle == true_avgidle/W,
729 * hence:
731 avgidle += idle - (avgidle>>cl->ewma_log);
734 if (avgidle <= 0) {
735 /* Overlimit or at-limit */
737 if (avgidle < cl->minidle)
738 avgidle = cl->minidle;
740 cl->avgidle = avgidle;
742 /* Calculate expected time, when this class
743 * will be allowed to send.
744 * It will occur, when:
745 * (1-W)*true_avgidle + W*delay = 0, i.e.
746 * idle = (1/W - 1)*(-true_avgidle)
747 * or
748 * idle = (1 - W)*(-cl->avgidle);
750 idle = (-avgidle) - ((-avgidle) >> cl->ewma_log);
753 * That is not all.
754 * To maintain the rate allocated to the class,
755 * we add to undertime virtual clock,
756 * necessary to complete transmitted packet.
757 * (len/phys_bandwidth has been already passed
758 * to the moment of cbq_update)
761 idle -= L2T(&q->link, len);
762 idle += L2T(cl, len);
764 cl->undertime = q->now + idle;
765 } else {
766 /* Underlimit */
768 cl->undertime = PSCHED_PASTPERFECT;
769 if (avgidle > cl->maxidle)
770 cl->avgidle = cl->maxidle;
771 else
772 cl->avgidle = avgidle;
774 cl->last = q->now;
777 cbq_update_toplevel(q, this, q->tx_borrowed);
780 static inline struct cbq_class *
781 cbq_under_limit(struct cbq_class *cl)
783 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
784 struct cbq_class *this_cl = cl;
786 if (cl->tparent == NULL)
787 return cl;
789 if (cl->undertime == PSCHED_PASTPERFECT || q->now >= cl->undertime) {
790 cl->delayed = 0;
791 return cl;
794 do {
795 /* It is very suspicious place. Now overlimit
796 * action is generated for not bounded classes
797 * only if link is completely congested.
798 * Though it is in agree with ancestor-only paradigm,
799 * it looks very stupid. Particularly,
800 * it means that this chunk of code will either
801 * never be called or result in strong amplification
802 * of burstiness. Dangerous, silly, and, however,
803 * no another solution exists.
805 cl = cl->borrow;
806 if (!cl) {
807 this_cl->qstats.overlimits++;
808 this_cl->overlimit(this_cl);
809 return NULL;
811 if (cl->level > q->toplevel)
812 return NULL;
813 } while (cl->undertime != PSCHED_PASTPERFECT && q->now < cl->undertime);
815 cl->delayed = 0;
816 return cl;
819 static inline struct sk_buff *
820 cbq_dequeue_prio(struct Qdisc *sch, int prio)
822 struct cbq_sched_data *q = qdisc_priv(sch);
823 struct cbq_class *cl_tail, *cl_prev, *cl;
824 struct sk_buff *skb;
825 int deficit;
827 cl_tail = cl_prev = q->active[prio];
828 cl = cl_prev->next_alive;
830 do {
831 deficit = 0;
833 /* Start round */
834 do {
835 struct cbq_class *borrow = cl;
837 if (cl->q->q.qlen &&
838 (borrow = cbq_under_limit(cl)) == NULL)
839 goto skip_class;
841 if (cl->deficit <= 0) {
842 /* Class exhausted its allotment per
843 * this round. Switch to the next one.
845 deficit = 1;
846 cl->deficit += cl->quantum;
847 goto next_class;
850 skb = cl->q->dequeue(cl->q);
852 /* Class did not give us any skb :-(
853 * It could occur even if cl->q->q.qlen != 0
854 * f.e. if cl->q == "tbf"
856 if (skb == NULL)
857 goto skip_class;
859 cl->deficit -= qdisc_pkt_len(skb);
860 q->tx_class = cl;
861 q->tx_borrowed = borrow;
862 if (borrow != cl) {
863 #ifndef CBQ_XSTATS_BORROWS_BYTES
864 borrow->xstats.borrows++;
865 cl->xstats.borrows++;
866 #else
867 borrow->xstats.borrows += qdisc_pkt_len(skb);
868 cl->xstats.borrows += qdisc_pkt_len(skb);
869 #endif
871 q->tx_len = qdisc_pkt_len(skb);
873 if (cl->deficit <= 0) {
874 q->active[prio] = cl;
875 cl = cl->next_alive;
876 cl->deficit += cl->quantum;
878 return skb;
880 skip_class:
881 if (cl->q->q.qlen == 0 || prio != cl->cpriority) {
882 /* Class is empty or penalized.
883 * Unlink it from active chain.
885 cl_prev->next_alive = cl->next_alive;
886 cl->next_alive = NULL;
888 /* Did cl_tail point to it? */
889 if (cl == cl_tail) {
890 /* Repair it! */
891 cl_tail = cl_prev;
893 /* Was it the last class in this band? */
894 if (cl == cl_tail) {
895 /* Kill the band! */
896 q->active[prio] = NULL;
897 q->activemask &= ~(1<<prio);
898 if (cl->q->q.qlen)
899 cbq_activate_class(cl);
900 return NULL;
903 q->active[prio] = cl_tail;
905 if (cl->q->q.qlen)
906 cbq_activate_class(cl);
908 cl = cl_prev;
911 next_class:
912 cl_prev = cl;
913 cl = cl->next_alive;
914 } while (cl_prev != cl_tail);
915 } while (deficit);
917 q->active[prio] = cl_prev;
919 return NULL;
922 static inline struct sk_buff *
923 cbq_dequeue_1(struct Qdisc *sch)
925 struct cbq_sched_data *q = qdisc_priv(sch);
926 struct sk_buff *skb;
927 unsigned int activemask;
929 activemask = q->activemask & 0xFF;
930 while (activemask) {
931 int prio = ffz(~activemask);
932 activemask &= ~(1<<prio);
933 skb = cbq_dequeue_prio(sch, prio);
934 if (skb)
935 return skb;
937 return NULL;
940 static struct sk_buff *
941 cbq_dequeue(struct Qdisc *sch)
943 struct sk_buff *skb;
944 struct cbq_sched_data *q = qdisc_priv(sch);
945 psched_time_t now;
946 psched_tdiff_t incr;
948 now = psched_get_time();
949 incr = now - q->now_rt;
951 if (q->tx_class) {
952 psched_tdiff_t incr2;
953 /* Time integrator. We calculate EOS time
954 * by adding expected packet transmission time.
955 * If real time is greater, we warp artificial clock,
956 * so that:
958 * cbq_time = max(real_time, work);
960 incr2 = L2T(&q->link, q->tx_len);
961 q->now += incr2;
962 cbq_update(q);
963 if ((incr -= incr2) < 0)
964 incr = 0;
965 q->now += incr;
966 } else {
967 if (now > q->now)
968 q->now = now;
970 q->now_rt = now;
972 for (;;) {
973 q->wd_expires = 0;
975 skb = cbq_dequeue_1(sch);
976 if (skb) {
977 qdisc_bstats_update(sch, skb);
978 sch->q.qlen--;
979 qdisc_unthrottled(sch);
980 return skb;
983 /* All the classes are overlimit.
985 * It is possible, if:
987 * 1. Scheduler is empty.
988 * 2. Toplevel cutoff inhibited borrowing.
989 * 3. Root class is overlimit.
991 * Reset 2d and 3d conditions and retry.
993 * Note, that NS and cbq-2.0 are buggy, peeking
994 * an arbitrary class is appropriate for ancestor-only
995 * sharing, but not for toplevel algorithm.
997 * Our version is better, but slower, because it requires
998 * two passes, but it is unavoidable with top-level sharing.
1001 if (q->toplevel == TC_CBQ_MAXLEVEL &&
1002 q->link.undertime == PSCHED_PASTPERFECT)
1003 break;
1005 q->toplevel = TC_CBQ_MAXLEVEL;
1006 q->link.undertime = PSCHED_PASTPERFECT;
1009 /* No packets in scheduler or nobody wants to give them to us :-(
1010 * Sigh... start watchdog timer in the last case.
1013 if (sch->q.qlen) {
1014 sch->qstats.overlimits++;
1015 if (q->wd_expires)
1016 qdisc_watchdog_schedule(&q->watchdog,
1017 now + q->wd_expires);
1019 return NULL;
1022 /* CBQ class maintanance routines */
1024 static void cbq_adjust_levels(struct cbq_class *this)
1026 if (this == NULL)
1027 return;
1029 do {
1030 int level = 0;
1031 struct cbq_class *cl;
1033 cl = this->children;
1034 if (cl) {
1035 do {
1036 if (cl->level > level)
1037 level = cl->level;
1038 } while ((cl = cl->sibling) != this->children);
1040 this->level = level + 1;
1041 } while ((this = this->tparent) != NULL);
1044 static void cbq_normalize_quanta(struct cbq_sched_data *q, int prio)
1046 struct cbq_class *cl;
1047 unsigned int h;
1049 if (q->quanta[prio] == 0)
1050 return;
1052 for (h = 0; h < q->clhash.hashsize; h++) {
1053 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
1054 /* BUGGGG... Beware! This expression suffer of
1055 * arithmetic overflows!
1057 if (cl->priority == prio) {
1058 cl->quantum = (cl->weight*cl->allot*q->nclasses[prio])/
1059 q->quanta[prio];
1061 if (cl->quantum <= 0 ||
1062 cl->quantum > 32*qdisc_dev(cl->qdisc)->mtu) {
1063 pr_warn("CBQ: class %08x has bad quantum==%ld, repaired.\n",
1064 cl->common.classid, cl->quantum);
1065 cl->quantum = qdisc_dev(cl->qdisc)->mtu/2 + 1;
1071 static void cbq_sync_defmap(struct cbq_class *cl)
1073 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
1074 struct cbq_class *split = cl->split;
1075 unsigned int h;
1076 int i;
1078 if (split == NULL)
1079 return;
1081 for (i = 0; i <= TC_PRIO_MAX; i++) {
1082 if (split->defaults[i] == cl && !(cl->defmap & (1<<i)))
1083 split->defaults[i] = NULL;
1086 for (i = 0; i <= TC_PRIO_MAX; i++) {
1087 int level = split->level;
1089 if (split->defaults[i])
1090 continue;
1092 for (h = 0; h < q->clhash.hashsize; h++) {
1093 struct cbq_class *c;
1095 hlist_for_each_entry(c, &q->clhash.hash[h],
1096 common.hnode) {
1097 if (c->split == split && c->level < level &&
1098 c->defmap & (1<<i)) {
1099 split->defaults[i] = c;
1100 level = c->level;
1107 static void cbq_change_defmap(struct cbq_class *cl, u32 splitid, u32 def, u32 mask)
1109 struct cbq_class *split = NULL;
1111 if (splitid == 0) {
1112 split = cl->split;
1113 if (!split)
1114 return;
1115 splitid = split->common.classid;
1118 if (split == NULL || split->common.classid != splitid) {
1119 for (split = cl->tparent; split; split = split->tparent)
1120 if (split->common.classid == splitid)
1121 break;
1124 if (split == NULL)
1125 return;
1127 if (cl->split != split) {
1128 cl->defmap = 0;
1129 cbq_sync_defmap(cl);
1130 cl->split = split;
1131 cl->defmap = def & mask;
1132 } else
1133 cl->defmap = (cl->defmap & ~mask) | (def & mask);
1135 cbq_sync_defmap(cl);
1138 static void cbq_unlink_class(struct cbq_class *this)
1140 struct cbq_class *cl, **clp;
1141 struct cbq_sched_data *q = qdisc_priv(this->qdisc);
1143 qdisc_class_hash_remove(&q->clhash, &this->common);
1145 if (this->tparent) {
1146 clp = &this->sibling;
1147 cl = *clp;
1148 do {
1149 if (cl == this) {
1150 *clp = cl->sibling;
1151 break;
1153 clp = &cl->sibling;
1154 } while ((cl = *clp) != this->sibling);
1156 if (this->tparent->children == this) {
1157 this->tparent->children = this->sibling;
1158 if (this->sibling == this)
1159 this->tparent->children = NULL;
1161 } else {
1162 WARN_ON(this->sibling != this);
1166 static void cbq_link_class(struct cbq_class *this)
1168 struct cbq_sched_data *q = qdisc_priv(this->qdisc);
1169 struct cbq_class *parent = this->tparent;
1171 this->sibling = this;
1172 qdisc_class_hash_insert(&q->clhash, &this->common);
1174 if (parent == NULL)
1175 return;
1177 if (parent->children == NULL) {
1178 parent->children = this;
1179 } else {
1180 this->sibling = parent->children->sibling;
1181 parent->children->sibling = this;
1185 static unsigned int cbq_drop(struct Qdisc *sch)
1187 struct cbq_sched_data *q = qdisc_priv(sch);
1188 struct cbq_class *cl, *cl_head;
1189 int prio;
1190 unsigned int len;
1192 for (prio = TC_CBQ_MAXPRIO; prio >= 0; prio--) {
1193 cl_head = q->active[prio];
1194 if (!cl_head)
1195 continue;
1197 cl = cl_head;
1198 do {
1199 if (cl->q->ops->drop && (len = cl->q->ops->drop(cl->q))) {
1200 sch->q.qlen--;
1201 if (!cl->q->q.qlen)
1202 cbq_deactivate_class(cl);
1203 return len;
1205 } while ((cl = cl->next_alive) != cl_head);
1207 return 0;
1210 static void
1211 cbq_reset(struct Qdisc *sch)
1213 struct cbq_sched_data *q = qdisc_priv(sch);
1214 struct cbq_class *cl;
1215 int prio;
1216 unsigned int h;
1218 q->activemask = 0;
1219 q->pmask = 0;
1220 q->tx_class = NULL;
1221 q->tx_borrowed = NULL;
1222 qdisc_watchdog_cancel(&q->watchdog);
1223 hrtimer_cancel(&q->delay_timer);
1224 q->toplevel = TC_CBQ_MAXLEVEL;
1225 q->now = psched_get_time();
1226 q->now_rt = q->now;
1228 for (prio = 0; prio <= TC_CBQ_MAXPRIO; prio++)
1229 q->active[prio] = NULL;
1231 for (h = 0; h < q->clhash.hashsize; h++) {
1232 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
1233 qdisc_reset(cl->q);
1235 cl->next_alive = NULL;
1236 cl->undertime = PSCHED_PASTPERFECT;
1237 cl->avgidle = cl->maxidle;
1238 cl->deficit = cl->quantum;
1239 cl->cpriority = cl->priority;
1242 sch->q.qlen = 0;
1246 static int cbq_set_lss(struct cbq_class *cl, struct tc_cbq_lssopt *lss)
1248 if (lss->change & TCF_CBQ_LSS_FLAGS) {
1249 cl->share = (lss->flags & TCF_CBQ_LSS_ISOLATED) ? NULL : cl->tparent;
1250 cl->borrow = (lss->flags & TCF_CBQ_LSS_BOUNDED) ? NULL : cl->tparent;
1252 if (lss->change & TCF_CBQ_LSS_EWMA)
1253 cl->ewma_log = lss->ewma_log;
1254 if (lss->change & TCF_CBQ_LSS_AVPKT)
1255 cl->avpkt = lss->avpkt;
1256 if (lss->change & TCF_CBQ_LSS_MINIDLE)
1257 cl->minidle = -(long)lss->minidle;
1258 if (lss->change & TCF_CBQ_LSS_MAXIDLE) {
1259 cl->maxidle = lss->maxidle;
1260 cl->avgidle = lss->maxidle;
1262 if (lss->change & TCF_CBQ_LSS_OFFTIME)
1263 cl->offtime = lss->offtime;
1264 return 0;
1267 static void cbq_rmprio(struct cbq_sched_data *q, struct cbq_class *cl)
1269 q->nclasses[cl->priority]--;
1270 q->quanta[cl->priority] -= cl->weight;
1271 cbq_normalize_quanta(q, cl->priority);
1274 static void cbq_addprio(struct cbq_sched_data *q, struct cbq_class *cl)
1276 q->nclasses[cl->priority]++;
1277 q->quanta[cl->priority] += cl->weight;
1278 cbq_normalize_quanta(q, cl->priority);
1281 static int cbq_set_wrr(struct cbq_class *cl, struct tc_cbq_wrropt *wrr)
1283 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
1285 if (wrr->allot)
1286 cl->allot = wrr->allot;
1287 if (wrr->weight)
1288 cl->weight = wrr->weight;
1289 if (wrr->priority) {
1290 cl->priority = wrr->priority - 1;
1291 cl->cpriority = cl->priority;
1292 if (cl->priority >= cl->priority2)
1293 cl->priority2 = TC_CBQ_MAXPRIO - 1;
1296 cbq_addprio(q, cl);
1297 return 0;
1300 static int cbq_set_overlimit(struct cbq_class *cl, struct tc_cbq_ovl *ovl)
1302 switch (ovl->strategy) {
1303 case TC_CBQ_OVL_CLASSIC:
1304 cl->overlimit = cbq_ovl_classic;
1305 break;
1306 case TC_CBQ_OVL_DELAY:
1307 cl->overlimit = cbq_ovl_delay;
1308 break;
1309 case TC_CBQ_OVL_LOWPRIO:
1310 if (ovl->priority2 - 1 >= TC_CBQ_MAXPRIO ||
1311 ovl->priority2 - 1 <= cl->priority)
1312 return -EINVAL;
1313 cl->priority2 = ovl->priority2 - 1;
1314 cl->overlimit = cbq_ovl_lowprio;
1315 break;
1316 case TC_CBQ_OVL_DROP:
1317 cl->overlimit = cbq_ovl_drop;
1318 break;
1319 case TC_CBQ_OVL_RCLASSIC:
1320 cl->overlimit = cbq_ovl_rclassic;
1321 break;
1322 default:
1323 return -EINVAL;
1325 cl->penalty = ovl->penalty;
1326 return 0;
1329 #ifdef CONFIG_NET_CLS_ACT
1330 static int cbq_set_police(struct cbq_class *cl, struct tc_cbq_police *p)
1332 cl->police = p->police;
1334 if (cl->q->handle) {
1335 if (p->police == TC_POLICE_RECLASSIFY)
1336 cl->q->reshape_fail = cbq_reshape_fail;
1337 else
1338 cl->q->reshape_fail = NULL;
1340 return 0;
1342 #endif
1344 static int cbq_set_fopt(struct cbq_class *cl, struct tc_cbq_fopt *fopt)
1346 cbq_change_defmap(cl, fopt->split, fopt->defmap, fopt->defchange);
1347 return 0;
1350 static const struct nla_policy cbq_policy[TCA_CBQ_MAX + 1] = {
1351 [TCA_CBQ_LSSOPT] = { .len = sizeof(struct tc_cbq_lssopt) },
1352 [TCA_CBQ_WRROPT] = { .len = sizeof(struct tc_cbq_wrropt) },
1353 [TCA_CBQ_FOPT] = { .len = sizeof(struct tc_cbq_fopt) },
1354 [TCA_CBQ_OVL_STRATEGY] = { .len = sizeof(struct tc_cbq_ovl) },
1355 [TCA_CBQ_RATE] = { .len = sizeof(struct tc_ratespec) },
1356 [TCA_CBQ_RTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
1357 [TCA_CBQ_POLICE] = { .len = sizeof(struct tc_cbq_police) },
1360 static int cbq_init(struct Qdisc *sch, struct nlattr *opt)
1362 struct cbq_sched_data *q = qdisc_priv(sch);
1363 struct nlattr *tb[TCA_CBQ_MAX + 1];
1364 struct tc_ratespec *r;
1365 int err;
1367 err = nla_parse_nested(tb, TCA_CBQ_MAX, opt, cbq_policy);
1368 if (err < 0)
1369 return err;
1371 if (tb[TCA_CBQ_RTAB] == NULL || tb[TCA_CBQ_RATE] == NULL)
1372 return -EINVAL;
1374 r = nla_data(tb[TCA_CBQ_RATE]);
1376 if ((q->link.R_tab = qdisc_get_rtab(r, tb[TCA_CBQ_RTAB])) == NULL)
1377 return -EINVAL;
1379 err = qdisc_class_hash_init(&q->clhash);
1380 if (err < 0)
1381 goto put_rtab;
1383 q->link.refcnt = 1;
1384 q->link.sibling = &q->link;
1385 q->link.common.classid = sch->handle;
1386 q->link.qdisc = sch;
1387 q->link.q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
1388 sch->handle);
1389 if (!q->link.q)
1390 q->link.q = &noop_qdisc;
1392 q->link.priority = TC_CBQ_MAXPRIO - 1;
1393 q->link.priority2 = TC_CBQ_MAXPRIO - 1;
1394 q->link.cpriority = TC_CBQ_MAXPRIO - 1;
1395 q->link.ovl_strategy = TC_CBQ_OVL_CLASSIC;
1396 q->link.overlimit = cbq_ovl_classic;
1397 q->link.allot = psched_mtu(qdisc_dev(sch));
1398 q->link.quantum = q->link.allot;
1399 q->link.weight = q->link.R_tab->rate.rate;
1401 q->link.ewma_log = TC_CBQ_DEF_EWMA;
1402 q->link.avpkt = q->link.allot/2;
1403 q->link.minidle = -0x7FFFFFFF;
1405 qdisc_watchdog_init(&q->watchdog, sch);
1406 hrtimer_init(&q->delay_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
1407 q->delay_timer.function = cbq_undelay;
1408 q->toplevel = TC_CBQ_MAXLEVEL;
1409 q->now = psched_get_time();
1410 q->now_rt = q->now;
1412 cbq_link_class(&q->link);
1414 if (tb[TCA_CBQ_LSSOPT])
1415 cbq_set_lss(&q->link, nla_data(tb[TCA_CBQ_LSSOPT]));
1417 cbq_addprio(q, &q->link);
1418 return 0;
1420 put_rtab:
1421 qdisc_put_rtab(q->link.R_tab);
1422 return err;
1425 static int cbq_dump_rate(struct sk_buff *skb, struct cbq_class *cl)
1427 unsigned char *b = skb_tail_pointer(skb);
1429 if (nla_put(skb, TCA_CBQ_RATE, sizeof(cl->R_tab->rate), &cl->R_tab->rate))
1430 goto nla_put_failure;
1431 return skb->len;
1433 nla_put_failure:
1434 nlmsg_trim(skb, b);
1435 return -1;
1438 static int cbq_dump_lss(struct sk_buff *skb, struct cbq_class *cl)
1440 unsigned char *b = skb_tail_pointer(skb);
1441 struct tc_cbq_lssopt opt;
1443 opt.flags = 0;
1444 if (cl->borrow == NULL)
1445 opt.flags |= TCF_CBQ_LSS_BOUNDED;
1446 if (cl->share == NULL)
1447 opt.flags |= TCF_CBQ_LSS_ISOLATED;
1448 opt.ewma_log = cl->ewma_log;
1449 opt.level = cl->level;
1450 opt.avpkt = cl->avpkt;
1451 opt.maxidle = cl->maxidle;
1452 opt.minidle = (u32)(-cl->minidle);
1453 opt.offtime = cl->offtime;
1454 opt.change = ~0;
1455 if (nla_put(skb, TCA_CBQ_LSSOPT, sizeof(opt), &opt))
1456 goto nla_put_failure;
1457 return skb->len;
1459 nla_put_failure:
1460 nlmsg_trim(skb, b);
1461 return -1;
1464 static int cbq_dump_wrr(struct sk_buff *skb, struct cbq_class *cl)
1466 unsigned char *b = skb_tail_pointer(skb);
1467 struct tc_cbq_wrropt opt;
1469 memset(&opt, 0, sizeof(opt));
1470 opt.flags = 0;
1471 opt.allot = cl->allot;
1472 opt.priority = cl->priority + 1;
1473 opt.cpriority = cl->cpriority + 1;
1474 opt.weight = cl->weight;
1475 if (nla_put(skb, TCA_CBQ_WRROPT, sizeof(opt), &opt))
1476 goto nla_put_failure;
1477 return skb->len;
1479 nla_put_failure:
1480 nlmsg_trim(skb, b);
1481 return -1;
1484 static int cbq_dump_ovl(struct sk_buff *skb, struct cbq_class *cl)
1486 unsigned char *b = skb_tail_pointer(skb);
1487 struct tc_cbq_ovl opt;
1489 opt.strategy = cl->ovl_strategy;
1490 opt.priority2 = cl->priority2 + 1;
1491 opt.pad = 0;
1492 opt.penalty = cl->penalty;
1493 if (nla_put(skb, TCA_CBQ_OVL_STRATEGY, sizeof(opt), &opt))
1494 goto nla_put_failure;
1495 return skb->len;
1497 nla_put_failure:
1498 nlmsg_trim(skb, b);
1499 return -1;
1502 static int cbq_dump_fopt(struct sk_buff *skb, struct cbq_class *cl)
1504 unsigned char *b = skb_tail_pointer(skb);
1505 struct tc_cbq_fopt opt;
1507 if (cl->split || cl->defmap) {
1508 opt.split = cl->split ? cl->split->common.classid : 0;
1509 opt.defmap = cl->defmap;
1510 opt.defchange = ~0;
1511 if (nla_put(skb, TCA_CBQ_FOPT, sizeof(opt), &opt))
1512 goto nla_put_failure;
1514 return skb->len;
1516 nla_put_failure:
1517 nlmsg_trim(skb, b);
1518 return -1;
1521 #ifdef CONFIG_NET_CLS_ACT
1522 static int cbq_dump_police(struct sk_buff *skb, struct cbq_class *cl)
1524 unsigned char *b = skb_tail_pointer(skb);
1525 struct tc_cbq_police opt;
1527 if (cl->police) {
1528 opt.police = cl->police;
1529 opt.__res1 = 0;
1530 opt.__res2 = 0;
1531 if (nla_put(skb, TCA_CBQ_POLICE, sizeof(opt), &opt))
1532 goto nla_put_failure;
1534 return skb->len;
1536 nla_put_failure:
1537 nlmsg_trim(skb, b);
1538 return -1;
1540 #endif
1542 static int cbq_dump_attr(struct sk_buff *skb, struct cbq_class *cl)
1544 if (cbq_dump_lss(skb, cl) < 0 ||
1545 cbq_dump_rate(skb, cl) < 0 ||
1546 cbq_dump_wrr(skb, cl) < 0 ||
1547 cbq_dump_ovl(skb, cl) < 0 ||
1548 #ifdef CONFIG_NET_CLS_ACT
1549 cbq_dump_police(skb, cl) < 0 ||
1550 #endif
1551 cbq_dump_fopt(skb, cl) < 0)
1552 return -1;
1553 return 0;
1556 static int cbq_dump(struct Qdisc *sch, struct sk_buff *skb)
1558 struct cbq_sched_data *q = qdisc_priv(sch);
1559 struct nlattr *nest;
1561 nest = nla_nest_start(skb, TCA_OPTIONS);
1562 if (nest == NULL)
1563 goto nla_put_failure;
1564 if (cbq_dump_attr(skb, &q->link) < 0)
1565 goto nla_put_failure;
1566 nla_nest_end(skb, nest);
1567 return skb->len;
1569 nla_put_failure:
1570 nla_nest_cancel(skb, nest);
1571 return -1;
1574 static int
1575 cbq_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
1577 struct cbq_sched_data *q = qdisc_priv(sch);
1579 q->link.xstats.avgidle = q->link.avgidle;
1580 return gnet_stats_copy_app(d, &q->link.xstats, sizeof(q->link.xstats));
1583 static int
1584 cbq_dump_class(struct Qdisc *sch, unsigned long arg,
1585 struct sk_buff *skb, struct tcmsg *tcm)
1587 struct cbq_class *cl = (struct cbq_class *)arg;
1588 struct nlattr *nest;
1590 if (cl->tparent)
1591 tcm->tcm_parent = cl->tparent->common.classid;
1592 else
1593 tcm->tcm_parent = TC_H_ROOT;
1594 tcm->tcm_handle = cl->common.classid;
1595 tcm->tcm_info = cl->q->handle;
1597 nest = nla_nest_start(skb, TCA_OPTIONS);
1598 if (nest == NULL)
1599 goto nla_put_failure;
1600 if (cbq_dump_attr(skb, cl) < 0)
1601 goto nla_put_failure;
1602 nla_nest_end(skb, nest);
1603 return skb->len;
1605 nla_put_failure:
1606 nla_nest_cancel(skb, nest);
1607 return -1;
1610 static int
1611 cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
1612 struct gnet_dump *d)
1614 struct cbq_sched_data *q = qdisc_priv(sch);
1615 struct cbq_class *cl = (struct cbq_class *)arg;
1617 cl->qstats.qlen = cl->q->q.qlen;
1618 cl->xstats.avgidle = cl->avgidle;
1619 cl->xstats.undertime = 0;
1621 if (cl->undertime != PSCHED_PASTPERFECT)
1622 cl->xstats.undertime = cl->undertime - q->now;
1624 if (gnet_stats_copy_basic(d, &cl->bstats) < 0 ||
1625 gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 ||
1626 gnet_stats_copy_queue(d, &cl->qstats) < 0)
1627 return -1;
1629 return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats));
1632 static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1633 struct Qdisc **old)
1635 struct cbq_class *cl = (struct cbq_class *)arg;
1637 if (new == NULL) {
1638 new = qdisc_create_dflt(sch->dev_queue,
1639 &pfifo_qdisc_ops, cl->common.classid);
1640 if (new == NULL)
1641 return -ENOBUFS;
1642 } else {
1643 #ifdef CONFIG_NET_CLS_ACT
1644 if (cl->police == TC_POLICE_RECLASSIFY)
1645 new->reshape_fail = cbq_reshape_fail;
1646 #endif
1648 sch_tree_lock(sch);
1649 *old = cl->q;
1650 cl->q = new;
1651 qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
1652 qdisc_reset(*old);
1653 sch_tree_unlock(sch);
1655 return 0;
1658 static struct Qdisc *cbq_leaf(struct Qdisc *sch, unsigned long arg)
1660 struct cbq_class *cl = (struct cbq_class *)arg;
1662 return cl->q;
1665 static void cbq_qlen_notify(struct Qdisc *sch, unsigned long arg)
1667 struct cbq_class *cl = (struct cbq_class *)arg;
1669 if (cl->q->q.qlen == 0)
1670 cbq_deactivate_class(cl);
1673 static unsigned long cbq_get(struct Qdisc *sch, u32 classid)
1675 struct cbq_sched_data *q = qdisc_priv(sch);
1676 struct cbq_class *cl = cbq_class_lookup(q, classid);
1678 if (cl) {
1679 cl->refcnt++;
1680 return (unsigned long)cl;
1682 return 0;
1685 static void cbq_destroy_class(struct Qdisc *sch, struct cbq_class *cl)
1687 struct cbq_sched_data *q = qdisc_priv(sch);
1689 WARN_ON(cl->filters);
1691 tcf_destroy_chain(&cl->filter_list);
1692 qdisc_destroy(cl->q);
1693 qdisc_put_rtab(cl->R_tab);
1694 gen_kill_estimator(&cl->bstats, &cl->rate_est);
1695 if (cl != &q->link)
1696 kfree(cl);
1699 static void cbq_destroy(struct Qdisc *sch)
1701 struct cbq_sched_data *q = qdisc_priv(sch);
1702 struct hlist_node *next;
1703 struct cbq_class *cl;
1704 unsigned int h;
1706 #ifdef CONFIG_NET_CLS_ACT
1707 q->rx_class = NULL;
1708 #endif
1710 * Filters must be destroyed first because we don't destroy the
1711 * classes from root to leafs which means that filters can still
1712 * be bound to classes which have been destroyed already. --TGR '04
1714 for (h = 0; h < q->clhash.hashsize; h++) {
1715 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode)
1716 tcf_destroy_chain(&cl->filter_list);
1718 for (h = 0; h < q->clhash.hashsize; h++) {
1719 hlist_for_each_entry_safe(cl, next, &q->clhash.hash[h],
1720 common.hnode)
1721 cbq_destroy_class(sch, cl);
1723 qdisc_class_hash_destroy(&q->clhash);
1726 static void cbq_put(struct Qdisc *sch, unsigned long arg)
1728 struct cbq_class *cl = (struct cbq_class *)arg;
1730 if (--cl->refcnt == 0) {
1731 #ifdef CONFIG_NET_CLS_ACT
1732 spinlock_t *root_lock = qdisc_root_sleeping_lock(sch);
1733 struct cbq_sched_data *q = qdisc_priv(sch);
1735 spin_lock_bh(root_lock);
1736 if (q->rx_class == cl)
1737 q->rx_class = NULL;
1738 spin_unlock_bh(root_lock);
1739 #endif
1741 cbq_destroy_class(sch, cl);
1745 static int
1746 cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **tca,
1747 unsigned long *arg)
1749 int err;
1750 struct cbq_sched_data *q = qdisc_priv(sch);
1751 struct cbq_class *cl = (struct cbq_class *)*arg;
1752 struct nlattr *opt = tca[TCA_OPTIONS];
1753 struct nlattr *tb[TCA_CBQ_MAX + 1];
1754 struct cbq_class *parent;
1755 struct qdisc_rate_table *rtab = NULL;
1757 if (opt == NULL)
1758 return -EINVAL;
1760 err = nla_parse_nested(tb, TCA_CBQ_MAX, opt, cbq_policy);
1761 if (err < 0)
1762 return err;
1764 if (cl) {
1765 /* Check parent */
1766 if (parentid) {
1767 if (cl->tparent &&
1768 cl->tparent->common.classid != parentid)
1769 return -EINVAL;
1770 if (!cl->tparent && parentid != TC_H_ROOT)
1771 return -EINVAL;
1774 if (tb[TCA_CBQ_RATE]) {
1775 rtab = qdisc_get_rtab(nla_data(tb[TCA_CBQ_RATE]),
1776 tb[TCA_CBQ_RTAB]);
1777 if (rtab == NULL)
1778 return -EINVAL;
1781 if (tca[TCA_RATE]) {
1782 err = gen_replace_estimator(&cl->bstats, &cl->rate_est,
1783 qdisc_root_sleeping_lock(sch),
1784 tca[TCA_RATE]);
1785 if (err) {
1786 qdisc_put_rtab(rtab);
1787 return err;
1791 /* Change class parameters */
1792 sch_tree_lock(sch);
1794 if (cl->next_alive != NULL)
1795 cbq_deactivate_class(cl);
1797 if (rtab) {
1798 qdisc_put_rtab(cl->R_tab);
1799 cl->R_tab = rtab;
1802 if (tb[TCA_CBQ_LSSOPT])
1803 cbq_set_lss(cl, nla_data(tb[TCA_CBQ_LSSOPT]));
1805 if (tb[TCA_CBQ_WRROPT]) {
1806 cbq_rmprio(q, cl);
1807 cbq_set_wrr(cl, nla_data(tb[TCA_CBQ_WRROPT]));
1810 if (tb[TCA_CBQ_OVL_STRATEGY])
1811 cbq_set_overlimit(cl, nla_data(tb[TCA_CBQ_OVL_STRATEGY]));
1813 #ifdef CONFIG_NET_CLS_ACT
1814 if (tb[TCA_CBQ_POLICE])
1815 cbq_set_police(cl, nla_data(tb[TCA_CBQ_POLICE]));
1816 #endif
1818 if (tb[TCA_CBQ_FOPT])
1819 cbq_set_fopt(cl, nla_data(tb[TCA_CBQ_FOPT]));
1821 if (cl->q->q.qlen)
1822 cbq_activate_class(cl);
1824 sch_tree_unlock(sch);
1826 return 0;
1829 if (parentid == TC_H_ROOT)
1830 return -EINVAL;
1832 if (tb[TCA_CBQ_WRROPT] == NULL || tb[TCA_CBQ_RATE] == NULL ||
1833 tb[TCA_CBQ_LSSOPT] == NULL)
1834 return -EINVAL;
1836 rtab = qdisc_get_rtab(nla_data(tb[TCA_CBQ_RATE]), tb[TCA_CBQ_RTAB]);
1837 if (rtab == NULL)
1838 return -EINVAL;
1840 if (classid) {
1841 err = -EINVAL;
1842 if (TC_H_MAJ(classid ^ sch->handle) ||
1843 cbq_class_lookup(q, classid))
1844 goto failure;
1845 } else {
1846 int i;
1847 classid = TC_H_MAKE(sch->handle, 0x8000);
1849 for (i = 0; i < 0x8000; i++) {
1850 if (++q->hgenerator >= 0x8000)
1851 q->hgenerator = 1;
1852 if (cbq_class_lookup(q, classid|q->hgenerator) == NULL)
1853 break;
1855 err = -ENOSR;
1856 if (i >= 0x8000)
1857 goto failure;
1858 classid = classid|q->hgenerator;
1861 parent = &q->link;
1862 if (parentid) {
1863 parent = cbq_class_lookup(q, parentid);
1864 err = -EINVAL;
1865 if (parent == NULL)
1866 goto failure;
1869 err = -ENOBUFS;
1870 cl = kzalloc(sizeof(*cl), GFP_KERNEL);
1871 if (cl == NULL)
1872 goto failure;
1874 if (tca[TCA_RATE]) {
1875 err = gen_new_estimator(&cl->bstats, &cl->rate_est,
1876 qdisc_root_sleeping_lock(sch),
1877 tca[TCA_RATE]);
1878 if (err) {
1879 kfree(cl);
1880 goto failure;
1884 cl->R_tab = rtab;
1885 rtab = NULL;
1886 cl->refcnt = 1;
1887 cl->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid);
1888 if (!cl->q)
1889 cl->q = &noop_qdisc;
1890 cl->common.classid = classid;
1891 cl->tparent = parent;
1892 cl->qdisc = sch;
1893 cl->allot = parent->allot;
1894 cl->quantum = cl->allot;
1895 cl->weight = cl->R_tab->rate.rate;
1897 sch_tree_lock(sch);
1898 cbq_link_class(cl);
1899 cl->borrow = cl->tparent;
1900 if (cl->tparent != &q->link)
1901 cl->share = cl->tparent;
1902 cbq_adjust_levels(parent);
1903 cl->minidle = -0x7FFFFFFF;
1904 cbq_set_lss(cl, nla_data(tb[TCA_CBQ_LSSOPT]));
1905 cbq_set_wrr(cl, nla_data(tb[TCA_CBQ_WRROPT]));
1906 if (cl->ewma_log == 0)
1907 cl->ewma_log = q->link.ewma_log;
1908 if (cl->maxidle == 0)
1909 cl->maxidle = q->link.maxidle;
1910 if (cl->avpkt == 0)
1911 cl->avpkt = q->link.avpkt;
1912 cl->overlimit = cbq_ovl_classic;
1913 if (tb[TCA_CBQ_OVL_STRATEGY])
1914 cbq_set_overlimit(cl, nla_data(tb[TCA_CBQ_OVL_STRATEGY]));
1915 #ifdef CONFIG_NET_CLS_ACT
1916 if (tb[TCA_CBQ_POLICE])
1917 cbq_set_police(cl, nla_data(tb[TCA_CBQ_POLICE]));
1918 #endif
1919 if (tb[TCA_CBQ_FOPT])
1920 cbq_set_fopt(cl, nla_data(tb[TCA_CBQ_FOPT]));
1921 sch_tree_unlock(sch);
1923 qdisc_class_hash_grow(sch, &q->clhash);
1925 *arg = (unsigned long)cl;
1926 return 0;
1928 failure:
1929 qdisc_put_rtab(rtab);
1930 return err;
1933 static int cbq_delete(struct Qdisc *sch, unsigned long arg)
1935 struct cbq_sched_data *q = qdisc_priv(sch);
1936 struct cbq_class *cl = (struct cbq_class *)arg;
1937 unsigned int qlen;
1939 if (cl->filters || cl->children || cl == &q->link)
1940 return -EBUSY;
1942 sch_tree_lock(sch);
1944 qlen = cl->q->q.qlen;
1945 qdisc_reset(cl->q);
1946 qdisc_tree_decrease_qlen(cl->q, qlen);
1948 if (cl->next_alive)
1949 cbq_deactivate_class(cl);
1951 if (q->tx_borrowed == cl)
1952 q->tx_borrowed = q->tx_class;
1953 if (q->tx_class == cl) {
1954 q->tx_class = NULL;
1955 q->tx_borrowed = NULL;
1957 #ifdef CONFIG_NET_CLS_ACT
1958 if (q->rx_class == cl)
1959 q->rx_class = NULL;
1960 #endif
1962 cbq_unlink_class(cl);
1963 cbq_adjust_levels(cl->tparent);
1964 cl->defmap = 0;
1965 cbq_sync_defmap(cl);
1967 cbq_rmprio(q, cl);
1968 sch_tree_unlock(sch);
1970 BUG_ON(--cl->refcnt == 0);
1972 * This shouldn't happen: we "hold" one cops->get() when called
1973 * from tc_ctl_tclass; the destroy method is done from cops->put().
1976 return 0;
1979 static struct tcf_proto **cbq_find_tcf(struct Qdisc *sch, unsigned long arg)
1981 struct cbq_sched_data *q = qdisc_priv(sch);
1982 struct cbq_class *cl = (struct cbq_class *)arg;
1984 if (cl == NULL)
1985 cl = &q->link;
1987 return &cl->filter_list;
1990 static unsigned long cbq_bind_filter(struct Qdisc *sch, unsigned long parent,
1991 u32 classid)
1993 struct cbq_sched_data *q = qdisc_priv(sch);
1994 struct cbq_class *p = (struct cbq_class *)parent;
1995 struct cbq_class *cl = cbq_class_lookup(q, classid);
1997 if (cl) {
1998 if (p && p->level <= cl->level)
1999 return 0;
2000 cl->filters++;
2001 return (unsigned long)cl;
2003 return 0;
2006 static void cbq_unbind_filter(struct Qdisc *sch, unsigned long arg)
2008 struct cbq_class *cl = (struct cbq_class *)arg;
2010 cl->filters--;
2013 static void cbq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
2015 struct cbq_sched_data *q = qdisc_priv(sch);
2016 struct cbq_class *cl;
2017 unsigned int h;
2019 if (arg->stop)
2020 return;
2022 for (h = 0; h < q->clhash.hashsize; h++) {
2023 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
2024 if (arg->count < arg->skip) {
2025 arg->count++;
2026 continue;
2028 if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
2029 arg->stop = 1;
2030 return;
2032 arg->count++;
2037 static const struct Qdisc_class_ops cbq_class_ops = {
2038 .graft = cbq_graft,
2039 .leaf = cbq_leaf,
2040 .qlen_notify = cbq_qlen_notify,
2041 .get = cbq_get,
2042 .put = cbq_put,
2043 .change = cbq_change_class,
2044 .delete = cbq_delete,
2045 .walk = cbq_walk,
2046 .tcf_chain = cbq_find_tcf,
2047 .bind_tcf = cbq_bind_filter,
2048 .unbind_tcf = cbq_unbind_filter,
2049 .dump = cbq_dump_class,
2050 .dump_stats = cbq_dump_class_stats,
2053 static struct Qdisc_ops cbq_qdisc_ops __read_mostly = {
2054 .next = NULL,
2055 .cl_ops = &cbq_class_ops,
2056 .id = "cbq",
2057 .priv_size = sizeof(struct cbq_sched_data),
2058 .enqueue = cbq_enqueue,
2059 .dequeue = cbq_dequeue,
2060 .peek = qdisc_peek_dequeued,
2061 .drop = cbq_drop,
2062 .init = cbq_init,
2063 .reset = cbq_reset,
2064 .destroy = cbq_destroy,
2065 .change = NULL,
2066 .dump = cbq_dump,
2067 .dump_stats = cbq_dump_stats,
2068 .owner = THIS_MODULE,
2071 static int __init cbq_module_init(void)
2073 return register_qdisc(&cbq_qdisc_ops);
2075 static void __exit cbq_module_exit(void)
2077 unregister_qdisc(&cbq_qdisc_ops);
2079 module_init(cbq_module_init)
2080 module_exit(cbq_module_exit)
2081 MODULE_LICENSE("GPL");