arm64: dts: r8a7796: Add reset control properties for audio
[linux-2.6/btrfs-unstable.git] / net / sched / sch_cbq.c
blob7415859fd4c3f65e6ff801b08d683b796d1eaa03
1 /*
2 * net/sched/sch_cbq.c Class-Based Queueing discipline.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
13 #include <linux/module.h>
14 #include <linux/slab.h>
15 #include <linux/types.h>
16 #include <linux/kernel.h>
17 #include <linux/string.h>
18 #include <linux/errno.h>
19 #include <linux/skbuff.h>
20 #include <net/netlink.h>
21 #include <net/pkt_sched.h>
22 #include <net/pkt_cls.h>
25 /* Class-Based Queueing (CBQ) algorithm.
26 =======================================
28 Sources: [1] Sally Floyd and Van Jacobson, "Link-sharing and Resource
29 Management Models for Packet Networks",
30 IEEE/ACM Transactions on Networking, Vol.3, No.4, 1995
32 [2] Sally Floyd, "Notes on CBQ and Guaranteed Service", 1995
34 [3] Sally Floyd, "Notes on Class-Based Queueing: Setting
35 Parameters", 1996
37 [4] Sally Floyd and Michael Speer, "Experimental Results
38 for Class-Based Queueing", 1998, not published.
40 -----------------------------------------------------------------------
42 Algorithm skeleton was taken from NS simulator cbq.cc.
43 If someone wants to check this code against the LBL version,
44 he should take into account that ONLY the skeleton was borrowed,
45 the implementation is different. Particularly:
47 --- The WRR algorithm is different. Our version looks more
48 reasonable (I hope) and works when quanta are allowed to be
49 less than MTU, which is always the case when real time classes
50 have small rates. Note, that the statement of [3] is
51 incomplete, delay may actually be estimated even if class
52 per-round allotment is less than MTU. Namely, if per-round
53 allotment is W*r_i, and r_1+...+r_k = r < 1
55 delay_i <= ([MTU/(W*r_i)]*W*r + W*r + k*MTU)/B
57 In the worst case we have IntServ estimate with D = W*r+k*MTU
58 and C = MTU*r. The proof (if correct at all) is trivial.
61 --- It seems that cbq-2.0 is not very accurate. At least, I cannot
62 interpret some places, which look like wrong translations
63 from NS. Anyone is advised to find these differences
64 and explain to me, why I am wrong 8).
66 --- Linux has no EOI event, so that we cannot estimate true class
67 idle time. Workaround is to consider the next dequeue event
68 as sign that previous packet is finished. This is wrong because of
69 internal device queueing, but on a permanently loaded link it is true.
70 Moreover, combined with clock integrator, this scheme looks
71 very close to an ideal solution. */
73 struct cbq_sched_data;
76 struct cbq_class {
77 struct Qdisc_class_common common;
78 struct cbq_class *next_alive; /* next class with backlog in this priority band */
80 /* Parameters */
81 unsigned char priority; /* class priority */
82 unsigned char priority2; /* priority to be used after overlimit */
83 unsigned char ewma_log; /* time constant for idle time calculation */
85 u32 defmap;
87 /* Link-sharing scheduler parameters */
88 long maxidle; /* Class parameters: see below. */
89 long offtime;
90 long minidle;
91 u32 avpkt;
92 struct qdisc_rate_table *R_tab;
94 /* General scheduler (WRR) parameters */
95 long allot;
96 long quantum; /* Allotment per WRR round */
97 long weight; /* Relative allotment: see below */
99 struct Qdisc *qdisc; /* Ptr to CBQ discipline */
100 struct cbq_class *split; /* Ptr to split node */
101 struct cbq_class *share; /* Ptr to LS parent in the class tree */
102 struct cbq_class *tparent; /* Ptr to tree parent in the class tree */
103 struct cbq_class *borrow; /* NULL if class is bandwidth limited;
104 parent otherwise */
105 struct cbq_class *sibling; /* Sibling chain */
106 struct cbq_class *children; /* Pointer to children chain */
108 struct Qdisc *q; /* Elementary queueing discipline */
111 /* Variables */
112 unsigned char cpriority; /* Effective priority */
113 unsigned char delayed;
114 unsigned char level; /* level of the class in hierarchy:
115 0 for leaf classes, and maximal
116 level of children + 1 for nodes.
119 psched_time_t last; /* Last end of service */
120 psched_time_t undertime;
121 long avgidle;
122 long deficit; /* Saved deficit for WRR */
123 psched_time_t penalized;
124 struct gnet_stats_basic_packed bstats;
125 struct gnet_stats_queue qstats;
126 struct net_rate_estimator __rcu *rate_est;
127 struct tc_cbq_xstats xstats;
129 struct tcf_proto __rcu *filter_list;
131 int refcnt;
132 int filters;
134 struct cbq_class *defaults[TC_PRIO_MAX + 1];
137 struct cbq_sched_data {
138 struct Qdisc_class_hash clhash; /* Hash table of all classes */
139 int nclasses[TC_CBQ_MAXPRIO + 1];
140 unsigned int quanta[TC_CBQ_MAXPRIO + 1];
142 struct cbq_class link;
144 unsigned int activemask;
145 struct cbq_class *active[TC_CBQ_MAXPRIO + 1]; /* List of all classes
146 with backlog */
148 #ifdef CONFIG_NET_CLS_ACT
149 struct cbq_class *rx_class;
150 #endif
151 struct cbq_class *tx_class;
152 struct cbq_class *tx_borrowed;
153 int tx_len;
154 psched_time_t now; /* Cached timestamp */
155 unsigned int pmask;
157 struct hrtimer delay_timer;
158 struct qdisc_watchdog watchdog; /* Watchdog timer,
159 started when CBQ has
160 backlog, but cannot
161 transmit just now */
162 psched_tdiff_t wd_expires;
163 int toplevel;
164 u32 hgenerator;
168 #define L2T(cl, len) qdisc_l2t((cl)->R_tab, len)
170 static inline struct cbq_class *
171 cbq_class_lookup(struct cbq_sched_data *q, u32 classid)
173 struct Qdisc_class_common *clc;
175 clc = qdisc_class_find(&q->clhash, classid);
176 if (clc == NULL)
177 return NULL;
178 return container_of(clc, struct cbq_class, common);
181 #ifdef CONFIG_NET_CLS_ACT
183 static struct cbq_class *
184 cbq_reclassify(struct sk_buff *skb, struct cbq_class *this)
186 struct cbq_class *cl;
188 for (cl = this->tparent; cl; cl = cl->tparent) {
189 struct cbq_class *new = cl->defaults[TC_PRIO_BESTEFFORT];
191 if (new != NULL && new != this)
192 return new;
194 return NULL;
197 #endif
199 /* Classify packet. The procedure is pretty complicated, but
200 * it allows us to combine link sharing and priority scheduling
201 * transparently.
203 * Namely, you can put link sharing rules (f.e. route based) at root of CBQ,
204 * so that it resolves to split nodes. Then packets are classified
205 * by logical priority, or a more specific classifier may be attached
206 * to the split node.
209 static struct cbq_class *
210 cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
212 struct cbq_sched_data *q = qdisc_priv(sch);
213 struct cbq_class *head = &q->link;
214 struct cbq_class **defmap;
215 struct cbq_class *cl = NULL;
216 u32 prio = skb->priority;
217 struct tcf_proto *fl;
218 struct tcf_result res;
221 * Step 1. If skb->priority points to one of our classes, use it.
223 if (TC_H_MAJ(prio ^ sch->handle) == 0 &&
224 (cl = cbq_class_lookup(q, prio)) != NULL)
225 return cl;
227 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
228 for (;;) {
229 int result = 0;
230 defmap = head->defaults;
232 fl = rcu_dereference_bh(head->filter_list);
234 * Step 2+n. Apply classifier.
236 result = tc_classify(skb, fl, &res, true);
237 if (!fl || result < 0)
238 goto fallback;
240 cl = (void *)res.class;
241 if (!cl) {
242 if (TC_H_MAJ(res.classid))
243 cl = cbq_class_lookup(q, res.classid);
244 else if ((cl = defmap[res.classid & TC_PRIO_MAX]) == NULL)
245 cl = defmap[TC_PRIO_BESTEFFORT];
247 if (cl == NULL)
248 goto fallback;
250 if (cl->level >= head->level)
251 goto fallback;
252 #ifdef CONFIG_NET_CLS_ACT
253 switch (result) {
254 case TC_ACT_QUEUED:
255 case TC_ACT_STOLEN:
256 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
257 case TC_ACT_SHOT:
258 return NULL;
259 case TC_ACT_RECLASSIFY:
260 return cbq_reclassify(skb, cl);
262 #endif
263 if (cl->level == 0)
264 return cl;
267 * Step 3+n. If classifier selected a link sharing class,
268 * apply agency specific classifier.
269 * Repeat this procdure until we hit a leaf node.
271 head = cl;
274 fallback:
275 cl = head;
278 * Step 4. No success...
280 if (TC_H_MAJ(prio) == 0 &&
281 !(cl = head->defaults[prio & TC_PRIO_MAX]) &&
282 !(cl = head->defaults[TC_PRIO_BESTEFFORT]))
283 return head;
285 return cl;
289 * A packet has just been enqueued on the empty class.
290 * cbq_activate_class adds it to the tail of active class list
291 * of its priority band.
294 static inline void cbq_activate_class(struct cbq_class *cl)
296 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
297 int prio = cl->cpriority;
298 struct cbq_class *cl_tail;
300 cl_tail = q->active[prio];
301 q->active[prio] = cl;
303 if (cl_tail != NULL) {
304 cl->next_alive = cl_tail->next_alive;
305 cl_tail->next_alive = cl;
306 } else {
307 cl->next_alive = cl;
308 q->activemask |= (1<<prio);
313 * Unlink class from active chain.
314 * Note that this same procedure is done directly in cbq_dequeue*
315 * during round-robin procedure.
318 static void cbq_deactivate_class(struct cbq_class *this)
320 struct cbq_sched_data *q = qdisc_priv(this->qdisc);
321 int prio = this->cpriority;
322 struct cbq_class *cl;
323 struct cbq_class *cl_prev = q->active[prio];
325 do {
326 cl = cl_prev->next_alive;
327 if (cl == this) {
328 cl_prev->next_alive = cl->next_alive;
329 cl->next_alive = NULL;
331 if (cl == q->active[prio]) {
332 q->active[prio] = cl_prev;
333 if (cl == q->active[prio]) {
334 q->active[prio] = NULL;
335 q->activemask &= ~(1<<prio);
336 return;
339 return;
341 } while ((cl_prev = cl) != q->active[prio]);
344 static void
345 cbq_mark_toplevel(struct cbq_sched_data *q, struct cbq_class *cl)
347 int toplevel = q->toplevel;
349 if (toplevel > cl->level) {
350 psched_time_t now = psched_get_time();
352 do {
353 if (cl->undertime < now) {
354 q->toplevel = cl->level;
355 return;
357 } while ((cl = cl->borrow) != NULL && toplevel > cl->level);
361 static int
362 cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
363 struct sk_buff **to_free)
365 struct cbq_sched_data *q = qdisc_priv(sch);
366 int uninitialized_var(ret);
367 struct cbq_class *cl = cbq_classify(skb, sch, &ret);
369 #ifdef CONFIG_NET_CLS_ACT
370 q->rx_class = cl;
371 #endif
372 if (cl == NULL) {
373 if (ret & __NET_XMIT_BYPASS)
374 qdisc_qstats_drop(sch);
375 __qdisc_drop(skb, to_free);
376 return ret;
379 ret = qdisc_enqueue(skb, cl->q, to_free);
380 if (ret == NET_XMIT_SUCCESS) {
381 sch->q.qlen++;
382 cbq_mark_toplevel(q, cl);
383 if (!cl->next_alive)
384 cbq_activate_class(cl);
385 return ret;
388 if (net_xmit_drop_count(ret)) {
389 qdisc_qstats_drop(sch);
390 cbq_mark_toplevel(q, cl);
391 cl->qstats.drops++;
393 return ret;
396 /* Overlimit action: penalize leaf class by adding offtime */
397 static void cbq_overlimit(struct cbq_class *cl)
399 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
400 psched_tdiff_t delay = cl->undertime - q->now;
402 if (!cl->delayed) {
403 delay += cl->offtime;
406 * Class goes to sleep, so that it will have no
407 * chance to work avgidle. Let's forgive it 8)
409 * BTW cbq-2.0 has a crap in this
410 * place, apparently they forgot to shift it by cl->ewma_log.
412 if (cl->avgidle < 0)
413 delay -= (-cl->avgidle) - ((-cl->avgidle) >> cl->ewma_log);
414 if (cl->avgidle < cl->minidle)
415 cl->avgidle = cl->minidle;
416 if (delay <= 0)
417 delay = 1;
418 cl->undertime = q->now + delay;
420 cl->xstats.overactions++;
421 cl->delayed = 1;
423 if (q->wd_expires == 0 || q->wd_expires > delay)
424 q->wd_expires = delay;
426 /* Dirty work! We must schedule wakeups based on
427 * real available rate, rather than leaf rate,
428 * which may be tiny (even zero).
430 if (q->toplevel == TC_CBQ_MAXLEVEL) {
431 struct cbq_class *b;
432 psched_tdiff_t base_delay = q->wd_expires;
434 for (b = cl->borrow; b; b = b->borrow) {
435 delay = b->undertime - q->now;
436 if (delay < base_delay) {
437 if (delay <= 0)
438 delay = 1;
439 base_delay = delay;
443 q->wd_expires = base_delay;
447 static psched_tdiff_t cbq_undelay_prio(struct cbq_sched_data *q, int prio,
448 psched_time_t now)
450 struct cbq_class *cl;
451 struct cbq_class *cl_prev = q->active[prio];
452 psched_time_t sched = now;
454 if (cl_prev == NULL)
455 return 0;
457 do {
458 cl = cl_prev->next_alive;
459 if (now - cl->penalized > 0) {
460 cl_prev->next_alive = cl->next_alive;
461 cl->next_alive = NULL;
462 cl->cpriority = cl->priority;
463 cl->delayed = 0;
464 cbq_activate_class(cl);
466 if (cl == q->active[prio]) {
467 q->active[prio] = cl_prev;
468 if (cl == q->active[prio]) {
469 q->active[prio] = NULL;
470 return 0;
474 cl = cl_prev->next_alive;
475 } else if (sched - cl->penalized > 0)
476 sched = cl->penalized;
477 } while ((cl_prev = cl) != q->active[prio]);
479 return sched - now;
482 static enum hrtimer_restart cbq_undelay(struct hrtimer *timer)
484 struct cbq_sched_data *q = container_of(timer, struct cbq_sched_data,
485 delay_timer);
486 struct Qdisc *sch = q->watchdog.qdisc;
487 psched_time_t now;
488 psched_tdiff_t delay = 0;
489 unsigned int pmask;
491 now = psched_get_time();
493 pmask = q->pmask;
494 q->pmask = 0;
496 while (pmask) {
497 int prio = ffz(~pmask);
498 psched_tdiff_t tmp;
500 pmask &= ~(1<<prio);
502 tmp = cbq_undelay_prio(q, prio, now);
503 if (tmp > 0) {
504 q->pmask |= 1<<prio;
505 if (tmp < delay || delay == 0)
506 delay = tmp;
510 if (delay) {
511 ktime_t time;
513 time = 0;
514 time = ktime_add_ns(time, PSCHED_TICKS2NS(now + delay));
515 hrtimer_start(&q->delay_timer, time, HRTIMER_MODE_ABS_PINNED);
518 __netif_schedule(qdisc_root(sch));
519 return HRTIMER_NORESTART;
523 * It is mission critical procedure.
525 * We "regenerate" toplevel cutoff, if transmitting class
526 * has backlog and it is not regulated. It is not part of
527 * original CBQ description, but looks more reasonable.
528 * Probably, it is wrong. This question needs further investigation.
531 static inline void
532 cbq_update_toplevel(struct cbq_sched_data *q, struct cbq_class *cl,
533 struct cbq_class *borrowed)
535 if (cl && q->toplevel >= borrowed->level) {
536 if (cl->q->q.qlen > 1) {
537 do {
538 if (borrowed->undertime == PSCHED_PASTPERFECT) {
539 q->toplevel = borrowed->level;
540 return;
542 } while ((borrowed = borrowed->borrow) != NULL);
544 #if 0
545 /* It is not necessary now. Uncommenting it
546 will save CPU cycles, but decrease fairness.
548 q->toplevel = TC_CBQ_MAXLEVEL;
549 #endif
553 static void
554 cbq_update(struct cbq_sched_data *q)
556 struct cbq_class *this = q->tx_class;
557 struct cbq_class *cl = this;
558 int len = q->tx_len;
559 psched_time_t now;
561 q->tx_class = NULL;
562 /* Time integrator. We calculate EOS time
563 * by adding expected packet transmission time.
565 now = q->now + L2T(&q->link, len);
567 for ( ; cl; cl = cl->share) {
568 long avgidle = cl->avgidle;
569 long idle;
571 cl->bstats.packets++;
572 cl->bstats.bytes += len;
575 * (now - last) is total time between packet right edges.
576 * (last_pktlen/rate) is "virtual" busy time, so that
578 * idle = (now - last) - last_pktlen/rate
581 idle = now - cl->last;
582 if ((unsigned long)idle > 128*1024*1024) {
583 avgidle = cl->maxidle;
584 } else {
585 idle -= L2T(cl, len);
587 /* true_avgidle := (1-W)*true_avgidle + W*idle,
588 * where W=2^{-ewma_log}. But cl->avgidle is scaled:
589 * cl->avgidle == true_avgidle/W,
590 * hence:
592 avgidle += idle - (avgidle>>cl->ewma_log);
595 if (avgidle <= 0) {
596 /* Overlimit or at-limit */
598 if (avgidle < cl->minidle)
599 avgidle = cl->minidle;
601 cl->avgidle = avgidle;
603 /* Calculate expected time, when this class
604 * will be allowed to send.
605 * It will occur, when:
606 * (1-W)*true_avgidle + W*delay = 0, i.e.
607 * idle = (1/W - 1)*(-true_avgidle)
608 * or
609 * idle = (1 - W)*(-cl->avgidle);
611 idle = (-avgidle) - ((-avgidle) >> cl->ewma_log);
614 * That is not all.
615 * To maintain the rate allocated to the class,
616 * we add to undertime virtual clock,
617 * necessary to complete transmitted packet.
618 * (len/phys_bandwidth has been already passed
619 * to the moment of cbq_update)
622 idle -= L2T(&q->link, len);
623 idle += L2T(cl, len);
625 cl->undertime = now + idle;
626 } else {
627 /* Underlimit */
629 cl->undertime = PSCHED_PASTPERFECT;
630 if (avgidle > cl->maxidle)
631 cl->avgidle = cl->maxidle;
632 else
633 cl->avgidle = avgidle;
635 if ((s64)(now - cl->last) > 0)
636 cl->last = now;
639 cbq_update_toplevel(q, this, q->tx_borrowed);
642 static inline struct cbq_class *
643 cbq_under_limit(struct cbq_class *cl)
645 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
646 struct cbq_class *this_cl = cl;
648 if (cl->tparent == NULL)
649 return cl;
651 if (cl->undertime == PSCHED_PASTPERFECT || q->now >= cl->undertime) {
652 cl->delayed = 0;
653 return cl;
656 do {
657 /* It is very suspicious place. Now overlimit
658 * action is generated for not bounded classes
659 * only if link is completely congested.
660 * Though it is in agree with ancestor-only paradigm,
661 * it looks very stupid. Particularly,
662 * it means that this chunk of code will either
663 * never be called or result in strong amplification
664 * of burstiness. Dangerous, silly, and, however,
665 * no another solution exists.
667 cl = cl->borrow;
668 if (!cl) {
669 this_cl->qstats.overlimits++;
670 cbq_overlimit(this_cl);
671 return NULL;
673 if (cl->level > q->toplevel)
674 return NULL;
675 } while (cl->undertime != PSCHED_PASTPERFECT && q->now < cl->undertime);
677 cl->delayed = 0;
678 return cl;
681 static inline struct sk_buff *
682 cbq_dequeue_prio(struct Qdisc *sch, int prio)
684 struct cbq_sched_data *q = qdisc_priv(sch);
685 struct cbq_class *cl_tail, *cl_prev, *cl;
686 struct sk_buff *skb;
687 int deficit;
689 cl_tail = cl_prev = q->active[prio];
690 cl = cl_prev->next_alive;
692 do {
693 deficit = 0;
695 /* Start round */
696 do {
697 struct cbq_class *borrow = cl;
699 if (cl->q->q.qlen &&
700 (borrow = cbq_under_limit(cl)) == NULL)
701 goto skip_class;
703 if (cl->deficit <= 0) {
704 /* Class exhausted its allotment per
705 * this round. Switch to the next one.
707 deficit = 1;
708 cl->deficit += cl->quantum;
709 goto next_class;
712 skb = cl->q->dequeue(cl->q);
714 /* Class did not give us any skb :-(
715 * It could occur even if cl->q->q.qlen != 0
716 * f.e. if cl->q == "tbf"
718 if (skb == NULL)
719 goto skip_class;
721 cl->deficit -= qdisc_pkt_len(skb);
722 q->tx_class = cl;
723 q->tx_borrowed = borrow;
724 if (borrow != cl) {
725 #ifndef CBQ_XSTATS_BORROWS_BYTES
726 borrow->xstats.borrows++;
727 cl->xstats.borrows++;
728 #else
729 borrow->xstats.borrows += qdisc_pkt_len(skb);
730 cl->xstats.borrows += qdisc_pkt_len(skb);
731 #endif
733 q->tx_len = qdisc_pkt_len(skb);
735 if (cl->deficit <= 0) {
736 q->active[prio] = cl;
737 cl = cl->next_alive;
738 cl->deficit += cl->quantum;
740 return skb;
742 skip_class:
743 if (cl->q->q.qlen == 0 || prio != cl->cpriority) {
744 /* Class is empty or penalized.
745 * Unlink it from active chain.
747 cl_prev->next_alive = cl->next_alive;
748 cl->next_alive = NULL;
750 /* Did cl_tail point to it? */
751 if (cl == cl_tail) {
752 /* Repair it! */
753 cl_tail = cl_prev;
755 /* Was it the last class in this band? */
756 if (cl == cl_tail) {
757 /* Kill the band! */
758 q->active[prio] = NULL;
759 q->activemask &= ~(1<<prio);
760 if (cl->q->q.qlen)
761 cbq_activate_class(cl);
762 return NULL;
765 q->active[prio] = cl_tail;
767 if (cl->q->q.qlen)
768 cbq_activate_class(cl);
770 cl = cl_prev;
773 next_class:
774 cl_prev = cl;
775 cl = cl->next_alive;
776 } while (cl_prev != cl_tail);
777 } while (deficit);
779 q->active[prio] = cl_prev;
781 return NULL;
784 static inline struct sk_buff *
785 cbq_dequeue_1(struct Qdisc *sch)
787 struct cbq_sched_data *q = qdisc_priv(sch);
788 struct sk_buff *skb;
789 unsigned int activemask;
791 activemask = q->activemask & 0xFF;
792 while (activemask) {
793 int prio = ffz(~activemask);
794 activemask &= ~(1<<prio);
795 skb = cbq_dequeue_prio(sch, prio);
796 if (skb)
797 return skb;
799 return NULL;
802 static struct sk_buff *
803 cbq_dequeue(struct Qdisc *sch)
805 struct sk_buff *skb;
806 struct cbq_sched_data *q = qdisc_priv(sch);
807 psched_time_t now;
809 now = psched_get_time();
811 if (q->tx_class)
812 cbq_update(q);
814 q->now = now;
816 for (;;) {
817 q->wd_expires = 0;
819 skb = cbq_dequeue_1(sch);
820 if (skb) {
821 qdisc_bstats_update(sch, skb);
822 sch->q.qlen--;
823 return skb;
826 /* All the classes are overlimit.
828 * It is possible, if:
830 * 1. Scheduler is empty.
831 * 2. Toplevel cutoff inhibited borrowing.
832 * 3. Root class is overlimit.
834 * Reset 2d and 3d conditions and retry.
836 * Note, that NS and cbq-2.0 are buggy, peeking
837 * an arbitrary class is appropriate for ancestor-only
838 * sharing, but not for toplevel algorithm.
840 * Our version is better, but slower, because it requires
841 * two passes, but it is unavoidable with top-level sharing.
844 if (q->toplevel == TC_CBQ_MAXLEVEL &&
845 q->link.undertime == PSCHED_PASTPERFECT)
846 break;
848 q->toplevel = TC_CBQ_MAXLEVEL;
849 q->link.undertime = PSCHED_PASTPERFECT;
852 /* No packets in scheduler or nobody wants to give them to us :-(
853 * Sigh... start watchdog timer in the last case.
856 if (sch->q.qlen) {
857 qdisc_qstats_overlimit(sch);
858 if (q->wd_expires)
859 qdisc_watchdog_schedule(&q->watchdog,
860 now + q->wd_expires);
862 return NULL;
865 /* CBQ class maintanance routines */
867 static void cbq_adjust_levels(struct cbq_class *this)
869 if (this == NULL)
870 return;
872 do {
873 int level = 0;
874 struct cbq_class *cl;
876 cl = this->children;
877 if (cl) {
878 do {
879 if (cl->level > level)
880 level = cl->level;
881 } while ((cl = cl->sibling) != this->children);
883 this->level = level + 1;
884 } while ((this = this->tparent) != NULL);
887 static void cbq_normalize_quanta(struct cbq_sched_data *q, int prio)
889 struct cbq_class *cl;
890 unsigned int h;
892 if (q->quanta[prio] == 0)
893 return;
895 for (h = 0; h < q->clhash.hashsize; h++) {
896 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
897 /* BUGGGG... Beware! This expression suffer of
898 * arithmetic overflows!
900 if (cl->priority == prio) {
901 cl->quantum = (cl->weight*cl->allot*q->nclasses[prio])/
902 q->quanta[prio];
904 if (cl->quantum <= 0 ||
905 cl->quantum > 32*qdisc_dev(cl->qdisc)->mtu) {
906 pr_warn("CBQ: class %08x has bad quantum==%ld, repaired.\n",
907 cl->common.classid, cl->quantum);
908 cl->quantum = qdisc_dev(cl->qdisc)->mtu/2 + 1;
914 static void cbq_sync_defmap(struct cbq_class *cl)
916 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
917 struct cbq_class *split = cl->split;
918 unsigned int h;
919 int i;
921 if (split == NULL)
922 return;
924 for (i = 0; i <= TC_PRIO_MAX; i++) {
925 if (split->defaults[i] == cl && !(cl->defmap & (1<<i)))
926 split->defaults[i] = NULL;
929 for (i = 0; i <= TC_PRIO_MAX; i++) {
930 int level = split->level;
932 if (split->defaults[i])
933 continue;
935 for (h = 0; h < q->clhash.hashsize; h++) {
936 struct cbq_class *c;
938 hlist_for_each_entry(c, &q->clhash.hash[h],
939 common.hnode) {
940 if (c->split == split && c->level < level &&
941 c->defmap & (1<<i)) {
942 split->defaults[i] = c;
943 level = c->level;
950 static void cbq_change_defmap(struct cbq_class *cl, u32 splitid, u32 def, u32 mask)
952 struct cbq_class *split = NULL;
954 if (splitid == 0) {
955 split = cl->split;
956 if (!split)
957 return;
958 splitid = split->common.classid;
961 if (split == NULL || split->common.classid != splitid) {
962 for (split = cl->tparent; split; split = split->tparent)
963 if (split->common.classid == splitid)
964 break;
967 if (split == NULL)
968 return;
970 if (cl->split != split) {
971 cl->defmap = 0;
972 cbq_sync_defmap(cl);
973 cl->split = split;
974 cl->defmap = def & mask;
975 } else
976 cl->defmap = (cl->defmap & ~mask) | (def & mask);
978 cbq_sync_defmap(cl);
981 static void cbq_unlink_class(struct cbq_class *this)
983 struct cbq_class *cl, **clp;
984 struct cbq_sched_data *q = qdisc_priv(this->qdisc);
986 qdisc_class_hash_remove(&q->clhash, &this->common);
988 if (this->tparent) {
989 clp = &this->sibling;
990 cl = *clp;
991 do {
992 if (cl == this) {
993 *clp = cl->sibling;
994 break;
996 clp = &cl->sibling;
997 } while ((cl = *clp) != this->sibling);
999 if (this->tparent->children == this) {
1000 this->tparent->children = this->sibling;
1001 if (this->sibling == this)
1002 this->tparent->children = NULL;
1004 } else {
1005 WARN_ON(this->sibling != this);
1009 static void cbq_link_class(struct cbq_class *this)
1011 struct cbq_sched_data *q = qdisc_priv(this->qdisc);
1012 struct cbq_class *parent = this->tparent;
1014 this->sibling = this;
1015 qdisc_class_hash_insert(&q->clhash, &this->common);
1017 if (parent == NULL)
1018 return;
1020 if (parent->children == NULL) {
1021 parent->children = this;
1022 } else {
1023 this->sibling = parent->children->sibling;
1024 parent->children->sibling = this;
1028 static void
1029 cbq_reset(struct Qdisc *sch)
1031 struct cbq_sched_data *q = qdisc_priv(sch);
1032 struct cbq_class *cl;
1033 int prio;
1034 unsigned int h;
1036 q->activemask = 0;
1037 q->pmask = 0;
1038 q->tx_class = NULL;
1039 q->tx_borrowed = NULL;
1040 qdisc_watchdog_cancel(&q->watchdog);
1041 hrtimer_cancel(&q->delay_timer);
1042 q->toplevel = TC_CBQ_MAXLEVEL;
1043 q->now = psched_get_time();
1045 for (prio = 0; prio <= TC_CBQ_MAXPRIO; prio++)
1046 q->active[prio] = NULL;
1048 for (h = 0; h < q->clhash.hashsize; h++) {
1049 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
1050 qdisc_reset(cl->q);
1052 cl->next_alive = NULL;
1053 cl->undertime = PSCHED_PASTPERFECT;
1054 cl->avgidle = cl->maxidle;
1055 cl->deficit = cl->quantum;
1056 cl->cpriority = cl->priority;
1059 sch->q.qlen = 0;
1063 static int cbq_set_lss(struct cbq_class *cl, struct tc_cbq_lssopt *lss)
1065 if (lss->change & TCF_CBQ_LSS_FLAGS) {
1066 cl->share = (lss->flags & TCF_CBQ_LSS_ISOLATED) ? NULL : cl->tparent;
1067 cl->borrow = (lss->flags & TCF_CBQ_LSS_BOUNDED) ? NULL : cl->tparent;
1069 if (lss->change & TCF_CBQ_LSS_EWMA)
1070 cl->ewma_log = lss->ewma_log;
1071 if (lss->change & TCF_CBQ_LSS_AVPKT)
1072 cl->avpkt = lss->avpkt;
1073 if (lss->change & TCF_CBQ_LSS_MINIDLE)
1074 cl->minidle = -(long)lss->minidle;
1075 if (lss->change & TCF_CBQ_LSS_MAXIDLE) {
1076 cl->maxidle = lss->maxidle;
1077 cl->avgidle = lss->maxidle;
1079 if (lss->change & TCF_CBQ_LSS_OFFTIME)
1080 cl->offtime = lss->offtime;
1081 return 0;
1084 static void cbq_rmprio(struct cbq_sched_data *q, struct cbq_class *cl)
1086 q->nclasses[cl->priority]--;
1087 q->quanta[cl->priority] -= cl->weight;
1088 cbq_normalize_quanta(q, cl->priority);
1091 static void cbq_addprio(struct cbq_sched_data *q, struct cbq_class *cl)
1093 q->nclasses[cl->priority]++;
1094 q->quanta[cl->priority] += cl->weight;
1095 cbq_normalize_quanta(q, cl->priority);
1098 static int cbq_set_wrr(struct cbq_class *cl, struct tc_cbq_wrropt *wrr)
1100 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
1102 if (wrr->allot)
1103 cl->allot = wrr->allot;
1104 if (wrr->weight)
1105 cl->weight = wrr->weight;
1106 if (wrr->priority) {
1107 cl->priority = wrr->priority - 1;
1108 cl->cpriority = cl->priority;
1109 if (cl->priority >= cl->priority2)
1110 cl->priority2 = TC_CBQ_MAXPRIO - 1;
1113 cbq_addprio(q, cl);
1114 return 0;
1117 static int cbq_set_fopt(struct cbq_class *cl, struct tc_cbq_fopt *fopt)
1119 cbq_change_defmap(cl, fopt->split, fopt->defmap, fopt->defchange);
1120 return 0;
1123 static const struct nla_policy cbq_policy[TCA_CBQ_MAX + 1] = {
1124 [TCA_CBQ_LSSOPT] = { .len = sizeof(struct tc_cbq_lssopt) },
1125 [TCA_CBQ_WRROPT] = { .len = sizeof(struct tc_cbq_wrropt) },
1126 [TCA_CBQ_FOPT] = { .len = sizeof(struct tc_cbq_fopt) },
1127 [TCA_CBQ_OVL_STRATEGY] = { .len = sizeof(struct tc_cbq_ovl) },
1128 [TCA_CBQ_RATE] = { .len = sizeof(struct tc_ratespec) },
1129 [TCA_CBQ_RTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
1130 [TCA_CBQ_POLICE] = { .len = sizeof(struct tc_cbq_police) },
1133 static int cbq_init(struct Qdisc *sch, struct nlattr *opt)
1135 struct cbq_sched_data *q = qdisc_priv(sch);
1136 struct nlattr *tb[TCA_CBQ_MAX + 1];
1137 struct tc_ratespec *r;
1138 int err;
1140 err = nla_parse_nested(tb, TCA_CBQ_MAX, opt, cbq_policy, NULL);
1141 if (err < 0)
1142 return err;
1144 if (tb[TCA_CBQ_RTAB] == NULL || tb[TCA_CBQ_RATE] == NULL)
1145 return -EINVAL;
1147 r = nla_data(tb[TCA_CBQ_RATE]);
1149 if ((q->link.R_tab = qdisc_get_rtab(r, tb[TCA_CBQ_RTAB])) == NULL)
1150 return -EINVAL;
1152 err = qdisc_class_hash_init(&q->clhash);
1153 if (err < 0)
1154 goto put_rtab;
1156 q->link.refcnt = 1;
1157 q->link.sibling = &q->link;
1158 q->link.common.classid = sch->handle;
1159 q->link.qdisc = sch;
1160 q->link.q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
1161 sch->handle);
1162 if (!q->link.q)
1163 q->link.q = &noop_qdisc;
1164 else
1165 qdisc_hash_add(q->link.q, true);
1167 q->link.priority = TC_CBQ_MAXPRIO - 1;
1168 q->link.priority2 = TC_CBQ_MAXPRIO - 1;
1169 q->link.cpriority = TC_CBQ_MAXPRIO - 1;
1170 q->link.allot = psched_mtu(qdisc_dev(sch));
1171 q->link.quantum = q->link.allot;
1172 q->link.weight = q->link.R_tab->rate.rate;
1174 q->link.ewma_log = TC_CBQ_DEF_EWMA;
1175 q->link.avpkt = q->link.allot/2;
1176 q->link.minidle = -0x7FFFFFFF;
1178 qdisc_watchdog_init(&q->watchdog, sch);
1179 hrtimer_init(&q->delay_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
1180 q->delay_timer.function = cbq_undelay;
1181 q->toplevel = TC_CBQ_MAXLEVEL;
1182 q->now = psched_get_time();
1184 cbq_link_class(&q->link);
1186 if (tb[TCA_CBQ_LSSOPT])
1187 cbq_set_lss(&q->link, nla_data(tb[TCA_CBQ_LSSOPT]));
1189 cbq_addprio(q, &q->link);
1190 return 0;
1192 put_rtab:
1193 qdisc_put_rtab(q->link.R_tab);
1194 return err;
1197 static int cbq_dump_rate(struct sk_buff *skb, struct cbq_class *cl)
1199 unsigned char *b = skb_tail_pointer(skb);
1201 if (nla_put(skb, TCA_CBQ_RATE, sizeof(cl->R_tab->rate), &cl->R_tab->rate))
1202 goto nla_put_failure;
1203 return skb->len;
1205 nla_put_failure:
1206 nlmsg_trim(skb, b);
1207 return -1;
1210 static int cbq_dump_lss(struct sk_buff *skb, struct cbq_class *cl)
1212 unsigned char *b = skb_tail_pointer(skb);
1213 struct tc_cbq_lssopt opt;
1215 opt.flags = 0;
1216 if (cl->borrow == NULL)
1217 opt.flags |= TCF_CBQ_LSS_BOUNDED;
1218 if (cl->share == NULL)
1219 opt.flags |= TCF_CBQ_LSS_ISOLATED;
1220 opt.ewma_log = cl->ewma_log;
1221 opt.level = cl->level;
1222 opt.avpkt = cl->avpkt;
1223 opt.maxidle = cl->maxidle;
1224 opt.minidle = (u32)(-cl->minidle);
1225 opt.offtime = cl->offtime;
1226 opt.change = ~0;
1227 if (nla_put(skb, TCA_CBQ_LSSOPT, sizeof(opt), &opt))
1228 goto nla_put_failure;
1229 return skb->len;
1231 nla_put_failure:
1232 nlmsg_trim(skb, b);
1233 return -1;
1236 static int cbq_dump_wrr(struct sk_buff *skb, struct cbq_class *cl)
1238 unsigned char *b = skb_tail_pointer(skb);
1239 struct tc_cbq_wrropt opt;
1241 memset(&opt, 0, sizeof(opt));
1242 opt.flags = 0;
1243 opt.allot = cl->allot;
1244 opt.priority = cl->priority + 1;
1245 opt.cpriority = cl->cpriority + 1;
1246 opt.weight = cl->weight;
1247 if (nla_put(skb, TCA_CBQ_WRROPT, sizeof(opt), &opt))
1248 goto nla_put_failure;
1249 return skb->len;
1251 nla_put_failure:
1252 nlmsg_trim(skb, b);
1253 return -1;
1256 static int cbq_dump_fopt(struct sk_buff *skb, struct cbq_class *cl)
1258 unsigned char *b = skb_tail_pointer(skb);
1259 struct tc_cbq_fopt opt;
1261 if (cl->split || cl->defmap) {
1262 opt.split = cl->split ? cl->split->common.classid : 0;
1263 opt.defmap = cl->defmap;
1264 opt.defchange = ~0;
1265 if (nla_put(skb, TCA_CBQ_FOPT, sizeof(opt), &opt))
1266 goto nla_put_failure;
1268 return skb->len;
1270 nla_put_failure:
1271 nlmsg_trim(skb, b);
1272 return -1;
1275 static int cbq_dump_attr(struct sk_buff *skb, struct cbq_class *cl)
1277 if (cbq_dump_lss(skb, cl) < 0 ||
1278 cbq_dump_rate(skb, cl) < 0 ||
1279 cbq_dump_wrr(skb, cl) < 0 ||
1280 cbq_dump_fopt(skb, cl) < 0)
1281 return -1;
1282 return 0;
1285 static int cbq_dump(struct Qdisc *sch, struct sk_buff *skb)
1287 struct cbq_sched_data *q = qdisc_priv(sch);
1288 struct nlattr *nest;
1290 nest = nla_nest_start(skb, TCA_OPTIONS);
1291 if (nest == NULL)
1292 goto nla_put_failure;
1293 if (cbq_dump_attr(skb, &q->link) < 0)
1294 goto nla_put_failure;
1295 return nla_nest_end(skb, nest);
1297 nla_put_failure:
1298 nla_nest_cancel(skb, nest);
1299 return -1;
1302 static int
1303 cbq_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
1305 struct cbq_sched_data *q = qdisc_priv(sch);
1307 q->link.xstats.avgidle = q->link.avgidle;
1308 return gnet_stats_copy_app(d, &q->link.xstats, sizeof(q->link.xstats));
1311 static int
1312 cbq_dump_class(struct Qdisc *sch, unsigned long arg,
1313 struct sk_buff *skb, struct tcmsg *tcm)
1315 struct cbq_class *cl = (struct cbq_class *)arg;
1316 struct nlattr *nest;
1318 if (cl->tparent)
1319 tcm->tcm_parent = cl->tparent->common.classid;
1320 else
1321 tcm->tcm_parent = TC_H_ROOT;
1322 tcm->tcm_handle = cl->common.classid;
1323 tcm->tcm_info = cl->q->handle;
1325 nest = nla_nest_start(skb, TCA_OPTIONS);
1326 if (nest == NULL)
1327 goto nla_put_failure;
1328 if (cbq_dump_attr(skb, cl) < 0)
1329 goto nla_put_failure;
1330 return nla_nest_end(skb, nest);
1332 nla_put_failure:
1333 nla_nest_cancel(skb, nest);
1334 return -1;
1337 static int
1338 cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
1339 struct gnet_dump *d)
1341 struct cbq_sched_data *q = qdisc_priv(sch);
1342 struct cbq_class *cl = (struct cbq_class *)arg;
1344 cl->xstats.avgidle = cl->avgidle;
1345 cl->xstats.undertime = 0;
1347 if (cl->undertime != PSCHED_PASTPERFECT)
1348 cl->xstats.undertime = cl->undertime - q->now;
1350 if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
1351 d, NULL, &cl->bstats) < 0 ||
1352 gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
1353 gnet_stats_copy_queue(d, NULL, &cl->qstats, cl->q->q.qlen) < 0)
1354 return -1;
1356 return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats));
1359 static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1360 struct Qdisc **old)
1362 struct cbq_class *cl = (struct cbq_class *)arg;
1364 if (new == NULL) {
1365 new = qdisc_create_dflt(sch->dev_queue,
1366 &pfifo_qdisc_ops, cl->common.classid);
1367 if (new == NULL)
1368 return -ENOBUFS;
1371 *old = qdisc_replace(sch, new, &cl->q);
1372 return 0;
1375 static struct Qdisc *cbq_leaf(struct Qdisc *sch, unsigned long arg)
1377 struct cbq_class *cl = (struct cbq_class *)arg;
1379 return cl->q;
1382 static void cbq_qlen_notify(struct Qdisc *sch, unsigned long arg)
1384 struct cbq_class *cl = (struct cbq_class *)arg;
1386 if (cl->q->q.qlen == 0)
1387 cbq_deactivate_class(cl);
1390 static unsigned long cbq_get(struct Qdisc *sch, u32 classid)
1392 struct cbq_sched_data *q = qdisc_priv(sch);
1393 struct cbq_class *cl = cbq_class_lookup(q, classid);
1395 if (cl) {
1396 cl->refcnt++;
1397 return (unsigned long)cl;
1399 return 0;
1402 static void cbq_destroy_class(struct Qdisc *sch, struct cbq_class *cl)
1404 struct cbq_sched_data *q = qdisc_priv(sch);
1406 WARN_ON(cl->filters);
1408 tcf_destroy_chain(&cl->filter_list);
1409 qdisc_destroy(cl->q);
1410 qdisc_put_rtab(cl->R_tab);
1411 gen_kill_estimator(&cl->rate_est);
1412 if (cl != &q->link)
1413 kfree(cl);
1416 static void cbq_destroy(struct Qdisc *sch)
1418 struct cbq_sched_data *q = qdisc_priv(sch);
1419 struct hlist_node *next;
1420 struct cbq_class *cl;
1421 unsigned int h;
1423 #ifdef CONFIG_NET_CLS_ACT
1424 q->rx_class = NULL;
1425 #endif
1427 * Filters must be destroyed first because we don't destroy the
1428 * classes from root to leafs which means that filters can still
1429 * be bound to classes which have been destroyed already. --TGR '04
1431 for (h = 0; h < q->clhash.hashsize; h++) {
1432 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode)
1433 tcf_destroy_chain(&cl->filter_list);
1435 for (h = 0; h < q->clhash.hashsize; h++) {
1436 hlist_for_each_entry_safe(cl, next, &q->clhash.hash[h],
1437 common.hnode)
1438 cbq_destroy_class(sch, cl);
1440 qdisc_class_hash_destroy(&q->clhash);
1443 static void cbq_put(struct Qdisc *sch, unsigned long arg)
1445 struct cbq_class *cl = (struct cbq_class *)arg;
1447 if (--cl->refcnt == 0) {
1448 #ifdef CONFIG_NET_CLS_ACT
1449 spinlock_t *root_lock = qdisc_root_sleeping_lock(sch);
1450 struct cbq_sched_data *q = qdisc_priv(sch);
1452 spin_lock_bh(root_lock);
1453 if (q->rx_class == cl)
1454 q->rx_class = NULL;
1455 spin_unlock_bh(root_lock);
1456 #endif
1458 cbq_destroy_class(sch, cl);
1462 static int
1463 cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **tca,
1464 unsigned long *arg)
1466 int err;
1467 struct cbq_sched_data *q = qdisc_priv(sch);
1468 struct cbq_class *cl = (struct cbq_class *)*arg;
1469 struct nlattr *opt = tca[TCA_OPTIONS];
1470 struct nlattr *tb[TCA_CBQ_MAX + 1];
1471 struct cbq_class *parent;
1472 struct qdisc_rate_table *rtab = NULL;
1474 if (opt == NULL)
1475 return -EINVAL;
1477 err = nla_parse_nested(tb, TCA_CBQ_MAX, opt, cbq_policy, NULL);
1478 if (err < 0)
1479 return err;
1481 if (tb[TCA_CBQ_OVL_STRATEGY] || tb[TCA_CBQ_POLICE])
1482 return -EOPNOTSUPP;
1484 if (cl) {
1485 /* Check parent */
1486 if (parentid) {
1487 if (cl->tparent &&
1488 cl->tparent->common.classid != parentid)
1489 return -EINVAL;
1490 if (!cl->tparent && parentid != TC_H_ROOT)
1491 return -EINVAL;
1494 if (tb[TCA_CBQ_RATE]) {
1495 rtab = qdisc_get_rtab(nla_data(tb[TCA_CBQ_RATE]),
1496 tb[TCA_CBQ_RTAB]);
1497 if (rtab == NULL)
1498 return -EINVAL;
1501 if (tca[TCA_RATE]) {
1502 err = gen_replace_estimator(&cl->bstats, NULL,
1503 &cl->rate_est,
1504 NULL,
1505 qdisc_root_sleeping_running(sch),
1506 tca[TCA_RATE]);
1507 if (err) {
1508 qdisc_put_rtab(rtab);
1509 return err;
1513 /* Change class parameters */
1514 sch_tree_lock(sch);
1516 if (cl->next_alive != NULL)
1517 cbq_deactivate_class(cl);
1519 if (rtab) {
1520 qdisc_put_rtab(cl->R_tab);
1521 cl->R_tab = rtab;
1524 if (tb[TCA_CBQ_LSSOPT])
1525 cbq_set_lss(cl, nla_data(tb[TCA_CBQ_LSSOPT]));
1527 if (tb[TCA_CBQ_WRROPT]) {
1528 cbq_rmprio(q, cl);
1529 cbq_set_wrr(cl, nla_data(tb[TCA_CBQ_WRROPT]));
1532 if (tb[TCA_CBQ_FOPT])
1533 cbq_set_fopt(cl, nla_data(tb[TCA_CBQ_FOPT]));
1535 if (cl->q->q.qlen)
1536 cbq_activate_class(cl);
1538 sch_tree_unlock(sch);
1540 return 0;
1543 if (parentid == TC_H_ROOT)
1544 return -EINVAL;
1546 if (tb[TCA_CBQ_WRROPT] == NULL || tb[TCA_CBQ_RATE] == NULL ||
1547 tb[TCA_CBQ_LSSOPT] == NULL)
1548 return -EINVAL;
1550 rtab = qdisc_get_rtab(nla_data(tb[TCA_CBQ_RATE]), tb[TCA_CBQ_RTAB]);
1551 if (rtab == NULL)
1552 return -EINVAL;
1554 if (classid) {
1555 err = -EINVAL;
1556 if (TC_H_MAJ(classid ^ sch->handle) ||
1557 cbq_class_lookup(q, classid))
1558 goto failure;
1559 } else {
1560 int i;
1561 classid = TC_H_MAKE(sch->handle, 0x8000);
1563 for (i = 0; i < 0x8000; i++) {
1564 if (++q->hgenerator >= 0x8000)
1565 q->hgenerator = 1;
1566 if (cbq_class_lookup(q, classid|q->hgenerator) == NULL)
1567 break;
1569 err = -ENOSR;
1570 if (i >= 0x8000)
1571 goto failure;
1572 classid = classid|q->hgenerator;
1575 parent = &q->link;
1576 if (parentid) {
1577 parent = cbq_class_lookup(q, parentid);
1578 err = -EINVAL;
1579 if (parent == NULL)
1580 goto failure;
1583 err = -ENOBUFS;
1584 cl = kzalloc(sizeof(*cl), GFP_KERNEL);
1585 if (cl == NULL)
1586 goto failure;
1588 if (tca[TCA_RATE]) {
1589 err = gen_new_estimator(&cl->bstats, NULL, &cl->rate_est,
1590 NULL,
1591 qdisc_root_sleeping_running(sch),
1592 tca[TCA_RATE]);
1593 if (err) {
1594 kfree(cl);
1595 goto failure;
1599 cl->R_tab = rtab;
1600 rtab = NULL;
1601 cl->refcnt = 1;
1602 cl->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid);
1603 if (!cl->q)
1604 cl->q = &noop_qdisc;
1605 else
1606 qdisc_hash_add(cl->q, true);
1608 cl->common.classid = classid;
1609 cl->tparent = parent;
1610 cl->qdisc = sch;
1611 cl->allot = parent->allot;
1612 cl->quantum = cl->allot;
1613 cl->weight = cl->R_tab->rate.rate;
1615 sch_tree_lock(sch);
1616 cbq_link_class(cl);
1617 cl->borrow = cl->tparent;
1618 if (cl->tparent != &q->link)
1619 cl->share = cl->tparent;
1620 cbq_adjust_levels(parent);
1621 cl->minidle = -0x7FFFFFFF;
1622 cbq_set_lss(cl, nla_data(tb[TCA_CBQ_LSSOPT]));
1623 cbq_set_wrr(cl, nla_data(tb[TCA_CBQ_WRROPT]));
1624 if (cl->ewma_log == 0)
1625 cl->ewma_log = q->link.ewma_log;
1626 if (cl->maxidle == 0)
1627 cl->maxidle = q->link.maxidle;
1628 if (cl->avpkt == 0)
1629 cl->avpkt = q->link.avpkt;
1630 if (tb[TCA_CBQ_FOPT])
1631 cbq_set_fopt(cl, nla_data(tb[TCA_CBQ_FOPT]));
1632 sch_tree_unlock(sch);
1634 qdisc_class_hash_grow(sch, &q->clhash);
1636 *arg = (unsigned long)cl;
1637 return 0;
1639 failure:
1640 qdisc_put_rtab(rtab);
1641 return err;
1644 static int cbq_delete(struct Qdisc *sch, unsigned long arg)
1646 struct cbq_sched_data *q = qdisc_priv(sch);
1647 struct cbq_class *cl = (struct cbq_class *)arg;
1648 unsigned int qlen, backlog;
1650 if (cl->filters || cl->children || cl == &q->link)
1651 return -EBUSY;
1653 sch_tree_lock(sch);
1655 qlen = cl->q->q.qlen;
1656 backlog = cl->q->qstats.backlog;
1657 qdisc_reset(cl->q);
1658 qdisc_tree_reduce_backlog(cl->q, qlen, backlog);
1660 if (cl->next_alive)
1661 cbq_deactivate_class(cl);
1663 if (q->tx_borrowed == cl)
1664 q->tx_borrowed = q->tx_class;
1665 if (q->tx_class == cl) {
1666 q->tx_class = NULL;
1667 q->tx_borrowed = NULL;
1669 #ifdef CONFIG_NET_CLS_ACT
1670 if (q->rx_class == cl)
1671 q->rx_class = NULL;
1672 #endif
1674 cbq_unlink_class(cl);
1675 cbq_adjust_levels(cl->tparent);
1676 cl->defmap = 0;
1677 cbq_sync_defmap(cl);
1679 cbq_rmprio(q, cl);
1680 sch_tree_unlock(sch);
1682 BUG_ON(--cl->refcnt == 0);
1684 * This shouldn't happen: we "hold" one cops->get() when called
1685 * from tc_ctl_tclass; the destroy method is done from cops->put().
1688 return 0;
1691 static struct tcf_proto __rcu **cbq_find_tcf(struct Qdisc *sch,
1692 unsigned long arg)
1694 struct cbq_sched_data *q = qdisc_priv(sch);
1695 struct cbq_class *cl = (struct cbq_class *)arg;
1697 if (cl == NULL)
1698 cl = &q->link;
1700 return &cl->filter_list;
1703 static unsigned long cbq_bind_filter(struct Qdisc *sch, unsigned long parent,
1704 u32 classid)
1706 struct cbq_sched_data *q = qdisc_priv(sch);
1707 struct cbq_class *p = (struct cbq_class *)parent;
1708 struct cbq_class *cl = cbq_class_lookup(q, classid);
1710 if (cl) {
1711 if (p && p->level <= cl->level)
1712 return 0;
1713 cl->filters++;
1714 return (unsigned long)cl;
1716 return 0;
1719 static void cbq_unbind_filter(struct Qdisc *sch, unsigned long arg)
1721 struct cbq_class *cl = (struct cbq_class *)arg;
1723 cl->filters--;
1726 static void cbq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
1728 struct cbq_sched_data *q = qdisc_priv(sch);
1729 struct cbq_class *cl;
1730 unsigned int h;
1732 if (arg->stop)
1733 return;
1735 for (h = 0; h < q->clhash.hashsize; h++) {
1736 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
1737 if (arg->count < arg->skip) {
1738 arg->count++;
1739 continue;
1741 if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
1742 arg->stop = 1;
1743 return;
1745 arg->count++;
1750 static const struct Qdisc_class_ops cbq_class_ops = {
1751 .graft = cbq_graft,
1752 .leaf = cbq_leaf,
1753 .qlen_notify = cbq_qlen_notify,
1754 .get = cbq_get,
1755 .put = cbq_put,
1756 .change = cbq_change_class,
1757 .delete = cbq_delete,
1758 .walk = cbq_walk,
1759 .tcf_chain = cbq_find_tcf,
1760 .bind_tcf = cbq_bind_filter,
1761 .unbind_tcf = cbq_unbind_filter,
1762 .dump = cbq_dump_class,
1763 .dump_stats = cbq_dump_class_stats,
1766 static struct Qdisc_ops cbq_qdisc_ops __read_mostly = {
1767 .next = NULL,
1768 .cl_ops = &cbq_class_ops,
1769 .id = "cbq",
1770 .priv_size = sizeof(struct cbq_sched_data),
1771 .enqueue = cbq_enqueue,
1772 .dequeue = cbq_dequeue,
1773 .peek = qdisc_peek_dequeued,
1774 .init = cbq_init,
1775 .reset = cbq_reset,
1776 .destroy = cbq_destroy,
1777 .change = NULL,
1778 .dump = cbq_dump,
1779 .dump_stats = cbq_dump_stats,
1780 .owner = THIS_MODULE,
1783 static int __init cbq_module_init(void)
1785 return register_qdisc(&cbq_qdisc_ops);
1787 static void __exit cbq_module_exit(void)
1789 unregister_qdisc(&cbq_qdisc_ops);
1791 module_init(cbq_module_init)
1792 module_exit(cbq_module_exit)
1793 MODULE_LICENSE("GPL");