[PATCH] kobject: don't oops on null kobject.name
[linux-2.6/mini2440.git] / net / sched / sch_cbq.c
blob6cd81708bf710e0c307807bcbffc35176be452a2
1 /*
2 * net/sched/sch_cbq.c Class-Based Queueing discipline.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
13 #include <linux/config.h>
14 #include <linux/module.h>
15 #include <asm/uaccess.h>
16 #include <asm/system.h>
17 #include <linux/bitops.h>
18 #include <linux/types.h>
19 #include <linux/kernel.h>
20 #include <linux/sched.h>
21 #include <linux/string.h>
22 #include <linux/mm.h>
23 #include <linux/socket.h>
24 #include <linux/sockios.h>
25 #include <linux/in.h>
26 #include <linux/errno.h>
27 #include <linux/interrupt.h>
28 #include <linux/if_ether.h>
29 #include <linux/inet.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/notifier.h>
33 #include <net/ip.h>
34 #include <net/route.h>
35 #include <linux/skbuff.h>
36 #include <net/sock.h>
37 #include <net/pkt_sched.h>
40 /* Class-Based Queueing (CBQ) algorithm.
41 =======================================
43 Sources: [1] Sally Floyd and Van Jacobson, "Link-sharing and Resource
44 Management Models for Packet Networks",
45 IEEE/ACM Transactions on Networking, Vol.3, No.4, 1995
47 [2] Sally Floyd, "Notes on CBQ and Guaranteed Service", 1995
49 [3] Sally Floyd, "Notes on Class-Based Queueing: Setting
50 Parameters", 1996
52 [4] Sally Floyd and Michael Speer, "Experimental Results
53 for Class-Based Queueing", 1998, not published.
55 -----------------------------------------------------------------------
57 Algorithm skeleton was taken from NS simulator cbq.cc.
58 If someone wants to check this code against the LBL version,
59 he should take into account that ONLY the skeleton was borrowed,
60 the implementation is different. Particularly:
62 --- The WRR algorithm is different. Our version looks more
63 reasonable (I hope) and works when quanta are allowed to be
64 less than MTU, which is always the case when real time classes
65 have small rates. Note, that the statement of [3] is
66 incomplete, delay may actually be estimated even if class
67 per-round allotment is less than MTU. Namely, if per-round
68 allotment is W*r_i, and r_1+...+r_k = r < 1
70 delay_i <= ([MTU/(W*r_i)]*W*r + W*r + k*MTU)/B
72 In the worst case we have IntServ estimate with D = W*r+k*MTU
73 and C = MTU*r. The proof (if correct at all) is trivial.
76 --- It seems that cbq-2.0 is not very accurate. At least, I cannot
77 interpret some places, which look like wrong translations
78 from NS. Anyone is advised to find these differences
79 and explain to me, why I am wrong 8).
81 --- Linux has no EOI event, so that we cannot estimate true class
82 idle time. Workaround is to consider the next dequeue event
83 as sign that previous packet is finished. This is wrong because of
84 internal device queueing, but on a permanently loaded link it is true.
85 Moreover, combined with clock integrator, this scheme looks
86 very close to an ideal solution. */
88 struct cbq_sched_data;
91 struct cbq_class
93 struct cbq_class *next; /* hash table link */
94 struct cbq_class *next_alive; /* next class with backlog in this priority band */
96 /* Parameters */
97 u32 classid;
98 unsigned char priority; /* class priority */
99 unsigned char priority2; /* priority to be used after overlimit */
100 unsigned char ewma_log; /* time constant for idle time calculation */
101 unsigned char ovl_strategy;
102 #ifdef CONFIG_NET_CLS_POLICE
103 unsigned char police;
104 #endif
106 u32 defmap;
108 /* Link-sharing scheduler parameters */
109 long maxidle; /* Class parameters: see below. */
110 long offtime;
111 long minidle;
112 u32 avpkt;
113 struct qdisc_rate_table *R_tab;
115 /* Overlimit strategy parameters */
116 void (*overlimit)(struct cbq_class *cl);
117 long penalty;
119 /* General scheduler (WRR) parameters */
120 long allot;
121 long quantum; /* Allotment per WRR round */
122 long weight; /* Relative allotment: see below */
124 struct Qdisc *qdisc; /* Ptr to CBQ discipline */
125 struct cbq_class *split; /* Ptr to split node */
126 struct cbq_class *share; /* Ptr to LS parent in the class tree */
127 struct cbq_class *tparent; /* Ptr to tree parent in the class tree */
128 struct cbq_class *borrow; /* NULL if class is bandwidth limited;
129 parent otherwise */
130 struct cbq_class *sibling; /* Sibling chain */
131 struct cbq_class *children; /* Pointer to children chain */
133 struct Qdisc *q; /* Elementary queueing discipline */
136 /* Variables */
137 unsigned char cpriority; /* Effective priority */
138 unsigned char delayed;
139 unsigned char level; /* level of the class in hierarchy:
140 0 for leaf classes, and maximal
141 level of children + 1 for nodes.
144 psched_time_t last; /* Last end of service */
145 psched_time_t undertime;
146 long avgidle;
147 long deficit; /* Saved deficit for WRR */
148 unsigned long penalized;
149 struct gnet_stats_basic bstats;
150 struct gnet_stats_queue qstats;
151 struct gnet_stats_rate_est rate_est;
152 spinlock_t *stats_lock;
153 struct tc_cbq_xstats xstats;
155 struct tcf_proto *filter_list;
157 int refcnt;
158 int filters;
160 struct cbq_class *defaults[TC_PRIO_MAX+1];
163 struct cbq_sched_data
165 struct cbq_class *classes[16]; /* Hash table of all classes */
166 int nclasses[TC_CBQ_MAXPRIO+1];
167 unsigned quanta[TC_CBQ_MAXPRIO+1];
169 struct cbq_class link;
171 unsigned activemask;
172 struct cbq_class *active[TC_CBQ_MAXPRIO+1]; /* List of all classes
173 with backlog */
175 #ifdef CONFIG_NET_CLS_POLICE
176 struct cbq_class *rx_class;
177 #endif
178 struct cbq_class *tx_class;
179 struct cbq_class *tx_borrowed;
180 int tx_len;
181 psched_time_t now; /* Cached timestamp */
182 psched_time_t now_rt; /* Cached real time */
183 unsigned pmask;
185 struct timer_list delay_timer;
186 struct timer_list wd_timer; /* Watchdog timer,
187 started when CBQ has
188 backlog, but cannot
189 transmit just now */
190 long wd_expires;
191 int toplevel;
192 u32 hgenerator;
196 #define L2T(cl,len) ((cl)->R_tab->data[(len)>>(cl)->R_tab->rate.cell_log])
199 static __inline__ unsigned cbq_hash(u32 h)
201 h ^= h>>8;
202 h ^= h>>4;
203 return h&0xF;
206 static __inline__ struct cbq_class *
207 cbq_class_lookup(struct cbq_sched_data *q, u32 classid)
209 struct cbq_class *cl;
211 for (cl = q->classes[cbq_hash(classid)]; cl; cl = cl->next)
212 if (cl->classid == classid)
213 return cl;
214 return NULL;
217 #ifdef CONFIG_NET_CLS_POLICE
219 static struct cbq_class *
220 cbq_reclassify(struct sk_buff *skb, struct cbq_class *this)
222 struct cbq_class *cl, *new;
224 for (cl = this->tparent; cl; cl = cl->tparent)
225 if ((new = cl->defaults[TC_PRIO_BESTEFFORT]) != NULL && new != this)
226 return new;
228 return NULL;
231 #endif
233 /* Classify packet. The procedure is pretty complicated, but
234 it allows us to combine link sharing and priority scheduling
235 transparently.
237 Namely, you can put link sharing rules (f.e. route based) at root of CBQ,
238 so that it resolves to split nodes. Then packets are classified
239 by logical priority, or a more specific classifier may be attached
240 to the split node.
243 static struct cbq_class *
244 cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
246 struct cbq_sched_data *q = qdisc_priv(sch);
247 struct cbq_class *head = &q->link;
248 struct cbq_class **defmap;
249 struct cbq_class *cl = NULL;
250 u32 prio = skb->priority;
251 struct tcf_result res;
254 * Step 1. If skb->priority points to one of our classes, use it.
256 if (TC_H_MAJ(prio^sch->handle) == 0 &&
257 (cl = cbq_class_lookup(q, prio)) != NULL)
258 return cl;
260 *qerr = NET_XMIT_BYPASS;
261 for (;;) {
262 int result = 0;
263 defmap = head->defaults;
266 * Step 2+n. Apply classifier.
268 if (!head->filter_list || (result = tc_classify(skb, head->filter_list, &res)) < 0)
269 goto fallback;
271 if ((cl = (void*)res.class) == NULL) {
272 if (TC_H_MAJ(res.classid))
273 cl = cbq_class_lookup(q, res.classid);
274 else if ((cl = defmap[res.classid&TC_PRIO_MAX]) == NULL)
275 cl = defmap[TC_PRIO_BESTEFFORT];
277 if (cl == NULL || cl->level >= head->level)
278 goto fallback;
281 #ifdef CONFIG_NET_CLS_ACT
282 switch (result) {
283 case TC_ACT_QUEUED:
284 case TC_ACT_STOLEN:
285 *qerr = NET_XMIT_SUCCESS;
286 case TC_ACT_SHOT:
287 return NULL;
289 #elif defined(CONFIG_NET_CLS_POLICE)
290 switch (result) {
291 case TC_POLICE_RECLASSIFY:
292 return cbq_reclassify(skb, cl);
293 case TC_POLICE_SHOT:
294 return NULL;
295 default:
296 break;
298 #endif
299 if (cl->level == 0)
300 return cl;
303 * Step 3+n. If classifier selected a link sharing class,
304 * apply agency specific classifier.
305 * Repeat this procdure until we hit a leaf node.
307 head = cl;
310 fallback:
311 cl = head;
314 * Step 4. No success...
316 if (TC_H_MAJ(prio) == 0 &&
317 !(cl = head->defaults[prio&TC_PRIO_MAX]) &&
318 !(cl = head->defaults[TC_PRIO_BESTEFFORT]))
319 return head;
321 return cl;
325 A packet has just been enqueued on the empty class.
326 cbq_activate_class adds it to the tail of active class list
327 of its priority band.
330 static __inline__ void cbq_activate_class(struct cbq_class *cl)
332 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
333 int prio = cl->cpriority;
334 struct cbq_class *cl_tail;
336 cl_tail = q->active[prio];
337 q->active[prio] = cl;
339 if (cl_tail != NULL) {
340 cl->next_alive = cl_tail->next_alive;
341 cl_tail->next_alive = cl;
342 } else {
343 cl->next_alive = cl;
344 q->activemask |= (1<<prio);
349 Unlink class from active chain.
350 Note that this same procedure is done directly in cbq_dequeue*
351 during round-robin procedure.
354 static void cbq_deactivate_class(struct cbq_class *this)
356 struct cbq_sched_data *q = qdisc_priv(this->qdisc);
357 int prio = this->cpriority;
358 struct cbq_class *cl;
359 struct cbq_class *cl_prev = q->active[prio];
361 do {
362 cl = cl_prev->next_alive;
363 if (cl == this) {
364 cl_prev->next_alive = cl->next_alive;
365 cl->next_alive = NULL;
367 if (cl == q->active[prio]) {
368 q->active[prio] = cl_prev;
369 if (cl == q->active[prio]) {
370 q->active[prio] = NULL;
371 q->activemask &= ~(1<<prio);
372 return;
376 cl = cl_prev->next_alive;
377 return;
379 } while ((cl_prev = cl) != q->active[prio]);
382 static void
383 cbq_mark_toplevel(struct cbq_sched_data *q, struct cbq_class *cl)
385 int toplevel = q->toplevel;
387 if (toplevel > cl->level && !(cl->q->flags&TCQ_F_THROTTLED)) {
388 psched_time_t now;
389 psched_tdiff_t incr;
391 PSCHED_GET_TIME(now);
392 incr = PSCHED_TDIFF(now, q->now_rt);
393 PSCHED_TADD2(q->now, incr, now);
395 do {
396 if (PSCHED_TLESS(cl->undertime, now)) {
397 q->toplevel = cl->level;
398 return;
400 } while ((cl=cl->borrow) != NULL && toplevel > cl->level);
404 static int
405 cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
407 struct cbq_sched_data *q = qdisc_priv(sch);
408 int len = skb->len;
409 int ret;
410 struct cbq_class *cl = cbq_classify(skb, sch, &ret);
412 #ifdef CONFIG_NET_CLS_POLICE
413 q->rx_class = cl;
414 #endif
415 if (cl == NULL) {
416 if (ret == NET_XMIT_BYPASS)
417 sch->qstats.drops++;
418 kfree_skb(skb);
419 return ret;
422 #ifdef CONFIG_NET_CLS_POLICE
423 cl->q->__parent = sch;
424 #endif
425 if ((ret = cl->q->enqueue(skb, cl->q)) == NET_XMIT_SUCCESS) {
426 sch->q.qlen++;
427 sch->bstats.packets++;
428 sch->bstats.bytes+=len;
429 cbq_mark_toplevel(q, cl);
430 if (!cl->next_alive)
431 cbq_activate_class(cl);
432 return ret;
435 sch->qstats.drops++;
436 cbq_mark_toplevel(q, cl);
437 cl->qstats.drops++;
438 return ret;
441 static int
442 cbq_requeue(struct sk_buff *skb, struct Qdisc *sch)
444 struct cbq_sched_data *q = qdisc_priv(sch);
445 struct cbq_class *cl;
446 int ret;
448 if ((cl = q->tx_class) == NULL) {
449 kfree_skb(skb);
450 sch->qstats.drops++;
451 return NET_XMIT_CN;
453 q->tx_class = NULL;
455 cbq_mark_toplevel(q, cl);
457 #ifdef CONFIG_NET_CLS_POLICE
458 q->rx_class = cl;
459 cl->q->__parent = sch;
460 #endif
461 if ((ret = cl->q->ops->requeue(skb, cl->q)) == 0) {
462 sch->q.qlen++;
463 sch->qstats.requeues++;
464 if (!cl->next_alive)
465 cbq_activate_class(cl);
466 return 0;
468 sch->qstats.drops++;
469 cl->qstats.drops++;
470 return ret;
473 /* Overlimit actions */
475 /* TC_CBQ_OVL_CLASSIC: (default) penalize leaf class by adding offtime */
477 static void cbq_ovl_classic(struct cbq_class *cl)
479 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
480 psched_tdiff_t delay = PSCHED_TDIFF(cl->undertime, q->now);
482 if (!cl->delayed) {
483 delay += cl->offtime;
486 Class goes to sleep, so that it will have no
487 chance to work avgidle. Let's forgive it 8)
489 BTW cbq-2.0 has a crap in this
490 place, apparently they forgot to shift it by cl->ewma_log.
492 if (cl->avgidle < 0)
493 delay -= (-cl->avgidle) - ((-cl->avgidle) >> cl->ewma_log);
494 if (cl->avgidle < cl->minidle)
495 cl->avgidle = cl->minidle;
496 if (delay <= 0)
497 delay = 1;
498 PSCHED_TADD2(q->now, delay, cl->undertime);
500 cl->xstats.overactions++;
501 cl->delayed = 1;
503 if (q->wd_expires == 0 || q->wd_expires > delay)
504 q->wd_expires = delay;
506 /* Dirty work! We must schedule wakeups based on
507 real available rate, rather than leaf rate,
508 which may be tiny (even zero).
510 if (q->toplevel == TC_CBQ_MAXLEVEL) {
511 struct cbq_class *b;
512 psched_tdiff_t base_delay = q->wd_expires;
514 for (b = cl->borrow; b; b = b->borrow) {
515 delay = PSCHED_TDIFF(b->undertime, q->now);
516 if (delay < base_delay) {
517 if (delay <= 0)
518 delay = 1;
519 base_delay = delay;
523 q->wd_expires = base_delay;
527 /* TC_CBQ_OVL_RCLASSIC: penalize by offtime classes in hierarchy, when
528 they go overlimit
531 static void cbq_ovl_rclassic(struct cbq_class *cl)
533 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
534 struct cbq_class *this = cl;
536 do {
537 if (cl->level > q->toplevel) {
538 cl = NULL;
539 break;
541 } while ((cl = cl->borrow) != NULL);
543 if (cl == NULL)
544 cl = this;
545 cbq_ovl_classic(cl);
548 /* TC_CBQ_OVL_DELAY: delay until it will go to underlimit */
550 static void cbq_ovl_delay(struct cbq_class *cl)
552 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
553 psched_tdiff_t delay = PSCHED_TDIFF(cl->undertime, q->now);
555 if (!cl->delayed) {
556 unsigned long sched = jiffies;
558 delay += cl->offtime;
559 if (cl->avgidle < 0)
560 delay -= (-cl->avgidle) - ((-cl->avgidle) >> cl->ewma_log);
561 if (cl->avgidle < cl->minidle)
562 cl->avgidle = cl->minidle;
563 PSCHED_TADD2(q->now, delay, cl->undertime);
565 if (delay > 0) {
566 sched += PSCHED_US2JIFFIE(delay) + cl->penalty;
567 cl->penalized = sched;
568 cl->cpriority = TC_CBQ_MAXPRIO;
569 q->pmask |= (1<<TC_CBQ_MAXPRIO);
570 if (del_timer(&q->delay_timer) &&
571 (long)(q->delay_timer.expires - sched) > 0)
572 q->delay_timer.expires = sched;
573 add_timer(&q->delay_timer);
574 cl->delayed = 1;
575 cl->xstats.overactions++;
576 return;
578 delay = 1;
580 if (q->wd_expires == 0 || q->wd_expires > delay)
581 q->wd_expires = delay;
584 /* TC_CBQ_OVL_LOWPRIO: penalize class by lowering its priority band */
586 static void cbq_ovl_lowprio(struct cbq_class *cl)
588 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
590 cl->penalized = jiffies + cl->penalty;
592 if (cl->cpriority != cl->priority2) {
593 cl->cpriority = cl->priority2;
594 q->pmask |= (1<<cl->cpriority);
595 cl->xstats.overactions++;
597 cbq_ovl_classic(cl);
600 /* TC_CBQ_OVL_DROP: penalize class by dropping */
602 static void cbq_ovl_drop(struct cbq_class *cl)
604 if (cl->q->ops->drop)
605 if (cl->q->ops->drop(cl->q))
606 cl->qdisc->q.qlen--;
607 cl->xstats.overactions++;
608 cbq_ovl_classic(cl);
611 static void cbq_watchdog(unsigned long arg)
613 struct Qdisc *sch = (struct Qdisc*)arg;
615 sch->flags &= ~TCQ_F_THROTTLED;
616 netif_schedule(sch->dev);
619 static unsigned long cbq_undelay_prio(struct cbq_sched_data *q, int prio)
621 struct cbq_class *cl;
622 struct cbq_class *cl_prev = q->active[prio];
623 unsigned long now = jiffies;
624 unsigned long sched = now;
626 if (cl_prev == NULL)
627 return now;
629 do {
630 cl = cl_prev->next_alive;
631 if ((long)(now - cl->penalized) > 0) {
632 cl_prev->next_alive = cl->next_alive;
633 cl->next_alive = NULL;
634 cl->cpriority = cl->priority;
635 cl->delayed = 0;
636 cbq_activate_class(cl);
638 if (cl == q->active[prio]) {
639 q->active[prio] = cl_prev;
640 if (cl == q->active[prio]) {
641 q->active[prio] = NULL;
642 return 0;
646 cl = cl_prev->next_alive;
647 } else if ((long)(sched - cl->penalized) > 0)
648 sched = cl->penalized;
649 } while ((cl_prev = cl) != q->active[prio]);
651 return (long)(sched - now);
654 static void cbq_undelay(unsigned long arg)
656 struct Qdisc *sch = (struct Qdisc*)arg;
657 struct cbq_sched_data *q = qdisc_priv(sch);
658 long delay = 0;
659 unsigned pmask;
661 pmask = q->pmask;
662 q->pmask = 0;
664 while (pmask) {
665 int prio = ffz(~pmask);
666 long tmp;
668 pmask &= ~(1<<prio);
670 tmp = cbq_undelay_prio(q, prio);
671 if (tmp > 0) {
672 q->pmask |= 1<<prio;
673 if (tmp < delay || delay == 0)
674 delay = tmp;
678 if (delay) {
679 q->delay_timer.expires = jiffies + delay;
680 add_timer(&q->delay_timer);
683 sch->flags &= ~TCQ_F_THROTTLED;
684 netif_schedule(sch->dev);
688 #ifdef CONFIG_NET_CLS_POLICE
690 static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child)
692 int len = skb->len;
693 struct Qdisc *sch = child->__parent;
694 struct cbq_sched_data *q = qdisc_priv(sch);
695 struct cbq_class *cl = q->rx_class;
697 q->rx_class = NULL;
699 if (cl && (cl = cbq_reclassify(skb, cl)) != NULL) {
701 cbq_mark_toplevel(q, cl);
703 q->rx_class = cl;
704 cl->q->__parent = sch;
706 if (cl->q->enqueue(skb, cl->q) == 0) {
707 sch->q.qlen++;
708 sch->bstats.packets++;
709 sch->bstats.bytes+=len;
710 if (!cl->next_alive)
711 cbq_activate_class(cl);
712 return 0;
714 sch->qstats.drops++;
715 return 0;
718 sch->qstats.drops++;
719 return -1;
721 #endif
724 It is mission critical procedure.
726 We "regenerate" toplevel cutoff, if transmitting class
727 has backlog and it is not regulated. It is not part of
728 original CBQ description, but looks more reasonable.
729 Probably, it is wrong. This question needs further investigation.
732 static __inline__ void
733 cbq_update_toplevel(struct cbq_sched_data *q, struct cbq_class *cl,
734 struct cbq_class *borrowed)
736 if (cl && q->toplevel >= borrowed->level) {
737 if (cl->q->q.qlen > 1) {
738 do {
739 if (PSCHED_IS_PASTPERFECT(borrowed->undertime)) {
740 q->toplevel = borrowed->level;
741 return;
743 } while ((borrowed=borrowed->borrow) != NULL);
745 #if 0
746 /* It is not necessary now. Uncommenting it
747 will save CPU cycles, but decrease fairness.
749 q->toplevel = TC_CBQ_MAXLEVEL;
750 #endif
754 static void
755 cbq_update(struct cbq_sched_data *q)
757 struct cbq_class *this = q->tx_class;
758 struct cbq_class *cl = this;
759 int len = q->tx_len;
761 q->tx_class = NULL;
763 for ( ; cl; cl = cl->share) {
764 long avgidle = cl->avgidle;
765 long idle;
767 cl->bstats.packets++;
768 cl->bstats.bytes += len;
771 (now - last) is total time between packet right edges.
772 (last_pktlen/rate) is "virtual" busy time, so that
774 idle = (now - last) - last_pktlen/rate
777 idle = PSCHED_TDIFF(q->now, cl->last);
778 if ((unsigned long)idle > 128*1024*1024) {
779 avgidle = cl->maxidle;
780 } else {
781 idle -= L2T(cl, len);
783 /* true_avgidle := (1-W)*true_avgidle + W*idle,
784 where W=2^{-ewma_log}. But cl->avgidle is scaled:
785 cl->avgidle == true_avgidle/W,
786 hence:
788 avgidle += idle - (avgidle>>cl->ewma_log);
791 if (avgidle <= 0) {
792 /* Overlimit or at-limit */
794 if (avgidle < cl->minidle)
795 avgidle = cl->minidle;
797 cl->avgidle = avgidle;
799 /* Calculate expected time, when this class
800 will be allowed to send.
801 It will occur, when:
802 (1-W)*true_avgidle + W*delay = 0, i.e.
803 idle = (1/W - 1)*(-true_avgidle)
805 idle = (1 - W)*(-cl->avgidle);
807 idle = (-avgidle) - ((-avgidle) >> cl->ewma_log);
810 That is not all.
811 To maintain the rate allocated to the class,
812 we add to undertime virtual clock,
813 necessary to complete transmitted packet.
814 (len/phys_bandwidth has been already passed
815 to the moment of cbq_update)
818 idle -= L2T(&q->link, len);
819 idle += L2T(cl, len);
821 PSCHED_AUDIT_TDIFF(idle);
823 PSCHED_TADD2(q->now, idle, cl->undertime);
824 } else {
825 /* Underlimit */
827 PSCHED_SET_PASTPERFECT(cl->undertime);
828 if (avgidle > cl->maxidle)
829 cl->avgidle = cl->maxidle;
830 else
831 cl->avgidle = avgidle;
833 cl->last = q->now;
836 cbq_update_toplevel(q, this, q->tx_borrowed);
839 static __inline__ struct cbq_class *
840 cbq_under_limit(struct cbq_class *cl)
842 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
843 struct cbq_class *this_cl = cl;
845 if (cl->tparent == NULL)
846 return cl;
848 if (PSCHED_IS_PASTPERFECT(cl->undertime) ||
849 !PSCHED_TLESS(q->now, cl->undertime)) {
850 cl->delayed = 0;
851 return cl;
854 do {
855 /* It is very suspicious place. Now overlimit
856 action is generated for not bounded classes
857 only if link is completely congested.
858 Though it is in agree with ancestor-only paradigm,
859 it looks very stupid. Particularly,
860 it means that this chunk of code will either
861 never be called or result in strong amplification
862 of burstiness. Dangerous, silly, and, however,
863 no another solution exists.
865 if ((cl = cl->borrow) == NULL) {
866 this_cl->qstats.overlimits++;
867 this_cl->overlimit(this_cl);
868 return NULL;
870 if (cl->level > q->toplevel)
871 return NULL;
872 } while (!PSCHED_IS_PASTPERFECT(cl->undertime) &&
873 PSCHED_TLESS(q->now, cl->undertime));
875 cl->delayed = 0;
876 return cl;
879 static __inline__ struct sk_buff *
880 cbq_dequeue_prio(struct Qdisc *sch, int prio)
882 struct cbq_sched_data *q = qdisc_priv(sch);
883 struct cbq_class *cl_tail, *cl_prev, *cl;
884 struct sk_buff *skb;
885 int deficit;
887 cl_tail = cl_prev = q->active[prio];
888 cl = cl_prev->next_alive;
890 do {
891 deficit = 0;
893 /* Start round */
894 do {
895 struct cbq_class *borrow = cl;
897 if (cl->q->q.qlen &&
898 (borrow = cbq_under_limit(cl)) == NULL)
899 goto skip_class;
901 if (cl->deficit <= 0) {
902 /* Class exhausted its allotment per
903 this round. Switch to the next one.
905 deficit = 1;
906 cl->deficit += cl->quantum;
907 goto next_class;
910 skb = cl->q->dequeue(cl->q);
912 /* Class did not give us any skb :-(
913 It could occur even if cl->q->q.qlen != 0
914 f.e. if cl->q == "tbf"
916 if (skb == NULL)
917 goto skip_class;
919 cl->deficit -= skb->len;
920 q->tx_class = cl;
921 q->tx_borrowed = borrow;
922 if (borrow != cl) {
923 #ifndef CBQ_XSTATS_BORROWS_BYTES
924 borrow->xstats.borrows++;
925 cl->xstats.borrows++;
926 #else
927 borrow->xstats.borrows += skb->len;
928 cl->xstats.borrows += skb->len;
929 #endif
931 q->tx_len = skb->len;
933 if (cl->deficit <= 0) {
934 q->active[prio] = cl;
935 cl = cl->next_alive;
936 cl->deficit += cl->quantum;
938 return skb;
940 skip_class:
941 if (cl->q->q.qlen == 0 || prio != cl->cpriority) {
942 /* Class is empty or penalized.
943 Unlink it from active chain.
945 cl_prev->next_alive = cl->next_alive;
946 cl->next_alive = NULL;
948 /* Did cl_tail point to it? */
949 if (cl == cl_tail) {
950 /* Repair it! */
951 cl_tail = cl_prev;
953 /* Was it the last class in this band? */
954 if (cl == cl_tail) {
955 /* Kill the band! */
956 q->active[prio] = NULL;
957 q->activemask &= ~(1<<prio);
958 if (cl->q->q.qlen)
959 cbq_activate_class(cl);
960 return NULL;
963 q->active[prio] = cl_tail;
965 if (cl->q->q.qlen)
966 cbq_activate_class(cl);
968 cl = cl_prev;
971 next_class:
972 cl_prev = cl;
973 cl = cl->next_alive;
974 } while (cl_prev != cl_tail);
975 } while (deficit);
977 q->active[prio] = cl_prev;
979 return NULL;
982 static __inline__ struct sk_buff *
983 cbq_dequeue_1(struct Qdisc *sch)
985 struct cbq_sched_data *q = qdisc_priv(sch);
986 struct sk_buff *skb;
987 unsigned activemask;
989 activemask = q->activemask&0xFF;
990 while (activemask) {
991 int prio = ffz(~activemask);
992 activemask &= ~(1<<prio);
993 skb = cbq_dequeue_prio(sch, prio);
994 if (skb)
995 return skb;
997 return NULL;
1000 static struct sk_buff *
1001 cbq_dequeue(struct Qdisc *sch)
1003 struct sk_buff *skb;
1004 struct cbq_sched_data *q = qdisc_priv(sch);
1005 psched_time_t now;
1006 psched_tdiff_t incr;
1008 PSCHED_GET_TIME(now);
1009 incr = PSCHED_TDIFF(now, q->now_rt);
1011 if (q->tx_class) {
1012 psched_tdiff_t incr2;
1013 /* Time integrator. We calculate EOS time
1014 by adding expected packet transmission time.
1015 If real time is greater, we warp artificial clock,
1016 so that:
1018 cbq_time = max(real_time, work);
1020 incr2 = L2T(&q->link, q->tx_len);
1021 PSCHED_TADD(q->now, incr2);
1022 cbq_update(q);
1023 if ((incr -= incr2) < 0)
1024 incr = 0;
1026 PSCHED_TADD(q->now, incr);
1027 q->now_rt = now;
1029 for (;;) {
1030 q->wd_expires = 0;
1032 skb = cbq_dequeue_1(sch);
1033 if (skb) {
1034 sch->q.qlen--;
1035 sch->flags &= ~TCQ_F_THROTTLED;
1036 return skb;
1039 /* All the classes are overlimit.
1041 It is possible, if:
1043 1. Scheduler is empty.
1044 2. Toplevel cutoff inhibited borrowing.
1045 3. Root class is overlimit.
1047 Reset 2d and 3d conditions and retry.
1049 Note, that NS and cbq-2.0 are buggy, peeking
1050 an arbitrary class is appropriate for ancestor-only
1051 sharing, but not for toplevel algorithm.
1053 Our version is better, but slower, because it requires
1054 two passes, but it is unavoidable with top-level sharing.
1057 if (q->toplevel == TC_CBQ_MAXLEVEL &&
1058 PSCHED_IS_PASTPERFECT(q->link.undertime))
1059 break;
1061 q->toplevel = TC_CBQ_MAXLEVEL;
1062 PSCHED_SET_PASTPERFECT(q->link.undertime);
1065 /* No packets in scheduler or nobody wants to give them to us :-(
1066 Sigh... start watchdog timer in the last case. */
1068 if (sch->q.qlen) {
1069 sch->qstats.overlimits++;
1070 if (q->wd_expires) {
1071 long delay = PSCHED_US2JIFFIE(q->wd_expires);
1072 if (delay <= 0)
1073 delay = 1;
1074 mod_timer(&q->wd_timer, jiffies + delay);
1075 sch->flags |= TCQ_F_THROTTLED;
1078 return NULL;
1081 /* CBQ class maintanance routines */
1083 static void cbq_adjust_levels(struct cbq_class *this)
1085 if (this == NULL)
1086 return;
1088 do {
1089 int level = 0;
1090 struct cbq_class *cl;
1092 if ((cl = this->children) != NULL) {
1093 do {
1094 if (cl->level > level)
1095 level = cl->level;
1096 } while ((cl = cl->sibling) != this->children);
1098 this->level = level+1;
1099 } while ((this = this->tparent) != NULL);
1102 static void cbq_normalize_quanta(struct cbq_sched_data *q, int prio)
1104 struct cbq_class *cl;
1105 unsigned h;
1107 if (q->quanta[prio] == 0)
1108 return;
1110 for (h=0; h<16; h++) {
1111 for (cl = q->classes[h]; cl; cl = cl->next) {
1112 /* BUGGGG... Beware! This expression suffer of
1113 arithmetic overflows!
1115 if (cl->priority == prio) {
1116 cl->quantum = (cl->weight*cl->allot*q->nclasses[prio])/
1117 q->quanta[prio];
1119 if (cl->quantum <= 0 || cl->quantum>32*cl->qdisc->dev->mtu) {
1120 printk(KERN_WARNING "CBQ: class %08x has bad quantum==%ld, repaired.\n", cl->classid, cl->quantum);
1121 cl->quantum = cl->qdisc->dev->mtu/2 + 1;
1127 static void cbq_sync_defmap(struct cbq_class *cl)
1129 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
1130 struct cbq_class *split = cl->split;
1131 unsigned h;
1132 int i;
1134 if (split == NULL)
1135 return;
1137 for (i=0; i<=TC_PRIO_MAX; i++) {
1138 if (split->defaults[i] == cl && !(cl->defmap&(1<<i)))
1139 split->defaults[i] = NULL;
1142 for (i=0; i<=TC_PRIO_MAX; i++) {
1143 int level = split->level;
1145 if (split->defaults[i])
1146 continue;
1148 for (h=0; h<16; h++) {
1149 struct cbq_class *c;
1151 for (c = q->classes[h]; c; c = c->next) {
1152 if (c->split == split && c->level < level &&
1153 c->defmap&(1<<i)) {
1154 split->defaults[i] = c;
1155 level = c->level;
1162 static void cbq_change_defmap(struct cbq_class *cl, u32 splitid, u32 def, u32 mask)
1164 struct cbq_class *split = NULL;
1166 if (splitid == 0) {
1167 if ((split = cl->split) == NULL)
1168 return;
1169 splitid = split->classid;
1172 if (split == NULL || split->classid != splitid) {
1173 for (split = cl->tparent; split; split = split->tparent)
1174 if (split->classid == splitid)
1175 break;
1178 if (split == NULL)
1179 return;
1181 if (cl->split != split) {
1182 cl->defmap = 0;
1183 cbq_sync_defmap(cl);
1184 cl->split = split;
1185 cl->defmap = def&mask;
1186 } else
1187 cl->defmap = (cl->defmap&~mask)|(def&mask);
1189 cbq_sync_defmap(cl);
1192 static void cbq_unlink_class(struct cbq_class *this)
1194 struct cbq_class *cl, **clp;
1195 struct cbq_sched_data *q = qdisc_priv(this->qdisc);
1197 for (clp = &q->classes[cbq_hash(this->classid)]; (cl = *clp) != NULL; clp = &cl->next) {
1198 if (cl == this) {
1199 *clp = cl->next;
1200 cl->next = NULL;
1201 break;
1205 if (this->tparent) {
1206 clp=&this->sibling;
1207 cl = *clp;
1208 do {
1209 if (cl == this) {
1210 *clp = cl->sibling;
1211 break;
1213 clp = &cl->sibling;
1214 } while ((cl = *clp) != this->sibling);
1216 if (this->tparent->children == this) {
1217 this->tparent->children = this->sibling;
1218 if (this->sibling == this)
1219 this->tparent->children = NULL;
1221 } else {
1222 BUG_TRAP(this->sibling == this);
1226 static void cbq_link_class(struct cbq_class *this)
1228 struct cbq_sched_data *q = qdisc_priv(this->qdisc);
1229 unsigned h = cbq_hash(this->classid);
1230 struct cbq_class *parent = this->tparent;
1232 this->sibling = this;
1233 this->next = q->classes[h];
1234 q->classes[h] = this;
1236 if (parent == NULL)
1237 return;
1239 if (parent->children == NULL) {
1240 parent->children = this;
1241 } else {
1242 this->sibling = parent->children->sibling;
1243 parent->children->sibling = this;
1247 static unsigned int cbq_drop(struct Qdisc* sch)
1249 struct cbq_sched_data *q = qdisc_priv(sch);
1250 struct cbq_class *cl, *cl_head;
1251 int prio;
1252 unsigned int len;
1254 for (prio = TC_CBQ_MAXPRIO; prio >= 0; prio--) {
1255 if ((cl_head = q->active[prio]) == NULL)
1256 continue;
1258 cl = cl_head;
1259 do {
1260 if (cl->q->ops->drop && (len = cl->q->ops->drop(cl->q))) {
1261 sch->q.qlen--;
1262 return len;
1264 } while ((cl = cl->next_alive) != cl_head);
1266 return 0;
1269 static void
1270 cbq_reset(struct Qdisc* sch)
1272 struct cbq_sched_data *q = qdisc_priv(sch);
1273 struct cbq_class *cl;
1274 int prio;
1275 unsigned h;
1277 q->activemask = 0;
1278 q->pmask = 0;
1279 q->tx_class = NULL;
1280 q->tx_borrowed = NULL;
1281 del_timer(&q->wd_timer);
1282 del_timer(&q->delay_timer);
1283 q->toplevel = TC_CBQ_MAXLEVEL;
1284 PSCHED_GET_TIME(q->now);
1285 q->now_rt = q->now;
1287 for (prio = 0; prio <= TC_CBQ_MAXPRIO; prio++)
1288 q->active[prio] = NULL;
1290 for (h = 0; h < 16; h++) {
1291 for (cl = q->classes[h]; cl; cl = cl->next) {
1292 qdisc_reset(cl->q);
1294 cl->next_alive = NULL;
1295 PSCHED_SET_PASTPERFECT(cl->undertime);
1296 cl->avgidle = cl->maxidle;
1297 cl->deficit = cl->quantum;
1298 cl->cpriority = cl->priority;
1301 sch->q.qlen = 0;
1305 static int cbq_set_lss(struct cbq_class *cl, struct tc_cbq_lssopt *lss)
1307 if (lss->change&TCF_CBQ_LSS_FLAGS) {
1308 cl->share = (lss->flags&TCF_CBQ_LSS_ISOLATED) ? NULL : cl->tparent;
1309 cl->borrow = (lss->flags&TCF_CBQ_LSS_BOUNDED) ? NULL : cl->tparent;
1311 if (lss->change&TCF_CBQ_LSS_EWMA)
1312 cl->ewma_log = lss->ewma_log;
1313 if (lss->change&TCF_CBQ_LSS_AVPKT)
1314 cl->avpkt = lss->avpkt;
1315 if (lss->change&TCF_CBQ_LSS_MINIDLE)
1316 cl->minidle = -(long)lss->minidle;
1317 if (lss->change&TCF_CBQ_LSS_MAXIDLE) {
1318 cl->maxidle = lss->maxidle;
1319 cl->avgidle = lss->maxidle;
1321 if (lss->change&TCF_CBQ_LSS_OFFTIME)
1322 cl->offtime = lss->offtime;
1323 return 0;
1326 static void cbq_rmprio(struct cbq_sched_data *q, struct cbq_class *cl)
1328 q->nclasses[cl->priority]--;
1329 q->quanta[cl->priority] -= cl->weight;
1330 cbq_normalize_quanta(q, cl->priority);
1333 static void cbq_addprio(struct cbq_sched_data *q, struct cbq_class *cl)
1335 q->nclasses[cl->priority]++;
1336 q->quanta[cl->priority] += cl->weight;
1337 cbq_normalize_quanta(q, cl->priority);
1340 static int cbq_set_wrr(struct cbq_class *cl, struct tc_cbq_wrropt *wrr)
1342 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
1344 if (wrr->allot)
1345 cl->allot = wrr->allot;
1346 if (wrr->weight)
1347 cl->weight = wrr->weight;
1348 if (wrr->priority) {
1349 cl->priority = wrr->priority-1;
1350 cl->cpriority = cl->priority;
1351 if (cl->priority >= cl->priority2)
1352 cl->priority2 = TC_CBQ_MAXPRIO-1;
1355 cbq_addprio(q, cl);
1356 return 0;
1359 static int cbq_set_overlimit(struct cbq_class *cl, struct tc_cbq_ovl *ovl)
1361 switch (ovl->strategy) {
1362 case TC_CBQ_OVL_CLASSIC:
1363 cl->overlimit = cbq_ovl_classic;
1364 break;
1365 case TC_CBQ_OVL_DELAY:
1366 cl->overlimit = cbq_ovl_delay;
1367 break;
1368 case TC_CBQ_OVL_LOWPRIO:
1369 if (ovl->priority2-1 >= TC_CBQ_MAXPRIO ||
1370 ovl->priority2-1 <= cl->priority)
1371 return -EINVAL;
1372 cl->priority2 = ovl->priority2-1;
1373 cl->overlimit = cbq_ovl_lowprio;
1374 break;
1375 case TC_CBQ_OVL_DROP:
1376 cl->overlimit = cbq_ovl_drop;
1377 break;
1378 case TC_CBQ_OVL_RCLASSIC:
1379 cl->overlimit = cbq_ovl_rclassic;
1380 break;
1381 default:
1382 return -EINVAL;
1384 cl->penalty = (ovl->penalty*HZ)/1000;
1385 return 0;
1388 #ifdef CONFIG_NET_CLS_POLICE
1389 static int cbq_set_police(struct cbq_class *cl, struct tc_cbq_police *p)
1391 cl->police = p->police;
1393 if (cl->q->handle) {
1394 if (p->police == TC_POLICE_RECLASSIFY)
1395 cl->q->reshape_fail = cbq_reshape_fail;
1396 else
1397 cl->q->reshape_fail = NULL;
1399 return 0;
1401 #endif
1403 static int cbq_set_fopt(struct cbq_class *cl, struct tc_cbq_fopt *fopt)
1405 cbq_change_defmap(cl, fopt->split, fopt->defmap, fopt->defchange);
1406 return 0;
1409 static int cbq_init(struct Qdisc *sch, struct rtattr *opt)
1411 struct cbq_sched_data *q = qdisc_priv(sch);
1412 struct rtattr *tb[TCA_CBQ_MAX];
1413 struct tc_ratespec *r;
1415 if (rtattr_parse_nested(tb, TCA_CBQ_MAX, opt) < 0 ||
1416 tb[TCA_CBQ_RTAB-1] == NULL || tb[TCA_CBQ_RATE-1] == NULL ||
1417 RTA_PAYLOAD(tb[TCA_CBQ_RATE-1]) < sizeof(struct tc_ratespec))
1418 return -EINVAL;
1420 if (tb[TCA_CBQ_LSSOPT-1] &&
1421 RTA_PAYLOAD(tb[TCA_CBQ_LSSOPT-1]) < sizeof(struct tc_cbq_lssopt))
1422 return -EINVAL;
1424 r = RTA_DATA(tb[TCA_CBQ_RATE-1]);
1426 if ((q->link.R_tab = qdisc_get_rtab(r, tb[TCA_CBQ_RTAB-1])) == NULL)
1427 return -EINVAL;
1429 q->link.refcnt = 1;
1430 q->link.sibling = &q->link;
1431 q->link.classid = sch->handle;
1432 q->link.qdisc = sch;
1433 if (!(q->link.q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops)))
1434 q->link.q = &noop_qdisc;
1436 q->link.priority = TC_CBQ_MAXPRIO-1;
1437 q->link.priority2 = TC_CBQ_MAXPRIO-1;
1438 q->link.cpriority = TC_CBQ_MAXPRIO-1;
1439 q->link.ovl_strategy = TC_CBQ_OVL_CLASSIC;
1440 q->link.overlimit = cbq_ovl_classic;
1441 q->link.allot = psched_mtu(sch->dev);
1442 q->link.quantum = q->link.allot;
1443 q->link.weight = q->link.R_tab->rate.rate;
1445 q->link.ewma_log = TC_CBQ_DEF_EWMA;
1446 q->link.avpkt = q->link.allot/2;
1447 q->link.minidle = -0x7FFFFFFF;
1448 q->link.stats_lock = &sch->dev->queue_lock;
1450 init_timer(&q->wd_timer);
1451 q->wd_timer.data = (unsigned long)sch;
1452 q->wd_timer.function = cbq_watchdog;
1453 init_timer(&q->delay_timer);
1454 q->delay_timer.data = (unsigned long)sch;
1455 q->delay_timer.function = cbq_undelay;
1456 q->toplevel = TC_CBQ_MAXLEVEL;
1457 PSCHED_GET_TIME(q->now);
1458 q->now_rt = q->now;
1460 cbq_link_class(&q->link);
1462 if (tb[TCA_CBQ_LSSOPT-1])
1463 cbq_set_lss(&q->link, RTA_DATA(tb[TCA_CBQ_LSSOPT-1]));
1465 cbq_addprio(q, &q->link);
1466 return 0;
1469 static __inline__ int cbq_dump_rate(struct sk_buff *skb, struct cbq_class *cl)
1471 unsigned char *b = skb->tail;
1473 RTA_PUT(skb, TCA_CBQ_RATE, sizeof(cl->R_tab->rate), &cl->R_tab->rate);
1474 return skb->len;
1476 rtattr_failure:
1477 skb_trim(skb, b - skb->data);
1478 return -1;
1481 static __inline__ int cbq_dump_lss(struct sk_buff *skb, struct cbq_class *cl)
1483 unsigned char *b = skb->tail;
1484 struct tc_cbq_lssopt opt;
1486 opt.flags = 0;
1487 if (cl->borrow == NULL)
1488 opt.flags |= TCF_CBQ_LSS_BOUNDED;
1489 if (cl->share == NULL)
1490 opt.flags |= TCF_CBQ_LSS_ISOLATED;
1491 opt.ewma_log = cl->ewma_log;
1492 opt.level = cl->level;
1493 opt.avpkt = cl->avpkt;
1494 opt.maxidle = cl->maxidle;
1495 opt.minidle = (u32)(-cl->minidle);
1496 opt.offtime = cl->offtime;
1497 opt.change = ~0;
1498 RTA_PUT(skb, TCA_CBQ_LSSOPT, sizeof(opt), &opt);
1499 return skb->len;
1501 rtattr_failure:
1502 skb_trim(skb, b - skb->data);
1503 return -1;
1506 static __inline__ int cbq_dump_wrr(struct sk_buff *skb, struct cbq_class *cl)
1508 unsigned char *b = skb->tail;
1509 struct tc_cbq_wrropt opt;
1511 opt.flags = 0;
1512 opt.allot = cl->allot;
1513 opt.priority = cl->priority+1;
1514 opt.cpriority = cl->cpriority+1;
1515 opt.weight = cl->weight;
1516 RTA_PUT(skb, TCA_CBQ_WRROPT, sizeof(opt), &opt);
1517 return skb->len;
1519 rtattr_failure:
1520 skb_trim(skb, b - skb->data);
1521 return -1;
1524 static __inline__ int cbq_dump_ovl(struct sk_buff *skb, struct cbq_class *cl)
1526 unsigned char *b = skb->tail;
1527 struct tc_cbq_ovl opt;
1529 opt.strategy = cl->ovl_strategy;
1530 opt.priority2 = cl->priority2+1;
1531 opt.pad = 0;
1532 opt.penalty = (cl->penalty*1000)/HZ;
1533 RTA_PUT(skb, TCA_CBQ_OVL_STRATEGY, sizeof(opt), &opt);
1534 return skb->len;
1536 rtattr_failure:
1537 skb_trim(skb, b - skb->data);
1538 return -1;
1541 static __inline__ int cbq_dump_fopt(struct sk_buff *skb, struct cbq_class *cl)
1543 unsigned char *b = skb->tail;
1544 struct tc_cbq_fopt opt;
1546 if (cl->split || cl->defmap) {
1547 opt.split = cl->split ? cl->split->classid : 0;
1548 opt.defmap = cl->defmap;
1549 opt.defchange = ~0;
1550 RTA_PUT(skb, TCA_CBQ_FOPT, sizeof(opt), &opt);
1552 return skb->len;
1554 rtattr_failure:
1555 skb_trim(skb, b - skb->data);
1556 return -1;
1559 #ifdef CONFIG_NET_CLS_POLICE
1560 static __inline__ int cbq_dump_police(struct sk_buff *skb, struct cbq_class *cl)
1562 unsigned char *b = skb->tail;
1563 struct tc_cbq_police opt;
1565 if (cl->police) {
1566 opt.police = cl->police;
1567 opt.__res1 = 0;
1568 opt.__res2 = 0;
1569 RTA_PUT(skb, TCA_CBQ_POLICE, sizeof(opt), &opt);
1571 return skb->len;
1573 rtattr_failure:
1574 skb_trim(skb, b - skb->data);
1575 return -1;
1577 #endif
1579 static int cbq_dump_attr(struct sk_buff *skb, struct cbq_class *cl)
1581 if (cbq_dump_lss(skb, cl) < 0 ||
1582 cbq_dump_rate(skb, cl) < 0 ||
1583 cbq_dump_wrr(skb, cl) < 0 ||
1584 cbq_dump_ovl(skb, cl) < 0 ||
1585 #ifdef CONFIG_NET_CLS_POLICE
1586 cbq_dump_police(skb, cl) < 0 ||
1587 #endif
1588 cbq_dump_fopt(skb, cl) < 0)
1589 return -1;
1590 return 0;
1593 static int cbq_dump(struct Qdisc *sch, struct sk_buff *skb)
1595 struct cbq_sched_data *q = qdisc_priv(sch);
1596 unsigned char *b = skb->tail;
1597 struct rtattr *rta;
1599 rta = (struct rtattr*)b;
1600 RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
1601 if (cbq_dump_attr(skb, &q->link) < 0)
1602 goto rtattr_failure;
1603 rta->rta_len = skb->tail - b;
1604 return skb->len;
1606 rtattr_failure:
1607 skb_trim(skb, b - skb->data);
1608 return -1;
1611 static int
1612 cbq_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
1614 struct cbq_sched_data *q = qdisc_priv(sch);
1616 q->link.xstats.avgidle = q->link.avgidle;
1617 return gnet_stats_copy_app(d, &q->link.xstats, sizeof(q->link.xstats));
1620 static int
1621 cbq_dump_class(struct Qdisc *sch, unsigned long arg,
1622 struct sk_buff *skb, struct tcmsg *tcm)
1624 struct cbq_class *cl = (struct cbq_class*)arg;
1625 unsigned char *b = skb->tail;
1626 struct rtattr *rta;
1628 if (cl->tparent)
1629 tcm->tcm_parent = cl->tparent->classid;
1630 else
1631 tcm->tcm_parent = TC_H_ROOT;
1632 tcm->tcm_handle = cl->classid;
1633 tcm->tcm_info = cl->q->handle;
1635 rta = (struct rtattr*)b;
1636 RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
1637 if (cbq_dump_attr(skb, cl) < 0)
1638 goto rtattr_failure;
1639 rta->rta_len = skb->tail - b;
1640 return skb->len;
1642 rtattr_failure:
1643 skb_trim(skb, b - skb->data);
1644 return -1;
1647 static int
1648 cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
1649 struct gnet_dump *d)
1651 struct cbq_sched_data *q = qdisc_priv(sch);
1652 struct cbq_class *cl = (struct cbq_class*)arg;
1654 cl->qstats.qlen = cl->q->q.qlen;
1655 cl->xstats.avgidle = cl->avgidle;
1656 cl->xstats.undertime = 0;
1658 if (!PSCHED_IS_PASTPERFECT(cl->undertime))
1659 cl->xstats.undertime = PSCHED_TDIFF(cl->undertime, q->now);
1661 if (gnet_stats_copy_basic(d, &cl->bstats) < 0 ||
1662 #ifdef CONFIG_NET_ESTIMATOR
1663 gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
1664 #endif
1665 gnet_stats_copy_queue(d, &cl->qstats) < 0)
1666 return -1;
1668 return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats));
1671 static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1672 struct Qdisc **old)
1674 struct cbq_class *cl = (struct cbq_class*)arg;
1676 if (cl) {
1677 if (new == NULL) {
1678 if ((new = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops)) == NULL)
1679 return -ENOBUFS;
1680 } else {
1681 #ifdef CONFIG_NET_CLS_POLICE
1682 if (cl->police == TC_POLICE_RECLASSIFY)
1683 new->reshape_fail = cbq_reshape_fail;
1684 #endif
1686 sch_tree_lock(sch);
1687 *old = cl->q;
1688 cl->q = new;
1689 sch->q.qlen -= (*old)->q.qlen;
1690 qdisc_reset(*old);
1691 sch_tree_unlock(sch);
1693 return 0;
1695 return -ENOENT;
1698 static struct Qdisc *
1699 cbq_leaf(struct Qdisc *sch, unsigned long arg)
1701 struct cbq_class *cl = (struct cbq_class*)arg;
1703 return cl ? cl->q : NULL;
1706 static unsigned long cbq_get(struct Qdisc *sch, u32 classid)
1708 struct cbq_sched_data *q = qdisc_priv(sch);
1709 struct cbq_class *cl = cbq_class_lookup(q, classid);
1711 if (cl) {
1712 cl->refcnt++;
1713 return (unsigned long)cl;
1715 return 0;
1718 static void cbq_destroy_filters(struct cbq_class *cl)
1720 struct tcf_proto *tp;
1722 while ((tp = cl->filter_list) != NULL) {
1723 cl->filter_list = tp->next;
1724 tcf_destroy(tp);
1728 static void cbq_destroy_class(struct Qdisc *sch, struct cbq_class *cl)
1730 struct cbq_sched_data *q = qdisc_priv(sch);
1732 BUG_TRAP(!cl->filters);
1734 cbq_destroy_filters(cl);
1735 qdisc_destroy(cl->q);
1736 qdisc_put_rtab(cl->R_tab);
1737 #ifdef CONFIG_NET_ESTIMATOR
1738 gen_kill_estimator(&cl->bstats, &cl->rate_est);
1739 #endif
1740 if (cl != &q->link)
1741 kfree(cl);
1744 static void
1745 cbq_destroy(struct Qdisc* sch)
1747 struct cbq_sched_data *q = qdisc_priv(sch);
1748 struct cbq_class *cl;
1749 unsigned h;
1751 #ifdef CONFIG_NET_CLS_POLICE
1752 q->rx_class = NULL;
1753 #endif
1755 * Filters must be destroyed first because we don't destroy the
1756 * classes from root to leafs which means that filters can still
1757 * be bound to classes which have been destroyed already. --TGR '04
1759 for (h = 0; h < 16; h++)
1760 for (cl = q->classes[h]; cl; cl = cl->next)
1761 cbq_destroy_filters(cl);
1763 for (h = 0; h < 16; h++) {
1764 struct cbq_class *next;
1766 for (cl = q->classes[h]; cl; cl = next) {
1767 next = cl->next;
1768 cbq_destroy_class(sch, cl);
1773 static void cbq_put(struct Qdisc *sch, unsigned long arg)
1775 struct cbq_class *cl = (struct cbq_class*)arg;
1777 if (--cl->refcnt == 0) {
1778 #ifdef CONFIG_NET_CLS_POLICE
1779 struct cbq_sched_data *q = qdisc_priv(sch);
1781 spin_lock_bh(&sch->dev->queue_lock);
1782 if (q->rx_class == cl)
1783 q->rx_class = NULL;
1784 spin_unlock_bh(&sch->dev->queue_lock);
1785 #endif
1787 cbq_destroy_class(sch, cl);
1791 static int
1792 cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct rtattr **tca,
1793 unsigned long *arg)
1795 int err;
1796 struct cbq_sched_data *q = qdisc_priv(sch);
1797 struct cbq_class *cl = (struct cbq_class*)*arg;
1798 struct rtattr *opt = tca[TCA_OPTIONS-1];
1799 struct rtattr *tb[TCA_CBQ_MAX];
1800 struct cbq_class *parent;
1801 struct qdisc_rate_table *rtab = NULL;
1803 if (opt==NULL || rtattr_parse_nested(tb, TCA_CBQ_MAX, opt))
1804 return -EINVAL;
1806 if (tb[TCA_CBQ_OVL_STRATEGY-1] &&
1807 RTA_PAYLOAD(tb[TCA_CBQ_OVL_STRATEGY-1]) < sizeof(struct tc_cbq_ovl))
1808 return -EINVAL;
1810 if (tb[TCA_CBQ_FOPT-1] &&
1811 RTA_PAYLOAD(tb[TCA_CBQ_FOPT-1]) < sizeof(struct tc_cbq_fopt))
1812 return -EINVAL;
1814 if (tb[TCA_CBQ_RATE-1] &&
1815 RTA_PAYLOAD(tb[TCA_CBQ_RATE-1]) < sizeof(struct tc_ratespec))
1816 return -EINVAL;
1818 if (tb[TCA_CBQ_LSSOPT-1] &&
1819 RTA_PAYLOAD(tb[TCA_CBQ_LSSOPT-1]) < sizeof(struct tc_cbq_lssopt))
1820 return -EINVAL;
1822 if (tb[TCA_CBQ_WRROPT-1] &&
1823 RTA_PAYLOAD(tb[TCA_CBQ_WRROPT-1]) < sizeof(struct tc_cbq_wrropt))
1824 return -EINVAL;
1826 #ifdef CONFIG_NET_CLS_POLICE
1827 if (tb[TCA_CBQ_POLICE-1] &&
1828 RTA_PAYLOAD(tb[TCA_CBQ_POLICE-1]) < sizeof(struct tc_cbq_police))
1829 return -EINVAL;
1830 #endif
1832 if (cl) {
1833 /* Check parent */
1834 if (parentid) {
1835 if (cl->tparent && cl->tparent->classid != parentid)
1836 return -EINVAL;
1837 if (!cl->tparent && parentid != TC_H_ROOT)
1838 return -EINVAL;
1841 if (tb[TCA_CBQ_RATE-1]) {
1842 rtab = qdisc_get_rtab(RTA_DATA(tb[TCA_CBQ_RATE-1]), tb[TCA_CBQ_RTAB-1]);
1843 if (rtab == NULL)
1844 return -EINVAL;
1847 /* Change class parameters */
1848 sch_tree_lock(sch);
1850 if (cl->next_alive != NULL)
1851 cbq_deactivate_class(cl);
1853 if (rtab) {
1854 rtab = xchg(&cl->R_tab, rtab);
1855 qdisc_put_rtab(rtab);
1858 if (tb[TCA_CBQ_LSSOPT-1])
1859 cbq_set_lss(cl, RTA_DATA(tb[TCA_CBQ_LSSOPT-1]));
1861 if (tb[TCA_CBQ_WRROPT-1]) {
1862 cbq_rmprio(q, cl);
1863 cbq_set_wrr(cl, RTA_DATA(tb[TCA_CBQ_WRROPT-1]));
1866 if (tb[TCA_CBQ_OVL_STRATEGY-1])
1867 cbq_set_overlimit(cl, RTA_DATA(tb[TCA_CBQ_OVL_STRATEGY-1]));
1869 #ifdef CONFIG_NET_CLS_POLICE
1870 if (tb[TCA_CBQ_POLICE-1])
1871 cbq_set_police(cl, RTA_DATA(tb[TCA_CBQ_POLICE-1]));
1872 #endif
1874 if (tb[TCA_CBQ_FOPT-1])
1875 cbq_set_fopt(cl, RTA_DATA(tb[TCA_CBQ_FOPT-1]));
1877 if (cl->q->q.qlen)
1878 cbq_activate_class(cl);
1880 sch_tree_unlock(sch);
1882 #ifdef CONFIG_NET_ESTIMATOR
1883 if (tca[TCA_RATE-1])
1884 gen_replace_estimator(&cl->bstats, &cl->rate_est,
1885 cl->stats_lock, tca[TCA_RATE-1]);
1886 #endif
1887 return 0;
1890 if (parentid == TC_H_ROOT)
1891 return -EINVAL;
1893 if (tb[TCA_CBQ_WRROPT-1] == NULL || tb[TCA_CBQ_RATE-1] == NULL ||
1894 tb[TCA_CBQ_LSSOPT-1] == NULL)
1895 return -EINVAL;
1897 rtab = qdisc_get_rtab(RTA_DATA(tb[TCA_CBQ_RATE-1]), tb[TCA_CBQ_RTAB-1]);
1898 if (rtab == NULL)
1899 return -EINVAL;
1901 if (classid) {
1902 err = -EINVAL;
1903 if (TC_H_MAJ(classid^sch->handle) || cbq_class_lookup(q, classid))
1904 goto failure;
1905 } else {
1906 int i;
1907 classid = TC_H_MAKE(sch->handle,0x8000);
1909 for (i=0; i<0x8000; i++) {
1910 if (++q->hgenerator >= 0x8000)
1911 q->hgenerator = 1;
1912 if (cbq_class_lookup(q, classid|q->hgenerator) == NULL)
1913 break;
1915 err = -ENOSR;
1916 if (i >= 0x8000)
1917 goto failure;
1918 classid = classid|q->hgenerator;
1921 parent = &q->link;
1922 if (parentid) {
1923 parent = cbq_class_lookup(q, parentid);
1924 err = -EINVAL;
1925 if (parent == NULL)
1926 goto failure;
1929 err = -ENOBUFS;
1930 cl = kmalloc(sizeof(*cl), GFP_KERNEL);
1931 if (cl == NULL)
1932 goto failure;
1933 memset(cl, 0, sizeof(*cl));
1934 cl->R_tab = rtab;
1935 rtab = NULL;
1936 cl->refcnt = 1;
1937 if (!(cl->q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops)))
1938 cl->q = &noop_qdisc;
1939 cl->classid = classid;
1940 cl->tparent = parent;
1941 cl->qdisc = sch;
1942 cl->allot = parent->allot;
1943 cl->quantum = cl->allot;
1944 cl->weight = cl->R_tab->rate.rate;
1945 cl->stats_lock = &sch->dev->queue_lock;
1947 sch_tree_lock(sch);
1948 cbq_link_class(cl);
1949 cl->borrow = cl->tparent;
1950 if (cl->tparent != &q->link)
1951 cl->share = cl->tparent;
1952 cbq_adjust_levels(parent);
1953 cl->minidle = -0x7FFFFFFF;
1954 cbq_set_lss(cl, RTA_DATA(tb[TCA_CBQ_LSSOPT-1]));
1955 cbq_set_wrr(cl, RTA_DATA(tb[TCA_CBQ_WRROPT-1]));
1956 if (cl->ewma_log==0)
1957 cl->ewma_log = q->link.ewma_log;
1958 if (cl->maxidle==0)
1959 cl->maxidle = q->link.maxidle;
1960 if (cl->avpkt==0)
1961 cl->avpkt = q->link.avpkt;
1962 cl->overlimit = cbq_ovl_classic;
1963 if (tb[TCA_CBQ_OVL_STRATEGY-1])
1964 cbq_set_overlimit(cl, RTA_DATA(tb[TCA_CBQ_OVL_STRATEGY-1]));
1965 #ifdef CONFIG_NET_CLS_POLICE
1966 if (tb[TCA_CBQ_POLICE-1])
1967 cbq_set_police(cl, RTA_DATA(tb[TCA_CBQ_POLICE-1]));
1968 #endif
1969 if (tb[TCA_CBQ_FOPT-1])
1970 cbq_set_fopt(cl, RTA_DATA(tb[TCA_CBQ_FOPT-1]));
1971 sch_tree_unlock(sch);
1973 #ifdef CONFIG_NET_ESTIMATOR
1974 if (tca[TCA_RATE-1])
1975 gen_new_estimator(&cl->bstats, &cl->rate_est,
1976 cl->stats_lock, tca[TCA_RATE-1]);
1977 #endif
1979 *arg = (unsigned long)cl;
1980 return 0;
1982 failure:
1983 qdisc_put_rtab(rtab);
1984 return err;
1987 static int cbq_delete(struct Qdisc *sch, unsigned long arg)
1989 struct cbq_sched_data *q = qdisc_priv(sch);
1990 struct cbq_class *cl = (struct cbq_class*)arg;
1992 if (cl->filters || cl->children || cl == &q->link)
1993 return -EBUSY;
1995 sch_tree_lock(sch);
1997 if (cl->next_alive)
1998 cbq_deactivate_class(cl);
2000 if (q->tx_borrowed == cl)
2001 q->tx_borrowed = q->tx_class;
2002 if (q->tx_class == cl) {
2003 q->tx_class = NULL;
2004 q->tx_borrowed = NULL;
2006 #ifdef CONFIG_NET_CLS_POLICE
2007 if (q->rx_class == cl)
2008 q->rx_class = NULL;
2009 #endif
2011 cbq_unlink_class(cl);
2012 cbq_adjust_levels(cl->tparent);
2013 cl->defmap = 0;
2014 cbq_sync_defmap(cl);
2016 cbq_rmprio(q, cl);
2017 sch_tree_unlock(sch);
2019 if (--cl->refcnt == 0)
2020 cbq_destroy_class(sch, cl);
2022 return 0;
2025 static struct tcf_proto **cbq_find_tcf(struct Qdisc *sch, unsigned long arg)
2027 struct cbq_sched_data *q = qdisc_priv(sch);
2028 struct cbq_class *cl = (struct cbq_class *)arg;
2030 if (cl == NULL)
2031 cl = &q->link;
2033 return &cl->filter_list;
2036 static unsigned long cbq_bind_filter(struct Qdisc *sch, unsigned long parent,
2037 u32 classid)
2039 struct cbq_sched_data *q = qdisc_priv(sch);
2040 struct cbq_class *p = (struct cbq_class*)parent;
2041 struct cbq_class *cl = cbq_class_lookup(q, classid);
2043 if (cl) {
2044 if (p && p->level <= cl->level)
2045 return 0;
2046 cl->filters++;
2047 return (unsigned long)cl;
2049 return 0;
2052 static void cbq_unbind_filter(struct Qdisc *sch, unsigned long arg)
2054 struct cbq_class *cl = (struct cbq_class*)arg;
2056 cl->filters--;
2059 static void cbq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
2061 struct cbq_sched_data *q = qdisc_priv(sch);
2062 unsigned h;
2064 if (arg->stop)
2065 return;
2067 for (h = 0; h < 16; h++) {
2068 struct cbq_class *cl;
2070 for (cl = q->classes[h]; cl; cl = cl->next) {
2071 if (arg->count < arg->skip) {
2072 arg->count++;
2073 continue;
2075 if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
2076 arg->stop = 1;
2077 return;
2079 arg->count++;
2084 static struct Qdisc_class_ops cbq_class_ops = {
2085 .graft = cbq_graft,
2086 .leaf = cbq_leaf,
2087 .get = cbq_get,
2088 .put = cbq_put,
2089 .change = cbq_change_class,
2090 .delete = cbq_delete,
2091 .walk = cbq_walk,
2092 .tcf_chain = cbq_find_tcf,
2093 .bind_tcf = cbq_bind_filter,
2094 .unbind_tcf = cbq_unbind_filter,
2095 .dump = cbq_dump_class,
2096 .dump_stats = cbq_dump_class_stats,
2099 static struct Qdisc_ops cbq_qdisc_ops = {
2100 .next = NULL,
2101 .cl_ops = &cbq_class_ops,
2102 .id = "cbq",
2103 .priv_size = sizeof(struct cbq_sched_data),
2104 .enqueue = cbq_enqueue,
2105 .dequeue = cbq_dequeue,
2106 .requeue = cbq_requeue,
2107 .drop = cbq_drop,
2108 .init = cbq_init,
2109 .reset = cbq_reset,
2110 .destroy = cbq_destroy,
2111 .change = NULL,
2112 .dump = cbq_dump,
2113 .dump_stats = cbq_dump_stats,
2114 .owner = THIS_MODULE,
2117 static int __init cbq_module_init(void)
2119 return register_qdisc(&cbq_qdisc_ops);
2121 static void __exit cbq_module_exit(void)
2123 unregister_qdisc(&cbq_qdisc_ops);
2125 module_init(cbq_module_init)
2126 module_exit(cbq_module_exit)
2127 MODULE_LICENSE("GPL");