Fix boot problem with iSeries lacking hugepage support
[linux-2.6/zen-sources.git] / net / sched / sch_gred.c
blob3cc6dda02e2e0a1297ecc8a2d5a7e3707a4b556c
1 /*
2 * net/sched/sch_gred.c Generic Random Early Detection queue.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
10 * Authors: J Hadi Salim (hadi@cyberus.ca) 1998-2002
12 * 991129: - Bug fix with grio mode
13 * - a better sing. AvgQ mode with Grio(WRED)
14 * - A finer grained VQ dequeue based on sugestion
15 * from Ren Liu
16 * - More error checks
18 * For all the glorious comments look at include/net/red.h
21 #include <linux/module.h>
22 #include <linux/types.h>
23 #include <linux/kernel.h>
24 #include <linux/skbuff.h>
25 #include <net/pkt_sched.h>
26 #include <net/red.h>
28 #define GRED_DEF_PRIO (MAX_DPs / 2)
29 #define GRED_VQ_MASK (MAX_DPs - 1)
31 struct gred_sched_data;
32 struct gred_sched;
34 struct gred_sched_data
36 u32 limit; /* HARD maximal queue length */
37 u32 DP; /* the drop pramaters */
38 u32 bytesin; /* bytes seen on virtualQ so far*/
39 u32 packetsin; /* packets seen on virtualQ so far*/
40 u32 backlog; /* bytes on the virtualQ */
41 u8 prio; /* the prio of this vq */
43 struct red_parms parms;
44 struct red_stats stats;
47 enum {
48 GRED_WRED_MODE = 1,
49 GRED_RIO_MODE,
52 struct gred_sched
54 struct gred_sched_data *tab[MAX_DPs];
55 unsigned long flags;
56 u32 red_flags;
57 u32 DPs;
58 u32 def;
59 struct red_parms wred_set;
62 static inline int gred_wred_mode(struct gred_sched *table)
64 return test_bit(GRED_WRED_MODE, &table->flags);
67 static inline void gred_enable_wred_mode(struct gred_sched *table)
69 __set_bit(GRED_WRED_MODE, &table->flags);
72 static inline void gred_disable_wred_mode(struct gred_sched *table)
74 __clear_bit(GRED_WRED_MODE, &table->flags);
77 static inline int gred_rio_mode(struct gred_sched *table)
79 return test_bit(GRED_RIO_MODE, &table->flags);
82 static inline void gred_enable_rio_mode(struct gred_sched *table)
84 __set_bit(GRED_RIO_MODE, &table->flags);
87 static inline void gred_disable_rio_mode(struct gred_sched *table)
89 __clear_bit(GRED_RIO_MODE, &table->flags);
92 static inline int gred_wred_mode_check(struct Qdisc *sch)
94 struct gred_sched *table = qdisc_priv(sch);
95 int i;
97 /* Really ugly O(n^2) but shouldn't be necessary too frequent. */
98 for (i = 0; i < table->DPs; i++) {
99 struct gred_sched_data *q = table->tab[i];
100 int n;
102 if (q == NULL)
103 continue;
105 for (n = 0; n < table->DPs; n++)
106 if (table->tab[n] && table->tab[n] != q &&
107 table->tab[n]->prio == q->prio)
108 return 1;
111 return 0;
114 static inline unsigned int gred_backlog(struct gred_sched *table,
115 struct gred_sched_data *q,
116 struct Qdisc *sch)
118 if (gred_wred_mode(table))
119 return sch->qstats.backlog;
120 else
121 return q->backlog;
124 static inline u16 tc_index_to_dp(struct sk_buff *skb)
126 return skb->tc_index & GRED_VQ_MASK;
129 static inline void gred_load_wred_set(struct gred_sched *table,
130 struct gred_sched_data *q)
132 q->parms.qavg = table->wred_set.qavg;
133 q->parms.qidlestart = table->wred_set.qidlestart;
136 static inline void gred_store_wred_set(struct gred_sched *table,
137 struct gred_sched_data *q)
139 table->wred_set.qavg = q->parms.qavg;
142 static inline int gred_use_ecn(struct gred_sched *t)
144 return t->red_flags & TC_RED_ECN;
147 static inline int gred_use_harddrop(struct gred_sched *t)
149 return t->red_flags & TC_RED_HARDDROP;
152 static int gred_enqueue(struct sk_buff *skb, struct Qdisc* sch)
154 struct gred_sched_data *q=NULL;
155 struct gred_sched *t= qdisc_priv(sch);
156 unsigned long qavg = 0;
157 u16 dp = tc_index_to_dp(skb);
159 if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
160 dp = t->def;
162 if ((q = t->tab[dp]) == NULL) {
163 /* Pass through packets not assigned to a DP
164 * if no default DP has been configured. This
165 * allows for DP flows to be left untouched.
167 if (skb_queue_len(&sch->q) < sch->dev->tx_queue_len)
168 return qdisc_enqueue_tail(skb, sch);
169 else
170 goto drop;
173 /* fix tc_index? --could be controvesial but needed for
174 requeueing */
175 skb->tc_index = (skb->tc_index & ~GRED_VQ_MASK) | dp;
178 /* sum up all the qaves of prios <= to ours to get the new qave */
179 if (!gred_wred_mode(t) && gred_rio_mode(t)) {
180 int i;
182 for (i = 0; i < t->DPs; i++) {
183 if (t->tab[i] && t->tab[i]->prio < q->prio &&
184 !red_is_idling(&t->tab[i]->parms))
185 qavg +=t->tab[i]->parms.qavg;
190 q->packetsin++;
191 q->bytesin += skb->len;
193 if (gred_wred_mode(t))
194 gred_load_wred_set(t, q);
196 q->parms.qavg = red_calc_qavg(&q->parms, gred_backlog(t, q, sch));
198 if (red_is_idling(&q->parms))
199 red_end_of_idle_period(&q->parms);
201 if (gred_wred_mode(t))
202 gred_store_wred_set(t, q);
204 switch (red_action(&q->parms, q->parms.qavg + qavg)) {
205 case RED_DONT_MARK:
206 break;
208 case RED_PROB_MARK:
209 sch->qstats.overlimits++;
210 if (!gred_use_ecn(t) || !INET_ECN_set_ce(skb)) {
211 q->stats.prob_drop++;
212 goto congestion_drop;
215 q->stats.prob_mark++;
216 break;
218 case RED_HARD_MARK:
219 sch->qstats.overlimits++;
220 if (gred_use_harddrop(t) || !gred_use_ecn(t) ||
221 !INET_ECN_set_ce(skb)) {
222 q->stats.forced_drop++;
223 goto congestion_drop;
225 q->stats.forced_mark++;
226 break;
229 if (q->backlog + skb->len <= q->limit) {
230 q->backlog += skb->len;
231 return qdisc_enqueue_tail(skb, sch);
234 q->stats.pdrop++;
235 drop:
236 return qdisc_drop(skb, sch);
238 congestion_drop:
239 qdisc_drop(skb, sch);
240 return NET_XMIT_CN;
243 static int gred_requeue(struct sk_buff *skb, struct Qdisc* sch)
245 struct gred_sched *t = qdisc_priv(sch);
246 struct gred_sched_data *q;
247 u16 dp = tc_index_to_dp(skb);
249 if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
250 if (net_ratelimit())
251 printk(KERN_WARNING "GRED: Unable to relocate VQ 0x%x "
252 "for requeue, screwing up backlog.\n",
253 tc_index_to_dp(skb));
254 } else {
255 if (red_is_idling(&q->parms))
256 red_end_of_idle_period(&q->parms);
257 q->backlog += skb->len;
260 return qdisc_requeue(skb, sch);
263 static struct sk_buff *gred_dequeue(struct Qdisc* sch)
265 struct sk_buff *skb;
266 struct gred_sched *t = qdisc_priv(sch);
268 skb = qdisc_dequeue_head(sch);
270 if (skb) {
271 struct gred_sched_data *q;
272 u16 dp = tc_index_to_dp(skb);
274 if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
275 if (net_ratelimit())
276 printk(KERN_WARNING "GRED: Unable to relocate "
277 "VQ 0x%x after dequeue, screwing up "
278 "backlog.\n", tc_index_to_dp(skb));
279 } else {
280 q->backlog -= skb->len;
282 if (!q->backlog && !gred_wred_mode(t))
283 red_start_of_idle_period(&q->parms);
286 return skb;
289 if (gred_wred_mode(t) && !red_is_idling(&t->wred_set))
290 red_start_of_idle_period(&t->wred_set);
292 return NULL;
295 static unsigned int gred_drop(struct Qdisc* sch)
297 struct sk_buff *skb;
298 struct gred_sched *t = qdisc_priv(sch);
300 skb = qdisc_dequeue_tail(sch);
301 if (skb) {
302 unsigned int len = skb->len;
303 struct gred_sched_data *q;
304 u16 dp = tc_index_to_dp(skb);
306 if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
307 if (net_ratelimit())
308 printk(KERN_WARNING "GRED: Unable to relocate "
309 "VQ 0x%x while dropping, screwing up "
310 "backlog.\n", tc_index_to_dp(skb));
311 } else {
312 q->backlog -= len;
313 q->stats.other++;
315 if (!q->backlog && !gred_wred_mode(t))
316 red_start_of_idle_period(&q->parms);
319 qdisc_drop(skb, sch);
320 return len;
323 if (gred_wred_mode(t) && !red_is_idling(&t->wred_set))
324 red_start_of_idle_period(&t->wred_set);
326 return 0;
330 static void gred_reset(struct Qdisc* sch)
332 int i;
333 struct gred_sched *t = qdisc_priv(sch);
335 qdisc_reset_queue(sch);
337 for (i = 0; i < t->DPs; i++) {
338 struct gred_sched_data *q = t->tab[i];
340 if (!q)
341 continue;
343 red_restart(&q->parms);
344 q->backlog = 0;
348 static inline void gred_destroy_vq(struct gred_sched_data *q)
350 kfree(q);
353 static inline int gred_change_table_def(struct Qdisc *sch, struct rtattr *dps)
355 struct gred_sched *table = qdisc_priv(sch);
356 struct tc_gred_sopt *sopt;
357 int i;
359 if (dps == NULL || RTA_PAYLOAD(dps) < sizeof(*sopt))
360 return -EINVAL;
362 sopt = RTA_DATA(dps);
364 if (sopt->DPs > MAX_DPs || sopt->DPs == 0 || sopt->def_DP >= sopt->DPs)
365 return -EINVAL;
367 sch_tree_lock(sch);
368 table->DPs = sopt->DPs;
369 table->def = sopt->def_DP;
370 table->red_flags = sopt->flags;
373 * Every entry point to GRED is synchronized with the above code
374 * and the DP is checked against DPs, i.e. shadowed VQs can no
375 * longer be found so we can unlock right here.
377 sch_tree_unlock(sch);
379 if (sopt->grio) {
380 gred_enable_rio_mode(table);
381 gred_disable_wred_mode(table);
382 if (gred_wred_mode_check(sch))
383 gred_enable_wred_mode(table);
384 } else {
385 gred_disable_rio_mode(table);
386 gred_disable_wred_mode(table);
389 for (i = table->DPs; i < MAX_DPs; i++) {
390 if (table->tab[i]) {
391 printk(KERN_WARNING "GRED: Warning: Destroying "
392 "shadowed VQ 0x%x\n", i);
393 gred_destroy_vq(table->tab[i]);
394 table->tab[i] = NULL;
398 return 0;
401 static inline int gred_change_vq(struct Qdisc *sch, int dp,
402 struct tc_gred_qopt *ctl, int prio, u8 *stab)
404 struct gred_sched *table = qdisc_priv(sch);
405 struct gred_sched_data *q;
407 if (table->tab[dp] == NULL) {
408 table->tab[dp] = kzalloc(sizeof(*q), GFP_KERNEL);
409 if (table->tab[dp] == NULL)
410 return -ENOMEM;
413 q = table->tab[dp];
414 q->DP = dp;
415 q->prio = prio;
416 q->limit = ctl->limit;
418 if (q->backlog == 0)
419 red_end_of_idle_period(&q->parms);
421 red_set_parms(&q->parms,
422 ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Plog,
423 ctl->Scell_log, stab);
425 return 0;
428 static int gred_change(struct Qdisc *sch, struct rtattr *opt)
430 struct gred_sched *table = qdisc_priv(sch);
431 struct tc_gred_qopt *ctl;
432 struct rtattr *tb[TCA_GRED_MAX];
433 int err = -EINVAL, prio = GRED_DEF_PRIO;
434 u8 *stab;
436 if (opt == NULL || rtattr_parse_nested(tb, TCA_GRED_MAX, opt))
437 return -EINVAL;
439 if (tb[TCA_GRED_PARMS-1] == NULL && tb[TCA_GRED_STAB-1] == NULL)
440 return gred_change_table_def(sch, opt);
442 if (tb[TCA_GRED_PARMS-1] == NULL ||
443 RTA_PAYLOAD(tb[TCA_GRED_PARMS-1]) < sizeof(*ctl) ||
444 tb[TCA_GRED_STAB-1] == NULL ||
445 RTA_PAYLOAD(tb[TCA_GRED_STAB-1]) < 256)
446 return -EINVAL;
448 ctl = RTA_DATA(tb[TCA_GRED_PARMS-1]);
449 stab = RTA_DATA(tb[TCA_GRED_STAB-1]);
451 if (ctl->DP >= table->DPs)
452 goto errout;
454 if (gred_rio_mode(table)) {
455 if (ctl->prio == 0) {
456 int def_prio = GRED_DEF_PRIO;
458 if (table->tab[table->def])
459 def_prio = table->tab[table->def]->prio;
461 printk(KERN_DEBUG "GRED: DP %u does not have a prio "
462 "setting default to %d\n", ctl->DP, def_prio);
464 prio = def_prio;
465 } else
466 prio = ctl->prio;
469 sch_tree_lock(sch);
471 err = gred_change_vq(sch, ctl->DP, ctl, prio, stab);
472 if (err < 0)
473 goto errout_locked;
475 if (gred_rio_mode(table)) {
476 gred_disable_wred_mode(table);
477 if (gred_wred_mode_check(sch))
478 gred_enable_wred_mode(table);
481 err = 0;
483 errout_locked:
484 sch_tree_unlock(sch);
485 errout:
486 return err;
489 static int gred_init(struct Qdisc *sch, struct rtattr *opt)
491 struct rtattr *tb[TCA_GRED_MAX];
493 if (opt == NULL || rtattr_parse_nested(tb, TCA_GRED_MAX, opt))
494 return -EINVAL;
496 if (tb[TCA_GRED_PARMS-1] || tb[TCA_GRED_STAB-1])
497 return -EINVAL;
499 return gred_change_table_def(sch, tb[TCA_GRED_DPS-1]);
502 static int gred_dump(struct Qdisc *sch, struct sk_buff *skb)
504 struct gred_sched *table = qdisc_priv(sch);
505 struct rtattr *parms, *opts = NULL;
506 int i;
507 struct tc_gred_sopt sopt = {
508 .DPs = table->DPs,
509 .def_DP = table->def,
510 .grio = gred_rio_mode(table),
511 .flags = table->red_flags,
514 opts = RTA_NEST(skb, TCA_OPTIONS);
515 RTA_PUT(skb, TCA_GRED_DPS, sizeof(sopt), &sopt);
516 parms = RTA_NEST(skb, TCA_GRED_PARMS);
518 for (i = 0; i < MAX_DPs; i++) {
519 struct gred_sched_data *q = table->tab[i];
520 struct tc_gred_qopt opt;
522 memset(&opt, 0, sizeof(opt));
524 if (!q) {
525 /* hack -- fix at some point with proper message
526 This is how we indicate to tc that there is no VQ
527 at this DP */
529 opt.DP = MAX_DPs + i;
530 goto append_opt;
533 opt.limit = q->limit;
534 opt.DP = q->DP;
535 opt.backlog = q->backlog;
536 opt.prio = q->prio;
537 opt.qth_min = q->parms.qth_min >> q->parms.Wlog;
538 opt.qth_max = q->parms.qth_max >> q->parms.Wlog;
539 opt.Wlog = q->parms.Wlog;
540 opt.Plog = q->parms.Plog;
541 opt.Scell_log = q->parms.Scell_log;
542 opt.other = q->stats.other;
543 opt.early = q->stats.prob_drop;
544 opt.forced = q->stats.forced_drop;
545 opt.pdrop = q->stats.pdrop;
546 opt.packets = q->packetsin;
547 opt.bytesin = q->bytesin;
549 if (gred_wred_mode(table)) {
550 q->parms.qidlestart =
551 table->tab[table->def]->parms.qidlestart;
552 q->parms.qavg = table->tab[table->def]->parms.qavg;
555 opt.qave = red_calc_qavg(&q->parms, q->parms.qavg);
557 append_opt:
558 RTA_APPEND(skb, sizeof(opt), &opt);
561 RTA_NEST_END(skb, parms);
563 return RTA_NEST_END(skb, opts);
565 rtattr_failure:
566 return RTA_NEST_CANCEL(skb, opts);
569 static void gred_destroy(struct Qdisc *sch)
571 struct gred_sched *table = qdisc_priv(sch);
572 int i;
574 for (i = 0; i < table->DPs; i++) {
575 if (table->tab[i])
576 gred_destroy_vq(table->tab[i]);
580 static struct Qdisc_ops gred_qdisc_ops = {
581 .id = "gred",
582 .priv_size = sizeof(struct gred_sched),
583 .enqueue = gred_enqueue,
584 .dequeue = gred_dequeue,
585 .requeue = gred_requeue,
586 .drop = gred_drop,
587 .init = gred_init,
588 .reset = gred_reset,
589 .destroy = gred_destroy,
590 .change = gred_change,
591 .dump = gred_dump,
592 .owner = THIS_MODULE,
595 static int __init gred_module_init(void)
597 return register_qdisc(&gred_qdisc_ops);
600 static void __exit gred_module_exit(void)
602 unregister_qdisc(&gred_qdisc_ops);
605 module_init(gred_module_init)
606 module_exit(gred_module_exit)
608 MODULE_LICENSE("GPL");