2 * net/sched/sch_gred.c Generic Random Early Detection queue.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
10 * Authors: J Hadi Salim (hadi@cyberus.ca) 1998-2002
12 * 991129: - Bug fix with grio mode
13 * - a better sing. AvgQ mode with Grio(WRED)
14 * - A finer grained VQ dequeue based on sugestion
18 * For all the glorious comments look at include/net/red.h
21 #include <linux/module.h>
22 #include <linux/types.h>
23 #include <linux/kernel.h>
24 #include <linux/skbuff.h>
25 #include <net/pkt_sched.h>
28 #define GRED_DEF_PRIO (MAX_DPs / 2)
29 #define GRED_VQ_MASK (MAX_DPs - 1)
31 struct gred_sched_data
;
34 struct gred_sched_data
36 u32 limit
; /* HARD maximal queue length */
37 u32 DP
; /* the drop pramaters */
38 u32 bytesin
; /* bytes seen on virtualQ so far*/
39 u32 packetsin
; /* packets seen on virtualQ so far*/
40 u32 backlog
; /* bytes on the virtualQ */
41 u8 prio
; /* the prio of this vq */
43 struct red_parms parms
;
44 struct red_stats stats
;
54 struct gred_sched_data
*tab
[MAX_DPs
];
59 struct red_parms wred_set
;
62 static inline int gred_wred_mode(struct gred_sched
*table
)
64 return test_bit(GRED_WRED_MODE
, &table
->flags
);
67 static inline void gred_enable_wred_mode(struct gred_sched
*table
)
69 __set_bit(GRED_WRED_MODE
, &table
->flags
);
72 static inline void gred_disable_wred_mode(struct gred_sched
*table
)
74 __clear_bit(GRED_WRED_MODE
, &table
->flags
);
77 static inline int gred_rio_mode(struct gred_sched
*table
)
79 return test_bit(GRED_RIO_MODE
, &table
->flags
);
82 static inline void gred_enable_rio_mode(struct gred_sched
*table
)
84 __set_bit(GRED_RIO_MODE
, &table
->flags
);
87 static inline void gred_disable_rio_mode(struct gred_sched
*table
)
89 __clear_bit(GRED_RIO_MODE
, &table
->flags
);
92 static inline int gred_wred_mode_check(struct Qdisc
*sch
)
94 struct gred_sched
*table
= qdisc_priv(sch
);
97 /* Really ugly O(n^2) but shouldn't be necessary too frequent. */
98 for (i
= 0; i
< table
->DPs
; i
++) {
99 struct gred_sched_data
*q
= table
->tab
[i
];
105 for (n
= 0; n
< table
->DPs
; n
++)
106 if (table
->tab
[n
] && table
->tab
[n
] != q
&&
107 table
->tab
[n
]->prio
== q
->prio
)
114 static inline unsigned int gred_backlog(struct gred_sched
*table
,
115 struct gred_sched_data
*q
,
118 if (gred_wred_mode(table
))
119 return sch
->qstats
.backlog
;
124 static inline u16
tc_index_to_dp(struct sk_buff
*skb
)
126 return skb
->tc_index
& GRED_VQ_MASK
;
129 static inline void gred_load_wred_set(struct gred_sched
*table
,
130 struct gred_sched_data
*q
)
132 q
->parms
.qavg
= table
->wred_set
.qavg
;
133 q
->parms
.qidlestart
= table
->wred_set
.qidlestart
;
136 static inline void gred_store_wred_set(struct gred_sched
*table
,
137 struct gred_sched_data
*q
)
139 table
->wred_set
.qavg
= q
->parms
.qavg
;
142 static inline int gred_use_ecn(struct gred_sched
*t
)
144 return t
->red_flags
& TC_RED_ECN
;
147 static inline int gred_use_harddrop(struct gred_sched
*t
)
149 return t
->red_flags
& TC_RED_HARDDROP
;
152 static int gred_enqueue(struct sk_buff
*skb
, struct Qdisc
* sch
)
154 struct gred_sched_data
*q
=NULL
;
155 struct gred_sched
*t
= qdisc_priv(sch
);
156 unsigned long qavg
= 0;
157 u16 dp
= tc_index_to_dp(skb
);
159 if (dp
>= t
->DPs
|| (q
= t
->tab
[dp
]) == NULL
) {
162 if ((q
= t
->tab
[dp
]) == NULL
) {
163 /* Pass through packets not assigned to a DP
164 * if no default DP has been configured. This
165 * allows for DP flows to be left untouched.
167 if (skb_queue_len(&sch
->q
) < sch
->dev
->tx_queue_len
)
168 return qdisc_enqueue_tail(skb
, sch
);
173 /* fix tc_index? --could be controvesial but needed for
175 skb
->tc_index
= (skb
->tc_index
& ~GRED_VQ_MASK
) | dp
;
178 /* sum up all the qaves of prios <= to ours to get the new qave */
179 if (!gred_wred_mode(t
) && gred_rio_mode(t
)) {
182 for (i
= 0; i
< t
->DPs
; i
++) {
183 if (t
->tab
[i
] && t
->tab
[i
]->prio
< q
->prio
&&
184 !red_is_idling(&t
->tab
[i
]->parms
))
185 qavg
+=t
->tab
[i
]->parms
.qavg
;
191 q
->bytesin
+= skb
->len
;
193 if (gred_wred_mode(t
))
194 gred_load_wred_set(t
, q
);
196 q
->parms
.qavg
= red_calc_qavg(&q
->parms
, gred_backlog(t
, q
, sch
));
198 if (red_is_idling(&q
->parms
))
199 red_end_of_idle_period(&q
->parms
);
201 if (gred_wred_mode(t
))
202 gred_store_wred_set(t
, q
);
204 switch (red_action(&q
->parms
, q
->parms
.qavg
+ qavg
)) {
209 sch
->qstats
.overlimits
++;
210 if (!gred_use_ecn(t
) || !INET_ECN_set_ce(skb
)) {
211 q
->stats
.prob_drop
++;
212 goto congestion_drop
;
215 q
->stats
.prob_mark
++;
219 sch
->qstats
.overlimits
++;
220 if (gred_use_harddrop(t
) || !gred_use_ecn(t
) ||
221 !INET_ECN_set_ce(skb
)) {
222 q
->stats
.forced_drop
++;
223 goto congestion_drop
;
225 q
->stats
.forced_mark
++;
229 if (q
->backlog
+ skb
->len
<= q
->limit
) {
230 q
->backlog
+= skb
->len
;
231 return qdisc_enqueue_tail(skb
, sch
);
236 return qdisc_drop(skb
, sch
);
239 qdisc_drop(skb
, sch
);
243 static int gred_requeue(struct sk_buff
*skb
, struct Qdisc
* sch
)
245 struct gred_sched
*t
= qdisc_priv(sch
);
246 struct gred_sched_data
*q
;
247 u16 dp
= tc_index_to_dp(skb
);
249 if (dp
>= t
->DPs
|| (q
= t
->tab
[dp
]) == NULL
) {
251 printk(KERN_WARNING
"GRED: Unable to relocate VQ 0x%x "
252 "for requeue, screwing up backlog.\n",
253 tc_index_to_dp(skb
));
255 if (red_is_idling(&q
->parms
))
256 red_end_of_idle_period(&q
->parms
);
257 q
->backlog
+= skb
->len
;
260 return qdisc_requeue(skb
, sch
);
263 static struct sk_buff
*gred_dequeue(struct Qdisc
* sch
)
266 struct gred_sched
*t
= qdisc_priv(sch
);
268 skb
= qdisc_dequeue_head(sch
);
271 struct gred_sched_data
*q
;
272 u16 dp
= tc_index_to_dp(skb
);
274 if (dp
>= t
->DPs
|| (q
= t
->tab
[dp
]) == NULL
) {
276 printk(KERN_WARNING
"GRED: Unable to relocate "
277 "VQ 0x%x after dequeue, screwing up "
278 "backlog.\n", tc_index_to_dp(skb
));
280 q
->backlog
-= skb
->len
;
282 if (!q
->backlog
&& !gred_wred_mode(t
))
283 red_start_of_idle_period(&q
->parms
);
289 if (gred_wred_mode(t
) && !red_is_idling(&t
->wred_set
))
290 red_start_of_idle_period(&t
->wred_set
);
295 static unsigned int gred_drop(struct Qdisc
* sch
)
298 struct gred_sched
*t
= qdisc_priv(sch
);
300 skb
= qdisc_dequeue_tail(sch
);
302 unsigned int len
= skb
->len
;
303 struct gred_sched_data
*q
;
304 u16 dp
= tc_index_to_dp(skb
);
306 if (dp
>= t
->DPs
|| (q
= t
->tab
[dp
]) == NULL
) {
308 printk(KERN_WARNING
"GRED: Unable to relocate "
309 "VQ 0x%x while dropping, screwing up "
310 "backlog.\n", tc_index_to_dp(skb
));
315 if (!q
->backlog
&& !gred_wred_mode(t
))
316 red_start_of_idle_period(&q
->parms
);
319 qdisc_drop(skb
, sch
);
323 if (gred_wred_mode(t
) && !red_is_idling(&t
->wred_set
))
324 red_start_of_idle_period(&t
->wred_set
);
330 static void gred_reset(struct Qdisc
* sch
)
333 struct gred_sched
*t
= qdisc_priv(sch
);
335 qdisc_reset_queue(sch
);
337 for (i
= 0; i
< t
->DPs
; i
++) {
338 struct gred_sched_data
*q
= t
->tab
[i
];
343 red_restart(&q
->parms
);
348 static inline void gred_destroy_vq(struct gred_sched_data
*q
)
353 static inline int gred_change_table_def(struct Qdisc
*sch
, struct nlattr
*dps
)
355 struct gred_sched
*table
= qdisc_priv(sch
);
356 struct tc_gred_sopt
*sopt
;
362 sopt
= nla_data(dps
);
364 if (sopt
->DPs
> MAX_DPs
|| sopt
->DPs
== 0 || sopt
->def_DP
>= sopt
->DPs
)
368 table
->DPs
= sopt
->DPs
;
369 table
->def
= sopt
->def_DP
;
370 table
->red_flags
= sopt
->flags
;
373 * Every entry point to GRED is synchronized with the above code
374 * and the DP is checked against DPs, i.e. shadowed VQs can no
375 * longer be found so we can unlock right here.
377 sch_tree_unlock(sch
);
380 gred_enable_rio_mode(table
);
381 gred_disable_wred_mode(table
);
382 if (gred_wred_mode_check(sch
))
383 gred_enable_wred_mode(table
);
385 gred_disable_rio_mode(table
);
386 gred_disable_wred_mode(table
);
389 for (i
= table
->DPs
; i
< MAX_DPs
; i
++) {
391 printk(KERN_WARNING
"GRED: Warning: Destroying "
392 "shadowed VQ 0x%x\n", i
);
393 gred_destroy_vq(table
->tab
[i
]);
394 table
->tab
[i
] = NULL
;
401 static inline int gred_change_vq(struct Qdisc
*sch
, int dp
,
402 struct tc_gred_qopt
*ctl
, int prio
, u8
*stab
)
404 struct gred_sched
*table
= qdisc_priv(sch
);
405 struct gred_sched_data
*q
;
407 if (table
->tab
[dp
] == NULL
) {
408 table
->tab
[dp
] = kzalloc(sizeof(*q
), GFP_KERNEL
);
409 if (table
->tab
[dp
] == NULL
)
416 q
->limit
= ctl
->limit
;
419 red_end_of_idle_period(&q
->parms
);
421 red_set_parms(&q
->parms
,
422 ctl
->qth_min
, ctl
->qth_max
, ctl
->Wlog
, ctl
->Plog
,
423 ctl
->Scell_log
, stab
);
428 static const struct nla_policy gred_policy
[TCA_GRED_MAX
+ 1] = {
429 [TCA_GRED_PARMS
] = { .len
= sizeof(struct tc_gred_qopt
) },
430 [TCA_GRED_STAB
] = { .len
= 256 },
431 [TCA_GRED_DPS
] = { .len
= sizeof(struct tc_gred_sopt
) },
434 static int gred_change(struct Qdisc
*sch
, struct nlattr
*opt
)
436 struct gred_sched
*table
= qdisc_priv(sch
);
437 struct tc_gred_qopt
*ctl
;
438 struct nlattr
*tb
[TCA_GRED_MAX
+ 1];
439 int err
, prio
= GRED_DEF_PRIO
;
445 err
= nla_parse_nested(tb
, TCA_GRED_MAX
, opt
, gred_policy
);
449 if (tb
[TCA_GRED_PARMS
] == NULL
&& tb
[TCA_GRED_STAB
] == NULL
)
450 return gred_change_table_def(sch
, opt
);
452 if (tb
[TCA_GRED_PARMS
] == NULL
||
453 tb
[TCA_GRED_STAB
] == NULL
)
457 ctl
= nla_data(tb
[TCA_GRED_PARMS
]);
458 stab
= nla_data(tb
[TCA_GRED_STAB
]);
460 if (ctl
->DP
>= table
->DPs
)
463 if (gred_rio_mode(table
)) {
464 if (ctl
->prio
== 0) {
465 int def_prio
= GRED_DEF_PRIO
;
467 if (table
->tab
[table
->def
])
468 def_prio
= table
->tab
[table
->def
]->prio
;
470 printk(KERN_DEBUG
"GRED: DP %u does not have a prio "
471 "setting default to %d\n", ctl
->DP
, def_prio
);
480 err
= gred_change_vq(sch
, ctl
->DP
, ctl
, prio
, stab
);
484 if (gred_rio_mode(table
)) {
485 gred_disable_wred_mode(table
);
486 if (gred_wred_mode_check(sch
))
487 gred_enable_wred_mode(table
);
493 sch_tree_unlock(sch
);
498 static int gred_init(struct Qdisc
*sch
, struct nlattr
*opt
)
500 struct nlattr
*tb
[TCA_GRED_MAX
+ 1];
506 err
= nla_parse_nested(tb
, TCA_GRED_MAX
, opt
, gred_policy
);
510 if (tb
[TCA_GRED_PARMS
] || tb
[TCA_GRED_STAB
])
513 return gred_change_table_def(sch
, tb
[TCA_GRED_DPS
]);
516 static int gred_dump(struct Qdisc
*sch
, struct sk_buff
*skb
)
518 struct gred_sched
*table
= qdisc_priv(sch
);
519 struct nlattr
*parms
, *opts
= NULL
;
521 struct tc_gred_sopt sopt
= {
523 .def_DP
= table
->def
,
524 .grio
= gred_rio_mode(table
),
525 .flags
= table
->red_flags
,
528 opts
= nla_nest_start(skb
, TCA_OPTIONS
);
530 goto nla_put_failure
;
531 NLA_PUT(skb
, TCA_GRED_DPS
, sizeof(sopt
), &sopt
);
532 parms
= nla_nest_start(skb
, TCA_GRED_PARMS
);
534 goto nla_put_failure
;
536 for (i
= 0; i
< MAX_DPs
; i
++) {
537 struct gred_sched_data
*q
= table
->tab
[i
];
538 struct tc_gred_qopt opt
;
540 memset(&opt
, 0, sizeof(opt
));
543 /* hack -- fix at some point with proper message
544 This is how we indicate to tc that there is no VQ
547 opt
.DP
= MAX_DPs
+ i
;
551 opt
.limit
= q
->limit
;
553 opt
.backlog
= q
->backlog
;
555 opt
.qth_min
= q
->parms
.qth_min
>> q
->parms
.Wlog
;
556 opt
.qth_max
= q
->parms
.qth_max
>> q
->parms
.Wlog
;
557 opt
.Wlog
= q
->parms
.Wlog
;
558 opt
.Plog
= q
->parms
.Plog
;
559 opt
.Scell_log
= q
->parms
.Scell_log
;
560 opt
.other
= q
->stats
.other
;
561 opt
.early
= q
->stats
.prob_drop
;
562 opt
.forced
= q
->stats
.forced_drop
;
563 opt
.pdrop
= q
->stats
.pdrop
;
564 opt
.packets
= q
->packetsin
;
565 opt
.bytesin
= q
->bytesin
;
567 if (gred_wred_mode(table
)) {
568 q
->parms
.qidlestart
=
569 table
->tab
[table
->def
]->parms
.qidlestart
;
570 q
->parms
.qavg
= table
->tab
[table
->def
]->parms
.qavg
;
573 opt
.qave
= red_calc_qavg(&q
->parms
, q
->parms
.qavg
);
576 if (nla_append(skb
, sizeof(opt
), &opt
) < 0)
577 goto nla_put_failure
;
580 nla_nest_end(skb
, parms
);
582 return nla_nest_end(skb
, opts
);
585 nla_nest_cancel(skb
, opts
);
589 static void gred_destroy(struct Qdisc
*sch
)
591 struct gred_sched
*table
= qdisc_priv(sch
);
594 for (i
= 0; i
< table
->DPs
; i
++) {
596 gred_destroy_vq(table
->tab
[i
]);
600 static struct Qdisc_ops gred_qdisc_ops __read_mostly
= {
602 .priv_size
= sizeof(struct gred_sched
),
603 .enqueue
= gred_enqueue
,
604 .dequeue
= gred_dequeue
,
605 .requeue
= gred_requeue
,
609 .destroy
= gred_destroy
,
610 .change
= gred_change
,
612 .owner
= THIS_MODULE
,
615 static int __init
gred_module_init(void)
617 return register_qdisc(&gred_qdisc_ops
);
620 static void __exit
gred_module_exit(void)
622 unregister_qdisc(&gred_qdisc_ops
);
625 module_init(gred_module_init
)
626 module_exit(gred_module_exit
)
628 MODULE_LICENSE("GPL");