2 * net/sched/sch_red.c Random Early Detection queue.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
12 * J Hadi Salim 980914: computation fixes
13 * Alexey Makarenko <makar@phoenix.kharkov.ua> 990814: qave on idle link was calculated incorrectly.
14 * J Hadi Salim 980816: ECN support
17 #include <linux/config.h>
18 #include <linux/module.h>
19 #include <linux/types.h>
20 #include <linux/kernel.h>
21 #include <linux/netdevice.h>
22 #include <linux/skbuff.h>
23 #include <net/pkt_sched.h>
24 #include <net/inet_ecn.h>
28 /* Parameters, settable by user:
29 -----------------------------
31 limit - bytes (must be > qth_max + burst)
33 Hard limit on queue length, should be chosen >qth_max
34 to allow packet bursts. This parameter does not
35 affect the algorithms behaviour and can be chosen
36 arbitrarily high (well, less than ram size)
37 Really, this limit will never be reached
38 if RED works correctly.
43 u32 limit
; /* HARD maximal queue length */
45 struct red_parms parms
;
46 struct red_stats stats
;
50 static inline int red_use_ecn(struct red_sched_data
*q
)
52 return q
->flags
& TC_RED_ECN
;
55 static inline int red_use_harddrop(struct red_sched_data
*q
)
57 return q
->flags
& TC_RED_HARDDROP
;
60 static int red_enqueue(struct sk_buff
*skb
, struct Qdisc
* sch
)
62 struct red_sched_data
*q
= qdisc_priv(sch
);
63 struct Qdisc
*child
= q
->qdisc
;
66 q
->parms
.qavg
= red_calc_qavg(&q
->parms
, child
->qstats
.backlog
);
68 if (red_is_idling(&q
->parms
))
69 red_end_of_idle_period(&q
->parms
);
71 switch (red_action(&q
->parms
, q
->parms
.qavg
)) {
76 sch
->qstats
.overlimits
++;
77 if (!red_use_ecn(q
) || !INET_ECN_set_ce(skb
)) {
86 sch
->qstats
.overlimits
++;
87 if (red_use_harddrop(q
) || !red_use_ecn(q
) ||
88 !INET_ECN_set_ce(skb
)) {
89 q
->stats
.forced_drop
++;
93 q
->stats
.forced_mark
++;
97 ret
= child
->enqueue(skb
, child
);
98 if (likely(ret
== NET_XMIT_SUCCESS
)) {
99 sch
->bstats
.bytes
+= skb
->len
;
100 sch
->bstats
.packets
++;
109 qdisc_drop(skb
, sch
);
113 static int red_requeue(struct sk_buff
*skb
, struct Qdisc
* sch
)
115 struct red_sched_data
*q
= qdisc_priv(sch
);
116 struct Qdisc
*child
= q
->qdisc
;
119 if (red_is_idling(&q
->parms
))
120 red_end_of_idle_period(&q
->parms
);
122 ret
= child
->ops
->requeue(skb
, child
);
123 if (likely(ret
== NET_XMIT_SUCCESS
)) {
124 sch
->qstats
.requeues
++;
130 static struct sk_buff
* red_dequeue(struct Qdisc
* sch
)
133 struct red_sched_data
*q
= qdisc_priv(sch
);
134 struct Qdisc
*child
= q
->qdisc
;
136 skb
= child
->dequeue(child
);
139 else if (!red_is_idling(&q
->parms
))
140 red_start_of_idle_period(&q
->parms
);
145 static unsigned int red_drop(struct Qdisc
* sch
)
147 struct red_sched_data
*q
= qdisc_priv(sch
);
148 struct Qdisc
*child
= q
->qdisc
;
151 if (child
->ops
->drop
&& (len
= child
->ops
->drop(child
)) > 0) {
158 if (!red_is_idling(&q
->parms
))
159 red_start_of_idle_period(&q
->parms
);
164 static void red_reset(struct Qdisc
* sch
)
166 struct red_sched_data
*q
= qdisc_priv(sch
);
168 qdisc_reset(q
->qdisc
);
170 red_restart(&q
->parms
);
173 static void red_destroy(struct Qdisc
*sch
)
175 struct red_sched_data
*q
= qdisc_priv(sch
);
176 qdisc_destroy(q
->qdisc
);
179 static struct Qdisc
*red_create_dflt(struct net_device
*dev
, u32 limit
)
181 struct Qdisc
*q
= qdisc_create_dflt(dev
, &bfifo_qdisc_ops
);
186 rta
= kmalloc(RTA_LENGTH(sizeof(struct tc_fifo_qopt
)),
189 rta
->rta_type
= RTM_NEWQDISC
;
190 rta
->rta_len
= RTA_LENGTH(sizeof(struct tc_fifo_qopt
));
191 ((struct tc_fifo_qopt
*)RTA_DATA(rta
))->limit
= limit
;
193 ret
= q
->ops
->change(q
, rta
);
204 static int red_change(struct Qdisc
*sch
, struct rtattr
*opt
)
206 struct red_sched_data
*q
= qdisc_priv(sch
);
207 struct rtattr
*tb
[TCA_RED_MAX
];
208 struct tc_red_qopt
*ctl
;
209 struct Qdisc
*child
= NULL
;
211 if (opt
== NULL
|| rtattr_parse_nested(tb
, TCA_RED_MAX
, opt
))
214 if (tb
[TCA_RED_PARMS
-1] == NULL
||
215 RTA_PAYLOAD(tb
[TCA_RED_PARMS
-1]) < sizeof(*ctl
) ||
216 tb
[TCA_RED_STAB
-1] == NULL
||
217 RTA_PAYLOAD(tb
[TCA_RED_STAB
-1]) < RED_STAB_SIZE
)
220 ctl
= RTA_DATA(tb
[TCA_RED_PARMS
-1]);
222 if (ctl
->limit
> 0) {
223 child
= red_create_dflt(sch
->dev
, ctl
->limit
);
229 q
->flags
= ctl
->flags
;
230 q
->limit
= ctl
->limit
;
232 qdisc_destroy(xchg(&q
->qdisc
, child
));
234 red_set_parms(&q
->parms
, ctl
->qth_min
, ctl
->qth_max
, ctl
->Wlog
,
235 ctl
->Plog
, ctl
->Scell_log
,
236 RTA_DATA(tb
[TCA_RED_STAB
-1]));
238 if (skb_queue_empty(&sch
->q
))
239 red_end_of_idle_period(&q
->parms
);
241 sch_tree_unlock(sch
);
245 static int red_init(struct Qdisc
* sch
, struct rtattr
*opt
)
247 struct red_sched_data
*q
= qdisc_priv(sch
);
249 q
->qdisc
= &noop_qdisc
;
250 return red_change(sch
, opt
);
253 static int red_dump(struct Qdisc
*sch
, struct sk_buff
*skb
)
255 struct red_sched_data
*q
= qdisc_priv(sch
);
256 struct rtattr
*opts
= NULL
;
257 struct tc_red_qopt opt
= {
260 .qth_min
= q
->parms
.qth_min
>> q
->parms
.Wlog
,
261 .qth_max
= q
->parms
.qth_max
>> q
->parms
.Wlog
,
262 .Wlog
= q
->parms
.Wlog
,
263 .Plog
= q
->parms
.Plog
,
264 .Scell_log
= q
->parms
.Scell_log
,
267 opts
= RTA_NEST(skb
, TCA_OPTIONS
);
268 RTA_PUT(skb
, TCA_RED_PARMS
, sizeof(opt
), &opt
);
269 return RTA_NEST_END(skb
, opts
);
272 return RTA_NEST_CANCEL(skb
, opts
);
275 static int red_dump_stats(struct Qdisc
*sch
, struct gnet_dump
*d
)
277 struct red_sched_data
*q
= qdisc_priv(sch
);
278 struct tc_red_xstats st
= {
279 .early
= q
->stats
.prob_drop
+ q
->stats
.forced_drop
,
280 .pdrop
= q
->stats
.pdrop
,
281 .other
= q
->stats
.other
,
282 .marked
= q
->stats
.prob_mark
+ q
->stats
.forced_mark
,
285 return gnet_stats_copy_app(d
, &st
, sizeof(st
));
288 static int red_dump_class(struct Qdisc
*sch
, unsigned long cl
,
289 struct sk_buff
*skb
, struct tcmsg
*tcm
)
291 struct red_sched_data
*q
= qdisc_priv(sch
);
295 tcm
->tcm_handle
|= TC_H_MIN(1);
296 tcm
->tcm_info
= q
->qdisc
->handle
;
300 static int red_graft(struct Qdisc
*sch
, unsigned long arg
, struct Qdisc
*new,
303 struct red_sched_data
*q
= qdisc_priv(sch
);
309 *old
= xchg(&q
->qdisc
, new);
312 sch_tree_unlock(sch
);
316 static struct Qdisc
*red_leaf(struct Qdisc
*sch
, unsigned long arg
)
318 struct red_sched_data
*q
= qdisc_priv(sch
);
322 static unsigned long red_get(struct Qdisc
*sch
, u32 classid
)
327 static void red_put(struct Qdisc
*sch
, unsigned long arg
)
332 static int red_change_class(struct Qdisc
*sch
, u32 classid
, u32 parentid
,
333 struct rtattr
**tca
, unsigned long *arg
)
338 static int red_delete(struct Qdisc
*sch
, unsigned long cl
)
343 static void red_walk(struct Qdisc
*sch
, struct qdisc_walker
*walker
)
346 if (walker
->count
>= walker
->skip
)
347 if (walker
->fn(sch
, 1, walker
) < 0) {
355 static struct tcf_proto
**red_find_tcf(struct Qdisc
*sch
, unsigned long cl
)
360 static struct Qdisc_class_ops red_class_ops
= {
365 .change
= red_change_class
,
366 .delete = red_delete
,
368 .tcf_chain
= red_find_tcf
,
369 .dump
= red_dump_class
,
372 static struct Qdisc_ops red_qdisc_ops
= {
374 .priv_size
= sizeof(struct red_sched_data
),
375 .cl_ops
= &red_class_ops
,
376 .enqueue
= red_enqueue
,
377 .dequeue
= red_dequeue
,
378 .requeue
= red_requeue
,
382 .destroy
= red_destroy
,
383 .change
= red_change
,
385 .dump_stats
= red_dump_stats
,
386 .owner
= THIS_MODULE
,
389 static int __init
red_module_init(void)
391 return register_qdisc(&red_qdisc_ops
);
394 static void __exit
red_module_exit(void)
396 unregister_qdisc(&red_qdisc_ops
);
399 module_init(red_module_init
)
400 module_exit(red_module_exit
)
402 MODULE_LICENSE("GPL");