2 * net/sched/sch_gred.c Generic Random Early Detection queue.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
10 * Authors: J Hadi Salim (hadi@cyberus.ca) 1998-2002
12 * 991129: - Bug fix with grio mode
13 * - a better sing. AvgQ mode with Grio(WRED)
14 * - A finer grained VQ dequeue based on sugestion
20 * For all the glorious comments look at Alexey's sch_red.c
23 #include <linux/config.h>
24 #include <linux/module.h>
25 #include <asm/uaccess.h>
26 #include <asm/system.h>
27 #include <linux/bitops.h>
28 #include <linux/types.h>
29 #include <linux/kernel.h>
30 #include <linux/sched.h>
31 #include <linux/string.h>
33 #include <linux/socket.h>
34 #include <linux/sockios.h>
36 #include <linux/errno.h>
37 #include <linux/interrupt.h>
38 #include <linux/if_ether.h>
39 #include <linux/inet.h>
40 #include <linux/netdevice.h>
41 #include <linux/etherdevice.h>
42 #include <linux/notifier.h>
44 #include <net/route.h>
45 #include <linux/skbuff.h>
47 #include <net/pkt_sched.h>
51 #define DPRINTK(format,args...) printk(KERN_DEBUG format,##args)
53 #define DPRINTK(format,args...)
57 #define D2PRINTK(format,args...) printk(KERN_DEBUG format,##args)
59 #define D2PRINTK(format,args...)
62 #define GRED_DEF_PRIO (MAX_DPs / 2)
63 #define GRED_VQ_MASK (MAX_DPs - 1)
65 struct gred_sched_data
;
68 struct gred_sched_data
70 u32 limit
; /* HARD maximal queue length */
71 u32 DP
; /* the drop pramaters */
72 u32 bytesin
; /* bytes seen on virtualQ so far*/
73 u32 packetsin
; /* packets seen on virtualQ so far*/
74 u32 backlog
; /* bytes on the virtualQ */
75 u8 prio
; /* the prio of this vq */
77 struct red_parms parms
;
78 struct red_stats stats
;
88 struct gred_sched_data
*tab
[MAX_DPs
];
92 struct red_parms wred_set
;
95 static inline int gred_wred_mode(struct gred_sched
*table
)
97 return test_bit(GRED_WRED_MODE
, &table
->flags
);
100 static inline void gred_enable_wred_mode(struct gred_sched
*table
)
102 __set_bit(GRED_WRED_MODE
, &table
->flags
);
105 static inline void gred_disable_wred_mode(struct gred_sched
*table
)
107 __clear_bit(GRED_WRED_MODE
, &table
->flags
);
110 static inline int gred_rio_mode(struct gred_sched
*table
)
112 return test_bit(GRED_RIO_MODE
, &table
->flags
);
115 static inline void gred_enable_rio_mode(struct gred_sched
*table
)
117 __set_bit(GRED_RIO_MODE
, &table
->flags
);
120 static inline void gred_disable_rio_mode(struct gred_sched
*table
)
122 __clear_bit(GRED_RIO_MODE
, &table
->flags
);
125 static inline int gred_wred_mode_check(struct Qdisc
*sch
)
127 struct gred_sched
*table
= qdisc_priv(sch
);
130 /* Really ugly O(n^2) but shouldn't be necessary too frequent. */
131 for (i
= 0; i
< table
->DPs
; i
++) {
132 struct gred_sched_data
*q
= table
->tab
[i
];
138 for (n
= 0; n
< table
->DPs
; n
++)
139 if (table
->tab
[n
] && table
->tab
[n
] != q
&&
140 table
->tab
[n
]->prio
== q
->prio
)
147 static inline unsigned int gred_backlog(struct gred_sched
*table
,
148 struct gred_sched_data
*q
,
151 if (gred_wred_mode(table
))
152 return sch
->qstats
.backlog
;
157 static inline u16
tc_index_to_dp(struct sk_buff
*skb
)
159 return skb
->tc_index
& GRED_VQ_MASK
;
162 static inline void gred_load_wred_set(struct gred_sched
*table
,
163 struct gred_sched_data
*q
)
165 q
->parms
.qavg
= table
->wred_set
.qavg
;
166 q
->parms
.qidlestart
= table
->wred_set
.qidlestart
;
169 static inline void gred_store_wred_set(struct gred_sched
*table
,
170 struct gred_sched_data
*q
)
172 table
->wred_set
.qavg
= q
->parms
.qavg
;
176 gred_enqueue(struct sk_buff
*skb
, struct Qdisc
* sch
)
178 struct gred_sched_data
*q
=NULL
;
179 struct gred_sched
*t
= qdisc_priv(sch
);
180 unsigned long qavg
= 0;
182 u16 dp
= tc_index_to_dp(skb
);
184 if (dp
>= t
->DPs
|| (q
= t
->tab
[dp
]) == NULL
) {
187 if ((q
= t
->tab
[dp
]) == NULL
) {
188 /* Pass through packets not assigned to a DP
189 * if no default DP has been configured. This
190 * allows for DP flows to be left untouched.
192 if (skb_queue_len(&sch
->q
) < sch
->dev
->tx_queue_len
)
193 return qdisc_enqueue_tail(skb
, sch
);
198 /* fix tc_index? --could be controvesial but needed for
200 skb
->tc_index
= (skb
->tc_index
& ~GRED_VQ_MASK
) | dp
;
203 /* sum up all the qaves of prios <= to ours to get the new qave*/
204 if (!gred_wred_mode(t
) && gred_rio_mode(t
)) {
205 for (i
=0;i
<t
->DPs
;i
++) {
206 if ((!t
->tab
[i
]) || (i
==q
->DP
))
209 if (t
->tab
[i
]->prio
< q
->prio
&&
210 !red_is_idling(&t
->tab
[i
]->parms
))
211 qavg
+=t
->tab
[i
]->parms
.qavg
;
217 q
->bytesin
+=skb
->len
;
219 if (gred_wred_mode(t
)) {
221 gred_load_wred_set(t
, q
);
224 q
->parms
.qavg
= red_calc_qavg(&q
->parms
, gred_backlog(t
, q
, sch
));
226 if (red_is_idling(&q
->parms
))
227 red_end_of_idle_period(&q
->parms
);
229 if (gred_wred_mode(t
))
230 gred_store_wred_set(t
, q
);
232 switch (red_action(&q
->parms
, q
->parms
.qavg
+ qavg
)) {
237 sch
->qstats
.overlimits
++;
238 q
->stats
.prob_drop
++;
239 goto congestion_drop
;
242 sch
->qstats
.overlimits
++;
243 q
->stats
.forced_drop
++;
244 goto congestion_drop
;
247 if (q
->backlog
+ skb
->len
<= q
->limit
) {
248 q
->backlog
+= skb
->len
;
249 return qdisc_enqueue_tail(skb
, sch
);
254 return qdisc_drop(skb
, sch
);
257 qdisc_drop(skb
, sch
);
262 gred_requeue(struct sk_buff
*skb
, struct Qdisc
* sch
)
264 struct gred_sched
*t
= qdisc_priv(sch
);
265 struct gred_sched_data
*q
;
266 u16 dp
= tc_index_to_dp(skb
);
268 if (dp
>= t
->DPs
|| (q
= t
->tab
[dp
]) == NULL
) {
270 printk(KERN_WARNING
"GRED: Unable to relocate VQ 0x%x "
271 "for requeue, screwing up backlog.\n",
272 tc_index_to_dp(skb
));
274 if (red_is_idling(&q
->parms
))
275 red_end_of_idle_period(&q
->parms
);
276 q
->backlog
+= skb
->len
;
279 return qdisc_requeue(skb
, sch
);
282 static struct sk_buff
*
283 gred_dequeue(struct Qdisc
* sch
)
286 struct gred_sched_data
*q
;
287 struct gred_sched
*t
= qdisc_priv(sch
);
289 skb
= qdisc_dequeue_head(sch
);
292 u16 dp
= tc_index_to_dp(skb
);
294 if (dp
>= t
->DPs
|| (q
= t
->tab
[dp
]) == NULL
) {
296 printk(KERN_WARNING
"GRED: Unable to relocate "
297 "VQ 0x%x after dequeue, screwing up "
298 "backlog.\n", tc_index_to_dp(skb
));
300 q
->backlog
-= skb
->len
;
302 if (!q
->backlog
&& !gred_wred_mode(t
))
303 red_start_of_idle_period(&q
->parms
);
309 if (gred_wred_mode(t
))
310 red_start_of_idle_period(&t
->wred_set
);
315 static unsigned int gred_drop(struct Qdisc
* sch
)
319 struct gred_sched_data
*q
;
320 struct gred_sched
*t
= qdisc_priv(sch
);
322 skb
= qdisc_dequeue_tail(sch
);
324 unsigned int len
= skb
->len
;
325 u16 dp
= tc_index_to_dp(skb
);
327 if (dp
>= t
->DPs
|| (q
= t
->tab
[dp
]) == NULL
) {
329 printk(KERN_WARNING
"GRED: Unable to relocate "
330 "VQ 0x%x while dropping, screwing up "
331 "backlog.\n", tc_index_to_dp(skb
));
336 if (!q
->backlog
&& !gred_wred_mode(t
))
337 red_start_of_idle_period(&q
->parms
);
340 qdisc_drop(skb
, sch
);
344 if (gred_wred_mode(t
))
345 red_start_of_idle_period(&t
->wred_set
);
351 static void gred_reset(struct Qdisc
* sch
)
354 struct gred_sched_data
*q
;
355 struct gred_sched
*t
= qdisc_priv(sch
);
357 qdisc_reset_queue(sch
);
359 for (i
=0;i
<t
->DPs
;i
++) {
363 red_restart(&q
->parms
);
368 static inline void gred_destroy_vq(struct gred_sched_data
*q
)
373 static inline int gred_change_table_def(struct Qdisc
*sch
, struct rtattr
*dps
)
375 struct gred_sched
*table
= qdisc_priv(sch
);
376 struct tc_gred_sopt
*sopt
;
379 if (dps
== NULL
|| RTA_PAYLOAD(dps
) < sizeof(*sopt
))
382 sopt
= RTA_DATA(dps
);
384 if (sopt
->DPs
> MAX_DPs
|| sopt
->DPs
== 0 || sopt
->def_DP
>= sopt
->DPs
)
388 table
->DPs
= sopt
->DPs
;
389 table
->def
= sopt
->def_DP
;
392 * Every entry point to GRED is synchronized with the above code
393 * and the DP is checked against DPs, i.e. shadowed VQs can no
394 * longer be found so we can unlock right here.
396 sch_tree_unlock(sch
);
399 gred_enable_rio_mode(table
);
400 gred_disable_wred_mode(table
);
401 if (gred_wred_mode_check(sch
))
402 gred_enable_wred_mode(table
);
404 gred_disable_rio_mode(table
);
405 gred_disable_wred_mode(table
);
408 for (i
= table
->DPs
; i
< MAX_DPs
; i
++) {
410 printk(KERN_WARNING
"GRED: Warning: Destroying "
411 "shadowed VQ 0x%x\n", i
);
412 gred_destroy_vq(table
->tab
[i
]);
413 table
->tab
[i
] = NULL
;
420 static inline int gred_change_vq(struct Qdisc
*sch
, int dp
,
421 struct tc_gred_qopt
*ctl
, int prio
, u8
*stab
)
423 struct gred_sched
*table
= qdisc_priv(sch
);
424 struct gred_sched_data
*q
;
426 if (table
->tab
[dp
] == NULL
) {
427 table
->tab
[dp
] = kmalloc(sizeof(*q
), GFP_KERNEL
);
428 if (table
->tab
[dp
] == NULL
)
430 memset(table
->tab
[dp
], 0, sizeof(*q
));
436 q
->limit
= ctl
->limit
;
439 red_end_of_idle_period(&q
->parms
);
441 red_set_parms(&q
->parms
,
442 ctl
->qth_min
, ctl
->qth_max
, ctl
->Wlog
, ctl
->Plog
,
443 ctl
->Scell_log
, stab
);
448 static int gred_change(struct Qdisc
*sch
, struct rtattr
*opt
)
450 struct gred_sched
*table
= qdisc_priv(sch
);
451 struct tc_gred_qopt
*ctl
;
452 struct rtattr
*tb
[TCA_GRED_MAX
];
453 int err
= -EINVAL
, prio
= GRED_DEF_PRIO
;
456 if (opt
== NULL
|| rtattr_parse_nested(tb
, TCA_GRED_MAX
, opt
))
459 if (tb
[TCA_GRED_PARMS
-1] == NULL
&& tb
[TCA_GRED_STAB
-1] == NULL
)
460 return gred_change_table_def(sch
, opt
);
462 if (tb
[TCA_GRED_PARMS
-1] == NULL
||
463 RTA_PAYLOAD(tb
[TCA_GRED_PARMS
-1]) < sizeof(*ctl
) ||
464 tb
[TCA_GRED_STAB
-1] == NULL
||
465 RTA_PAYLOAD(tb
[TCA_GRED_STAB
-1]) < 256)
468 ctl
= RTA_DATA(tb
[TCA_GRED_PARMS
-1]);
469 stab
= RTA_DATA(tb
[TCA_GRED_STAB
-1]);
471 if (ctl
->DP
>= table
->DPs
)
474 if (gred_rio_mode(table
)) {
475 if (ctl
->prio
== 0) {
476 int def_prio
= GRED_DEF_PRIO
;
478 if (table
->tab
[table
->def
])
479 def_prio
= table
->tab
[table
->def
]->prio
;
481 printk(KERN_DEBUG
"GRED: DP %u does not have a prio "
482 "setting default to %d\n", ctl
->DP
, def_prio
);
491 err
= gred_change_vq(sch
, ctl
->DP
, ctl
, prio
, stab
);
495 if (table
->tab
[table
->def
] == NULL
) {
496 if (gred_rio_mode(table
))
497 prio
= table
->tab
[ctl
->DP
]->prio
;
499 err
= gred_change_vq(sch
, table
->def
, ctl
, prio
, stab
);
504 if (gred_rio_mode(table
)) {
505 gred_disable_wred_mode(table
);
506 if (gred_wred_mode_check(sch
))
507 gred_enable_wred_mode(table
);
513 sch_tree_unlock(sch
);
518 static int gred_init(struct Qdisc
*sch
, struct rtattr
*opt
)
520 struct rtattr
*tb
[TCA_GRED_MAX
];
522 if (opt
== NULL
|| rtattr_parse_nested(tb
, TCA_GRED_MAX
, opt
))
525 if (tb
[TCA_GRED_PARMS
-1] || tb
[TCA_GRED_STAB
-1])
528 return gred_change_table_def(sch
, tb
[TCA_GRED_DPS
-1]);
531 static int gred_dump(struct Qdisc
*sch
, struct sk_buff
*skb
)
533 struct gred_sched
*table
= qdisc_priv(sch
);
534 struct rtattr
*parms
, *opts
= NULL
;
536 struct tc_gred_sopt sopt
= {
538 .def_DP
= table
->def
,
539 .grio
= gred_rio_mode(table
),
542 opts
= RTA_NEST(skb
, TCA_OPTIONS
);
543 RTA_PUT(skb
, TCA_GRED_DPS
, sizeof(sopt
), &sopt
);
544 parms
= RTA_NEST(skb
, TCA_GRED_PARMS
);
546 for (i
= 0; i
< MAX_DPs
; i
++) {
547 struct gred_sched_data
*q
= table
->tab
[i
];
548 struct tc_gred_qopt opt
;
550 memset(&opt
, 0, sizeof(opt
));
553 /* hack -- fix at some point with proper message
554 This is how we indicate to tc that there is no VQ
557 opt
.DP
= MAX_DPs
+ i
;
561 opt
.limit
= q
->limit
;
563 opt
.backlog
= q
->backlog
;
565 opt
.qth_min
= q
->parms
.qth_min
>> q
->parms
.Wlog
;
566 opt
.qth_max
= q
->parms
.qth_max
>> q
->parms
.Wlog
;
567 opt
.Wlog
= q
->parms
.Wlog
;
568 opt
.Plog
= q
->parms
.Plog
;
569 opt
.Scell_log
= q
->parms
.Scell_log
;
570 opt
.other
= q
->stats
.other
;
571 opt
.early
= q
->stats
.prob_drop
;
572 opt
.forced
= q
->stats
.forced_drop
;
573 opt
.pdrop
= q
->stats
.pdrop
;
574 opt
.packets
= q
->packetsin
;
575 opt
.bytesin
= q
->bytesin
;
577 if (gred_wred_mode(table
)) {
578 q
->parms
.qidlestart
=
579 table
->tab
[table
->def
]->parms
.qidlestart
;
580 q
->parms
.qavg
= table
->tab
[table
->def
]->parms
.qavg
;
583 opt
.qave
= red_calc_qavg(&q
->parms
, q
->parms
.qavg
);
586 RTA_APPEND(skb
, sizeof(opt
), &opt
);
589 RTA_NEST_END(skb
, parms
);
591 return RTA_NEST_END(skb
, opts
);
594 return RTA_NEST_CANCEL(skb
, opts
);
597 static void gred_destroy(struct Qdisc
*sch
)
599 struct gred_sched
*table
= qdisc_priv(sch
);
602 for (i
= 0;i
< table
->DPs
; i
++) {
604 gred_destroy_vq(table
->tab
[i
]);
608 static struct Qdisc_ops gred_qdisc_ops
= {
612 .priv_size
= sizeof(struct gred_sched
),
613 .enqueue
= gred_enqueue
,
614 .dequeue
= gred_dequeue
,
615 .requeue
= gred_requeue
,
619 .destroy
= gred_destroy
,
620 .change
= gred_change
,
622 .owner
= THIS_MODULE
,
625 static int __init
gred_module_init(void)
627 return register_qdisc(&gred_qdisc_ops
);
629 static void __exit
gred_module_exit(void)
631 unregister_qdisc(&gred_qdisc_ops
);
633 module_init(gred_module_init
)
634 module_exit(gred_module_exit
)
635 MODULE_LICENSE("GPL");