2 * net/sched/sch_gred.c Generic Random Early Detection queue.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
10 * Authors: J Hadi Salim (hadi@cyberus.ca) 1998-2002
12 * 991129: - Bug fix with grio mode
13 * - a better sing. AvgQ mode with Grio(WRED)
14 * - A finer grained VQ dequeue based on sugestion
20 * For all the glorious comments look at Alexey's sch_red.c
23 #include <linux/config.h>
24 #include <linux/module.h>
25 #include <asm/uaccess.h>
26 #include <asm/system.h>
27 #include <linux/bitops.h>
28 #include <linux/types.h>
29 #include <linux/kernel.h>
30 #include <linux/sched.h>
31 #include <linux/string.h>
33 #include <linux/socket.h>
34 #include <linux/sockios.h>
36 #include <linux/errno.h>
37 #include <linux/interrupt.h>
38 #include <linux/if_ether.h>
39 #include <linux/inet.h>
40 #include <linux/netdevice.h>
41 #include <linux/etherdevice.h>
42 #include <linux/notifier.h>
44 #include <net/route.h>
45 #include <linux/skbuff.h>
47 #include <net/pkt_sched.h>
50 #define DPRINTK(format,args...) printk(KERN_DEBUG format,##args)
52 #define DPRINTK(format,args...)
56 #define D2PRINTK(format,args...) printk(KERN_DEBUG format,##args)
58 #define D2PRINTK(format,args...)
61 struct gred_sched_data
;
64 struct gred_sched_data
67 u32 limit
; /* HARD maximal queue length */
68 u32 qth_min
; /* Min average length threshold: A scaled */
69 u32 qth_max
; /* Max average length threshold: A scaled */
70 u32 DP
; /* the drop pramaters */
71 char Wlog
; /* log(W) */
72 char Plog
; /* random number bits */
75 u32 bytesin
; /* bytes seen on virtualQ so far*/
76 u32 packetsin
; /* packets seen on virtualQ so far*/
77 u32 backlog
; /* bytes on the virtualQ */
78 u32 forced
; /* packets dropped for exceeding limits */
79 u32 early
; /* packets dropped as a warning */
80 u32 other
; /* packets dropped by invoking drop() */
81 u32 pdrop
; /* packets dropped because we exceeded physical queue limits */
84 u8 prio
; /* the prio of this vq */
87 unsigned long qave
; /* Average queue length: A scaled */
88 int qcount
; /* Packets since last random number generation */
89 u32 qR
; /* Cached random number */
91 psched_time_t qidlestart
; /* Start of idle period */
100 struct gred_sched_data
*tab
[MAX_DPs
];
108 static inline int gred_wred_mode(struct gred_sched
*table
)
110 return test_bit(GRED_WRED_MODE
, &table
->flags
);
113 static inline void gred_enable_wred_mode(struct gred_sched
*table
)
115 __set_bit(GRED_WRED_MODE
, &table
->flags
);
118 static inline void gred_disable_wred_mode(struct gred_sched
*table
)
120 __clear_bit(GRED_WRED_MODE
, &table
->flags
);
123 static inline int gred_wred_mode_check(struct Qdisc
*sch
)
125 struct gred_sched
*table
= qdisc_priv(sch
);
128 /* Really ugly O(n^2) but shouldn't be necessary too frequent. */
129 for (i
= 0; i
< table
->DPs
; i
++) {
130 struct gred_sched_data
*q
= table
->tab
[i
];
136 for (n
= 0; n
< table
->DPs
; n
++)
137 if (table
->tab
[n
] && table
->tab
[n
] != q
&&
138 table
->tab
[n
]->prio
== q
->prio
)
146 gred_enqueue(struct sk_buff
*skb
, struct Qdisc
* sch
)
149 struct gred_sched_data
*q
=NULL
;
150 struct gred_sched
*t
= qdisc_priv(sch
);
151 unsigned long qave
=0;
154 if (!t
->initd
&& skb_queue_len(&sch
->q
) < (sch
->dev
->tx_queue_len
? : 1)) {
155 D2PRINTK("NO GRED Queues setup yet! Enqueued anyway\n");
160 if ( ((skb
->tc_index
&0xf) > (t
->DPs
-1)) || !(q
=t
->tab
[skb
->tc_index
&0xf])) {
161 printk("GRED: setting to default (%d)\n ",t
->def
);
162 if (!(q
=t
->tab
[t
->def
])) {
163 DPRINTK("GRED: setting to default FAILED! dropping!! "
167 /* fix tc_index? --could be controvesial but needed for
169 skb
->tc_index
=(skb
->tc_index
&0xfffffff0) | t
->def
;
172 D2PRINTK("gred_enqueue virtualQ 0x%x classid %x backlog %d "
173 "general backlog %d\n",skb
->tc_index
&0xf,sch
->handle
,q
->backlog
,
174 sch
->qstats
.backlog
);
175 /* sum up all the qaves of prios <= to ours to get the new qave*/
176 if (!gred_wred_mode(t
) && t
->grio
) {
177 for (i
=0;i
<t
->DPs
;i
++) {
178 if ((!t
->tab
[i
]) || (i
==q
->DP
))
181 if ((t
->tab
[i
]->prio
< q
->prio
) && (PSCHED_IS_PASTPERFECT(t
->tab
[i
]->qidlestart
)))
182 qave
+=t
->tab
[i
]->qave
;
188 q
->bytesin
+=skb
->len
;
190 if (gred_wred_mode(t
)) {
192 q
->qave
=t
->tab
[t
->def
]->qave
;
193 q
->qidlestart
=t
->tab
[t
->def
]->qidlestart
;
196 if (!PSCHED_IS_PASTPERFECT(q
->qidlestart
)) {
198 PSCHED_GET_TIME(now
);
199 us_idle
= PSCHED_TDIFF_SAFE(now
, q
->qidlestart
, q
->Scell_max
);
200 PSCHED_SET_PASTPERFECT(q
->qidlestart
);
202 q
->qave
>>= q
->Stab
[(us_idle
>>q
->Scell_log
)&0xFF];
204 if (gred_wred_mode(t
)) {
205 q
->qave
+= sch
->qstats
.backlog
- (q
->qave
>> q
->Wlog
);
207 q
->qave
+= q
->backlog
- (q
->qave
>> q
->Wlog
);
213 if (gred_wred_mode(t
))
214 t
->tab
[t
->def
]->qave
=q
->qave
;
216 if ((q
->qave
+qave
) < q
->qth_min
) {
219 if (q
->backlog
+ skb
->len
<= q
->limit
) {
220 q
->backlog
+= skb
->len
;
222 __skb_queue_tail(&sch
->q
, skb
);
223 sch
->qstats
.backlog
+= skb
->len
;
224 sch
->bstats
.bytes
+= skb
->len
;
225 sch
->bstats
.packets
++;
234 return NET_XMIT_DROP
;
236 if ((q
->qave
+qave
) >= q
->qth_max
) {
238 sch
->qstats
.overlimits
++;
243 if ((((qave
+q
->qave
) - q
->qth_min
)>>q
->Wlog
)*q
->qcount
< q
->qR
)
246 q
->qR
= net_random()&q
->Rmask
;
247 sch
->qstats
.overlimits
++;
251 q
->qR
= net_random()&q
->Rmask
;
256 gred_requeue(struct sk_buff
*skb
, struct Qdisc
* sch
)
258 struct gred_sched_data
*q
;
259 struct gred_sched
*t
= qdisc_priv(sch
);
260 q
= t
->tab
[(skb
->tc_index
&0xf)];
261 /* error checking here -- probably unnecessary */
262 PSCHED_SET_PASTPERFECT(q
->qidlestart
);
264 __skb_queue_head(&sch
->q
, skb
);
265 sch
->qstats
.backlog
+= skb
->len
;
266 sch
->qstats
.requeues
++;
267 q
->backlog
+= skb
->len
;
271 static struct sk_buff
*
272 gred_dequeue(struct Qdisc
* sch
)
275 struct gred_sched_data
*q
;
276 struct gred_sched
*t
= qdisc_priv(sch
);
278 skb
= __skb_dequeue(&sch
->q
);
280 sch
->qstats
.backlog
-= skb
->len
;
281 q
= t
->tab
[(skb
->tc_index
&0xf)];
283 q
->backlog
-= skb
->len
;
284 if (!q
->backlog
&& !gred_wred_mode(t
))
285 PSCHED_GET_TIME(q
->qidlestart
);
287 D2PRINTK("gred_dequeue: skb has bad tcindex %x\n",skb
->tc_index
&0xf);
292 if (gred_wred_mode(t
)) {
295 D2PRINTK("no default VQ set: Results will be "
298 PSCHED_GET_TIME(q
->qidlestart
);
304 static unsigned int gred_drop(struct Qdisc
* sch
)
308 struct gred_sched_data
*q
;
309 struct gred_sched
*t
= qdisc_priv(sch
);
311 skb
= __skb_dequeue_tail(&sch
->q
);
313 unsigned int len
= skb
->len
;
314 sch
->qstats
.backlog
-= len
;
316 q
= t
->tab
[(skb
->tc_index
&0xf)];
320 if (!q
->backlog
&& !gred_wred_mode(t
))
321 PSCHED_GET_TIME(q
->qidlestart
);
323 D2PRINTK("gred_dequeue: skb has bad tcindex %x\n",skb
->tc_index
&0xf);
332 D2PRINTK("no default VQ set: Results might be screwed up\n");
336 PSCHED_GET_TIME(q
->qidlestart
);
341 static void gred_reset(struct Qdisc
* sch
)
344 struct gred_sched_data
*q
;
345 struct gred_sched
*t
= qdisc_priv(sch
);
347 __skb_queue_purge(&sch
->q
);
349 sch
->qstats
.backlog
= 0;
351 for (i
=0;i
<t
->DPs
;i
++) {
355 PSCHED_SET_PASTPERFECT(q
->qidlestart
);
366 static int gred_change(struct Qdisc
*sch
, struct rtattr
*opt
)
368 struct gred_sched
*table
= qdisc_priv(sch
);
369 struct gred_sched_data
*q
;
370 struct tc_gred_qopt
*ctl
;
371 struct tc_gred_sopt
*sopt
;
372 struct rtattr
*tb
[TCA_GRED_STAB
];
373 struct rtattr
*tb2
[TCA_GRED_DPS
];
375 if (opt
== NULL
|| rtattr_parse_nested(tb
, TCA_GRED_STAB
, opt
))
378 if (tb
[TCA_GRED_PARMS
-1] == 0 && tb
[TCA_GRED_STAB
-1] == 0) {
379 rtattr_parse_nested(tb2
, TCA_GRED_DPS
, opt
);
381 if (tb2
[TCA_GRED_DPS
-1] == 0)
384 sopt
= RTA_DATA(tb2
[TCA_GRED_DPS
-1]);
385 table
->DPs
=sopt
->DPs
;
386 table
->def
=sopt
->def_DP
;
390 gred_disable_wred_mode(table
);
391 if (gred_wred_mode_check(sch
))
392 gred_enable_wred_mode(table
);
395 gred_disable_wred_mode(table
);
399 /* probably need to clear all the table DP entries as well */
404 if (!table
->DPs
|| tb
[TCA_GRED_PARMS
-1] == 0 || tb
[TCA_GRED_STAB
-1] == 0 ||
405 RTA_PAYLOAD(tb
[TCA_GRED_PARMS
-1]) < sizeof(*ctl
) ||
406 RTA_PAYLOAD(tb
[TCA_GRED_STAB
-1]) < 256)
409 ctl
= RTA_DATA(tb
[TCA_GRED_PARMS
-1]);
410 if (ctl
->DP
> MAX_DPs
-1 ) {
411 /* misbehaving is punished! Put in the default drop probability */
412 DPRINTK("\nGRED: DP %u not in the proper range fixed. New DP "
413 "set to default at %d\n",ctl
->DP
,table
->def
);
417 if (table
->tab
[ctl
->DP
] == NULL
) {
418 table
->tab
[ctl
->DP
]=kmalloc(sizeof(struct gred_sched_data
),
420 if (NULL
== table
->tab
[ctl
->DP
])
422 memset(table
->tab
[ctl
->DP
], 0, (sizeof(struct gred_sched_data
)));
424 q
= table
->tab
[ctl
->DP
];
428 if (table
->def
&& table
->tab
[table
->def
]) {
429 DPRINTK("\nGRED: DP %u does not have a prio"
430 "setting default to %d\n",ctl
->DP
,
431 table
->tab
[table
->def
]->prio
);
432 q
->prio
=table
->tab
[table
->def
]->prio
;
434 DPRINTK("\nGRED: DP %u does not have a prio"
435 " setting default to 8\n",ctl
->DP
);
449 q
->limit
= ctl
->limit
;
450 q
->Scell_log
= ctl
->Scell_log
;
451 q
->Rmask
= ctl
->Plog
< 32 ? ((1<<ctl
->Plog
) - 1) : ~0UL;
452 q
->Scell_max
= (255<<q
->Scell_log
);
453 q
->qth_min
= ctl
->qth_min
<<ctl
->Wlog
;
454 q
->qth_max
= ctl
->qth_max
<<ctl
->Wlog
;
463 PSCHED_SET_PASTPERFECT(q
->qidlestart
);
464 memcpy(q
->Stab
, RTA_DATA(tb
[TCA_GRED_STAB
-1]), 256);
467 gred_disable_wred_mode(table
);
468 if (gred_wred_mode_check(sch
))
469 gred_enable_wred_mode(table
);
475 the first entry also goes into the default until
479 if (table
->tab
[table
->def
] == NULL
) {
480 table
->tab
[table
->def
]=
481 kmalloc(sizeof(struct gred_sched_data
), GFP_KERNEL
);
482 if (NULL
== table
->tab
[table
->def
])
485 memset(table
->tab
[table
->def
], 0,
486 (sizeof(struct gred_sched_data
)));
488 q
= table
->tab
[table
->def
];
492 q
->limit
= ctl
->limit
;
493 q
->Scell_log
= ctl
->Scell_log
;
494 q
->Rmask
= ctl
->Plog
< 32 ? ((1<<ctl
->Plog
) - 1) : ~0UL;
495 q
->Scell_max
= (255<<q
->Scell_log
);
496 q
->qth_min
= ctl
->qth_min
<<ctl
->Wlog
;
497 q
->qth_max
= ctl
->qth_max
<<ctl
->Wlog
;
500 q
->prio
=table
->tab
[ctl
->DP
]->prio
;
505 PSCHED_SET_PASTPERFECT(q
->qidlestart
);
506 memcpy(q
->Stab
, RTA_DATA(tb
[TCA_GRED_STAB
-1]), 256);
512 static int gred_init(struct Qdisc
*sch
, struct rtattr
*opt
)
514 struct gred_sched
*table
= qdisc_priv(sch
);
515 struct tc_gred_sopt
*sopt
;
516 struct rtattr
*tb
[TCA_GRED_STAB
];
517 struct rtattr
*tb2
[TCA_GRED_DPS
];
519 if (opt
== NULL
|| rtattr_parse_nested(tb
, TCA_GRED_STAB
, opt
))
522 if (tb
[TCA_GRED_PARMS
-1] == 0 && tb
[TCA_GRED_STAB
-1] == 0) {
523 rtattr_parse_nested(tb2
, TCA_GRED_DPS
, opt
);
525 if (tb2
[TCA_GRED_DPS
-1] == 0)
528 sopt
= RTA_DATA(tb2
[TCA_GRED_DPS
-1]);
529 table
->DPs
=sopt
->DPs
;
530 table
->def
=sopt
->def_DP
;
531 table
->grio
=sopt
->grio
;
536 DPRINTK("\n GRED_INIT error!\n");
540 static int gred_dump(struct Qdisc
*sch
, struct sk_buff
*skb
)
544 struct tc_gred_qopt
*opt
= NULL
;
545 struct tc_gred_qopt
*dst
;
546 struct gred_sched
*table
= qdisc_priv(sch
);
547 struct gred_sched_data
*q
;
549 unsigned char *b
= skb
->tail
;
551 rta
= (struct rtattr
*)b
;
552 RTA_PUT(skb
, TCA_OPTIONS
, 0, NULL
);
554 opt
=kmalloc(sizeof(struct tc_gred_qopt
)*MAX_DPs
, GFP_KERNEL
);
557 DPRINTK("gred_dump:failed to malloc for %Zd\n",
558 sizeof(struct tc_gred_qopt
)*MAX_DPs
);
562 memset(opt
, 0, (sizeof(struct tc_gred_qopt
))*table
->DPs
);
565 DPRINTK("NO GRED Queues setup!\n");
568 for (i
=0;i
<MAX_DPs
;i
++) {
573 /* hack -- fix at some point with proper message
574 This is how we indicate to tc that there is no VQ
582 dst
->qth_min
=q
->qth_min
>>q
->Wlog
;
583 dst
->qth_max
=q
->qth_max
>>q
->Wlog
;
585 dst
->backlog
=q
->backlog
;
587 if (gred_wred_mode(table
)) {
588 q
->qidlestart
=table
->tab
[table
->def
]->qidlestart
;
589 q
->qave
=table
->tab
[table
->def
]->qave
;
591 if (!PSCHED_IS_PASTPERFECT(q
->qidlestart
)) {
594 PSCHED_GET_TIME(now
);
595 idle
= PSCHED_TDIFF_SAFE(now
, q
->qidlestart
, q
->Scell_max
);
596 qave
= q
->qave
>> q
->Stab
[(idle
>>q
->Scell_log
)&0xFF];
597 dst
->qave
= qave
>> q
->Wlog
;
600 dst
->qave
= q
->qave
>> q
->Wlog
;
609 dst
->Scell_log
= q
->Scell_log
;
610 dst
->other
= q
->other
;
611 dst
->forced
= q
->forced
;
612 dst
->early
= q
->early
;
613 dst
->pdrop
= q
->pdrop
;
615 dst
->packets
=q
->packetsin
;
616 dst
->bytesin
=q
->bytesin
;
619 RTA_PUT(skb
, TCA_GRED_PARMS
, sizeof(struct tc_gred_qopt
)*MAX_DPs
, opt
);
620 rta
->rta_len
= skb
->tail
- b
;
628 DPRINTK("gred_dump: FAILURE!!!!\n");
630 /* also free the opt struct here */
631 skb_trim(skb
, b
- skb
->data
);
635 static void gred_destroy(struct Qdisc
*sch
)
637 struct gred_sched
*table
= qdisc_priv(sch
);
640 for (i
= 0;i
< table
->DPs
; i
++) {
642 kfree(table
->tab
[i
]);
646 static struct Qdisc_ops gred_qdisc_ops
= {
650 .priv_size
= sizeof(struct gred_sched
),
651 .enqueue
= gred_enqueue
,
652 .dequeue
= gred_dequeue
,
653 .requeue
= gred_requeue
,
657 .destroy
= gred_destroy
,
658 .change
= gred_change
,
660 .owner
= THIS_MODULE
,
663 static int __init
gred_module_init(void)
665 return register_qdisc(&gred_qdisc_ops
);
667 static void __exit
gred_module_exit(void)
669 unregister_qdisc(&gred_qdisc_ops
);
671 module_init(gred_module_init
)
672 module_exit(gred_module_exit
)
673 MODULE_LICENSE("GPL");