2 * net/sched/sch_drr.c Deficit Round Robin scheduler
4 * Copyright (c) 2008 Patrick McHardy <kaber@trash.net>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * version 2 as published by the Free Software Foundation.
11 #include <linux/module.h>
12 #include <linux/init.h>
13 #include <linux/errno.h>
14 #include <linux/netdevice.h>
15 #include <linux/pkt_sched.h>
16 #include <net/sch_generic.h>
17 #include <net/pkt_sched.h>
18 #include <net/pkt_cls.h>
21 struct Qdisc_class_common common
;
23 unsigned int filter_cnt
;
25 struct gnet_stats_basic bstats
;
26 struct gnet_stats_queue qstats
;
27 struct gnet_stats_rate_est rate_est
;
28 struct list_head alist
;
36 struct list_head active
;
37 struct tcf_proto
*filter_list
;
38 struct Qdisc_class_hash clhash
;
41 static struct drr_class
*drr_find_class(struct Qdisc
*sch
, u32 classid
)
43 struct drr_sched
*q
= qdisc_priv(sch
);
44 struct Qdisc_class_common
*clc
;
46 clc
= qdisc_class_find(&q
->clhash
, classid
);
49 return container_of(clc
, struct drr_class
, common
);
52 static void drr_purge_queue(struct drr_class
*cl
)
54 unsigned int len
= cl
->qdisc
->q
.qlen
;
56 qdisc_reset(cl
->qdisc
);
57 qdisc_tree_decrease_qlen(cl
->qdisc
, len
);
60 static const struct nla_policy drr_policy
[TCA_DRR_MAX
+ 1] = {
61 [TCA_DRR_QUANTUM
] = { .type
= NLA_U32
},
64 static int drr_change_class(struct Qdisc
*sch
, u32 classid
, u32 parentid
,
65 struct nlattr
**tca
, unsigned long *arg
)
67 struct drr_sched
*q
= qdisc_priv(sch
);
68 struct drr_class
*cl
= (struct drr_class
*)*arg
;
69 struct nlattr
*tb
[TCA_DRR_MAX
+ 1];
73 err
= nla_parse_nested(tb
, TCA_DRR_MAX
, tca
[TCA_OPTIONS
], drr_policy
);
77 if (tb
[TCA_DRR_QUANTUM
]) {
78 quantum
= nla_get_u32(tb
[TCA_DRR_QUANTUM
]);
82 quantum
= psched_mtu(qdisc_dev(sch
));
86 if (tb
[TCA_DRR_QUANTUM
])
87 cl
->quantum
= quantum
;
91 gen_replace_estimator(&cl
->bstats
, &cl
->rate_est
,
92 qdisc_root_sleeping_lock(sch
),
97 cl
= kzalloc(sizeof(struct drr_class
), GFP_KERNEL
);
102 cl
->common
.classid
= classid
;
103 cl
->quantum
= quantum
;
104 cl
->qdisc
= qdisc_create_dflt(qdisc_dev(sch
), sch
->dev_queue
,
105 &pfifo_qdisc_ops
, classid
);
106 if (cl
->qdisc
== NULL
)
107 cl
->qdisc
= &noop_qdisc
;
110 gen_replace_estimator(&cl
->bstats
, &cl
->rate_est
,
111 qdisc_root_sleeping_lock(sch
),
115 qdisc_class_hash_insert(&q
->clhash
, &cl
->common
);
116 sch_tree_unlock(sch
);
118 qdisc_class_hash_grow(sch
, &q
->clhash
);
120 *arg
= (unsigned long)cl
;
124 static void drr_destroy_class(struct Qdisc
*sch
, struct drr_class
*cl
)
126 gen_kill_estimator(&cl
->bstats
, &cl
->rate_est
);
127 qdisc_destroy(cl
->qdisc
);
131 static int drr_delete_class(struct Qdisc
*sch
, unsigned long arg
)
133 struct drr_sched
*q
= qdisc_priv(sch
);
134 struct drr_class
*cl
= (struct drr_class
*)arg
;
136 if (cl
->filter_cnt
> 0)
142 qdisc_class_hash_remove(&q
->clhash
, &cl
->common
);
144 if (--cl
->refcnt
== 0)
145 drr_destroy_class(sch
, cl
);
147 sch_tree_unlock(sch
);
151 static unsigned long drr_get_class(struct Qdisc
*sch
, u32 classid
)
153 struct drr_class
*cl
= drr_find_class(sch
, classid
);
158 return (unsigned long)cl
;
161 static void drr_put_class(struct Qdisc
*sch
, unsigned long arg
)
163 struct drr_class
*cl
= (struct drr_class
*)arg
;
165 if (--cl
->refcnt
== 0)
166 drr_destroy_class(sch
, cl
);
169 static struct tcf_proto
**drr_tcf_chain(struct Qdisc
*sch
, unsigned long cl
)
171 struct drr_sched
*q
= qdisc_priv(sch
);
176 return &q
->filter_list
;
179 static unsigned long drr_bind_tcf(struct Qdisc
*sch
, unsigned long parent
,
182 struct drr_class
*cl
= drr_find_class(sch
, classid
);
187 return (unsigned long)cl
;
190 static void drr_unbind_tcf(struct Qdisc
*sch
, unsigned long arg
)
192 struct drr_class
*cl
= (struct drr_class
*)arg
;
197 static int drr_graft_class(struct Qdisc
*sch
, unsigned long arg
,
198 struct Qdisc
*new, struct Qdisc
**old
)
200 struct drr_class
*cl
= (struct drr_class
*)arg
;
203 new = qdisc_create_dflt(qdisc_dev(sch
), sch
->dev_queue
,
204 &pfifo_qdisc_ops
, cl
->common
.classid
);
213 sch_tree_unlock(sch
);
217 static struct Qdisc
*drr_class_leaf(struct Qdisc
*sch
, unsigned long arg
)
219 struct drr_class
*cl
= (struct drr_class
*)arg
;
224 static void drr_qlen_notify(struct Qdisc
*csh
, unsigned long arg
)
226 struct drr_class
*cl
= (struct drr_class
*)arg
;
228 if (cl
->qdisc
->q
.qlen
== 0)
229 list_del(&cl
->alist
);
232 static int drr_dump_class(struct Qdisc
*sch
, unsigned long arg
,
233 struct sk_buff
*skb
, struct tcmsg
*tcm
)
235 struct drr_class
*cl
= (struct drr_class
*)arg
;
238 tcm
->tcm_parent
= TC_H_ROOT
;
239 tcm
->tcm_handle
= cl
->common
.classid
;
240 tcm
->tcm_info
= cl
->qdisc
->handle
;
242 nest
= nla_nest_start(skb
, TCA_OPTIONS
);
244 goto nla_put_failure
;
245 NLA_PUT_U32(skb
, TCA_DRR_QUANTUM
, cl
->quantum
);
246 return nla_nest_end(skb
, nest
);
249 nla_nest_cancel(skb
, nest
);
253 static int drr_dump_class_stats(struct Qdisc
*sch
, unsigned long arg
,
256 struct drr_class
*cl
= (struct drr_class
*)arg
;
257 struct tc_drr_stats xstats
;
259 memset(&xstats
, 0, sizeof(xstats
));
260 if (cl
->qdisc
->q
.qlen
)
261 xstats
.deficit
= cl
->deficit
;
263 if (gnet_stats_copy_basic(d
, &cl
->bstats
) < 0 ||
264 gnet_stats_copy_rate_est(d
, &cl
->rate_est
) < 0 ||
265 gnet_stats_copy_queue(d
, &cl
->qdisc
->qstats
) < 0)
268 return gnet_stats_copy_app(d
, &xstats
, sizeof(xstats
));
271 static void drr_walk(struct Qdisc
*sch
, struct qdisc_walker
*arg
)
273 struct drr_sched
*q
= qdisc_priv(sch
);
274 struct drr_class
*cl
;
275 struct hlist_node
*n
;
281 for (i
= 0; i
< q
->clhash
.hashsize
; i
++) {
282 hlist_for_each_entry(cl
, n
, &q
->clhash
.hash
[i
], common
.hnode
) {
283 if (arg
->count
< arg
->skip
) {
287 if (arg
->fn(sch
, (unsigned long)cl
, arg
) < 0) {
296 static struct drr_class
*drr_classify(struct sk_buff
*skb
, struct Qdisc
*sch
,
299 struct drr_sched
*q
= qdisc_priv(sch
);
300 struct drr_class
*cl
;
301 struct tcf_result res
;
304 if (TC_H_MAJ(skb
->priority
^ sch
->handle
) == 0) {
305 cl
= drr_find_class(sch
, skb
->priority
);
310 *qerr
= NET_XMIT_SUCCESS
| __NET_XMIT_BYPASS
;
311 result
= tc_classify(skb
, q
->filter_list
, &res
);
313 #ifdef CONFIG_NET_CLS_ACT
317 *qerr
= NET_XMIT_SUCCESS
| __NET_XMIT_STOLEN
;
322 cl
= (struct drr_class
*)res
.class;
324 cl
= drr_find_class(sch
, res
.classid
);
330 static int drr_enqueue(struct sk_buff
*skb
, struct Qdisc
*sch
)
332 struct drr_sched
*q
= qdisc_priv(sch
);
333 struct drr_class
*cl
;
337 cl
= drr_classify(skb
, sch
, &err
);
339 if (err
& __NET_XMIT_BYPASS
)
345 len
= qdisc_pkt_len(skb
);
346 err
= qdisc_enqueue(skb
, cl
->qdisc
);
347 if (unlikely(err
!= NET_XMIT_SUCCESS
)) {
348 if (net_xmit_drop_count(err
)) {
355 if (cl
->qdisc
->q
.qlen
== 1) {
356 list_add_tail(&cl
->alist
, &q
->active
);
357 cl
->deficit
= cl
->quantum
;
360 cl
->bstats
.packets
++;
361 cl
->bstats
.bytes
+= len
;
362 sch
->bstats
.packets
++;
363 sch
->bstats
.bytes
+= len
;
369 static struct sk_buff
*drr_dequeue(struct Qdisc
*sch
)
371 struct drr_sched
*q
= qdisc_priv(sch
);
372 struct drr_class
*cl
;
376 while (!list_empty(&q
->active
)) {
377 cl
= list_first_entry(&q
->active
, struct drr_class
, alist
);
378 skb
= cl
->qdisc
->ops
->peek(cl
->qdisc
);
382 len
= qdisc_pkt_len(skb
);
383 if (len
<= cl
->deficit
) {
385 skb
= qdisc_dequeue_peeked(cl
->qdisc
);
386 if (cl
->qdisc
->q
.qlen
== 0)
387 list_del(&cl
->alist
);
392 cl
->deficit
+= cl
->quantum
;
394 list_move_tail(&cl
->alist
, &q
->active
);
399 static unsigned int drr_drop(struct Qdisc
*sch
)
401 struct drr_sched
*q
= qdisc_priv(sch
);
402 struct drr_class
*cl
;
405 list_for_each_entry(cl
, &q
->active
, alist
) {
406 if (cl
->qdisc
->ops
->drop
) {
407 len
= cl
->qdisc
->ops
->drop(cl
->qdisc
);
409 if (cl
->qdisc
->q
.qlen
== 0)
410 list_del(&cl
->alist
);
418 static int drr_init_qdisc(struct Qdisc
*sch
, struct nlattr
*opt
)
420 struct drr_sched
*q
= qdisc_priv(sch
);
423 err
= qdisc_class_hash_init(&q
->clhash
);
426 INIT_LIST_HEAD(&q
->active
);
430 static void drr_reset_qdisc(struct Qdisc
*sch
)
432 struct drr_sched
*q
= qdisc_priv(sch
);
433 struct drr_class
*cl
;
434 struct hlist_node
*n
;
437 for (i
= 0; i
< q
->clhash
.hashsize
; i
++) {
438 hlist_for_each_entry(cl
, n
, &q
->clhash
.hash
[i
], common
.hnode
) {
439 if (cl
->qdisc
->q
.qlen
)
440 list_del(&cl
->alist
);
441 qdisc_reset(cl
->qdisc
);
447 static void drr_destroy_qdisc(struct Qdisc
*sch
)
449 struct drr_sched
*q
= qdisc_priv(sch
);
450 struct drr_class
*cl
;
451 struct hlist_node
*n
, *next
;
454 tcf_destroy_chain(&q
->filter_list
);
456 for (i
= 0; i
< q
->clhash
.hashsize
; i
++) {
457 hlist_for_each_entry_safe(cl
, n
, next
, &q
->clhash
.hash
[i
],
459 drr_destroy_class(sch
, cl
);
461 qdisc_class_hash_destroy(&q
->clhash
);
464 static const struct Qdisc_class_ops drr_class_ops
= {
465 .change
= drr_change_class
,
466 .delete = drr_delete_class
,
467 .get
= drr_get_class
,
468 .put
= drr_put_class
,
469 .tcf_chain
= drr_tcf_chain
,
470 .bind_tcf
= drr_bind_tcf
,
471 .unbind_tcf
= drr_unbind_tcf
,
472 .graft
= drr_graft_class
,
473 .leaf
= drr_class_leaf
,
474 .qlen_notify
= drr_qlen_notify
,
475 .dump
= drr_dump_class
,
476 .dump_stats
= drr_dump_class_stats
,
480 static struct Qdisc_ops drr_qdisc_ops __read_mostly
= {
481 .cl_ops
= &drr_class_ops
,
483 .priv_size
= sizeof(struct drr_sched
),
484 .enqueue
= drr_enqueue
,
485 .dequeue
= drr_dequeue
,
486 .peek
= qdisc_peek_dequeued
,
488 .init
= drr_init_qdisc
,
489 .reset
= drr_reset_qdisc
,
490 .destroy
= drr_destroy_qdisc
,
491 .owner
= THIS_MODULE
,
494 static int __init
drr_init(void)
496 return register_qdisc(&drr_qdisc_ops
);
499 static void __exit
drr_exit(void)
501 unregister_qdisc(&drr_qdisc_ops
);
504 module_init(drr_init
);
505 module_exit(drr_exit
);
506 MODULE_LICENSE("GPL");