2 * net/sched/sch_drr.c Deficit Round Robin scheduler
4 * Copyright (c) 2008 Patrick McHardy <kaber@trash.net>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * version 2 as published by the Free Software Foundation.
11 #include <linux/module.h>
12 #include <linux/init.h>
13 #include <linux/errno.h>
14 #include <linux/netdevice.h>
15 #include <linux/pkt_sched.h>
16 #include <net/sch_generic.h>
17 #include <net/pkt_sched.h>
18 #include <net/pkt_cls.h>
21 struct Qdisc_class_common common
;
23 unsigned int filter_cnt
;
25 struct gnet_stats_basic_packed bstats
;
26 struct gnet_stats_queue qstats
;
27 struct gnet_stats_rate_est rate_est
;
28 struct list_head alist
;
36 struct list_head active
;
37 struct tcf_proto
*filter_list
;
38 struct Qdisc_class_hash clhash
;
41 static struct drr_class
*drr_find_class(struct Qdisc
*sch
, u32 classid
)
43 struct drr_sched
*q
= qdisc_priv(sch
);
44 struct Qdisc_class_common
*clc
;
46 clc
= qdisc_class_find(&q
->clhash
, classid
);
49 return container_of(clc
, struct drr_class
, common
);
52 static void drr_purge_queue(struct drr_class
*cl
)
54 unsigned int len
= cl
->qdisc
->q
.qlen
;
56 qdisc_reset(cl
->qdisc
);
57 qdisc_tree_decrease_qlen(cl
->qdisc
, len
);
60 static const struct nla_policy drr_policy
[TCA_DRR_MAX
+ 1] = {
61 [TCA_DRR_QUANTUM
] = { .type
= NLA_U32
},
64 static int drr_change_class(struct Qdisc
*sch
, u32 classid
, u32 parentid
,
65 struct nlattr
**tca
, unsigned long *arg
)
67 struct drr_sched
*q
= qdisc_priv(sch
);
68 struct drr_class
*cl
= (struct drr_class
*)*arg
;
69 struct nlattr
*opt
= tca
[TCA_OPTIONS
];
70 struct nlattr
*tb
[TCA_DRR_MAX
+ 1];
77 err
= nla_parse_nested(tb
, TCA_DRR_MAX
, opt
, drr_policy
);
81 if (tb
[TCA_DRR_QUANTUM
]) {
82 quantum
= nla_get_u32(tb
[TCA_DRR_QUANTUM
]);
86 quantum
= psched_mtu(qdisc_dev(sch
));
90 err
= gen_replace_estimator(&cl
->bstats
, &cl
->rate_est
,
91 qdisc_root_sleeping_lock(sch
),
98 if (tb
[TCA_DRR_QUANTUM
])
99 cl
->quantum
= quantum
;
100 sch_tree_unlock(sch
);
105 cl
= kzalloc(sizeof(struct drr_class
), GFP_KERNEL
);
110 cl
->common
.classid
= classid
;
111 cl
->quantum
= quantum
;
112 cl
->qdisc
= qdisc_create_dflt(qdisc_dev(sch
), sch
->dev_queue
,
113 &pfifo_qdisc_ops
, classid
);
114 if (cl
->qdisc
== NULL
)
115 cl
->qdisc
= &noop_qdisc
;
118 err
= gen_replace_estimator(&cl
->bstats
, &cl
->rate_est
,
119 qdisc_root_sleeping_lock(sch
),
122 qdisc_destroy(cl
->qdisc
);
129 qdisc_class_hash_insert(&q
->clhash
, &cl
->common
);
130 sch_tree_unlock(sch
);
132 qdisc_class_hash_grow(sch
, &q
->clhash
);
134 *arg
= (unsigned long)cl
;
138 static void drr_destroy_class(struct Qdisc
*sch
, struct drr_class
*cl
)
140 gen_kill_estimator(&cl
->bstats
, &cl
->rate_est
);
141 qdisc_destroy(cl
->qdisc
);
145 static int drr_delete_class(struct Qdisc
*sch
, unsigned long arg
)
147 struct drr_sched
*q
= qdisc_priv(sch
);
148 struct drr_class
*cl
= (struct drr_class
*)arg
;
150 if (cl
->filter_cnt
> 0)
156 qdisc_class_hash_remove(&q
->clhash
, &cl
->common
);
158 BUG_ON(--cl
->refcnt
== 0);
160 * This shouldn't happen: we "hold" one cops->get() when called
161 * from tc_ctl_tclass; the destroy method is done from cops->put().
164 sch_tree_unlock(sch
);
168 static unsigned long drr_get_class(struct Qdisc
*sch
, u32 classid
)
170 struct drr_class
*cl
= drr_find_class(sch
, classid
);
175 return (unsigned long)cl
;
178 static void drr_put_class(struct Qdisc
*sch
, unsigned long arg
)
180 struct drr_class
*cl
= (struct drr_class
*)arg
;
182 if (--cl
->refcnt
== 0)
183 drr_destroy_class(sch
, cl
);
186 static struct tcf_proto
**drr_tcf_chain(struct Qdisc
*sch
, unsigned long cl
)
188 struct drr_sched
*q
= qdisc_priv(sch
);
193 return &q
->filter_list
;
196 static unsigned long drr_bind_tcf(struct Qdisc
*sch
, unsigned long parent
,
199 struct drr_class
*cl
= drr_find_class(sch
, classid
);
204 return (unsigned long)cl
;
207 static void drr_unbind_tcf(struct Qdisc
*sch
, unsigned long arg
)
209 struct drr_class
*cl
= (struct drr_class
*)arg
;
214 static int drr_graft_class(struct Qdisc
*sch
, unsigned long arg
,
215 struct Qdisc
*new, struct Qdisc
**old
)
217 struct drr_class
*cl
= (struct drr_class
*)arg
;
220 new = qdisc_create_dflt(qdisc_dev(sch
), sch
->dev_queue
,
221 &pfifo_qdisc_ops
, cl
->common
.classid
);
230 sch_tree_unlock(sch
);
234 static struct Qdisc
*drr_class_leaf(struct Qdisc
*sch
, unsigned long arg
)
236 struct drr_class
*cl
= (struct drr_class
*)arg
;
241 static void drr_qlen_notify(struct Qdisc
*csh
, unsigned long arg
)
243 struct drr_class
*cl
= (struct drr_class
*)arg
;
245 if (cl
->qdisc
->q
.qlen
== 0)
246 list_del(&cl
->alist
);
249 static int drr_dump_class(struct Qdisc
*sch
, unsigned long arg
,
250 struct sk_buff
*skb
, struct tcmsg
*tcm
)
252 struct drr_class
*cl
= (struct drr_class
*)arg
;
255 tcm
->tcm_parent
= TC_H_ROOT
;
256 tcm
->tcm_handle
= cl
->common
.classid
;
257 tcm
->tcm_info
= cl
->qdisc
->handle
;
259 nest
= nla_nest_start(skb
, TCA_OPTIONS
);
261 goto nla_put_failure
;
262 NLA_PUT_U32(skb
, TCA_DRR_QUANTUM
, cl
->quantum
);
263 return nla_nest_end(skb
, nest
);
266 nla_nest_cancel(skb
, nest
);
270 static int drr_dump_class_stats(struct Qdisc
*sch
, unsigned long arg
,
273 struct drr_class
*cl
= (struct drr_class
*)arg
;
274 struct tc_drr_stats xstats
;
276 memset(&xstats
, 0, sizeof(xstats
));
277 if (cl
->qdisc
->q
.qlen
) {
278 xstats
.deficit
= cl
->deficit
;
279 cl
->qdisc
->qstats
.qlen
= cl
->qdisc
->q
.qlen
;
282 if (gnet_stats_copy_basic(d
, &cl
->bstats
) < 0 ||
283 gnet_stats_copy_rate_est(d
, &cl
->bstats
, &cl
->rate_est
) < 0 ||
284 gnet_stats_copy_queue(d
, &cl
->qdisc
->qstats
) < 0)
287 return gnet_stats_copy_app(d
, &xstats
, sizeof(xstats
));
290 static void drr_walk(struct Qdisc
*sch
, struct qdisc_walker
*arg
)
292 struct drr_sched
*q
= qdisc_priv(sch
);
293 struct drr_class
*cl
;
294 struct hlist_node
*n
;
300 for (i
= 0; i
< q
->clhash
.hashsize
; i
++) {
301 hlist_for_each_entry(cl
, n
, &q
->clhash
.hash
[i
], common
.hnode
) {
302 if (arg
->count
< arg
->skip
) {
306 if (arg
->fn(sch
, (unsigned long)cl
, arg
) < 0) {
315 static struct drr_class
*drr_classify(struct sk_buff
*skb
, struct Qdisc
*sch
,
318 struct drr_sched
*q
= qdisc_priv(sch
);
319 struct drr_class
*cl
;
320 struct tcf_result res
;
323 if (TC_H_MAJ(skb
->priority
^ sch
->handle
) == 0) {
324 cl
= drr_find_class(sch
, skb
->priority
);
329 *qerr
= NET_XMIT_SUCCESS
| __NET_XMIT_BYPASS
;
330 result
= tc_classify(skb
, q
->filter_list
, &res
);
332 #ifdef CONFIG_NET_CLS_ACT
336 *qerr
= NET_XMIT_SUCCESS
| __NET_XMIT_STOLEN
;
341 cl
= (struct drr_class
*)res
.class;
343 cl
= drr_find_class(sch
, res
.classid
);
349 static int drr_enqueue(struct sk_buff
*skb
, struct Qdisc
*sch
)
351 struct drr_sched
*q
= qdisc_priv(sch
);
352 struct drr_class
*cl
;
356 cl
= drr_classify(skb
, sch
, &err
);
358 if (err
& __NET_XMIT_BYPASS
)
364 len
= qdisc_pkt_len(skb
);
365 err
= qdisc_enqueue(skb
, cl
->qdisc
);
366 if (unlikely(err
!= NET_XMIT_SUCCESS
)) {
367 if (net_xmit_drop_count(err
)) {
374 if (cl
->qdisc
->q
.qlen
== 1) {
375 list_add_tail(&cl
->alist
, &q
->active
);
376 cl
->deficit
= cl
->quantum
;
379 cl
->bstats
.packets
++;
380 cl
->bstats
.bytes
+= len
;
381 sch
->bstats
.packets
++;
382 sch
->bstats
.bytes
+= len
;
388 static struct sk_buff
*drr_dequeue(struct Qdisc
*sch
)
390 struct drr_sched
*q
= qdisc_priv(sch
);
391 struct drr_class
*cl
;
395 if (list_empty(&q
->active
))
398 cl
= list_first_entry(&q
->active
, struct drr_class
, alist
);
399 skb
= cl
->qdisc
->ops
->peek(cl
->qdisc
);
403 len
= qdisc_pkt_len(skb
);
404 if (len
<= cl
->deficit
) {
406 skb
= qdisc_dequeue_peeked(cl
->qdisc
);
407 if (cl
->qdisc
->q
.qlen
== 0)
408 list_del(&cl
->alist
);
413 cl
->deficit
+= cl
->quantum
;
414 list_move_tail(&cl
->alist
, &q
->active
);
420 static unsigned int drr_drop(struct Qdisc
*sch
)
422 struct drr_sched
*q
= qdisc_priv(sch
);
423 struct drr_class
*cl
;
426 list_for_each_entry(cl
, &q
->active
, alist
) {
427 if (cl
->qdisc
->ops
->drop
) {
428 len
= cl
->qdisc
->ops
->drop(cl
->qdisc
);
431 if (cl
->qdisc
->q
.qlen
== 0)
432 list_del(&cl
->alist
);
440 static int drr_init_qdisc(struct Qdisc
*sch
, struct nlattr
*opt
)
442 struct drr_sched
*q
= qdisc_priv(sch
);
445 err
= qdisc_class_hash_init(&q
->clhash
);
448 INIT_LIST_HEAD(&q
->active
);
452 static void drr_reset_qdisc(struct Qdisc
*sch
)
454 struct drr_sched
*q
= qdisc_priv(sch
);
455 struct drr_class
*cl
;
456 struct hlist_node
*n
;
459 for (i
= 0; i
< q
->clhash
.hashsize
; i
++) {
460 hlist_for_each_entry(cl
, n
, &q
->clhash
.hash
[i
], common
.hnode
) {
461 if (cl
->qdisc
->q
.qlen
)
462 list_del(&cl
->alist
);
463 qdisc_reset(cl
->qdisc
);
469 static void drr_destroy_qdisc(struct Qdisc
*sch
)
471 struct drr_sched
*q
= qdisc_priv(sch
);
472 struct drr_class
*cl
;
473 struct hlist_node
*n
, *next
;
476 tcf_destroy_chain(&q
->filter_list
);
478 for (i
= 0; i
< q
->clhash
.hashsize
; i
++) {
479 hlist_for_each_entry_safe(cl
, n
, next
, &q
->clhash
.hash
[i
],
481 drr_destroy_class(sch
, cl
);
483 qdisc_class_hash_destroy(&q
->clhash
);
486 static const struct Qdisc_class_ops drr_class_ops
= {
487 .change
= drr_change_class
,
488 .delete = drr_delete_class
,
489 .get
= drr_get_class
,
490 .put
= drr_put_class
,
491 .tcf_chain
= drr_tcf_chain
,
492 .bind_tcf
= drr_bind_tcf
,
493 .unbind_tcf
= drr_unbind_tcf
,
494 .graft
= drr_graft_class
,
495 .leaf
= drr_class_leaf
,
496 .qlen_notify
= drr_qlen_notify
,
497 .dump
= drr_dump_class
,
498 .dump_stats
= drr_dump_class_stats
,
502 static struct Qdisc_ops drr_qdisc_ops __read_mostly
= {
503 .cl_ops
= &drr_class_ops
,
505 .priv_size
= sizeof(struct drr_sched
),
506 .enqueue
= drr_enqueue
,
507 .dequeue
= drr_dequeue
,
508 .peek
= qdisc_peek_dequeued
,
510 .init
= drr_init_qdisc
,
511 .reset
= drr_reset_qdisc
,
512 .destroy
= drr_destroy_qdisc
,
513 .owner
= THIS_MODULE
,
516 static int __init
drr_init(void)
518 return register_qdisc(&drr_qdisc_ops
);
521 static void __exit
drr_exit(void)
523 unregister_qdisc(&drr_qdisc_ops
);
526 module_init(drr_init
);
527 module_exit(drr_exit
);
528 MODULE_LICENSE("GPL");