drm/radeon/kms: don't set pcie lanes for ignored power_state
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / net / sched / sch_drr.c
blob5a888af7e5daed2f46d3293a843de8182f3f6cc3
1 /*
2 * net/sched/sch_drr.c Deficit Round Robin scheduler
4 * Copyright (c) 2008 Patrick McHardy <kaber@trash.net>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * version 2 as published by the Free Software Foundation.
9 */
11 #include <linux/module.h>
12 #include <linux/init.h>
13 #include <linux/errno.h>
14 #include <linux/netdevice.h>
15 #include <linux/pkt_sched.h>
16 #include <net/sch_generic.h>
17 #include <net/pkt_sched.h>
18 #include <net/pkt_cls.h>
20 struct drr_class {
21 struct Qdisc_class_common common;
22 unsigned int refcnt;
23 unsigned int filter_cnt;
25 struct gnet_stats_basic_packed bstats;
26 struct gnet_stats_queue qstats;
27 struct gnet_stats_rate_est rate_est;
28 struct list_head alist;
29 struct Qdisc *qdisc;
31 u32 quantum;
32 u32 deficit;
35 struct drr_sched {
36 struct list_head active;
37 struct tcf_proto *filter_list;
38 struct Qdisc_class_hash clhash;
41 static struct drr_class *drr_find_class(struct Qdisc *sch, u32 classid)
43 struct drr_sched *q = qdisc_priv(sch);
44 struct Qdisc_class_common *clc;
46 clc = qdisc_class_find(&q->clhash, classid);
47 if (clc == NULL)
48 return NULL;
49 return container_of(clc, struct drr_class, common);
52 static void drr_purge_queue(struct drr_class *cl)
54 unsigned int len = cl->qdisc->q.qlen;
56 qdisc_reset(cl->qdisc);
57 qdisc_tree_decrease_qlen(cl->qdisc, len);
60 static const struct nla_policy drr_policy[TCA_DRR_MAX + 1] = {
61 [TCA_DRR_QUANTUM] = { .type = NLA_U32 },
64 static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
65 struct nlattr **tca, unsigned long *arg)
67 struct drr_sched *q = qdisc_priv(sch);
68 struct drr_class *cl = (struct drr_class *)*arg;
69 struct nlattr *opt = tca[TCA_OPTIONS];
70 struct nlattr *tb[TCA_DRR_MAX + 1];
71 u32 quantum;
72 int err;
74 if (!opt)
75 return -EINVAL;
77 err = nla_parse_nested(tb, TCA_DRR_MAX, opt, drr_policy);
78 if (err < 0)
79 return err;
81 if (tb[TCA_DRR_QUANTUM]) {
82 quantum = nla_get_u32(tb[TCA_DRR_QUANTUM]);
83 if (quantum == 0)
84 return -EINVAL;
85 } else
86 quantum = psched_mtu(qdisc_dev(sch));
88 if (cl != NULL) {
89 if (tca[TCA_RATE]) {
90 err = gen_replace_estimator(&cl->bstats, &cl->rate_est,
91 qdisc_root_sleeping_lock(sch),
92 tca[TCA_RATE]);
93 if (err)
94 return err;
97 sch_tree_lock(sch);
98 if (tb[TCA_DRR_QUANTUM])
99 cl->quantum = quantum;
100 sch_tree_unlock(sch);
102 return 0;
105 cl = kzalloc(sizeof(struct drr_class), GFP_KERNEL);
106 if (cl == NULL)
107 return -ENOBUFS;
109 cl->refcnt = 1;
110 cl->common.classid = classid;
111 cl->quantum = quantum;
112 cl->qdisc = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
113 &pfifo_qdisc_ops, classid);
114 if (cl->qdisc == NULL)
115 cl->qdisc = &noop_qdisc;
117 if (tca[TCA_RATE]) {
118 err = gen_replace_estimator(&cl->bstats, &cl->rate_est,
119 qdisc_root_sleeping_lock(sch),
120 tca[TCA_RATE]);
121 if (err) {
122 qdisc_destroy(cl->qdisc);
123 kfree(cl);
124 return err;
128 sch_tree_lock(sch);
129 qdisc_class_hash_insert(&q->clhash, &cl->common);
130 sch_tree_unlock(sch);
132 qdisc_class_hash_grow(sch, &q->clhash);
134 *arg = (unsigned long)cl;
135 return 0;
138 static void drr_destroy_class(struct Qdisc *sch, struct drr_class *cl)
140 gen_kill_estimator(&cl->bstats, &cl->rate_est);
141 qdisc_destroy(cl->qdisc);
142 kfree(cl);
145 static int drr_delete_class(struct Qdisc *sch, unsigned long arg)
147 struct drr_sched *q = qdisc_priv(sch);
148 struct drr_class *cl = (struct drr_class *)arg;
150 if (cl->filter_cnt > 0)
151 return -EBUSY;
153 sch_tree_lock(sch);
155 drr_purge_queue(cl);
156 qdisc_class_hash_remove(&q->clhash, &cl->common);
158 BUG_ON(--cl->refcnt == 0);
160 * This shouldn't happen: we "hold" one cops->get() when called
161 * from tc_ctl_tclass; the destroy method is done from cops->put().
164 sch_tree_unlock(sch);
165 return 0;
168 static unsigned long drr_get_class(struct Qdisc *sch, u32 classid)
170 struct drr_class *cl = drr_find_class(sch, classid);
172 if (cl != NULL)
173 cl->refcnt++;
175 return (unsigned long)cl;
178 static void drr_put_class(struct Qdisc *sch, unsigned long arg)
180 struct drr_class *cl = (struct drr_class *)arg;
182 if (--cl->refcnt == 0)
183 drr_destroy_class(sch, cl);
186 static struct tcf_proto **drr_tcf_chain(struct Qdisc *sch, unsigned long cl)
188 struct drr_sched *q = qdisc_priv(sch);
190 if (cl)
191 return NULL;
193 return &q->filter_list;
196 static unsigned long drr_bind_tcf(struct Qdisc *sch, unsigned long parent,
197 u32 classid)
199 struct drr_class *cl = drr_find_class(sch, classid);
201 if (cl != NULL)
202 cl->filter_cnt++;
204 return (unsigned long)cl;
207 static void drr_unbind_tcf(struct Qdisc *sch, unsigned long arg)
209 struct drr_class *cl = (struct drr_class *)arg;
211 cl->filter_cnt--;
214 static int drr_graft_class(struct Qdisc *sch, unsigned long arg,
215 struct Qdisc *new, struct Qdisc **old)
217 struct drr_class *cl = (struct drr_class *)arg;
219 if (new == NULL) {
220 new = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
221 &pfifo_qdisc_ops, cl->common.classid);
222 if (new == NULL)
223 new = &noop_qdisc;
226 sch_tree_lock(sch);
227 drr_purge_queue(cl);
228 *old = cl->qdisc;
229 cl->qdisc = new;
230 sch_tree_unlock(sch);
231 return 0;
234 static struct Qdisc *drr_class_leaf(struct Qdisc *sch, unsigned long arg)
236 struct drr_class *cl = (struct drr_class *)arg;
238 return cl->qdisc;
241 static void drr_qlen_notify(struct Qdisc *csh, unsigned long arg)
243 struct drr_class *cl = (struct drr_class *)arg;
245 if (cl->qdisc->q.qlen == 0)
246 list_del(&cl->alist);
249 static int drr_dump_class(struct Qdisc *sch, unsigned long arg,
250 struct sk_buff *skb, struct tcmsg *tcm)
252 struct drr_class *cl = (struct drr_class *)arg;
253 struct nlattr *nest;
255 tcm->tcm_parent = TC_H_ROOT;
256 tcm->tcm_handle = cl->common.classid;
257 tcm->tcm_info = cl->qdisc->handle;
259 nest = nla_nest_start(skb, TCA_OPTIONS);
260 if (nest == NULL)
261 goto nla_put_failure;
262 NLA_PUT_U32(skb, TCA_DRR_QUANTUM, cl->quantum);
263 return nla_nest_end(skb, nest);
265 nla_put_failure:
266 nla_nest_cancel(skb, nest);
267 return -EMSGSIZE;
270 static int drr_dump_class_stats(struct Qdisc *sch, unsigned long arg,
271 struct gnet_dump *d)
273 struct drr_class *cl = (struct drr_class *)arg;
274 struct tc_drr_stats xstats;
276 memset(&xstats, 0, sizeof(xstats));
277 if (cl->qdisc->q.qlen) {
278 xstats.deficit = cl->deficit;
279 cl->qdisc->qstats.qlen = cl->qdisc->q.qlen;
282 if (gnet_stats_copy_basic(d, &cl->bstats) < 0 ||
283 gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
284 gnet_stats_copy_queue(d, &cl->qdisc->qstats) < 0)
285 return -1;
287 return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
290 static void drr_walk(struct Qdisc *sch, struct qdisc_walker *arg)
292 struct drr_sched *q = qdisc_priv(sch);
293 struct drr_class *cl;
294 struct hlist_node *n;
295 unsigned int i;
297 if (arg->stop)
298 return;
300 for (i = 0; i < q->clhash.hashsize; i++) {
301 hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode) {
302 if (arg->count < arg->skip) {
303 arg->count++;
304 continue;
306 if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
307 arg->stop = 1;
308 return;
310 arg->count++;
315 static struct drr_class *drr_classify(struct sk_buff *skb, struct Qdisc *sch,
316 int *qerr)
318 struct drr_sched *q = qdisc_priv(sch);
319 struct drr_class *cl;
320 struct tcf_result res;
321 int result;
323 if (TC_H_MAJ(skb->priority ^ sch->handle) == 0) {
324 cl = drr_find_class(sch, skb->priority);
325 if (cl != NULL)
326 return cl;
329 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
330 result = tc_classify(skb, q->filter_list, &res);
331 if (result >= 0) {
332 #ifdef CONFIG_NET_CLS_ACT
333 switch (result) {
334 case TC_ACT_QUEUED:
335 case TC_ACT_STOLEN:
336 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
337 case TC_ACT_SHOT:
338 return NULL;
340 #endif
341 cl = (struct drr_class *)res.class;
342 if (cl == NULL)
343 cl = drr_find_class(sch, res.classid);
344 return cl;
346 return NULL;
349 static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch)
351 struct drr_sched *q = qdisc_priv(sch);
352 struct drr_class *cl;
353 unsigned int len;
354 int err;
356 cl = drr_classify(skb, sch, &err);
357 if (cl == NULL) {
358 if (err & __NET_XMIT_BYPASS)
359 sch->qstats.drops++;
360 kfree_skb(skb);
361 return err;
364 len = qdisc_pkt_len(skb);
365 err = qdisc_enqueue(skb, cl->qdisc);
366 if (unlikely(err != NET_XMIT_SUCCESS)) {
367 if (net_xmit_drop_count(err)) {
368 cl->qstats.drops++;
369 sch->qstats.drops++;
371 return err;
374 if (cl->qdisc->q.qlen == 1) {
375 list_add_tail(&cl->alist, &q->active);
376 cl->deficit = cl->quantum;
379 cl->bstats.packets++;
380 cl->bstats.bytes += len;
381 sch->bstats.packets++;
382 sch->bstats.bytes += len;
384 sch->q.qlen++;
385 return err;
388 static struct sk_buff *drr_dequeue(struct Qdisc *sch)
390 struct drr_sched *q = qdisc_priv(sch);
391 struct drr_class *cl;
392 struct sk_buff *skb;
393 unsigned int len;
395 if (list_empty(&q->active))
396 goto out;
397 while (1) {
398 cl = list_first_entry(&q->active, struct drr_class, alist);
399 skb = cl->qdisc->ops->peek(cl->qdisc);
400 if (skb == NULL)
401 goto out;
403 len = qdisc_pkt_len(skb);
404 if (len <= cl->deficit) {
405 cl->deficit -= len;
406 skb = qdisc_dequeue_peeked(cl->qdisc);
407 if (cl->qdisc->q.qlen == 0)
408 list_del(&cl->alist);
409 sch->q.qlen--;
410 return skb;
413 cl->deficit += cl->quantum;
414 list_move_tail(&cl->alist, &q->active);
416 out:
417 return NULL;
420 static unsigned int drr_drop(struct Qdisc *sch)
422 struct drr_sched *q = qdisc_priv(sch);
423 struct drr_class *cl;
424 unsigned int len;
426 list_for_each_entry(cl, &q->active, alist) {
427 if (cl->qdisc->ops->drop) {
428 len = cl->qdisc->ops->drop(cl->qdisc);
429 if (len > 0) {
430 sch->q.qlen--;
431 if (cl->qdisc->q.qlen == 0)
432 list_del(&cl->alist);
433 return len;
437 return 0;
440 static int drr_init_qdisc(struct Qdisc *sch, struct nlattr *opt)
442 struct drr_sched *q = qdisc_priv(sch);
443 int err;
445 err = qdisc_class_hash_init(&q->clhash);
446 if (err < 0)
447 return err;
448 INIT_LIST_HEAD(&q->active);
449 return 0;
452 static void drr_reset_qdisc(struct Qdisc *sch)
454 struct drr_sched *q = qdisc_priv(sch);
455 struct drr_class *cl;
456 struct hlist_node *n;
457 unsigned int i;
459 for (i = 0; i < q->clhash.hashsize; i++) {
460 hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode) {
461 if (cl->qdisc->q.qlen)
462 list_del(&cl->alist);
463 qdisc_reset(cl->qdisc);
466 sch->q.qlen = 0;
469 static void drr_destroy_qdisc(struct Qdisc *sch)
471 struct drr_sched *q = qdisc_priv(sch);
472 struct drr_class *cl;
473 struct hlist_node *n, *next;
474 unsigned int i;
476 tcf_destroy_chain(&q->filter_list);
478 for (i = 0; i < q->clhash.hashsize; i++) {
479 hlist_for_each_entry_safe(cl, n, next, &q->clhash.hash[i],
480 common.hnode)
481 drr_destroy_class(sch, cl);
483 qdisc_class_hash_destroy(&q->clhash);
486 static const struct Qdisc_class_ops drr_class_ops = {
487 .change = drr_change_class,
488 .delete = drr_delete_class,
489 .get = drr_get_class,
490 .put = drr_put_class,
491 .tcf_chain = drr_tcf_chain,
492 .bind_tcf = drr_bind_tcf,
493 .unbind_tcf = drr_unbind_tcf,
494 .graft = drr_graft_class,
495 .leaf = drr_class_leaf,
496 .qlen_notify = drr_qlen_notify,
497 .dump = drr_dump_class,
498 .dump_stats = drr_dump_class_stats,
499 .walk = drr_walk,
502 static struct Qdisc_ops drr_qdisc_ops __read_mostly = {
503 .cl_ops = &drr_class_ops,
504 .id = "drr",
505 .priv_size = sizeof(struct drr_sched),
506 .enqueue = drr_enqueue,
507 .dequeue = drr_dequeue,
508 .peek = qdisc_peek_dequeued,
509 .drop = drr_drop,
510 .init = drr_init_qdisc,
511 .reset = drr_reset_qdisc,
512 .destroy = drr_destroy_qdisc,
513 .owner = THIS_MODULE,
516 static int __init drr_init(void)
518 return register_qdisc(&drr_qdisc_ops);
521 static void __exit drr_exit(void)
523 unregister_qdisc(&drr_qdisc_ops);
526 module_init(drr_init);
527 module_exit(drr_exit);
528 MODULE_LICENSE("GPL");