[PATCH 2/2] proc: switch inode number allocation to IDA
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / net / sched / sch_prio.c
blobf849243eb095f74943612630c7e658f2601c20ad
1 /*
2 * net/sched/sch_prio.c Simple 3-band priority "scheduler".
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 * Fixes: 19990609: J Hadi Salim <hadi@nortelnetworks.com>:
11 * Init -- EINVAL when opt undefined
14 #include <linux/module.h>
15 #include <linux/types.h>
16 #include <linux/kernel.h>
17 #include <linux/string.h>
18 #include <linux/errno.h>
19 #include <linux/skbuff.h>
20 #include <net/netlink.h>
21 #include <net/pkt_sched.h>
24 struct prio_sched_data
26 int bands;
27 struct tcf_proto *filter_list;
28 u8 prio2band[TC_PRIO_MAX+1];
29 struct Qdisc *queues[TCQ_PRIO_BANDS];
33 static struct Qdisc *
34 prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
36 struct prio_sched_data *q = qdisc_priv(sch);
37 u32 band = skb->priority;
38 struct tcf_result res;
39 int err;
41 *qerr = NET_XMIT_BYPASS;
42 if (TC_H_MAJ(skb->priority) != sch->handle) {
43 err = tc_classify(skb, q->filter_list, &res);
44 #ifdef CONFIG_NET_CLS_ACT
45 switch (err) {
46 case TC_ACT_STOLEN:
47 case TC_ACT_QUEUED:
48 *qerr = NET_XMIT_SUCCESS;
49 case TC_ACT_SHOT:
50 return NULL;
52 #endif
53 if (!q->filter_list || err < 0) {
54 if (TC_H_MAJ(band))
55 band = 0;
56 return q->queues[q->prio2band[band&TC_PRIO_MAX]];
58 band = res.classid;
60 band = TC_H_MIN(band) - 1;
61 if (band >= q->bands)
62 return q->queues[q->prio2band[0]];
64 return q->queues[band];
67 static int
68 prio_enqueue(struct sk_buff *skb, struct Qdisc *sch)
70 struct Qdisc *qdisc;
71 int ret;
73 qdisc = prio_classify(skb, sch, &ret);
74 #ifdef CONFIG_NET_CLS_ACT
75 if (qdisc == NULL) {
77 if (ret == NET_XMIT_BYPASS)
78 sch->qstats.drops++;
79 kfree_skb(skb);
80 return ret;
82 #endif
84 ret = qdisc_enqueue(skb, qdisc);
85 if (ret == NET_XMIT_SUCCESS) {
86 sch->bstats.bytes += qdisc_pkt_len(skb);
87 sch->bstats.packets++;
88 sch->q.qlen++;
89 return NET_XMIT_SUCCESS;
91 sch->qstats.drops++;
92 return ret;
96 static int
97 prio_requeue(struct sk_buff *skb, struct Qdisc* sch)
99 struct Qdisc *qdisc;
100 int ret;
102 qdisc = prio_classify(skb, sch, &ret);
103 #ifdef CONFIG_NET_CLS_ACT
104 if (qdisc == NULL) {
105 if (ret == NET_XMIT_BYPASS)
106 sch->qstats.drops++;
107 kfree_skb(skb);
108 return ret;
110 #endif
112 if ((ret = qdisc->ops->requeue(skb, qdisc)) == NET_XMIT_SUCCESS) {
113 sch->q.qlen++;
114 sch->qstats.requeues++;
115 return 0;
117 sch->qstats.drops++;
118 return NET_XMIT_DROP;
122 static struct sk_buff *prio_dequeue(struct Qdisc* sch)
124 struct prio_sched_data *q = qdisc_priv(sch);
125 int prio;
127 for (prio = 0; prio < q->bands; prio++) {
128 struct Qdisc *qdisc = q->queues[prio];
129 struct sk_buff *skb = qdisc->dequeue(qdisc);
130 if (skb) {
131 sch->q.qlen--;
132 return skb;
135 return NULL;
139 static unsigned int prio_drop(struct Qdisc* sch)
141 struct prio_sched_data *q = qdisc_priv(sch);
142 int prio;
143 unsigned int len;
144 struct Qdisc *qdisc;
146 for (prio = q->bands-1; prio >= 0; prio--) {
147 qdisc = q->queues[prio];
148 if (qdisc->ops->drop && (len = qdisc->ops->drop(qdisc)) != 0) {
149 sch->q.qlen--;
150 return len;
153 return 0;
157 static void
158 prio_reset(struct Qdisc* sch)
160 int prio;
161 struct prio_sched_data *q = qdisc_priv(sch);
163 for (prio=0; prio<q->bands; prio++)
164 qdisc_reset(q->queues[prio]);
165 sch->q.qlen = 0;
168 static void
169 prio_destroy(struct Qdisc* sch)
171 int prio;
172 struct prio_sched_data *q = qdisc_priv(sch);
174 tcf_destroy_chain(&q->filter_list);
175 for (prio=0; prio<q->bands; prio++)
176 qdisc_destroy(q->queues[prio]);
179 static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
181 struct prio_sched_data *q = qdisc_priv(sch);
182 struct tc_prio_qopt *qopt;
183 int i;
185 if (nla_len(opt) < sizeof(*qopt))
186 return -EINVAL;
187 qopt = nla_data(opt);
189 if (qopt->bands > TCQ_PRIO_BANDS || qopt->bands < 2)
190 return -EINVAL;
192 for (i=0; i<=TC_PRIO_MAX; i++) {
193 if (qopt->priomap[i] >= qopt->bands)
194 return -EINVAL;
197 sch_tree_lock(sch);
198 q->bands = qopt->bands;
199 memcpy(q->prio2band, qopt->priomap, TC_PRIO_MAX+1);
201 for (i=q->bands; i<TCQ_PRIO_BANDS; i++) {
202 struct Qdisc *child = xchg(&q->queues[i], &noop_qdisc);
203 if (child != &noop_qdisc) {
204 qdisc_tree_decrease_qlen(child, child->q.qlen);
205 qdisc_destroy(child);
208 sch_tree_unlock(sch);
210 for (i=0; i<q->bands; i++) {
211 if (q->queues[i] == &noop_qdisc) {
212 struct Qdisc *child;
213 child = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
214 &pfifo_qdisc_ops,
215 TC_H_MAKE(sch->handle, i + 1));
216 if (child) {
217 sch_tree_lock(sch);
218 child = xchg(&q->queues[i], child);
220 if (child != &noop_qdisc) {
221 qdisc_tree_decrease_qlen(child,
222 child->q.qlen);
223 qdisc_destroy(child);
225 sch_tree_unlock(sch);
229 return 0;
232 static int prio_init(struct Qdisc *sch, struct nlattr *opt)
234 struct prio_sched_data *q = qdisc_priv(sch);
235 int i;
237 for (i=0; i<TCQ_PRIO_BANDS; i++)
238 q->queues[i] = &noop_qdisc;
240 if (opt == NULL) {
241 return -EINVAL;
242 } else {
243 int err;
245 if ((err= prio_tune(sch, opt)) != 0)
246 return err;
248 return 0;
251 static int prio_dump(struct Qdisc *sch, struct sk_buff *skb)
253 struct prio_sched_data *q = qdisc_priv(sch);
254 unsigned char *b = skb_tail_pointer(skb);
255 struct nlattr *nest;
256 struct tc_prio_qopt opt;
258 opt.bands = q->bands;
259 memcpy(&opt.priomap, q->prio2band, TC_PRIO_MAX+1);
261 nest = nla_nest_compat_start(skb, TCA_OPTIONS, sizeof(opt), &opt);
262 if (nest == NULL)
263 goto nla_put_failure;
264 nla_nest_compat_end(skb, nest);
266 return skb->len;
268 nla_put_failure:
269 nlmsg_trim(skb, b);
270 return -1;
273 static int prio_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
274 struct Qdisc **old)
276 struct prio_sched_data *q = qdisc_priv(sch);
277 unsigned long band = arg - 1;
279 if (band >= q->bands)
280 return -EINVAL;
282 if (new == NULL)
283 new = &noop_qdisc;
285 sch_tree_lock(sch);
286 *old = q->queues[band];
287 q->queues[band] = new;
288 qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
289 qdisc_reset(*old);
290 sch_tree_unlock(sch);
292 return 0;
295 static struct Qdisc *
296 prio_leaf(struct Qdisc *sch, unsigned long arg)
298 struct prio_sched_data *q = qdisc_priv(sch);
299 unsigned long band = arg - 1;
301 if (band >= q->bands)
302 return NULL;
304 return q->queues[band];
307 static unsigned long prio_get(struct Qdisc *sch, u32 classid)
309 struct prio_sched_data *q = qdisc_priv(sch);
310 unsigned long band = TC_H_MIN(classid);
312 if (band - 1 >= q->bands)
313 return 0;
314 return band;
317 static unsigned long prio_bind(struct Qdisc *sch, unsigned long parent, u32 classid)
319 return prio_get(sch, classid);
323 static void prio_put(struct Qdisc *q, unsigned long cl)
325 return;
328 static int prio_change(struct Qdisc *sch, u32 handle, u32 parent, struct nlattr **tca, unsigned long *arg)
330 unsigned long cl = *arg;
331 struct prio_sched_data *q = qdisc_priv(sch);
333 if (cl - 1 > q->bands)
334 return -ENOENT;
335 return 0;
338 static int prio_delete(struct Qdisc *sch, unsigned long cl)
340 struct prio_sched_data *q = qdisc_priv(sch);
341 if (cl - 1 > q->bands)
342 return -ENOENT;
343 return 0;
347 static int prio_dump_class(struct Qdisc *sch, unsigned long cl, struct sk_buff *skb,
348 struct tcmsg *tcm)
350 struct prio_sched_data *q = qdisc_priv(sch);
352 if (cl - 1 > q->bands)
353 return -ENOENT;
354 tcm->tcm_handle |= TC_H_MIN(cl);
355 if (q->queues[cl-1])
356 tcm->tcm_info = q->queues[cl-1]->handle;
357 return 0;
360 static int prio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
361 struct gnet_dump *d)
363 struct prio_sched_data *q = qdisc_priv(sch);
364 struct Qdisc *cl_q;
366 cl_q = q->queues[cl - 1];
367 if (gnet_stats_copy_basic(d, &cl_q->bstats) < 0 ||
368 gnet_stats_copy_queue(d, &cl_q->qstats) < 0)
369 return -1;
371 return 0;
374 static void prio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
376 struct prio_sched_data *q = qdisc_priv(sch);
377 int prio;
379 if (arg->stop)
380 return;
382 for (prio = 0; prio < q->bands; prio++) {
383 if (arg->count < arg->skip) {
384 arg->count++;
385 continue;
387 if (arg->fn(sch, prio+1, arg) < 0) {
388 arg->stop = 1;
389 break;
391 arg->count++;
395 static struct tcf_proto ** prio_find_tcf(struct Qdisc *sch, unsigned long cl)
397 struct prio_sched_data *q = qdisc_priv(sch);
399 if (cl)
400 return NULL;
401 return &q->filter_list;
404 static const struct Qdisc_class_ops prio_class_ops = {
405 .graft = prio_graft,
406 .leaf = prio_leaf,
407 .get = prio_get,
408 .put = prio_put,
409 .change = prio_change,
410 .delete = prio_delete,
411 .walk = prio_walk,
412 .tcf_chain = prio_find_tcf,
413 .bind_tcf = prio_bind,
414 .unbind_tcf = prio_put,
415 .dump = prio_dump_class,
416 .dump_stats = prio_dump_class_stats,
419 static struct Qdisc_ops prio_qdisc_ops __read_mostly = {
420 .next = NULL,
421 .cl_ops = &prio_class_ops,
422 .id = "prio",
423 .priv_size = sizeof(struct prio_sched_data),
424 .enqueue = prio_enqueue,
425 .dequeue = prio_dequeue,
426 .requeue = prio_requeue,
427 .drop = prio_drop,
428 .init = prio_init,
429 .reset = prio_reset,
430 .destroy = prio_destroy,
431 .change = prio_tune,
432 .dump = prio_dump,
433 .owner = THIS_MODULE,
436 static int __init prio_module_init(void)
438 return register_qdisc(&prio_qdisc_ops);
441 static void __exit prio_module_exit(void)
443 unregister_qdisc(&prio_qdisc_ops);
446 module_init(prio_module_init)
447 module_exit(prio_module_exit)
449 MODULE_LICENSE("GPL");