net_sched: cls_flow: use skb_header_pointer()
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / net / sched / sch_multiq.c
blobedc1950e0e7722d77e8b67a2e298bb4d1038e479
1 /*
2 * Copyright (c) 2008, Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 * Author: Alexander Duyck <alexander.h.duyck@intel.com>
20 #include <linux/module.h>
21 #include <linux/slab.h>
22 #include <linux/types.h>
23 #include <linux/kernel.h>
24 #include <linux/string.h>
25 #include <linux/errno.h>
26 #include <linux/skbuff.h>
27 #include <net/netlink.h>
28 #include <net/pkt_sched.h>
31 struct multiq_sched_data {
32 u16 bands;
33 u16 max_bands;
34 u16 curband;
35 struct tcf_proto *filter_list;
36 struct Qdisc **queues;
40 static struct Qdisc *
41 multiq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
43 struct multiq_sched_data *q = qdisc_priv(sch);
44 u32 band;
45 struct tcf_result res;
46 int err;
48 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
49 err = tc_classify(skb, q->filter_list, &res);
50 #ifdef CONFIG_NET_CLS_ACT
51 switch (err) {
52 case TC_ACT_STOLEN:
53 case TC_ACT_QUEUED:
54 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
55 case TC_ACT_SHOT:
56 return NULL;
58 #endif
59 band = skb_get_queue_mapping(skb);
61 if (band >= q->bands)
62 return q->queues[0];
64 return q->queues[band];
67 static int
68 multiq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
70 struct Qdisc *qdisc;
71 int ret;
73 qdisc = multiq_classify(skb, sch, &ret);
74 #ifdef CONFIG_NET_CLS_ACT
75 if (qdisc == NULL) {
77 if (ret & __NET_XMIT_BYPASS)
78 sch->qstats.drops++;
79 kfree_skb(skb);
80 return ret;
82 #endif
84 ret = qdisc_enqueue(skb, qdisc);
85 if (ret == NET_XMIT_SUCCESS) {
86 sch->q.qlen++;
87 return NET_XMIT_SUCCESS;
89 if (net_xmit_drop_count(ret))
90 sch->qstats.drops++;
91 return ret;
94 static struct sk_buff *multiq_dequeue(struct Qdisc *sch)
96 struct multiq_sched_data *q = qdisc_priv(sch);
97 struct Qdisc *qdisc;
98 struct sk_buff *skb;
99 int band;
101 for (band = 0; band < q->bands; band++) {
102 /* cycle through bands to ensure fairness */
103 q->curband++;
104 if (q->curband >= q->bands)
105 q->curband = 0;
107 /* Check that target subqueue is available before
108 * pulling an skb to avoid head-of-line blocking.
110 if (!__netif_subqueue_stopped(qdisc_dev(sch), q->curband)) {
111 qdisc = q->queues[q->curband];
112 skb = qdisc->dequeue(qdisc);
113 if (skb) {
114 qdisc_bstats_update(sch, skb);
115 sch->q.qlen--;
116 return skb;
120 return NULL;
124 static struct sk_buff *multiq_peek(struct Qdisc *sch)
126 struct multiq_sched_data *q = qdisc_priv(sch);
127 unsigned int curband = q->curband;
128 struct Qdisc *qdisc;
129 struct sk_buff *skb;
130 int band;
132 for (band = 0; band < q->bands; band++) {
133 /* cycle through bands to ensure fairness */
134 curband++;
135 if (curband >= q->bands)
136 curband = 0;
138 /* Check that target subqueue is available before
139 * pulling an skb to avoid head-of-line blocking.
141 if (!__netif_subqueue_stopped(qdisc_dev(sch), curband)) {
142 qdisc = q->queues[curband];
143 skb = qdisc->ops->peek(qdisc);
144 if (skb)
145 return skb;
148 return NULL;
152 static unsigned int multiq_drop(struct Qdisc *sch)
154 struct multiq_sched_data *q = qdisc_priv(sch);
155 int band;
156 unsigned int len;
157 struct Qdisc *qdisc;
159 for (band = q->bands - 1; band >= 0; band--) {
160 qdisc = q->queues[band];
161 if (qdisc->ops->drop) {
162 len = qdisc->ops->drop(qdisc);
163 if (len != 0) {
164 sch->q.qlen--;
165 return len;
169 return 0;
173 static void
174 multiq_reset(struct Qdisc *sch)
176 u16 band;
177 struct multiq_sched_data *q = qdisc_priv(sch);
179 for (band = 0; band < q->bands; band++)
180 qdisc_reset(q->queues[band]);
181 sch->q.qlen = 0;
182 q->curband = 0;
185 static void
186 multiq_destroy(struct Qdisc *sch)
188 int band;
189 struct multiq_sched_data *q = qdisc_priv(sch);
191 tcf_destroy_chain(&q->filter_list);
192 for (band = 0; band < q->bands; band++)
193 qdisc_destroy(q->queues[band]);
195 kfree(q->queues);
198 static int multiq_tune(struct Qdisc *sch, struct nlattr *opt)
200 struct multiq_sched_data *q = qdisc_priv(sch);
201 struct tc_multiq_qopt *qopt;
202 int i;
204 if (!netif_is_multiqueue(qdisc_dev(sch)))
205 return -EOPNOTSUPP;
206 if (nla_len(opt) < sizeof(*qopt))
207 return -EINVAL;
209 qopt = nla_data(opt);
211 qopt->bands = qdisc_dev(sch)->real_num_tx_queues;
213 sch_tree_lock(sch);
214 q->bands = qopt->bands;
215 for (i = q->bands; i < q->max_bands; i++) {
216 if (q->queues[i] != &noop_qdisc) {
217 struct Qdisc *child = q->queues[i];
218 q->queues[i] = &noop_qdisc;
219 qdisc_tree_decrease_qlen(child, child->q.qlen);
220 qdisc_destroy(child);
224 sch_tree_unlock(sch);
226 for (i = 0; i < q->bands; i++) {
227 if (q->queues[i] == &noop_qdisc) {
228 struct Qdisc *child, *old;
229 child = qdisc_create_dflt(sch->dev_queue,
230 &pfifo_qdisc_ops,
231 TC_H_MAKE(sch->handle,
232 i + 1));
233 if (child) {
234 sch_tree_lock(sch);
235 old = q->queues[i];
236 q->queues[i] = child;
238 if (old != &noop_qdisc) {
239 qdisc_tree_decrease_qlen(old,
240 old->q.qlen);
241 qdisc_destroy(old);
243 sch_tree_unlock(sch);
247 return 0;
250 static int multiq_init(struct Qdisc *sch, struct nlattr *opt)
252 struct multiq_sched_data *q = qdisc_priv(sch);
253 int i, err;
255 q->queues = NULL;
257 if (opt == NULL)
258 return -EINVAL;
260 q->max_bands = qdisc_dev(sch)->num_tx_queues;
262 q->queues = kcalloc(q->max_bands, sizeof(struct Qdisc *), GFP_KERNEL);
263 if (!q->queues)
264 return -ENOBUFS;
265 for (i = 0; i < q->max_bands; i++)
266 q->queues[i] = &noop_qdisc;
268 err = multiq_tune(sch, opt);
270 if (err)
271 kfree(q->queues);
273 return err;
276 static int multiq_dump(struct Qdisc *sch, struct sk_buff *skb)
278 struct multiq_sched_data *q = qdisc_priv(sch);
279 unsigned char *b = skb_tail_pointer(skb);
280 struct tc_multiq_qopt opt;
282 opt.bands = q->bands;
283 opt.max_bands = q->max_bands;
285 NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
287 return skb->len;
289 nla_put_failure:
290 nlmsg_trim(skb, b);
291 return -1;
294 static int multiq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
295 struct Qdisc **old)
297 struct multiq_sched_data *q = qdisc_priv(sch);
298 unsigned long band = arg - 1;
300 if (new == NULL)
301 new = &noop_qdisc;
303 sch_tree_lock(sch);
304 *old = q->queues[band];
305 q->queues[band] = new;
306 qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
307 qdisc_reset(*old);
308 sch_tree_unlock(sch);
310 return 0;
313 static struct Qdisc *
314 multiq_leaf(struct Qdisc *sch, unsigned long arg)
316 struct multiq_sched_data *q = qdisc_priv(sch);
317 unsigned long band = arg - 1;
319 return q->queues[band];
322 static unsigned long multiq_get(struct Qdisc *sch, u32 classid)
324 struct multiq_sched_data *q = qdisc_priv(sch);
325 unsigned long band = TC_H_MIN(classid);
327 if (band - 1 >= q->bands)
328 return 0;
329 return band;
332 static unsigned long multiq_bind(struct Qdisc *sch, unsigned long parent,
333 u32 classid)
335 return multiq_get(sch, classid);
339 static void multiq_put(struct Qdisc *q, unsigned long cl)
343 static int multiq_dump_class(struct Qdisc *sch, unsigned long cl,
344 struct sk_buff *skb, struct tcmsg *tcm)
346 struct multiq_sched_data *q = qdisc_priv(sch);
348 tcm->tcm_handle |= TC_H_MIN(cl);
349 tcm->tcm_info = q->queues[cl - 1]->handle;
350 return 0;
353 static int multiq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
354 struct gnet_dump *d)
356 struct multiq_sched_data *q = qdisc_priv(sch);
357 struct Qdisc *cl_q;
359 cl_q = q->queues[cl - 1];
360 cl_q->qstats.qlen = cl_q->q.qlen;
361 if (gnet_stats_copy_basic(d, &cl_q->bstats) < 0 ||
362 gnet_stats_copy_queue(d, &cl_q->qstats) < 0)
363 return -1;
365 return 0;
368 static void multiq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
370 struct multiq_sched_data *q = qdisc_priv(sch);
371 int band;
373 if (arg->stop)
374 return;
376 for (band = 0; band < q->bands; band++) {
377 if (arg->count < arg->skip) {
378 arg->count++;
379 continue;
381 if (arg->fn(sch, band + 1, arg) < 0) {
382 arg->stop = 1;
383 break;
385 arg->count++;
389 static struct tcf_proto **multiq_find_tcf(struct Qdisc *sch, unsigned long cl)
391 struct multiq_sched_data *q = qdisc_priv(sch);
393 if (cl)
394 return NULL;
395 return &q->filter_list;
398 static const struct Qdisc_class_ops multiq_class_ops = {
399 .graft = multiq_graft,
400 .leaf = multiq_leaf,
401 .get = multiq_get,
402 .put = multiq_put,
403 .walk = multiq_walk,
404 .tcf_chain = multiq_find_tcf,
405 .bind_tcf = multiq_bind,
406 .unbind_tcf = multiq_put,
407 .dump = multiq_dump_class,
408 .dump_stats = multiq_dump_class_stats,
411 static struct Qdisc_ops multiq_qdisc_ops __read_mostly = {
412 .next = NULL,
413 .cl_ops = &multiq_class_ops,
414 .id = "multiq",
415 .priv_size = sizeof(struct multiq_sched_data),
416 .enqueue = multiq_enqueue,
417 .dequeue = multiq_dequeue,
418 .peek = multiq_peek,
419 .drop = multiq_drop,
420 .init = multiq_init,
421 .reset = multiq_reset,
422 .destroy = multiq_destroy,
423 .change = multiq_tune,
424 .dump = multiq_dump,
425 .owner = THIS_MODULE,
428 static int __init multiq_module_init(void)
430 return register_qdisc(&multiq_qdisc_ops);
433 static void __exit multiq_module_exit(void)
435 unregister_qdisc(&multiq_qdisc_ops);
438 module_init(multiq_module_init)
439 module_exit(multiq_module_exit)
441 MODULE_LICENSE("GPL");