2 * Copyright (c) 2008, Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 * Author: Alexander Duyck <alexander.h.duyck@intel.com>
20 #include <linux/module.h>
21 #include <linux/slab.h>
22 #include <linux/types.h>
23 #include <linux/kernel.h>
24 #include <linux/string.h>
25 #include <linux/errno.h>
26 #include <linux/skbuff.h>
27 #include <net/netlink.h>
28 #include <net/pkt_sched.h>
31 struct multiq_sched_data
{
35 struct tcf_proto
*filter_list
;
36 struct Qdisc
**queues
;
41 multiq_classify(struct sk_buff
*skb
, struct Qdisc
*sch
, int *qerr
)
43 struct multiq_sched_data
*q
= qdisc_priv(sch
);
45 struct tcf_result res
;
48 *qerr
= NET_XMIT_SUCCESS
| __NET_XMIT_BYPASS
;
49 err
= tc_classify(skb
, q
->filter_list
, &res
);
50 #ifdef CONFIG_NET_CLS_ACT
54 *qerr
= NET_XMIT_SUCCESS
| __NET_XMIT_STOLEN
;
59 band
= skb_get_queue_mapping(skb
);
64 return q
->queues
[band
];
68 multiq_enqueue(struct sk_buff
*skb
, struct Qdisc
*sch
)
73 qdisc
= multiq_classify(skb
, sch
, &ret
);
74 #ifdef CONFIG_NET_CLS_ACT
77 if (ret
& __NET_XMIT_BYPASS
)
84 ret
= qdisc_enqueue(skb
, qdisc
);
85 if (ret
== NET_XMIT_SUCCESS
) {
87 return NET_XMIT_SUCCESS
;
89 if (net_xmit_drop_count(ret
))
94 static struct sk_buff
*multiq_dequeue(struct Qdisc
*sch
)
96 struct multiq_sched_data
*q
= qdisc_priv(sch
);
101 for (band
= 0; band
< q
->bands
; band
++) {
102 /* cycle through bands to ensure fairness */
104 if (q
->curband
>= q
->bands
)
107 /* Check that target subqueue is available before
108 * pulling an skb to avoid head-of-line blocking.
110 if (!__netif_subqueue_stopped(qdisc_dev(sch
), q
->curband
)) {
111 qdisc
= q
->queues
[q
->curband
];
112 skb
= qdisc
->dequeue(qdisc
);
114 qdisc_bstats_update(sch
, skb
);
124 static struct sk_buff
*multiq_peek(struct Qdisc
*sch
)
126 struct multiq_sched_data
*q
= qdisc_priv(sch
);
127 unsigned int curband
= q
->curband
;
132 for (band
= 0; band
< q
->bands
; band
++) {
133 /* cycle through bands to ensure fairness */
135 if (curband
>= q
->bands
)
138 /* Check that target subqueue is available before
139 * pulling an skb to avoid head-of-line blocking.
141 if (!__netif_subqueue_stopped(qdisc_dev(sch
), curband
)) {
142 qdisc
= q
->queues
[curband
];
143 skb
= qdisc
->ops
->peek(qdisc
);
152 static unsigned int multiq_drop(struct Qdisc
*sch
)
154 struct multiq_sched_data
*q
= qdisc_priv(sch
);
159 for (band
= q
->bands
-1; band
>= 0; band
--) {
160 qdisc
= q
->queues
[band
];
161 if (qdisc
->ops
->drop
) {
162 len
= qdisc
->ops
->drop(qdisc
);
174 multiq_reset(struct Qdisc
*sch
)
177 struct multiq_sched_data
*q
= qdisc_priv(sch
);
179 for (band
= 0; band
< q
->bands
; band
++)
180 qdisc_reset(q
->queues
[band
]);
186 multiq_destroy(struct Qdisc
*sch
)
189 struct multiq_sched_data
*q
= qdisc_priv(sch
);
191 tcf_destroy_chain(&q
->filter_list
);
192 for (band
= 0; band
< q
->bands
; band
++)
193 qdisc_destroy(q
->queues
[band
]);
198 static int multiq_tune(struct Qdisc
*sch
, struct nlattr
*opt
)
200 struct multiq_sched_data
*q
= qdisc_priv(sch
);
201 struct tc_multiq_qopt
*qopt
;
204 if (!netif_is_multiqueue(qdisc_dev(sch
)))
206 if (nla_len(opt
) < sizeof(*qopt
))
209 qopt
= nla_data(opt
);
211 qopt
->bands
= qdisc_dev(sch
)->real_num_tx_queues
;
214 q
->bands
= qopt
->bands
;
215 for (i
= q
->bands
; i
< q
->max_bands
; i
++) {
216 if (q
->queues
[i
] != &noop_qdisc
) {
217 struct Qdisc
*child
= q
->queues
[i
];
218 q
->queues
[i
] = &noop_qdisc
;
219 qdisc_tree_decrease_qlen(child
, child
->q
.qlen
);
220 qdisc_destroy(child
);
224 sch_tree_unlock(sch
);
226 for (i
= 0; i
< q
->bands
; i
++) {
227 if (q
->queues
[i
] == &noop_qdisc
) {
228 struct Qdisc
*child
, *old
;
229 child
= qdisc_create_dflt(sch
->dev_queue
,
231 TC_H_MAKE(sch
->handle
,
236 q
->queues
[i
] = child
;
238 if (old
!= &noop_qdisc
) {
239 qdisc_tree_decrease_qlen(old
,
243 sch_tree_unlock(sch
);
250 static int multiq_init(struct Qdisc
*sch
, struct nlattr
*opt
)
252 struct multiq_sched_data
*q
= qdisc_priv(sch
);
260 q
->max_bands
= qdisc_dev(sch
)->num_tx_queues
;
262 q
->queues
= kcalloc(q
->max_bands
, sizeof(struct Qdisc
*), GFP_KERNEL
);
265 for (i
= 0; i
< q
->max_bands
; i
++)
266 q
->queues
[i
] = &noop_qdisc
;
268 err
= multiq_tune(sch
,opt
);
276 static int multiq_dump(struct Qdisc
*sch
, struct sk_buff
*skb
)
278 struct multiq_sched_data
*q
= qdisc_priv(sch
);
279 unsigned char *b
= skb_tail_pointer(skb
);
280 struct tc_multiq_qopt opt
;
282 opt
.bands
= q
->bands
;
283 opt
.max_bands
= q
->max_bands
;
285 NLA_PUT(skb
, TCA_OPTIONS
, sizeof(opt
), &opt
);
294 static int multiq_graft(struct Qdisc
*sch
, unsigned long arg
, struct Qdisc
*new,
297 struct multiq_sched_data
*q
= qdisc_priv(sch
);
298 unsigned long band
= arg
- 1;
304 *old
= q
->queues
[band
];
305 q
->queues
[band
] = new;
306 qdisc_tree_decrease_qlen(*old
, (*old
)->q
.qlen
);
308 sch_tree_unlock(sch
);
313 static struct Qdisc
*
314 multiq_leaf(struct Qdisc
*sch
, unsigned long arg
)
316 struct multiq_sched_data
*q
= qdisc_priv(sch
);
317 unsigned long band
= arg
- 1;
319 return q
->queues
[band
];
322 static unsigned long multiq_get(struct Qdisc
*sch
, u32 classid
)
324 struct multiq_sched_data
*q
= qdisc_priv(sch
);
325 unsigned long band
= TC_H_MIN(classid
);
327 if (band
- 1 >= q
->bands
)
332 static unsigned long multiq_bind(struct Qdisc
*sch
, unsigned long parent
,
335 return multiq_get(sch
, classid
);
339 static void multiq_put(struct Qdisc
*q
, unsigned long cl
)
343 static int multiq_dump_class(struct Qdisc
*sch
, unsigned long cl
,
344 struct sk_buff
*skb
, struct tcmsg
*tcm
)
346 struct multiq_sched_data
*q
= qdisc_priv(sch
);
348 tcm
->tcm_handle
|= TC_H_MIN(cl
);
349 tcm
->tcm_info
= q
->queues
[cl
-1]->handle
;
353 static int multiq_dump_class_stats(struct Qdisc
*sch
, unsigned long cl
,
356 struct multiq_sched_data
*q
= qdisc_priv(sch
);
359 cl_q
= q
->queues
[cl
- 1];
360 cl_q
->qstats
.qlen
= cl_q
->q
.qlen
;
361 if (gnet_stats_copy_basic(d
, &cl_q
->bstats
) < 0 ||
362 gnet_stats_copy_queue(d
, &cl_q
->qstats
) < 0)
368 static void multiq_walk(struct Qdisc
*sch
, struct qdisc_walker
*arg
)
370 struct multiq_sched_data
*q
= qdisc_priv(sch
);
376 for (band
= 0; band
< q
->bands
; band
++) {
377 if (arg
->count
< arg
->skip
) {
381 if (arg
->fn(sch
, band
+1, arg
) < 0) {
389 static struct tcf_proto
**multiq_find_tcf(struct Qdisc
*sch
, unsigned long cl
)
391 struct multiq_sched_data
*q
= qdisc_priv(sch
);
395 return &q
->filter_list
;
398 static const struct Qdisc_class_ops multiq_class_ops
= {
399 .graft
= multiq_graft
,
404 .tcf_chain
= multiq_find_tcf
,
405 .bind_tcf
= multiq_bind
,
406 .unbind_tcf
= multiq_put
,
407 .dump
= multiq_dump_class
,
408 .dump_stats
= multiq_dump_class_stats
,
411 static struct Qdisc_ops multiq_qdisc_ops __read_mostly
= {
413 .cl_ops
= &multiq_class_ops
,
415 .priv_size
= sizeof(struct multiq_sched_data
),
416 .enqueue
= multiq_enqueue
,
417 .dequeue
= multiq_dequeue
,
421 .reset
= multiq_reset
,
422 .destroy
= multiq_destroy
,
423 .change
= multiq_tune
,
425 .owner
= THIS_MODULE
,
428 static int __init
multiq_module_init(void)
430 return register_qdisc(&multiq_qdisc_ops
);
433 static void __exit
multiq_module_exit(void)
435 unregister_qdisc(&multiq_qdisc_ops
);
438 module_init(multiq_module_init
)
439 module_exit(multiq_module_exit
)
441 MODULE_LICENSE("GPL");