pkt_sched: Add and use qdisc_root() and qdisc_root_lock().
[linux-2.6/btrfs-unstable.git] / net / sched / sch_api.c
blob19c244a008391b5f07a2dc6294645265f1043ae2
1 /*
2 * net/sched/sch_api.c Packet scheduler API.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
11 * Fixes:
13 * Rani Assaf <rani@magic.metawire.com> :980802: JIFFIES and CPU clock sources are repaired.
14 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
15 * Jamal Hadi Salim <hadi@nortelnetworks.com>: 990601: ingress support
18 #include <linux/module.h>
19 #include <linux/types.h>
20 #include <linux/kernel.h>
21 #include <linux/string.h>
22 #include <linux/errno.h>
23 #include <linux/skbuff.h>
24 #include <linux/init.h>
25 #include <linux/proc_fs.h>
26 #include <linux/seq_file.h>
27 #include <linux/kmod.h>
28 #include <linux/list.h>
29 #include <linux/hrtimer.h>
31 #include <net/net_namespace.h>
32 #include <net/sock.h>
33 #include <net/netlink.h>
34 #include <net/pkt_sched.h>
36 static int qdisc_notify(struct sk_buff *oskb, struct nlmsghdr *n, u32 clid,
37 struct Qdisc *old, struct Qdisc *new);
38 static int tclass_notify(struct sk_buff *oskb, struct nlmsghdr *n,
39 struct Qdisc *q, unsigned long cl, int event);
43 Short review.
44 -------------
46 This file consists of two interrelated parts:
48 1. queueing disciplines manager frontend.
49 2. traffic classes manager frontend.
51 Generally, queueing discipline ("qdisc") is a black box,
52 which is able to enqueue packets and to dequeue them (when
53 device is ready to send something) in order and at times
54 determined by algorithm hidden in it.
56 qdisc's are divided to two categories:
57 - "queues", which have no internal structure visible from outside.
58 - "schedulers", which split all the packets to "traffic classes",
59 using "packet classifiers" (look at cls_api.c)
61 In turn, classes may have child qdiscs (as rule, queues)
62 attached to them etc. etc. etc.
64 The goal of the routines in this file is to translate
65 information supplied by user in the form of handles
66 to more intelligible for kernel form, to make some sanity
67 checks and part of work, which is common to all qdiscs
68 and to provide rtnetlink notifications.
70 All real intelligent work is done inside qdisc modules.
74 Every discipline has two major routines: enqueue and dequeue.
76 ---dequeue
78 dequeue usually returns a skb to send. It is allowed to return NULL,
79 but it does not mean that queue is empty, it just means that
80 discipline does not want to send anything this time.
81 Queue is really empty if q->q.qlen == 0.
82 For complicated disciplines with multiple queues q->q is not
83 real packet queue, but however q->q.qlen must be valid.
85 ---enqueue
87 enqueue returns 0, if packet was enqueued successfully.
88 If packet (this one or another one) was dropped, it returns
89 not zero error code.
90 NET_XMIT_DROP - this packet dropped
91 Expected action: do not backoff, but wait until queue will clear.
92 NET_XMIT_CN - probably this packet enqueued, but another one dropped.
93 Expected action: backoff or ignore
94 NET_XMIT_POLICED - dropped by police.
95 Expected action: backoff or error to real-time apps.
97 Auxiliary routines:
99 ---requeue
101 requeues once dequeued packet. It is used for non-standard or
102 just buggy devices, which can defer output even if netif_queue_stopped()=0.
104 ---reset
106 returns qdisc to initial state: purge all buffers, clear all
107 timers, counters (except for statistics) etc.
109 ---init
111 initializes newly created qdisc.
113 ---destroy
115 destroys resources allocated by init and during lifetime of qdisc.
117 ---change
119 changes qdisc parameters.
122 /* Protects list of registered TC modules. It is pure SMP lock. */
123 static DEFINE_RWLOCK(qdisc_mod_lock);
126 /************************************************
127 * Queueing disciplines manipulation. *
128 ************************************************/
131 /* The list of all installed queueing disciplines. */
133 static struct Qdisc_ops *qdisc_base;
135 /* Register/uregister queueing discipline */
137 int register_qdisc(struct Qdisc_ops *qops)
139 struct Qdisc_ops *q, **qp;
140 int rc = -EEXIST;
142 write_lock(&qdisc_mod_lock);
143 for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next)
144 if (!strcmp(qops->id, q->id))
145 goto out;
147 if (qops->enqueue == NULL)
148 qops->enqueue = noop_qdisc_ops.enqueue;
149 if (qops->requeue == NULL)
150 qops->requeue = noop_qdisc_ops.requeue;
151 if (qops->dequeue == NULL)
152 qops->dequeue = noop_qdisc_ops.dequeue;
154 qops->next = NULL;
155 *qp = qops;
156 rc = 0;
157 out:
158 write_unlock(&qdisc_mod_lock);
159 return rc;
161 EXPORT_SYMBOL(register_qdisc);
163 int unregister_qdisc(struct Qdisc_ops *qops)
165 struct Qdisc_ops *q, **qp;
166 int err = -ENOENT;
168 write_lock(&qdisc_mod_lock);
169 for (qp = &qdisc_base; (q=*qp)!=NULL; qp = &q->next)
170 if (q == qops)
171 break;
172 if (q) {
173 *qp = q->next;
174 q->next = NULL;
175 err = 0;
177 write_unlock(&qdisc_mod_lock);
178 return err;
180 EXPORT_SYMBOL(unregister_qdisc);
182 /* We know handle. Find qdisc among all qdisc's attached to device
183 (root qdisc, all its children, children of children etc.)
186 static struct Qdisc *__qdisc_lookup(struct netdev_queue *dev_queue, u32 handle)
188 struct Qdisc *q;
190 list_for_each_entry(q, &dev_queue->qdisc_list, list) {
191 if (q->handle == handle)
192 return q;
194 return NULL;
197 struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
199 unsigned int i;
201 for (i = 0; i < dev->num_tx_queues; i++) {
202 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
203 struct Qdisc *q = __qdisc_lookup(txq, handle);
204 if (q)
205 return q;
207 return NULL;
210 static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid)
212 unsigned long cl;
213 struct Qdisc *leaf;
214 const struct Qdisc_class_ops *cops = p->ops->cl_ops;
216 if (cops == NULL)
217 return NULL;
218 cl = cops->get(p, classid);
220 if (cl == 0)
221 return NULL;
222 leaf = cops->leaf(p, cl);
223 cops->put(p, cl);
224 return leaf;
227 /* Find queueing discipline by name */
229 static struct Qdisc_ops *qdisc_lookup_ops(struct nlattr *kind)
231 struct Qdisc_ops *q = NULL;
233 if (kind) {
234 read_lock(&qdisc_mod_lock);
235 for (q = qdisc_base; q; q = q->next) {
236 if (nla_strcmp(kind, q->id) == 0) {
237 if (!try_module_get(q->owner))
238 q = NULL;
239 break;
242 read_unlock(&qdisc_mod_lock);
244 return q;
247 static struct qdisc_rate_table *qdisc_rtab_list;
249 struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, struct nlattr *tab)
251 struct qdisc_rate_table *rtab;
253 for (rtab = qdisc_rtab_list; rtab; rtab = rtab->next) {
254 if (memcmp(&rtab->rate, r, sizeof(struct tc_ratespec)) == 0) {
255 rtab->refcnt++;
256 return rtab;
260 if (tab == NULL || r->rate == 0 || r->cell_log == 0 ||
261 nla_len(tab) != TC_RTAB_SIZE)
262 return NULL;
264 rtab = kmalloc(sizeof(*rtab), GFP_KERNEL);
265 if (rtab) {
266 rtab->rate = *r;
267 rtab->refcnt = 1;
268 memcpy(rtab->data, nla_data(tab), 1024);
269 rtab->next = qdisc_rtab_list;
270 qdisc_rtab_list = rtab;
272 return rtab;
274 EXPORT_SYMBOL(qdisc_get_rtab);
276 void qdisc_put_rtab(struct qdisc_rate_table *tab)
278 struct qdisc_rate_table *rtab, **rtabp;
280 if (!tab || --tab->refcnt)
281 return;
283 for (rtabp = &qdisc_rtab_list; (rtab=*rtabp) != NULL; rtabp = &rtab->next) {
284 if (rtab == tab) {
285 *rtabp = rtab->next;
286 kfree(rtab);
287 return;
291 EXPORT_SYMBOL(qdisc_put_rtab);
293 static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer)
295 struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog,
296 timer);
297 struct netdev_queue *txq = wd->qdisc->dev_queue;
299 wd->qdisc->flags &= ~TCQ_F_THROTTLED;
300 smp_wmb();
301 netif_schedule_queue(txq);
303 return HRTIMER_NORESTART;
306 void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc)
308 hrtimer_init(&wd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
309 wd->timer.function = qdisc_watchdog;
310 wd->qdisc = qdisc;
312 EXPORT_SYMBOL(qdisc_watchdog_init);
314 void qdisc_watchdog_schedule(struct qdisc_watchdog *wd, psched_time_t expires)
316 ktime_t time;
318 wd->qdisc->flags |= TCQ_F_THROTTLED;
319 time = ktime_set(0, 0);
320 time = ktime_add_ns(time, PSCHED_US2NS(expires));
321 hrtimer_start(&wd->timer, time, HRTIMER_MODE_ABS);
323 EXPORT_SYMBOL(qdisc_watchdog_schedule);
325 void qdisc_watchdog_cancel(struct qdisc_watchdog *wd)
327 hrtimer_cancel(&wd->timer);
328 wd->qdisc->flags &= ~TCQ_F_THROTTLED;
330 EXPORT_SYMBOL(qdisc_watchdog_cancel);
332 struct hlist_head *qdisc_class_hash_alloc(unsigned int n)
334 unsigned int size = n * sizeof(struct hlist_head), i;
335 struct hlist_head *h;
337 if (size <= PAGE_SIZE)
338 h = kmalloc(size, GFP_KERNEL);
339 else
340 h = (struct hlist_head *)
341 __get_free_pages(GFP_KERNEL, get_order(size));
343 if (h != NULL) {
344 for (i = 0; i < n; i++)
345 INIT_HLIST_HEAD(&h[i]);
347 return h;
350 static void qdisc_class_hash_free(struct hlist_head *h, unsigned int n)
352 unsigned int size = n * sizeof(struct hlist_head);
354 if (size <= PAGE_SIZE)
355 kfree(h);
356 else
357 free_pages((unsigned long)h, get_order(size));
360 void qdisc_class_hash_grow(struct Qdisc *sch, struct Qdisc_class_hash *clhash)
362 struct Qdisc_class_common *cl;
363 struct hlist_node *n, *next;
364 struct hlist_head *nhash, *ohash;
365 unsigned int nsize, nmask, osize;
366 unsigned int i, h;
368 /* Rehash when load factor exceeds 0.75 */
369 if (clhash->hashelems * 4 <= clhash->hashsize * 3)
370 return;
371 nsize = clhash->hashsize * 2;
372 nmask = nsize - 1;
373 nhash = qdisc_class_hash_alloc(nsize);
374 if (nhash == NULL)
375 return;
377 ohash = clhash->hash;
378 osize = clhash->hashsize;
380 sch_tree_lock(sch);
381 for (i = 0; i < osize; i++) {
382 hlist_for_each_entry_safe(cl, n, next, &ohash[i], hnode) {
383 h = qdisc_class_hash(cl->classid, nmask);
384 hlist_add_head(&cl->hnode, &nhash[h]);
387 clhash->hash = nhash;
388 clhash->hashsize = nsize;
389 clhash->hashmask = nmask;
390 sch_tree_unlock(sch);
392 qdisc_class_hash_free(ohash, osize);
394 EXPORT_SYMBOL(qdisc_class_hash_grow);
396 int qdisc_class_hash_init(struct Qdisc_class_hash *clhash)
398 unsigned int size = 4;
400 clhash->hash = qdisc_class_hash_alloc(size);
401 if (clhash->hash == NULL)
402 return -ENOMEM;
403 clhash->hashsize = size;
404 clhash->hashmask = size - 1;
405 clhash->hashelems = 0;
406 return 0;
408 EXPORT_SYMBOL(qdisc_class_hash_init);
410 void qdisc_class_hash_destroy(struct Qdisc_class_hash *clhash)
412 qdisc_class_hash_free(clhash->hash, clhash->hashsize);
414 EXPORT_SYMBOL(qdisc_class_hash_destroy);
416 void qdisc_class_hash_insert(struct Qdisc_class_hash *clhash,
417 struct Qdisc_class_common *cl)
419 unsigned int h;
421 INIT_HLIST_NODE(&cl->hnode);
422 h = qdisc_class_hash(cl->classid, clhash->hashmask);
423 hlist_add_head(&cl->hnode, &clhash->hash[h]);
424 clhash->hashelems++;
426 EXPORT_SYMBOL(qdisc_class_hash_insert);
428 void qdisc_class_hash_remove(struct Qdisc_class_hash *clhash,
429 struct Qdisc_class_common *cl)
431 hlist_del(&cl->hnode);
432 clhash->hashelems--;
434 EXPORT_SYMBOL(qdisc_class_hash_remove);
436 /* Allocate an unique handle from space managed by kernel */
438 static u32 qdisc_alloc_handle(struct net_device *dev)
440 int i = 0x10000;
441 static u32 autohandle = TC_H_MAKE(0x80000000U, 0);
443 do {
444 autohandle += TC_H_MAKE(0x10000U, 0);
445 if (autohandle == TC_H_MAKE(TC_H_ROOT, 0))
446 autohandle = TC_H_MAKE(0x80000000U, 0);
447 } while (qdisc_lookup(dev, autohandle) && --i > 0);
449 return i>0 ? autohandle : 0;
452 /* Attach toplevel qdisc to device dev */
454 static struct Qdisc *
455 dev_graft_qdisc(struct net_device *dev, struct Qdisc *qdisc)
457 struct netdev_queue *dev_queue;
458 struct Qdisc *oqdisc;
460 if (dev->flags & IFF_UP)
461 dev_deactivate(dev);
463 qdisc_lock_tree(dev);
464 if (qdisc && qdisc->flags&TCQ_F_INGRESS) {
465 dev_queue = &dev->rx_queue;
466 oqdisc = dev_queue->qdisc;
467 /* Prune old scheduler */
468 if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1) {
469 /* delete */
470 qdisc_reset(oqdisc);
471 dev_queue->qdisc = NULL;
472 } else { /* new */
473 dev_queue->qdisc = qdisc;
476 } else {
477 dev_queue = netdev_get_tx_queue(dev, 0);
478 oqdisc = dev_queue->qdisc_sleeping;
480 /* Prune old scheduler */
481 if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1)
482 qdisc_reset(oqdisc);
484 /* ... and graft new one */
485 if (qdisc == NULL)
486 qdisc = &noop_qdisc;
487 dev_queue->qdisc_sleeping = qdisc;
488 dev_queue->qdisc = &noop_qdisc;
491 qdisc_unlock_tree(dev);
493 if (dev->flags & IFF_UP)
494 dev_activate(dev);
496 return oqdisc;
499 void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n)
501 const struct Qdisc_class_ops *cops;
502 unsigned long cl;
503 u32 parentid;
505 if (n == 0)
506 return;
507 while ((parentid = sch->parent)) {
508 if (TC_H_MAJ(parentid) == TC_H_MAJ(TC_H_INGRESS))
509 return;
511 sch = qdisc_lookup(qdisc_dev(sch), TC_H_MAJ(parentid));
512 if (sch == NULL) {
513 WARN_ON(parentid != TC_H_ROOT);
514 return;
516 cops = sch->ops->cl_ops;
517 if (cops->qlen_notify) {
518 cl = cops->get(sch, parentid);
519 cops->qlen_notify(sch, cl);
520 cops->put(sch, cl);
522 sch->q.qlen -= n;
525 EXPORT_SYMBOL(qdisc_tree_decrease_qlen);
527 /* Graft qdisc "new" to class "classid" of qdisc "parent" or
528 to device "dev".
530 Old qdisc is not destroyed but returned in *old.
533 static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
534 u32 classid,
535 struct Qdisc *new, struct Qdisc **old)
537 int err = 0;
538 struct Qdisc *q = *old;
541 if (parent == NULL) {
542 if (q && q->flags&TCQ_F_INGRESS) {
543 *old = dev_graft_qdisc(dev, q);
544 } else {
545 *old = dev_graft_qdisc(dev, new);
547 } else {
548 const struct Qdisc_class_ops *cops = parent->ops->cl_ops;
550 err = -EINVAL;
552 if (cops) {
553 unsigned long cl = cops->get(parent, classid);
554 if (cl) {
555 err = cops->graft(parent, cl, new, old);
556 cops->put(parent, cl);
560 return err;
564 Allocate and initialize new qdisc.
566 Parameters are passed via opt.
569 static struct Qdisc *
570 qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
571 u32 parent, u32 handle, struct nlattr **tca, int *errp)
573 int err;
574 struct nlattr *kind = tca[TCA_KIND];
575 struct Qdisc *sch;
576 struct Qdisc_ops *ops;
578 ops = qdisc_lookup_ops(kind);
579 #ifdef CONFIG_KMOD
580 if (ops == NULL && kind != NULL) {
581 char name[IFNAMSIZ];
582 if (nla_strlcpy(name, kind, IFNAMSIZ) < IFNAMSIZ) {
583 /* We dropped the RTNL semaphore in order to
584 * perform the module load. So, even if we
585 * succeeded in loading the module we have to
586 * tell the caller to replay the request. We
587 * indicate this using -EAGAIN.
588 * We replay the request because the device may
589 * go away in the mean time.
591 rtnl_unlock();
592 request_module("sch_%s", name);
593 rtnl_lock();
594 ops = qdisc_lookup_ops(kind);
595 if (ops != NULL) {
596 /* We will try again qdisc_lookup_ops,
597 * so don't keep a reference.
599 module_put(ops->owner);
600 err = -EAGAIN;
601 goto err_out;
605 #endif
607 err = -ENOENT;
608 if (ops == NULL)
609 goto err_out;
611 sch = qdisc_alloc(dev_queue, ops);
612 if (IS_ERR(sch)) {
613 err = PTR_ERR(sch);
614 goto err_out2;
617 sch->parent = parent;
619 if (handle == TC_H_INGRESS) {
620 sch->flags |= TCQ_F_INGRESS;
621 handle = TC_H_MAKE(TC_H_INGRESS, 0);
622 } else {
623 if (handle == 0) {
624 handle = qdisc_alloc_handle(dev);
625 err = -ENOMEM;
626 if (handle == 0)
627 goto err_out3;
631 sch->handle = handle;
633 if (!ops->init || (err = ops->init(sch, tca[TCA_OPTIONS])) == 0) {
634 if (tca[TCA_RATE]) {
635 err = gen_new_estimator(&sch->bstats, &sch->rate_est,
636 qdisc_root_lock(sch),
637 tca[TCA_RATE]);
638 if (err) {
640 * Any broken qdiscs that would require
641 * a ops->reset() here? The qdisc was never
642 * in action so it shouldn't be necessary.
644 if (ops->destroy)
645 ops->destroy(sch);
646 goto err_out3;
649 qdisc_lock_tree(dev);
650 list_add_tail(&sch->list, &dev_queue->qdisc_list);
651 qdisc_unlock_tree(dev);
653 return sch;
655 err_out3:
656 dev_put(dev);
657 kfree((char *) sch - sch->padded);
658 err_out2:
659 module_put(ops->owner);
660 err_out:
661 *errp = err;
662 return NULL;
665 static int qdisc_change(struct Qdisc *sch, struct nlattr **tca)
667 if (tca[TCA_OPTIONS]) {
668 int err;
670 if (sch->ops->change == NULL)
671 return -EINVAL;
672 err = sch->ops->change(sch, tca[TCA_OPTIONS]);
673 if (err)
674 return err;
676 if (tca[TCA_RATE])
677 gen_replace_estimator(&sch->bstats, &sch->rate_est,
678 qdisc_root_lock(sch), tca[TCA_RATE]);
679 return 0;
682 struct check_loop_arg
684 struct qdisc_walker w;
685 struct Qdisc *p;
686 int depth;
689 static int check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w);
691 static int check_loop(struct Qdisc *q, struct Qdisc *p, int depth)
693 struct check_loop_arg arg;
695 if (q->ops->cl_ops == NULL)
696 return 0;
698 arg.w.stop = arg.w.skip = arg.w.count = 0;
699 arg.w.fn = check_loop_fn;
700 arg.depth = depth;
701 arg.p = p;
702 q->ops->cl_ops->walk(q, &arg.w);
703 return arg.w.stop ? -ELOOP : 0;
706 static int
707 check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w)
709 struct Qdisc *leaf;
710 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
711 struct check_loop_arg *arg = (struct check_loop_arg *)w;
713 leaf = cops->leaf(q, cl);
714 if (leaf) {
715 if (leaf == arg->p || arg->depth > 7)
716 return -ELOOP;
717 return check_loop(leaf, arg->p, arg->depth + 1);
719 return 0;
723 * Delete/get qdisc.
726 static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
728 struct net *net = sock_net(skb->sk);
729 struct tcmsg *tcm = NLMSG_DATA(n);
730 struct nlattr *tca[TCA_MAX + 1];
731 struct net_device *dev;
732 u32 clid = tcm->tcm_parent;
733 struct Qdisc *q = NULL;
734 struct Qdisc *p = NULL;
735 int err;
737 if (net != &init_net)
738 return -EINVAL;
740 if ((dev = __dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL)
741 return -ENODEV;
743 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
744 if (err < 0)
745 return err;
747 if (clid) {
748 if (clid != TC_H_ROOT) {
749 if (TC_H_MAJ(clid) != TC_H_MAJ(TC_H_INGRESS)) {
750 if ((p = qdisc_lookup(dev, TC_H_MAJ(clid))) == NULL)
751 return -ENOENT;
752 q = qdisc_leaf(p, clid);
753 } else { /* ingress */
754 q = dev->rx_queue.qdisc;
756 } else {
757 struct netdev_queue *dev_queue;
758 dev_queue = netdev_get_tx_queue(dev, 0);
759 q = dev_queue->qdisc_sleeping;
761 if (!q)
762 return -ENOENT;
764 if (tcm->tcm_handle && q->handle != tcm->tcm_handle)
765 return -EINVAL;
766 } else {
767 if ((q = qdisc_lookup(dev, tcm->tcm_handle)) == NULL)
768 return -ENOENT;
771 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
772 return -EINVAL;
774 if (n->nlmsg_type == RTM_DELQDISC) {
775 if (!clid)
776 return -EINVAL;
777 if (q->handle == 0)
778 return -ENOENT;
779 if ((err = qdisc_graft(dev, p, clid, NULL, &q)) != 0)
780 return err;
781 if (q) {
782 qdisc_notify(skb, n, clid, q, NULL);
783 qdisc_lock_tree(dev);
784 qdisc_destroy(q);
785 qdisc_unlock_tree(dev);
787 } else {
788 qdisc_notify(skb, n, clid, NULL, q);
790 return 0;
794 Create/change qdisc.
797 static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
799 struct net *net = sock_net(skb->sk);
800 struct tcmsg *tcm;
801 struct nlattr *tca[TCA_MAX + 1];
802 struct net_device *dev;
803 u32 clid;
804 struct Qdisc *q, *p;
805 int err;
807 if (net != &init_net)
808 return -EINVAL;
810 replay:
811 /* Reinit, just in case something touches this. */
812 tcm = NLMSG_DATA(n);
813 clid = tcm->tcm_parent;
814 q = p = NULL;
816 if ((dev = __dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL)
817 return -ENODEV;
819 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
820 if (err < 0)
821 return err;
823 if (clid) {
824 if (clid != TC_H_ROOT) {
825 if (clid != TC_H_INGRESS) {
826 if ((p = qdisc_lookup(dev, TC_H_MAJ(clid))) == NULL)
827 return -ENOENT;
828 q = qdisc_leaf(p, clid);
829 } else { /*ingress */
830 q = dev->rx_queue.qdisc;
832 } else {
833 struct netdev_queue *dev_queue;
834 dev_queue = netdev_get_tx_queue(dev, 0);
835 q = dev_queue->qdisc_sleeping;
838 /* It may be default qdisc, ignore it */
839 if (q && q->handle == 0)
840 q = NULL;
842 if (!q || !tcm->tcm_handle || q->handle != tcm->tcm_handle) {
843 if (tcm->tcm_handle) {
844 if (q && !(n->nlmsg_flags&NLM_F_REPLACE))
845 return -EEXIST;
846 if (TC_H_MIN(tcm->tcm_handle))
847 return -EINVAL;
848 if ((q = qdisc_lookup(dev, tcm->tcm_handle)) == NULL)
849 goto create_n_graft;
850 if (n->nlmsg_flags&NLM_F_EXCL)
851 return -EEXIST;
852 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
853 return -EINVAL;
854 if (q == p ||
855 (p && check_loop(q, p, 0)))
856 return -ELOOP;
857 atomic_inc(&q->refcnt);
858 goto graft;
859 } else {
860 if (q == NULL)
861 goto create_n_graft;
863 /* This magic test requires explanation.
865 * We know, that some child q is already
866 * attached to this parent and have choice:
867 * either to change it or to create/graft new one.
869 * 1. We are allowed to create/graft only
870 * if CREATE and REPLACE flags are set.
872 * 2. If EXCL is set, requestor wanted to say,
873 * that qdisc tcm_handle is not expected
874 * to exist, so that we choose create/graft too.
876 * 3. The last case is when no flags are set.
877 * Alas, it is sort of hole in API, we
878 * cannot decide what to do unambiguously.
879 * For now we select create/graft, if
880 * user gave KIND, which does not match existing.
882 if ((n->nlmsg_flags&NLM_F_CREATE) &&
883 (n->nlmsg_flags&NLM_F_REPLACE) &&
884 ((n->nlmsg_flags&NLM_F_EXCL) ||
885 (tca[TCA_KIND] &&
886 nla_strcmp(tca[TCA_KIND], q->ops->id))))
887 goto create_n_graft;
890 } else {
891 if (!tcm->tcm_handle)
892 return -EINVAL;
893 q = qdisc_lookup(dev, tcm->tcm_handle);
896 /* Change qdisc parameters */
897 if (q == NULL)
898 return -ENOENT;
899 if (n->nlmsg_flags&NLM_F_EXCL)
900 return -EEXIST;
901 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
902 return -EINVAL;
903 err = qdisc_change(q, tca);
904 if (err == 0)
905 qdisc_notify(skb, n, clid, NULL, q);
906 return err;
908 create_n_graft:
909 if (!(n->nlmsg_flags&NLM_F_CREATE))
910 return -ENOENT;
911 if (clid == TC_H_INGRESS)
912 q = qdisc_create(dev, &dev->rx_queue,
913 tcm->tcm_parent, tcm->tcm_parent,
914 tca, &err);
915 else
916 q = qdisc_create(dev, netdev_get_tx_queue(dev, 0),
917 tcm->tcm_parent, tcm->tcm_handle,
918 tca, &err);
919 if (q == NULL) {
920 if (err == -EAGAIN)
921 goto replay;
922 return err;
925 graft:
926 if (1) {
927 struct Qdisc *old_q = NULL;
928 err = qdisc_graft(dev, p, clid, q, &old_q);
929 if (err) {
930 if (q) {
931 qdisc_lock_tree(dev);
932 qdisc_destroy(q);
933 qdisc_unlock_tree(dev);
935 return err;
937 qdisc_notify(skb, n, clid, old_q, q);
938 if (old_q) {
939 qdisc_lock_tree(dev);
940 qdisc_destroy(old_q);
941 qdisc_unlock_tree(dev);
944 return 0;
947 static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
948 u32 pid, u32 seq, u16 flags, int event)
950 struct tcmsg *tcm;
951 struct nlmsghdr *nlh;
952 unsigned char *b = skb_tail_pointer(skb);
953 struct gnet_dump d;
955 nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*tcm), flags);
956 tcm = NLMSG_DATA(nlh);
957 tcm->tcm_family = AF_UNSPEC;
958 tcm->tcm__pad1 = 0;
959 tcm->tcm__pad2 = 0;
960 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
961 tcm->tcm_parent = clid;
962 tcm->tcm_handle = q->handle;
963 tcm->tcm_info = atomic_read(&q->refcnt);
964 NLA_PUT_STRING(skb, TCA_KIND, q->ops->id);
965 if (q->ops->dump && q->ops->dump(q, skb) < 0)
966 goto nla_put_failure;
967 q->qstats.qlen = q->q.qlen;
969 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS,
970 TCA_XSTATS, qdisc_root_lock(q), &d) < 0)
971 goto nla_put_failure;
973 if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0)
974 goto nla_put_failure;
976 if (gnet_stats_copy_basic(&d, &q->bstats) < 0 ||
977 gnet_stats_copy_rate_est(&d, &q->rate_est) < 0 ||
978 gnet_stats_copy_queue(&d, &q->qstats) < 0)
979 goto nla_put_failure;
981 if (gnet_stats_finish_copy(&d) < 0)
982 goto nla_put_failure;
984 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
985 return skb->len;
987 nlmsg_failure:
988 nla_put_failure:
989 nlmsg_trim(skb, b);
990 return -1;
993 static int qdisc_notify(struct sk_buff *oskb, struct nlmsghdr *n,
994 u32 clid, struct Qdisc *old, struct Qdisc *new)
996 struct sk_buff *skb;
997 u32 pid = oskb ? NETLINK_CB(oskb).pid : 0;
999 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1000 if (!skb)
1001 return -ENOBUFS;
1003 if (old && old->handle) {
1004 if (tc_fill_qdisc(skb, old, clid, pid, n->nlmsg_seq, 0, RTM_DELQDISC) < 0)
1005 goto err_out;
1007 if (new) {
1008 if (tc_fill_qdisc(skb, new, clid, pid, n->nlmsg_seq, old ? NLM_F_REPLACE : 0, RTM_NEWQDISC) < 0)
1009 goto err_out;
1012 if (skb->len)
1013 return rtnetlink_send(skb, &init_net, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO);
1015 err_out:
1016 kfree_skb(skb);
1017 return -EINVAL;
1020 static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
1022 struct net *net = sock_net(skb->sk);
1023 int idx, q_idx;
1024 int s_idx, s_q_idx;
1025 struct net_device *dev;
1026 struct Qdisc *q;
1028 if (net != &init_net)
1029 return 0;
1031 s_idx = cb->args[0];
1032 s_q_idx = q_idx = cb->args[1];
1033 read_lock(&dev_base_lock);
1034 idx = 0;
1035 for_each_netdev(&init_net, dev) {
1036 struct netdev_queue *dev_queue;
1037 if (idx < s_idx)
1038 goto cont;
1039 if (idx > s_idx)
1040 s_q_idx = 0;
1041 q_idx = 0;
1042 dev_queue = netdev_get_tx_queue(dev, 0);
1043 list_for_each_entry(q, &dev_queue->qdisc_list, list) {
1044 if (q_idx < s_q_idx) {
1045 q_idx++;
1046 continue;
1048 if (tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).pid,
1049 cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0)
1050 goto done;
1051 q_idx++;
1053 cont:
1054 idx++;
1057 done:
1058 read_unlock(&dev_base_lock);
1060 cb->args[0] = idx;
1061 cb->args[1] = q_idx;
1063 return skb->len;
1068 /************************************************
1069 * Traffic classes manipulation. *
1070 ************************************************/
1074 static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
1076 struct net *net = sock_net(skb->sk);
1077 struct netdev_queue *dev_queue;
1078 struct tcmsg *tcm = NLMSG_DATA(n);
1079 struct nlattr *tca[TCA_MAX + 1];
1080 struct net_device *dev;
1081 struct Qdisc *q = NULL;
1082 const struct Qdisc_class_ops *cops;
1083 unsigned long cl = 0;
1084 unsigned long new_cl;
1085 u32 pid = tcm->tcm_parent;
1086 u32 clid = tcm->tcm_handle;
1087 u32 qid = TC_H_MAJ(clid);
1088 int err;
1090 if (net != &init_net)
1091 return -EINVAL;
1093 if ((dev = __dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL)
1094 return -ENODEV;
1096 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
1097 if (err < 0)
1098 return err;
1101 parent == TC_H_UNSPEC - unspecified parent.
1102 parent == TC_H_ROOT - class is root, which has no parent.
1103 parent == X:0 - parent is root class.
1104 parent == X:Y - parent is a node in hierarchy.
1105 parent == 0:Y - parent is X:Y, where X:0 is qdisc.
1107 handle == 0:0 - generate handle from kernel pool.
1108 handle == 0:Y - class is X:Y, where X:0 is qdisc.
1109 handle == X:Y - clear.
1110 handle == X:0 - root class.
1113 /* Step 1. Determine qdisc handle X:0 */
1115 dev_queue = netdev_get_tx_queue(dev, 0);
1116 if (pid != TC_H_ROOT) {
1117 u32 qid1 = TC_H_MAJ(pid);
1119 if (qid && qid1) {
1120 /* If both majors are known, they must be identical. */
1121 if (qid != qid1)
1122 return -EINVAL;
1123 } else if (qid1) {
1124 qid = qid1;
1125 } else if (qid == 0)
1126 qid = dev_queue->qdisc_sleeping->handle;
1128 /* Now qid is genuine qdisc handle consistent
1129 both with parent and child.
1131 TC_H_MAJ(pid) still may be unspecified, complete it now.
1133 if (pid)
1134 pid = TC_H_MAKE(qid, pid);
1135 } else {
1136 if (qid == 0)
1137 qid = dev_queue->qdisc_sleeping->handle;
1140 /* OK. Locate qdisc */
1141 if ((q = qdisc_lookup(dev, qid)) == NULL)
1142 return -ENOENT;
1144 /* An check that it supports classes */
1145 cops = q->ops->cl_ops;
1146 if (cops == NULL)
1147 return -EINVAL;
1149 /* Now try to get class */
1150 if (clid == 0) {
1151 if (pid == TC_H_ROOT)
1152 clid = qid;
1153 } else
1154 clid = TC_H_MAKE(qid, clid);
1156 if (clid)
1157 cl = cops->get(q, clid);
1159 if (cl == 0) {
1160 err = -ENOENT;
1161 if (n->nlmsg_type != RTM_NEWTCLASS || !(n->nlmsg_flags&NLM_F_CREATE))
1162 goto out;
1163 } else {
1164 switch (n->nlmsg_type) {
1165 case RTM_NEWTCLASS:
1166 err = -EEXIST;
1167 if (n->nlmsg_flags&NLM_F_EXCL)
1168 goto out;
1169 break;
1170 case RTM_DELTCLASS:
1171 err = cops->delete(q, cl);
1172 if (err == 0)
1173 tclass_notify(skb, n, q, cl, RTM_DELTCLASS);
1174 goto out;
1175 case RTM_GETTCLASS:
1176 err = tclass_notify(skb, n, q, cl, RTM_NEWTCLASS);
1177 goto out;
1178 default:
1179 err = -EINVAL;
1180 goto out;
1184 new_cl = cl;
1185 err = cops->change(q, clid, pid, tca, &new_cl);
1186 if (err == 0)
1187 tclass_notify(skb, n, q, new_cl, RTM_NEWTCLASS);
1189 out:
1190 if (cl)
1191 cops->put(q, cl);
1193 return err;
1197 static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
1198 unsigned long cl,
1199 u32 pid, u32 seq, u16 flags, int event)
1201 struct tcmsg *tcm;
1202 struct nlmsghdr *nlh;
1203 unsigned char *b = skb_tail_pointer(skb);
1204 struct gnet_dump d;
1205 const struct Qdisc_class_ops *cl_ops = q->ops->cl_ops;
1207 nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*tcm), flags);
1208 tcm = NLMSG_DATA(nlh);
1209 tcm->tcm_family = AF_UNSPEC;
1210 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
1211 tcm->tcm_parent = q->handle;
1212 tcm->tcm_handle = q->handle;
1213 tcm->tcm_info = 0;
1214 NLA_PUT_STRING(skb, TCA_KIND, q->ops->id);
1215 if (cl_ops->dump && cl_ops->dump(q, cl, skb, tcm) < 0)
1216 goto nla_put_failure;
1218 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS,
1219 TCA_XSTATS, qdisc_root_lock(q), &d) < 0)
1220 goto nla_put_failure;
1222 if (cl_ops->dump_stats && cl_ops->dump_stats(q, cl, &d) < 0)
1223 goto nla_put_failure;
1225 if (gnet_stats_finish_copy(&d) < 0)
1226 goto nla_put_failure;
1228 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1229 return skb->len;
1231 nlmsg_failure:
1232 nla_put_failure:
1233 nlmsg_trim(skb, b);
1234 return -1;
1237 static int tclass_notify(struct sk_buff *oskb, struct nlmsghdr *n,
1238 struct Qdisc *q, unsigned long cl, int event)
1240 struct sk_buff *skb;
1241 u32 pid = oskb ? NETLINK_CB(oskb).pid : 0;
1243 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1244 if (!skb)
1245 return -ENOBUFS;
1247 if (tc_fill_tclass(skb, q, cl, pid, n->nlmsg_seq, 0, event) < 0) {
1248 kfree_skb(skb);
1249 return -EINVAL;
1252 return rtnetlink_send(skb, &init_net, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO);
1255 struct qdisc_dump_args
1257 struct qdisc_walker w;
1258 struct sk_buff *skb;
1259 struct netlink_callback *cb;
1262 static int qdisc_class_dump(struct Qdisc *q, unsigned long cl, struct qdisc_walker *arg)
1264 struct qdisc_dump_args *a = (struct qdisc_dump_args *)arg;
1266 return tc_fill_tclass(a->skb, q, cl, NETLINK_CB(a->cb->skb).pid,
1267 a->cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWTCLASS);
1270 static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
1272 struct net *net = sock_net(skb->sk);
1273 struct netdev_queue *dev_queue;
1274 int t;
1275 int s_t;
1276 struct net_device *dev;
1277 struct Qdisc *q;
1278 struct tcmsg *tcm = (struct tcmsg*)NLMSG_DATA(cb->nlh);
1279 struct qdisc_dump_args arg;
1281 if (net != &init_net)
1282 return 0;
1284 if (cb->nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*tcm)))
1285 return 0;
1286 if ((dev = dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL)
1287 return 0;
1289 s_t = cb->args[0];
1290 t = 0;
1292 dev_queue = netdev_get_tx_queue(dev, 0);
1293 list_for_each_entry(q, &dev_queue->qdisc_list, list) {
1294 if (t < s_t || !q->ops->cl_ops ||
1295 (tcm->tcm_parent &&
1296 TC_H_MAJ(tcm->tcm_parent) != q->handle)) {
1297 t++;
1298 continue;
1300 if (t > s_t)
1301 memset(&cb->args[1], 0, sizeof(cb->args)-sizeof(cb->args[0]));
1302 arg.w.fn = qdisc_class_dump;
1303 arg.skb = skb;
1304 arg.cb = cb;
1305 arg.w.stop = 0;
1306 arg.w.skip = cb->args[1];
1307 arg.w.count = 0;
1308 q->ops->cl_ops->walk(q, &arg.w);
1309 cb->args[1] = arg.w.count;
1310 if (arg.w.stop)
1311 break;
1312 t++;
1315 cb->args[0] = t;
1317 dev_put(dev);
1318 return skb->len;
1321 /* Main classifier routine: scans classifier chain attached
1322 to this qdisc, (optionally) tests for protocol and asks
1323 specific classifiers.
1325 int tc_classify_compat(struct sk_buff *skb, struct tcf_proto *tp,
1326 struct tcf_result *res)
1328 __be16 protocol = skb->protocol;
1329 int err = 0;
1331 for (; tp; tp = tp->next) {
1332 if ((tp->protocol == protocol ||
1333 tp->protocol == htons(ETH_P_ALL)) &&
1334 (err = tp->classify(skb, tp, res)) >= 0) {
1335 #ifdef CONFIG_NET_CLS_ACT
1336 if (err != TC_ACT_RECLASSIFY && skb->tc_verd)
1337 skb->tc_verd = SET_TC_VERD(skb->tc_verd, 0);
1338 #endif
1339 return err;
1342 return -1;
1344 EXPORT_SYMBOL(tc_classify_compat);
1346 int tc_classify(struct sk_buff *skb, struct tcf_proto *tp,
1347 struct tcf_result *res)
1349 int err = 0;
1350 __be16 protocol;
1351 #ifdef CONFIG_NET_CLS_ACT
1352 struct tcf_proto *otp = tp;
1353 reclassify:
1354 #endif
1355 protocol = skb->protocol;
1357 err = tc_classify_compat(skb, tp, res);
1358 #ifdef CONFIG_NET_CLS_ACT
1359 if (err == TC_ACT_RECLASSIFY) {
1360 u32 verd = G_TC_VERD(skb->tc_verd);
1361 tp = otp;
1363 if (verd++ >= MAX_REC_LOOP) {
1364 printk("rule prio %u protocol %02x reclassify loop, "
1365 "packet dropped\n",
1366 tp->prio&0xffff, ntohs(tp->protocol));
1367 return TC_ACT_SHOT;
1369 skb->tc_verd = SET_TC_VERD(skb->tc_verd, verd);
1370 goto reclassify;
1372 #endif
1373 return err;
1375 EXPORT_SYMBOL(tc_classify);
1377 void tcf_destroy(struct tcf_proto *tp)
1379 tp->ops->destroy(tp);
1380 module_put(tp->ops->owner);
1381 kfree(tp);
1384 void tcf_destroy_chain(struct tcf_proto **fl)
1386 struct tcf_proto *tp;
1388 while ((tp = *fl) != NULL) {
1389 *fl = tp->next;
1390 tcf_destroy(tp);
1393 EXPORT_SYMBOL(tcf_destroy_chain);
1395 #ifdef CONFIG_PROC_FS
1396 static int psched_show(struct seq_file *seq, void *v)
1398 struct timespec ts;
1400 hrtimer_get_res(CLOCK_MONOTONIC, &ts);
1401 seq_printf(seq, "%08x %08x %08x %08x\n",
1402 (u32)NSEC_PER_USEC, (u32)PSCHED_US2NS(1),
1403 1000000,
1404 (u32)NSEC_PER_SEC/(u32)ktime_to_ns(timespec_to_ktime(ts)));
1406 return 0;
1409 static int psched_open(struct inode *inode, struct file *file)
1411 return single_open(file, psched_show, PDE(inode)->data);
1414 static const struct file_operations psched_fops = {
1415 .owner = THIS_MODULE,
1416 .open = psched_open,
1417 .read = seq_read,
1418 .llseek = seq_lseek,
1419 .release = single_release,
1421 #endif
1423 static int __init pktsched_init(void)
1425 register_qdisc(&pfifo_qdisc_ops);
1426 register_qdisc(&bfifo_qdisc_ops);
1427 proc_net_fops_create(&init_net, "psched", 0, &psched_fops);
1429 rtnl_register(PF_UNSPEC, RTM_NEWQDISC, tc_modify_qdisc, NULL);
1430 rtnl_register(PF_UNSPEC, RTM_DELQDISC, tc_get_qdisc, NULL);
1431 rtnl_register(PF_UNSPEC, RTM_GETQDISC, tc_get_qdisc, tc_dump_qdisc);
1432 rtnl_register(PF_UNSPEC, RTM_NEWTCLASS, tc_ctl_tclass, NULL);
1433 rtnl_register(PF_UNSPEC, RTM_DELTCLASS, tc_ctl_tclass, NULL);
1434 rtnl_register(PF_UNSPEC, RTM_GETTCLASS, tc_ctl_tclass, tc_dump_tclass);
1436 return 0;
1439 subsys_initcall(pktsched_init);