allow coexistance of N build and AC build.
[tomato.git] / release / src-rt-6.x / linux / linux-2.6 / net / sched / sch_api.c
blob6cf9c6eed20ca51380e763677c472c1ae087b187
1 /*
2 * net/sched/sch_api.c Packet scheduler API.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
11 * Fixes:
13 * Rani Assaf <rani@magic.metawire.com> :980802: JIFFIES and CPU clock sources are repaired.
14 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
15 * Jamal Hadi Salim <hadi@nortelnetworks.com>: 990601: ingress support
18 #include <linux/module.h>
19 #include <linux/types.h>
20 #include <linux/kernel.h>
21 #include <linux/string.h>
22 #include <linux/mm.h>
23 #include <linux/socket.h>
24 #include <linux/sockios.h>
25 #include <linux/in.h>
26 #include <linux/errno.h>
27 #include <linux/interrupt.h>
28 #include <linux/netdevice.h>
29 #include <linux/skbuff.h>
30 #include <linux/init.h>
31 #include <linux/proc_fs.h>
32 #include <linux/seq_file.h>
33 #include <linux/kmod.h>
34 #include <linux/list.h>
35 #include <linux/bitops.h>
36 #include <linux/hrtimer.h>
38 #include <net/netlink.h>
39 #include <net/sock.h>
40 #include <net/pkt_sched.h>
42 #include <asm/processor.h>
43 #include <asm/uaccess.h>
44 #include <asm/system.h>
46 static int qdisc_notify(struct sk_buff *oskb, struct nlmsghdr *n, u32 clid,
47 struct Qdisc *old, struct Qdisc *new);
48 static int tclass_notify(struct sk_buff *oskb, struct nlmsghdr *n,
49 struct Qdisc *q, unsigned long cl, int event);
53 Short review.
54 -------------
56 This file consists of two interrelated parts:
58 1. queueing disciplines manager frontend.
59 2. traffic classes manager frontend.
61 Generally, queueing discipline ("qdisc") is a black box,
62 which is able to enqueue packets and to dequeue them (when
63 device is ready to send something) in order and at times
64 determined by algorithm hidden in it.
66 qdisc's are divided to two categories:
67 - "queues", which have no internal structure visible from outside.
68 - "schedulers", which split all the packets to "traffic classes",
69 using "packet classifiers" (look at cls_api.c)
71 In turn, classes may have child qdiscs (as rule, queues)
72 attached to them etc. etc. etc.
74 The goal of the routines in this file is to translate
75 information supplied by user in the form of handles
76 to more intelligible for kernel form, to make some sanity
77 checks and part of work, which is common to all qdiscs
78 and to provide rtnetlink notifications.
80 All real intelligent work is done inside qdisc modules.
84 Every discipline has two major routines: enqueue and dequeue.
86 ---dequeue
88 dequeue usually returns a skb to send. It is allowed to return NULL,
89 but it does not mean that queue is empty, it just means that
90 discipline does not want to send anything this time.
91 Queue is really empty if q->q.qlen == 0.
92 For complicated disciplines with multiple queues q->q is not
93 real packet queue, but however q->q.qlen must be valid.
95 ---enqueue
97 enqueue returns 0, if packet was enqueued successfully.
98 If packet (this one or another one) was dropped, it returns
99 not zero error code.
100 NET_XMIT_DROP - this packet dropped
101 Expected action: do not backoff, but wait until queue will clear.
102 NET_XMIT_CN - probably this packet enqueued, but another one dropped.
103 Expected action: backoff or ignore
104 NET_XMIT_POLICED - dropped by police.
105 Expected action: backoff or error to real-time apps.
107 Auxiliary routines:
109 ---requeue
111 requeues once dequeued packet. It is used for non-standard or
112 just buggy devices, which can defer output even if dev->tbusy=0.
114 ---reset
116 returns qdisc to initial state: purge all buffers, clear all
117 timers, counters (except for statistics) etc.
119 ---init
121 initializes newly created qdisc.
123 ---destroy
125 destroys resources allocated by init and during lifetime of qdisc.
127 ---change
129 changes qdisc parameters.
132 /* Protects list of registered TC modules. It is pure SMP lock. */
133 static DEFINE_RWLOCK(qdisc_mod_lock);
136 /************************************************
137 * Queueing disciplines manipulation. *
138 ************************************************/
141 /* The list of all installed queueing disciplines. */
143 static struct Qdisc_ops *qdisc_base;
145 /* Register/uregister queueing discipline */
147 int register_qdisc(struct Qdisc_ops *qops)
149 struct Qdisc_ops *q, **qp;
150 int rc = -EEXIST;
152 write_lock(&qdisc_mod_lock);
153 for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next)
154 if (!strcmp(qops->id, q->id))
155 goto out;
157 if (qops->enqueue == NULL)
158 qops->enqueue = noop_qdisc_ops.enqueue;
159 if (qops->requeue == NULL)
160 qops->requeue = noop_qdisc_ops.requeue;
161 if (qops->dequeue == NULL)
162 qops->dequeue = noop_qdisc_ops.dequeue;
164 qops->next = NULL;
165 *qp = qops;
166 rc = 0;
167 out:
168 write_unlock(&qdisc_mod_lock);
169 return rc;
172 int unregister_qdisc(struct Qdisc_ops *qops)
174 struct Qdisc_ops *q, **qp;
175 int err = -ENOENT;
177 write_lock(&qdisc_mod_lock);
178 for (qp = &qdisc_base; (q=*qp)!=NULL; qp = &q->next)
179 if (q == qops)
180 break;
181 if (q) {
182 *qp = q->next;
183 q->next = NULL;
184 err = 0;
186 write_unlock(&qdisc_mod_lock);
187 return err;
190 /* We know handle. Find qdisc among all qdisc's attached to device
191 (root qdisc, all its children, children of children etc.)
194 struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
196 struct Qdisc *q;
198 list_for_each_entry(q, &dev->qdisc_list, list) {
199 if (q->handle == handle)
200 return q;
202 return NULL;
205 static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid)
207 unsigned long cl;
208 struct Qdisc *leaf;
209 struct Qdisc_class_ops *cops = p->ops->cl_ops;
211 if (cops == NULL)
212 return NULL;
213 cl = cops->get(p, classid);
215 if (cl == 0)
216 return NULL;
217 leaf = cops->leaf(p, cl);
218 cops->put(p, cl);
219 return leaf;
222 /* Find queueing discipline by name */
224 static struct Qdisc_ops *qdisc_lookup_ops(struct rtattr *kind)
226 struct Qdisc_ops *q = NULL;
228 if (kind) {
229 read_lock(&qdisc_mod_lock);
230 for (q = qdisc_base; q; q = q->next) {
231 if (rtattr_strcmp(kind, q->id) == 0) {
232 if (!try_module_get(q->owner))
233 q = NULL;
234 break;
237 read_unlock(&qdisc_mod_lock);
239 return q;
242 static struct qdisc_rate_table *qdisc_rtab_list;
244 struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, struct rtattr *tab)
246 struct qdisc_rate_table *rtab;
248 for (rtab = qdisc_rtab_list; rtab; rtab = rtab->next) {
249 if (memcmp(&rtab->rate, r, sizeof(struct tc_ratespec)) == 0) {
250 rtab->refcnt++;
251 return rtab;
255 if (tab == NULL || r->rate == 0 || r->cell_log == 0 || RTA_PAYLOAD(tab) != 1024)
256 return NULL;
258 rtab = kmalloc(sizeof(*rtab), GFP_KERNEL);
259 if (rtab) {
260 rtab->rate = *r;
261 rtab->refcnt = 1;
262 memcpy(rtab->data, RTA_DATA(tab), 1024);
263 rtab->next = qdisc_rtab_list;
264 qdisc_rtab_list = rtab;
266 return rtab;
269 void qdisc_put_rtab(struct qdisc_rate_table *tab)
271 struct qdisc_rate_table *rtab, **rtabp;
273 if (!tab || --tab->refcnt)
274 return;
276 for (rtabp = &qdisc_rtab_list; (rtab=*rtabp) != NULL; rtabp = &rtab->next) {
277 if (rtab == tab) {
278 *rtabp = rtab->next;
279 kfree(rtab);
280 return;
285 static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer)
287 struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog,
288 timer);
289 struct net_device *dev = wd->qdisc->dev;
291 wd->qdisc->flags &= ~TCQ_F_THROTTLED;
292 smp_wmb();
293 netif_schedule(dev);
295 return HRTIMER_NORESTART;
298 void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc)
300 hrtimer_init(&wd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
301 wd->timer.function = qdisc_watchdog;
302 wd->qdisc = qdisc;
304 EXPORT_SYMBOL(qdisc_watchdog_init);
306 void qdisc_watchdog_schedule(struct qdisc_watchdog *wd, psched_time_t expires)
308 ktime_t time;
310 wd->qdisc->flags |= TCQ_F_THROTTLED;
311 time = ktime_set(0, 0);
312 time = ktime_add_ns(time, PSCHED_US2NS(expires));
313 hrtimer_start(&wd->timer, time, HRTIMER_MODE_ABS);
315 EXPORT_SYMBOL(qdisc_watchdog_schedule);
317 void qdisc_watchdog_cancel(struct qdisc_watchdog *wd)
319 hrtimer_cancel(&wd->timer);
320 wd->qdisc->flags &= ~TCQ_F_THROTTLED;
322 EXPORT_SYMBOL(qdisc_watchdog_cancel);
324 /* Allocate an unique handle from space managed by kernel */
326 static u32 qdisc_alloc_handle(struct net_device *dev)
328 int i = 0x10000;
329 static u32 autohandle = TC_H_MAKE(0x80000000U, 0);
331 do {
332 autohandle += TC_H_MAKE(0x10000U, 0);
333 if (autohandle == TC_H_MAKE(TC_H_ROOT, 0))
334 autohandle = TC_H_MAKE(0x80000000U, 0);
335 } while (qdisc_lookup(dev, autohandle) && --i > 0);
337 return i>0 ? autohandle : 0;
340 /* Attach toplevel qdisc to device dev */
342 static struct Qdisc *
343 dev_graft_qdisc(struct net_device *dev, struct Qdisc *qdisc)
345 struct Qdisc *oqdisc;
347 if (dev->flags & IFF_UP)
348 dev_deactivate(dev);
350 qdisc_lock_tree(dev);
351 if (qdisc && qdisc->flags&TCQ_F_INGRESS) {
352 oqdisc = dev->qdisc_ingress;
353 /* Prune old scheduler */
354 if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1) {
355 /* delete */
356 qdisc_reset(oqdisc);
357 dev->qdisc_ingress = NULL;
358 } else { /* new */
359 dev->qdisc_ingress = qdisc;
362 } else {
364 oqdisc = dev->qdisc_sleeping;
366 /* Prune old scheduler */
367 if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1)
368 qdisc_reset(oqdisc);
370 /* ... and graft new one */
371 if (qdisc == NULL)
372 qdisc = &noop_qdisc;
373 dev->qdisc_sleeping = qdisc;
374 dev->qdisc = &noop_qdisc;
377 qdisc_unlock_tree(dev);
379 if (dev->flags & IFF_UP)
380 dev_activate(dev);
382 return oqdisc;
385 void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n)
387 struct Qdisc_class_ops *cops;
388 unsigned long cl;
389 u32 parentid;
391 if (n == 0)
392 return;
393 while ((parentid = sch->parent)) {
394 if (TC_H_MAJ(parentid) == TC_H_MAJ(TC_H_INGRESS))
395 return;
397 sch = qdisc_lookup(sch->dev, TC_H_MAJ(parentid));
398 if (sch == NULL) {
399 WARN_ON(parentid != TC_H_ROOT);
400 return;
402 cops = sch->ops->cl_ops;
403 if (cops->qlen_notify) {
404 cl = cops->get(sch, parentid);
405 cops->qlen_notify(sch, cl);
406 cops->put(sch, cl);
408 sch->q.qlen -= n;
411 EXPORT_SYMBOL(qdisc_tree_decrease_qlen);
413 /* Graft qdisc "new" to class "classid" of qdisc "parent" or
414 to device "dev".
416 Old qdisc is not destroyed but returned in *old.
419 static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
420 u32 classid,
421 struct Qdisc *new, struct Qdisc **old)
423 int err = 0;
424 struct Qdisc *q = *old;
427 if (parent == NULL) {
428 if (q && q->flags&TCQ_F_INGRESS) {
429 *old = dev_graft_qdisc(dev, q);
430 } else {
431 *old = dev_graft_qdisc(dev, new);
433 } else {
434 struct Qdisc_class_ops *cops = parent->ops->cl_ops;
436 err = -EINVAL;
438 if (cops) {
439 unsigned long cl = cops->get(parent, classid);
440 if (cl) {
441 err = cops->graft(parent, cl, new, old);
442 cops->put(parent, cl);
446 return err;
450 Allocate and initialize new qdisc.
452 Parameters are passed via opt.
455 static struct Qdisc *
456 qdisc_create(struct net_device *dev, u32 parent, u32 handle,
457 struct rtattr **tca, int *errp)
459 int err;
460 struct rtattr *kind = tca[TCA_KIND-1];
461 struct Qdisc *sch;
462 struct Qdisc_ops *ops;
464 ops = qdisc_lookup_ops(kind);
465 #ifdef CONFIG_KMOD
466 if (ops == NULL && kind != NULL) {
467 char name[IFNAMSIZ];
468 if (rtattr_strlcpy(name, kind, IFNAMSIZ) < IFNAMSIZ) {
469 /* We dropped the RTNL semaphore in order to
470 * perform the module load. So, even if we
471 * succeeded in loading the module we have to
472 * tell the caller to replay the request. We
473 * indicate this using -EAGAIN.
474 * We replay the request because the device may
475 * go away in the mean time.
477 rtnl_unlock();
478 request_module("sch_%s", name);
479 rtnl_lock();
480 ops = qdisc_lookup_ops(kind);
481 if (ops != NULL) {
482 /* We will try again qdisc_lookup_ops,
483 * so don't keep a reference.
485 module_put(ops->owner);
486 err = -EAGAIN;
487 goto err_out;
491 #endif
493 err = -ENOENT;
494 if (ops == NULL)
495 goto err_out;
497 sch = qdisc_alloc(dev, ops);
498 if (IS_ERR(sch)) {
499 err = PTR_ERR(sch);
500 goto err_out2;
503 sch->parent = parent;
505 if (handle == TC_H_INGRESS) {
506 sch->flags |= TCQ_F_INGRESS;
507 sch->stats_lock = &dev->ingress_lock;
508 handle = TC_H_MAKE(TC_H_INGRESS, 0);
509 } else {
510 sch->stats_lock = &dev->queue_lock;
511 if (handle == 0) {
512 handle = qdisc_alloc_handle(dev);
513 err = -ENOMEM;
514 if (handle == 0)
515 goto err_out3;
519 sch->handle = handle;
521 if (!ops->init || (err = ops->init(sch, tca[TCA_OPTIONS-1])) == 0) {
522 #ifdef CONFIG_NET_ESTIMATOR
523 if (tca[TCA_RATE-1]) {
524 err = gen_new_estimator(&sch->bstats, &sch->rate_est,
525 sch->stats_lock,
526 tca[TCA_RATE-1]);
527 if (err) {
529 * Any broken qdiscs that would require
530 * a ops->reset() here? The qdisc was never
531 * in action so it shouldn't be necessary.
533 if (ops->destroy)
534 ops->destroy(sch);
535 goto err_out3;
538 #endif
539 qdisc_lock_tree(dev);
540 list_add_tail(&sch->list, &dev->qdisc_list);
541 qdisc_unlock_tree(dev);
543 return sch;
545 err_out3:
546 dev_put(dev);
547 kfree((char *) sch - sch->padded);
548 err_out2:
549 module_put(ops->owner);
550 err_out:
551 *errp = err;
552 return NULL;
555 static int qdisc_change(struct Qdisc *sch, struct rtattr **tca)
557 if (tca[TCA_OPTIONS-1]) {
558 int err;
560 if (sch->ops->change == NULL)
561 return -EINVAL;
562 err = sch->ops->change(sch, tca[TCA_OPTIONS-1]);
563 if (err)
564 return err;
566 #ifdef CONFIG_NET_ESTIMATOR
567 if (tca[TCA_RATE-1])
568 gen_replace_estimator(&sch->bstats, &sch->rate_est,
569 sch->stats_lock, tca[TCA_RATE-1]);
570 #endif
571 return 0;
574 struct check_loop_arg
576 struct qdisc_walker w;
577 struct Qdisc *p;
578 int depth;
581 static int check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w);
583 static int check_loop(struct Qdisc *q, struct Qdisc *p, int depth)
585 struct check_loop_arg arg;
587 if (q->ops->cl_ops == NULL)
588 return 0;
590 arg.w.stop = arg.w.skip = arg.w.count = 0;
591 arg.w.fn = check_loop_fn;
592 arg.depth = depth;
593 arg.p = p;
594 q->ops->cl_ops->walk(q, &arg.w);
595 return arg.w.stop ? -ELOOP : 0;
598 static int
599 check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w)
601 struct Qdisc *leaf;
602 struct Qdisc_class_ops *cops = q->ops->cl_ops;
603 struct check_loop_arg *arg = (struct check_loop_arg *)w;
605 leaf = cops->leaf(q, cl);
606 if (leaf) {
607 if (leaf == arg->p || arg->depth > 7)
608 return -ELOOP;
609 return check_loop(leaf, arg->p, arg->depth + 1);
611 return 0;
615 * Delete/get qdisc.
618 static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
620 struct tcmsg *tcm = NLMSG_DATA(n);
621 struct rtattr **tca = arg;
622 struct net_device *dev;
623 u32 clid = tcm->tcm_parent;
624 struct Qdisc *q = NULL;
625 struct Qdisc *p = NULL;
626 int err;
628 if ((dev = __dev_get_by_index(tcm->tcm_ifindex)) == NULL)
629 return -ENODEV;
631 if (clid) {
632 if (clid != TC_H_ROOT) {
633 if (TC_H_MAJ(clid) != TC_H_MAJ(TC_H_INGRESS)) {
634 if ((p = qdisc_lookup(dev, TC_H_MAJ(clid))) == NULL)
635 return -ENOENT;
636 q = qdisc_leaf(p, clid);
637 } else { /* ingress */
638 q = dev->qdisc_ingress;
640 } else {
641 q = dev->qdisc_sleeping;
643 if (!q)
644 return -ENOENT;
646 if (tcm->tcm_handle && q->handle != tcm->tcm_handle)
647 return -EINVAL;
648 } else {
649 if ((q = qdisc_lookup(dev, tcm->tcm_handle)) == NULL)
650 return -ENOENT;
653 if (tca[TCA_KIND-1] && rtattr_strcmp(tca[TCA_KIND-1], q->ops->id))
654 return -EINVAL;
656 if (n->nlmsg_type == RTM_DELQDISC) {
657 if (!clid)
658 return -EINVAL;
659 if (q->handle == 0)
660 return -ENOENT;
661 if ((err = qdisc_graft(dev, p, clid, NULL, &q)) != 0)
662 return err;
663 if (q) {
664 qdisc_notify(skb, n, clid, q, NULL);
665 qdisc_lock_tree(dev);
666 qdisc_destroy(q);
667 qdisc_unlock_tree(dev);
669 } else {
670 qdisc_notify(skb, n, clid, NULL, q);
672 return 0;
676 Create/change qdisc.
679 static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
681 struct tcmsg *tcm;
682 struct rtattr **tca;
683 struct net_device *dev;
684 u32 clid;
685 struct Qdisc *q, *p;
686 int err;
688 replay:
689 /* Reinit, just in case something touches this. */
690 tcm = NLMSG_DATA(n);
691 tca = arg;
692 clid = tcm->tcm_parent;
693 q = p = NULL;
695 if ((dev = __dev_get_by_index(tcm->tcm_ifindex)) == NULL)
696 return -ENODEV;
698 if (clid) {
699 if (clid != TC_H_ROOT) {
700 if (clid != TC_H_INGRESS) {
701 if ((p = qdisc_lookup(dev, TC_H_MAJ(clid))) == NULL)
702 return -ENOENT;
703 q = qdisc_leaf(p, clid);
704 } else { /*ingress */
705 q = dev->qdisc_ingress;
707 } else {
708 q = dev->qdisc_sleeping;
711 /* It may be default qdisc, ignore it */
712 if (q && q->handle == 0)
713 q = NULL;
715 if (!q || !tcm->tcm_handle || q->handle != tcm->tcm_handle) {
716 if (tcm->tcm_handle) {
717 if (q && !(n->nlmsg_flags&NLM_F_REPLACE))
718 return -EEXIST;
719 if (TC_H_MIN(tcm->tcm_handle))
720 return -EINVAL;
721 if ((q = qdisc_lookup(dev, tcm->tcm_handle)) == NULL)
722 goto create_n_graft;
723 if (n->nlmsg_flags&NLM_F_EXCL)
724 return -EEXIST;
725 if (tca[TCA_KIND-1] && rtattr_strcmp(tca[TCA_KIND-1], q->ops->id))
726 return -EINVAL;
727 if (q == p ||
728 (p && check_loop(q, p, 0)))
729 return -ELOOP;
730 atomic_inc(&q->refcnt);
731 goto graft;
732 } else {
733 if (q == NULL)
734 goto create_n_graft;
736 /* This magic test requires explanation.
738 * We know, that some child q is already
739 * attached to this parent and have choice:
740 * either to change it or to create/graft new one.
742 * 1. We are allowed to create/graft only
743 * if CREATE and REPLACE flags are set.
745 * 2. If EXCL is set, requestor wanted to say,
746 * that qdisc tcm_handle is not expected
747 * to exist, so that we choose create/graft too.
749 * 3. The last case is when no flags are set.
750 * Alas, it is sort of hole in API, we
751 * cannot decide what to do unambiguously.
752 * For now we select create/graft, if
753 * user gave KIND, which does not match existing.
755 if ((n->nlmsg_flags&NLM_F_CREATE) &&
756 (n->nlmsg_flags&NLM_F_REPLACE) &&
757 ((n->nlmsg_flags&NLM_F_EXCL) ||
758 (tca[TCA_KIND-1] &&
759 rtattr_strcmp(tca[TCA_KIND-1], q->ops->id))))
760 goto create_n_graft;
763 } else {
764 if (!tcm->tcm_handle)
765 return -EINVAL;
766 q = qdisc_lookup(dev, tcm->tcm_handle);
769 /* Change qdisc parameters */
770 if (q == NULL)
771 return -ENOENT;
772 if (n->nlmsg_flags&NLM_F_EXCL)
773 return -EEXIST;
774 if (tca[TCA_KIND-1] && rtattr_strcmp(tca[TCA_KIND-1], q->ops->id))
775 return -EINVAL;
776 err = qdisc_change(q, tca);
777 if (err == 0)
778 qdisc_notify(skb, n, clid, NULL, q);
779 return err;
781 create_n_graft:
782 if (!(n->nlmsg_flags&NLM_F_CREATE))
783 return -ENOENT;
784 if (clid == TC_H_INGRESS)
785 q = qdisc_create(dev, tcm->tcm_parent, tcm->tcm_parent,
786 tca, &err);
787 else
788 q = qdisc_create(dev, tcm->tcm_parent, tcm->tcm_handle,
789 tca, &err);
790 if (q == NULL) {
791 if (err == -EAGAIN)
792 goto replay;
793 return err;
796 graft:
797 if (1) {
798 struct Qdisc *old_q = NULL;
799 err = qdisc_graft(dev, p, clid, q, &old_q);
800 if (err) {
801 if (q) {
802 qdisc_lock_tree(dev);
803 qdisc_destroy(q);
804 qdisc_unlock_tree(dev);
806 return err;
808 qdisc_notify(skb, n, clid, old_q, q);
809 if (old_q) {
810 qdisc_lock_tree(dev);
811 qdisc_destroy(old_q);
812 qdisc_unlock_tree(dev);
815 return 0;
818 static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
819 u32 pid, u32 seq, u16 flags, int event)
821 struct tcmsg *tcm;
822 struct nlmsghdr *nlh;
823 unsigned char *b = skb_tail_pointer(skb);
824 struct gnet_dump d;
826 nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*tcm), flags);
827 tcm = NLMSG_DATA(nlh);
828 tcm->tcm_family = AF_UNSPEC;
829 tcm->tcm__pad1 = 0;
830 tcm->tcm__pad2 = 0;
831 tcm->tcm_ifindex = q->dev->ifindex;
832 tcm->tcm_parent = clid;
833 tcm->tcm_handle = q->handle;
834 tcm->tcm_info = atomic_read(&q->refcnt);
835 RTA_PUT(skb, TCA_KIND, IFNAMSIZ, q->ops->id);
836 if (q->ops->dump && q->ops->dump(q, skb) < 0)
837 goto rtattr_failure;
838 q->qstats.qlen = q->q.qlen;
840 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS,
841 TCA_XSTATS, q->stats_lock, &d) < 0)
842 goto rtattr_failure;
844 if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0)
845 goto rtattr_failure;
847 if (gnet_stats_copy_basic(&d, &q->bstats) < 0 ||
848 #ifdef CONFIG_NET_ESTIMATOR
849 gnet_stats_copy_rate_est(&d, &q->rate_est) < 0 ||
850 #endif
851 gnet_stats_copy_queue(&d, &q->qstats) < 0)
852 goto rtattr_failure;
854 if (gnet_stats_finish_copy(&d) < 0)
855 goto rtattr_failure;
857 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
858 return skb->len;
860 nlmsg_failure:
861 rtattr_failure:
862 nlmsg_trim(skb, b);
863 return -1;
866 static int qdisc_notify(struct sk_buff *oskb, struct nlmsghdr *n,
867 u32 clid, struct Qdisc *old, struct Qdisc *new)
869 struct sk_buff *skb;
870 u32 pid = oskb ? NETLINK_CB(oskb).pid : 0;
872 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
873 if (!skb)
874 return -ENOBUFS;
876 if (old && old->handle) {
877 if (tc_fill_qdisc(skb, old, clid, pid, n->nlmsg_seq, 0, RTM_DELQDISC) < 0)
878 goto err_out;
880 if (new) {
881 if (tc_fill_qdisc(skb, new, clid, pid, n->nlmsg_seq, old ? NLM_F_REPLACE : 0, RTM_NEWQDISC) < 0)
882 goto err_out;
885 if (skb->len)
886 return rtnetlink_send(skb, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO);
888 err_out:
889 kfree_skb(skb);
890 return -EINVAL;
893 static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
895 int idx, q_idx;
896 int s_idx, s_q_idx;
897 struct net_device *dev;
898 struct Qdisc *q;
900 s_idx = cb->args[0];
901 s_q_idx = q_idx = cb->args[1];
902 read_lock(&dev_base_lock);
903 idx = 0;
904 for_each_netdev(dev) {
905 if (idx < s_idx)
906 goto cont;
907 if (idx > s_idx)
908 s_q_idx = 0;
909 q_idx = 0;
910 list_for_each_entry(q, &dev->qdisc_list, list) {
911 if (q_idx < s_q_idx) {
912 q_idx++;
913 continue;
915 if (tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).pid,
916 cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0)
917 goto done;
918 q_idx++;
920 cont:
921 idx++;
924 done:
925 read_unlock(&dev_base_lock);
927 cb->args[0] = idx;
928 cb->args[1] = q_idx;
930 return skb->len;
935 /************************************************
936 * Traffic classes manipulation. *
937 ************************************************/
941 static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
943 struct tcmsg *tcm = NLMSG_DATA(n);
944 struct rtattr **tca = arg;
945 struct net_device *dev;
946 struct Qdisc *q = NULL;
947 struct Qdisc_class_ops *cops;
948 unsigned long cl = 0;
949 unsigned long new_cl;
950 u32 pid = tcm->tcm_parent;
951 u32 clid = tcm->tcm_handle;
952 u32 qid = TC_H_MAJ(clid);
953 int err;
955 if ((dev = __dev_get_by_index(tcm->tcm_ifindex)) == NULL)
956 return -ENODEV;
959 parent == TC_H_UNSPEC - unspecified parent.
960 parent == TC_H_ROOT - class is root, which has no parent.
961 parent == X:0 - parent is root class.
962 parent == X:Y - parent is a node in hierarchy.
963 parent == 0:Y - parent is X:Y, where X:0 is qdisc.
965 handle == 0:0 - generate handle from kernel pool.
966 handle == 0:Y - class is X:Y, where X:0 is qdisc.
967 handle == X:Y - clear.
968 handle == X:0 - root class.
971 /* Step 1. Determine qdisc handle X:0 */
973 if (pid != TC_H_ROOT) {
974 u32 qid1 = TC_H_MAJ(pid);
976 if (qid && qid1) {
977 /* If both majors are known, they must be identical. */
978 if (qid != qid1)
979 return -EINVAL;
980 } else if (qid1) {
981 qid = qid1;
982 } else if (qid == 0)
983 qid = dev->qdisc_sleeping->handle;
985 /* Now qid is genuine qdisc handle consistent
986 both with parent and child.
988 TC_H_MAJ(pid) still may be unspecified, complete it now.
990 if (pid)
991 pid = TC_H_MAKE(qid, pid);
992 } else {
993 if (qid == 0)
994 qid = dev->qdisc_sleeping->handle;
997 /* OK. Locate qdisc */
998 if ((q = qdisc_lookup(dev, qid)) == NULL)
999 return -ENOENT;
1001 /* An check that it supports classes */
1002 cops = q->ops->cl_ops;
1003 if (cops == NULL)
1004 return -EINVAL;
1006 /* Now try to get class */
1007 if (clid == 0) {
1008 if (pid == TC_H_ROOT)
1009 clid = qid;
1010 } else
1011 clid = TC_H_MAKE(qid, clid);
1013 if (clid)
1014 cl = cops->get(q, clid);
1016 if (cl == 0) {
1017 err = -ENOENT;
1018 if (n->nlmsg_type != RTM_NEWTCLASS || !(n->nlmsg_flags&NLM_F_CREATE))
1019 goto out;
1020 } else {
1021 switch (n->nlmsg_type) {
1022 case RTM_NEWTCLASS:
1023 err = -EEXIST;
1024 if (n->nlmsg_flags&NLM_F_EXCL)
1025 goto out;
1026 break;
1027 case RTM_DELTCLASS:
1028 err = cops->delete(q, cl);
1029 if (err == 0)
1030 tclass_notify(skb, n, q, cl, RTM_DELTCLASS);
1031 goto out;
1032 case RTM_GETTCLASS:
1033 err = tclass_notify(skb, n, q, cl, RTM_NEWTCLASS);
1034 goto out;
1035 default:
1036 err = -EINVAL;
1037 goto out;
1041 new_cl = cl;
1042 err = cops->change(q, clid, pid, tca, &new_cl);
1043 if (err == 0)
1044 tclass_notify(skb, n, q, new_cl, RTM_NEWTCLASS);
1046 out:
1047 if (cl)
1048 cops->put(q, cl);
1050 return err;
1054 static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
1055 unsigned long cl,
1056 u32 pid, u32 seq, u16 flags, int event)
1058 struct tcmsg *tcm;
1059 struct nlmsghdr *nlh;
1060 unsigned char *b = skb_tail_pointer(skb);
1061 struct gnet_dump d;
1062 struct Qdisc_class_ops *cl_ops = q->ops->cl_ops;
1064 nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*tcm), flags);
1065 tcm = NLMSG_DATA(nlh);
1066 tcm->tcm_family = AF_UNSPEC;
1067 tcm->tcm__pad1 = 0;
1068 tcm->tcm__pad2 = 0;
1069 tcm->tcm_ifindex = q->dev->ifindex;
1070 tcm->tcm_parent = q->handle;
1071 tcm->tcm_handle = q->handle;
1072 tcm->tcm_info = 0;
1073 RTA_PUT(skb, TCA_KIND, IFNAMSIZ, q->ops->id);
1074 if (cl_ops->dump && cl_ops->dump(q, cl, skb, tcm) < 0)
1075 goto rtattr_failure;
1077 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS,
1078 TCA_XSTATS, q->stats_lock, &d) < 0)
1079 goto rtattr_failure;
1081 if (cl_ops->dump_stats && cl_ops->dump_stats(q, cl, &d) < 0)
1082 goto rtattr_failure;
1084 if (gnet_stats_finish_copy(&d) < 0)
1085 goto rtattr_failure;
1087 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1088 return skb->len;
1090 nlmsg_failure:
1091 rtattr_failure:
1092 nlmsg_trim(skb, b);
1093 return -1;
1096 static int tclass_notify(struct sk_buff *oskb, struct nlmsghdr *n,
1097 struct Qdisc *q, unsigned long cl, int event)
1099 struct sk_buff *skb;
1100 u32 pid = oskb ? NETLINK_CB(oskb).pid : 0;
1102 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1103 if (!skb)
1104 return -ENOBUFS;
1106 if (tc_fill_tclass(skb, q, cl, pid, n->nlmsg_seq, 0, event) < 0) {
1107 kfree_skb(skb);
1108 return -EINVAL;
1111 return rtnetlink_send(skb, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO);
1114 struct qdisc_dump_args
1116 struct qdisc_walker w;
1117 struct sk_buff *skb;
1118 struct netlink_callback *cb;
1121 static int qdisc_class_dump(struct Qdisc *q, unsigned long cl, struct qdisc_walker *arg)
1123 struct qdisc_dump_args *a = (struct qdisc_dump_args *)arg;
1125 return tc_fill_tclass(a->skb, q, cl, NETLINK_CB(a->cb->skb).pid,
1126 a->cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWTCLASS);
1129 static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
1131 int t;
1132 int s_t;
1133 struct net_device *dev;
1134 struct Qdisc *q;
1135 struct tcmsg *tcm = (struct tcmsg*)NLMSG_DATA(cb->nlh);
1136 struct qdisc_dump_args arg;
1138 if (cb->nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*tcm)))
1139 return 0;
1140 if ((dev = dev_get_by_index(tcm->tcm_ifindex)) == NULL)
1141 return 0;
1143 s_t = cb->args[0];
1144 t = 0;
1146 list_for_each_entry(q, &dev->qdisc_list, list) {
1147 if (t < s_t || !q->ops->cl_ops ||
1148 (tcm->tcm_parent &&
1149 TC_H_MAJ(tcm->tcm_parent) != q->handle)) {
1150 t++;
1151 continue;
1153 if (t > s_t)
1154 memset(&cb->args[1], 0, sizeof(cb->args)-sizeof(cb->args[0]));
1155 arg.w.fn = qdisc_class_dump;
1156 arg.skb = skb;
1157 arg.cb = cb;
1158 arg.w.stop = 0;
1159 arg.w.skip = cb->args[1];
1160 arg.w.count = 0;
1161 q->ops->cl_ops->walk(q, &arg.w);
1162 cb->args[1] = arg.w.count;
1163 if (arg.w.stop)
1164 break;
1165 t++;
1168 cb->args[0] = t;
1170 dev_put(dev);
1171 return skb->len;
1174 /* Main classifier routine: scans classifier chain attached
1175 to this qdisc, (optionally) tests for protocol and asks
1176 specific classifiers.
1178 int tc_classify(struct sk_buff *skb, struct tcf_proto *tp,
1179 struct tcf_result *res)
1181 int err = 0;
1182 __be16 protocol = skb->protocol;
1183 #ifdef CONFIG_NET_CLS_ACT
1184 struct tcf_proto *otp = tp;
1185 reclassify:
1186 #endif
1187 protocol = skb->protocol;
1189 for ( ; tp; tp = tp->next) {
1190 if ((tp->protocol == protocol ||
1191 tp->protocol == htons(ETH_P_ALL)) &&
1192 (err = tp->classify(skb, tp, res)) >= 0) {
1193 #ifdef CONFIG_NET_CLS_ACT
1194 if ( TC_ACT_RECLASSIFY == err) {
1195 __u32 verd = (__u32) G_TC_VERD(skb->tc_verd);
1196 tp = otp;
1198 if (MAX_REC_LOOP < verd++) {
1199 printk("rule prio %d protocol %02x reclassify is buggy packet dropped\n",
1200 tp->prio&0xffff, ntohs(tp->protocol));
1201 return TC_ACT_SHOT;
1203 skb->tc_verd = SET_TC_VERD(skb->tc_verd,verd);
1204 goto reclassify;
1205 } else {
1206 if (skb->tc_verd)
1207 skb->tc_verd = SET_TC_VERD(skb->tc_verd,0);
1208 return err;
1210 #else
1212 return err;
1213 #endif
1217 return -1;
1220 void tcf_destroy(struct tcf_proto *tp)
1222 tp->ops->destroy(tp);
1223 module_put(tp->ops->owner);
1224 kfree(tp);
1227 void tcf_destroy_chain(struct tcf_proto *fl)
1229 struct tcf_proto *tp;
1231 while ((tp = fl) != NULL) {
1232 fl = tp->next;
1233 tcf_destroy(tp);
1236 EXPORT_SYMBOL(tcf_destroy_chain);
1238 #ifdef CONFIG_PROC_FS
1239 static int psched_show(struct seq_file *seq, void *v)
1241 struct timespec ts;
1243 hrtimer_get_res(CLOCK_MONOTONIC, &ts);
1244 seq_printf(seq, "%08x %08x %08x %08x\n",
1245 (u32)NSEC_PER_USEC, (u32)PSCHED_US2NS(1),
1246 1000000,
1247 (u32)NSEC_PER_SEC/(u32)ktime_to_ns(timespec_to_ktime(ts)));
1249 return 0;
1252 static int psched_open(struct inode *inode, struct file *file)
1254 return single_open(file, psched_show, PDE(inode)->data);
1257 static const struct file_operations psched_fops = {
1258 .owner = THIS_MODULE,
1259 .open = psched_open,
1260 .read = seq_read,
1261 .llseek = seq_lseek,
1262 .release = single_release,
1264 #endif
1266 static int __init pktsched_init(void)
1268 register_qdisc(&pfifo_qdisc_ops);
1269 register_qdisc(&bfifo_qdisc_ops);
1270 proc_net_fops_create("psched", 0, &psched_fops);
1272 rtnl_register(PF_UNSPEC, RTM_NEWQDISC, tc_modify_qdisc, NULL);
1273 rtnl_register(PF_UNSPEC, RTM_DELQDISC, tc_get_qdisc, NULL);
1274 rtnl_register(PF_UNSPEC, RTM_GETQDISC, tc_get_qdisc, tc_dump_qdisc);
1275 rtnl_register(PF_UNSPEC, RTM_NEWTCLASS, tc_ctl_tclass, NULL);
1276 rtnl_register(PF_UNSPEC, RTM_DELTCLASS, tc_ctl_tclass, NULL);
1277 rtnl_register(PF_UNSPEC, RTM_GETTCLASS, tc_ctl_tclass, tc_dump_tclass);
1279 return 0;
1282 subsys_initcall(pktsched_init);
1284 EXPORT_SYMBOL(qdisc_get_rtab);
1285 EXPORT_SYMBOL(qdisc_put_rtab);
1286 EXPORT_SYMBOL(register_qdisc);
1287 EXPORT_SYMBOL(unregister_qdisc);
1288 EXPORT_SYMBOL(tc_classify);