2 * net/sched/sch_api.c Packet scheduler API.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
13 * Rani Assaf <rani@magic.metawire.com> :980802: JIFFIES and CPU clock sources are repaired.
14 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
15 * Jamal Hadi Salim <hadi@nortelnetworks.com>: 990601: ingress support
18 #include <linux/module.h>
19 #include <linux/types.h>
20 #include <linux/kernel.h>
21 #include <linux/string.h>
22 #include <linux/errno.h>
23 #include <linux/skbuff.h>
24 #include <linux/init.h>
25 #include <linux/proc_fs.h>
26 #include <linux/seq_file.h>
27 #include <linux/kmod.h>
28 #include <linux/list.h>
29 #include <linux/hrtimer.h>
30 #include <linux/lockdep.h>
31 #include <linux/slab.h>
33 #include <net/net_namespace.h>
35 #include <net/netlink.h>
36 #include <net/pkt_sched.h>
38 static int qdisc_notify(struct net
*net
, struct sk_buff
*oskb
,
39 struct nlmsghdr
*n
, u32 clid
,
40 struct Qdisc
*old
, struct Qdisc
*new);
41 static int tclass_notify(struct net
*net
, struct sk_buff
*oskb
,
42 struct nlmsghdr
*n
, struct Qdisc
*q
,
43 unsigned long cl
, int event
);
50 This file consists of two interrelated parts:
52 1. queueing disciplines manager frontend.
53 2. traffic classes manager frontend.
55 Generally, queueing discipline ("qdisc") is a black box,
56 which is able to enqueue packets and to dequeue them (when
57 device is ready to send something) in order and at times
58 determined by algorithm hidden in it.
60 qdisc's are divided to two categories:
61 - "queues", which have no internal structure visible from outside.
62 - "schedulers", which split all the packets to "traffic classes",
63 using "packet classifiers" (look at cls_api.c)
65 In turn, classes may have child qdiscs (as rule, queues)
66 attached to them etc. etc. etc.
68 The goal of the routines in this file is to translate
69 information supplied by user in the form of handles
70 to more intelligible for kernel form, to make some sanity
71 checks and part of work, which is common to all qdiscs
72 and to provide rtnetlink notifications.
74 All real intelligent work is done inside qdisc modules.
78 Every discipline has two major routines: enqueue and dequeue.
82 dequeue usually returns a skb to send. It is allowed to return NULL,
83 but it does not mean that queue is empty, it just means that
84 discipline does not want to send anything this time.
85 Queue is really empty if q->q.qlen == 0.
86 For complicated disciplines with multiple queues q->q is not
87 real packet queue, but however q->q.qlen must be valid.
91 enqueue returns 0, if packet was enqueued successfully.
92 If packet (this one or another one) was dropped, it returns
94 NET_XMIT_DROP - this packet dropped
95 Expected action: do not backoff, but wait until queue will clear.
96 NET_XMIT_CN - probably this packet enqueued, but another one dropped.
97 Expected action: backoff or ignore
98 NET_XMIT_POLICED - dropped by police.
99 Expected action: backoff or error to real-time apps.
105 like dequeue but without removing a packet from the queue
109 returns qdisc to initial state: purge all buffers, clear all
110 timers, counters (except for statistics) etc.
114 initializes newly created qdisc.
118 destroys resources allocated by init and during lifetime of qdisc.
122 changes qdisc parameters.
125 /* Protects list of registered TC modules. It is pure SMP lock. */
126 static DEFINE_RWLOCK(qdisc_mod_lock
);
129 /************************************************
130 * Queueing disciplines manipulation. *
131 ************************************************/
134 /* The list of all installed queueing disciplines. */
136 static struct Qdisc_ops
*qdisc_base
;
138 /* Register/uregister queueing discipline */
140 int register_qdisc(struct Qdisc_ops
*qops
)
142 struct Qdisc_ops
*q
, **qp
;
145 write_lock(&qdisc_mod_lock
);
146 for (qp
= &qdisc_base
; (q
= *qp
) != NULL
; qp
= &q
->next
)
147 if (!strcmp(qops
->id
, q
->id
))
150 if (qops
->enqueue
== NULL
)
151 qops
->enqueue
= noop_qdisc_ops
.enqueue
;
152 if (qops
->peek
== NULL
) {
153 if (qops
->dequeue
== NULL
)
154 qops
->peek
= noop_qdisc_ops
.peek
;
158 if (qops
->dequeue
== NULL
)
159 qops
->dequeue
= noop_qdisc_ops
.dequeue
;
162 const struct Qdisc_class_ops
*cops
= qops
->cl_ops
;
164 if (!(cops
->get
&& cops
->put
&& cops
->walk
&& cops
->leaf
))
167 if (cops
->tcf_chain
&& !(cops
->bind_tcf
&& cops
->unbind_tcf
))
175 write_unlock(&qdisc_mod_lock
);
182 EXPORT_SYMBOL(register_qdisc
);
184 int unregister_qdisc(struct Qdisc_ops
*qops
)
186 struct Qdisc_ops
*q
, **qp
;
189 write_lock(&qdisc_mod_lock
);
190 for (qp
= &qdisc_base
; (q
=*qp
)!=NULL
; qp
= &q
->next
)
198 write_unlock(&qdisc_mod_lock
);
201 EXPORT_SYMBOL(unregister_qdisc
);
203 /* We know handle. Find qdisc among all qdisc's attached to device
204 (root qdisc, all its children, children of children etc.)
207 static struct Qdisc
*qdisc_match_from_root(struct Qdisc
*root
, u32 handle
)
211 if (!(root
->flags
& TCQ_F_BUILTIN
) &&
212 root
->handle
== handle
)
215 list_for_each_entry(q
, &root
->list
, list
) {
216 if (q
->handle
== handle
)
222 static void qdisc_list_add(struct Qdisc
*q
)
224 if ((q
->parent
!= TC_H_ROOT
) && !(q
->flags
& TCQ_F_INGRESS
))
225 list_add_tail(&q
->list
, &qdisc_dev(q
)->qdisc
->list
);
228 void qdisc_list_del(struct Qdisc
*q
)
230 if ((q
->parent
!= TC_H_ROOT
) && !(q
->flags
& TCQ_F_INGRESS
))
233 EXPORT_SYMBOL(qdisc_list_del
);
235 struct Qdisc
*qdisc_lookup(struct net_device
*dev
, u32 handle
)
239 q
= qdisc_match_from_root(dev
->qdisc
, handle
);
243 if (dev_ingress_queue(dev
))
244 q
= qdisc_match_from_root(
245 dev_ingress_queue(dev
)->qdisc_sleeping
,
251 static struct Qdisc
*qdisc_leaf(struct Qdisc
*p
, u32 classid
)
255 const struct Qdisc_class_ops
*cops
= p
->ops
->cl_ops
;
259 cl
= cops
->get(p
, classid
);
263 leaf
= cops
->leaf(p
, cl
);
268 /* Find queueing discipline by name */
270 static struct Qdisc_ops
*qdisc_lookup_ops(struct nlattr
*kind
)
272 struct Qdisc_ops
*q
= NULL
;
275 read_lock(&qdisc_mod_lock
);
276 for (q
= qdisc_base
; q
; q
= q
->next
) {
277 if (nla_strcmp(kind
, q
->id
) == 0) {
278 if (!try_module_get(q
->owner
))
283 read_unlock(&qdisc_mod_lock
);
288 static struct qdisc_rate_table
*qdisc_rtab_list
;
290 struct qdisc_rate_table
*qdisc_get_rtab(struct tc_ratespec
*r
, struct nlattr
*tab
)
292 struct qdisc_rate_table
*rtab
;
294 for (rtab
= qdisc_rtab_list
; rtab
; rtab
= rtab
->next
) {
295 if (memcmp(&rtab
->rate
, r
, sizeof(struct tc_ratespec
)) == 0) {
301 if (tab
== NULL
|| r
->rate
== 0 || r
->cell_log
== 0 ||
302 nla_len(tab
) != TC_RTAB_SIZE
)
305 rtab
= kmalloc(sizeof(*rtab
), GFP_KERNEL
);
309 memcpy(rtab
->data
, nla_data(tab
), 1024);
310 rtab
->next
= qdisc_rtab_list
;
311 qdisc_rtab_list
= rtab
;
315 EXPORT_SYMBOL(qdisc_get_rtab
);
317 void qdisc_put_rtab(struct qdisc_rate_table
*tab
)
319 struct qdisc_rate_table
*rtab
, **rtabp
;
321 if (!tab
|| --tab
->refcnt
)
324 for (rtabp
= &qdisc_rtab_list
; (rtab
=*rtabp
) != NULL
; rtabp
= &rtab
->next
) {
332 EXPORT_SYMBOL(qdisc_put_rtab
);
334 static LIST_HEAD(qdisc_stab_list
);
335 static DEFINE_SPINLOCK(qdisc_stab_lock
);
337 static const struct nla_policy stab_policy
[TCA_STAB_MAX
+ 1] = {
338 [TCA_STAB_BASE
] = { .len
= sizeof(struct tc_sizespec
) },
339 [TCA_STAB_DATA
] = { .type
= NLA_BINARY
},
342 static struct qdisc_size_table
*qdisc_get_stab(struct nlattr
*opt
)
344 struct nlattr
*tb
[TCA_STAB_MAX
+ 1];
345 struct qdisc_size_table
*stab
;
346 struct tc_sizespec
*s
;
347 unsigned int tsize
= 0;
351 err
= nla_parse_nested(tb
, TCA_STAB_MAX
, opt
, stab_policy
);
354 if (!tb
[TCA_STAB_BASE
])
355 return ERR_PTR(-EINVAL
);
357 s
= nla_data(tb
[TCA_STAB_BASE
]);
360 if (!tb
[TCA_STAB_DATA
])
361 return ERR_PTR(-EINVAL
);
362 tab
= nla_data(tb
[TCA_STAB_DATA
]);
363 tsize
= nla_len(tb
[TCA_STAB_DATA
]) / sizeof(u16
);
366 if (tsize
!= s
->tsize
|| (!tab
&& tsize
> 0))
367 return ERR_PTR(-EINVAL
);
369 spin_lock(&qdisc_stab_lock
);
371 list_for_each_entry(stab
, &qdisc_stab_list
, list
) {
372 if (memcmp(&stab
->szopts
, s
, sizeof(*s
)))
374 if (tsize
> 0 && memcmp(stab
->data
, tab
, tsize
* sizeof(u16
)))
377 spin_unlock(&qdisc_stab_lock
);
381 spin_unlock(&qdisc_stab_lock
);
383 stab
= kmalloc(sizeof(*stab
) + tsize
* sizeof(u16
), GFP_KERNEL
);
385 return ERR_PTR(-ENOMEM
);
390 memcpy(stab
->data
, tab
, tsize
* sizeof(u16
));
392 spin_lock(&qdisc_stab_lock
);
393 list_add_tail(&stab
->list
, &qdisc_stab_list
);
394 spin_unlock(&qdisc_stab_lock
);
399 void qdisc_put_stab(struct qdisc_size_table
*tab
)
404 spin_lock(&qdisc_stab_lock
);
406 if (--tab
->refcnt
== 0) {
407 list_del(&tab
->list
);
411 spin_unlock(&qdisc_stab_lock
);
413 EXPORT_SYMBOL(qdisc_put_stab
);
415 static int qdisc_dump_stab(struct sk_buff
*skb
, struct qdisc_size_table
*stab
)
419 nest
= nla_nest_start(skb
, TCA_STAB
);
421 goto nla_put_failure
;
422 NLA_PUT(skb
, TCA_STAB_BASE
, sizeof(stab
->szopts
), &stab
->szopts
);
423 nla_nest_end(skb
, nest
);
431 void qdisc_calculate_pkt_len(struct sk_buff
*skb
, struct qdisc_size_table
*stab
)
435 pkt_len
= skb
->len
+ stab
->szopts
.overhead
;
436 if (unlikely(!stab
->szopts
.tsize
))
439 slot
= pkt_len
+ stab
->szopts
.cell_align
;
440 if (unlikely(slot
< 0))
443 slot
>>= stab
->szopts
.cell_log
;
444 if (likely(slot
< stab
->szopts
.tsize
))
445 pkt_len
= stab
->data
[slot
];
447 pkt_len
= stab
->data
[stab
->szopts
.tsize
- 1] *
448 (slot
/ stab
->szopts
.tsize
) +
449 stab
->data
[slot
% stab
->szopts
.tsize
];
451 pkt_len
<<= stab
->szopts
.size_log
;
453 if (unlikely(pkt_len
< 1))
455 qdisc_skb_cb(skb
)->pkt_len
= pkt_len
;
457 EXPORT_SYMBOL(qdisc_calculate_pkt_len
);
459 void qdisc_warn_nonwc(char *txt
, struct Qdisc
*qdisc
)
461 if (!(qdisc
->flags
& TCQ_F_WARN_NONWC
)) {
463 "%s: %s qdisc %X: is non-work-conserving?\n",
464 txt
, qdisc
->ops
->id
, qdisc
->handle
>> 16);
465 qdisc
->flags
|= TCQ_F_WARN_NONWC
;
468 EXPORT_SYMBOL(qdisc_warn_nonwc
);
470 static enum hrtimer_restart
qdisc_watchdog(struct hrtimer
*timer
)
472 struct qdisc_watchdog
*wd
= container_of(timer
, struct qdisc_watchdog
,
475 wd
->qdisc
->flags
&= ~TCQ_F_THROTTLED
;
476 __netif_schedule(qdisc_root(wd
->qdisc
));
478 return HRTIMER_NORESTART
;
481 void qdisc_watchdog_init(struct qdisc_watchdog
*wd
, struct Qdisc
*qdisc
)
483 hrtimer_init(&wd
->timer
, CLOCK_MONOTONIC
, HRTIMER_MODE_ABS
);
484 wd
->timer
.function
= qdisc_watchdog
;
487 EXPORT_SYMBOL(qdisc_watchdog_init
);
489 void qdisc_watchdog_schedule(struct qdisc_watchdog
*wd
, psched_time_t expires
)
493 if (test_bit(__QDISC_STATE_DEACTIVATED
,
494 &qdisc_root_sleeping(wd
->qdisc
)->state
))
497 wd
->qdisc
->flags
|= TCQ_F_THROTTLED
;
498 time
= ktime_set(0, 0);
499 time
= ktime_add_ns(time
, PSCHED_TICKS2NS(expires
));
500 hrtimer_start(&wd
->timer
, time
, HRTIMER_MODE_ABS
);
502 EXPORT_SYMBOL(qdisc_watchdog_schedule
);
504 void qdisc_watchdog_cancel(struct qdisc_watchdog
*wd
)
506 hrtimer_cancel(&wd
->timer
);
507 wd
->qdisc
->flags
&= ~TCQ_F_THROTTLED
;
509 EXPORT_SYMBOL(qdisc_watchdog_cancel
);
511 static struct hlist_head
*qdisc_class_hash_alloc(unsigned int n
)
513 unsigned int size
= n
* sizeof(struct hlist_head
), i
;
514 struct hlist_head
*h
;
516 if (size
<= PAGE_SIZE
)
517 h
= kmalloc(size
, GFP_KERNEL
);
519 h
= (struct hlist_head
*)
520 __get_free_pages(GFP_KERNEL
, get_order(size
));
523 for (i
= 0; i
< n
; i
++)
524 INIT_HLIST_HEAD(&h
[i
]);
529 static void qdisc_class_hash_free(struct hlist_head
*h
, unsigned int n
)
531 unsigned int size
= n
* sizeof(struct hlist_head
);
533 if (size
<= PAGE_SIZE
)
536 free_pages((unsigned long)h
, get_order(size
));
539 void qdisc_class_hash_grow(struct Qdisc
*sch
, struct Qdisc_class_hash
*clhash
)
541 struct Qdisc_class_common
*cl
;
542 struct hlist_node
*n
, *next
;
543 struct hlist_head
*nhash
, *ohash
;
544 unsigned int nsize
, nmask
, osize
;
547 /* Rehash when load factor exceeds 0.75 */
548 if (clhash
->hashelems
* 4 <= clhash
->hashsize
* 3)
550 nsize
= clhash
->hashsize
* 2;
552 nhash
= qdisc_class_hash_alloc(nsize
);
556 ohash
= clhash
->hash
;
557 osize
= clhash
->hashsize
;
560 for (i
= 0; i
< osize
; i
++) {
561 hlist_for_each_entry_safe(cl
, n
, next
, &ohash
[i
], hnode
) {
562 h
= qdisc_class_hash(cl
->classid
, nmask
);
563 hlist_add_head(&cl
->hnode
, &nhash
[h
]);
566 clhash
->hash
= nhash
;
567 clhash
->hashsize
= nsize
;
568 clhash
->hashmask
= nmask
;
569 sch_tree_unlock(sch
);
571 qdisc_class_hash_free(ohash
, osize
);
573 EXPORT_SYMBOL(qdisc_class_hash_grow
);
575 int qdisc_class_hash_init(struct Qdisc_class_hash
*clhash
)
577 unsigned int size
= 4;
579 clhash
->hash
= qdisc_class_hash_alloc(size
);
580 if (clhash
->hash
== NULL
)
582 clhash
->hashsize
= size
;
583 clhash
->hashmask
= size
- 1;
584 clhash
->hashelems
= 0;
587 EXPORT_SYMBOL(qdisc_class_hash_init
);
589 void qdisc_class_hash_destroy(struct Qdisc_class_hash
*clhash
)
591 qdisc_class_hash_free(clhash
->hash
, clhash
->hashsize
);
593 EXPORT_SYMBOL(qdisc_class_hash_destroy
);
595 void qdisc_class_hash_insert(struct Qdisc_class_hash
*clhash
,
596 struct Qdisc_class_common
*cl
)
600 INIT_HLIST_NODE(&cl
->hnode
);
601 h
= qdisc_class_hash(cl
->classid
, clhash
->hashmask
);
602 hlist_add_head(&cl
->hnode
, &clhash
->hash
[h
]);
605 EXPORT_SYMBOL(qdisc_class_hash_insert
);
607 void qdisc_class_hash_remove(struct Qdisc_class_hash
*clhash
,
608 struct Qdisc_class_common
*cl
)
610 hlist_del(&cl
->hnode
);
613 EXPORT_SYMBOL(qdisc_class_hash_remove
);
615 /* Allocate an unique handle from space managed by kernel */
617 static u32
qdisc_alloc_handle(struct net_device
*dev
)
620 static u32 autohandle
= TC_H_MAKE(0x80000000U
, 0);
623 autohandle
+= TC_H_MAKE(0x10000U
, 0);
624 if (autohandle
== TC_H_MAKE(TC_H_ROOT
, 0))
625 autohandle
= TC_H_MAKE(0x80000000U
, 0);
626 } while (qdisc_lookup(dev
, autohandle
) && --i
> 0);
628 return i
>0 ? autohandle
: 0;
631 void qdisc_tree_decrease_qlen(struct Qdisc
*sch
, unsigned int n
)
633 const struct Qdisc_class_ops
*cops
;
639 while ((parentid
= sch
->parent
)) {
640 if (TC_H_MAJ(parentid
) == TC_H_MAJ(TC_H_INGRESS
))
643 sch
= qdisc_lookup(qdisc_dev(sch
), TC_H_MAJ(parentid
));
645 WARN_ON(parentid
!= TC_H_ROOT
);
648 cops
= sch
->ops
->cl_ops
;
649 if (cops
->qlen_notify
) {
650 cl
= cops
->get(sch
, parentid
);
651 cops
->qlen_notify(sch
, cl
);
657 EXPORT_SYMBOL(qdisc_tree_decrease_qlen
);
659 static void notify_and_destroy(struct net
*net
, struct sk_buff
*skb
,
660 struct nlmsghdr
*n
, u32 clid
,
661 struct Qdisc
*old
, struct Qdisc
*new)
664 qdisc_notify(net
, skb
, n
, clid
, old
, new);
670 /* Graft qdisc "new" to class "classid" of qdisc "parent" or
673 * When appropriate send a netlink notification using 'skb'
676 * On success, destroy old qdisc.
679 static int qdisc_graft(struct net_device
*dev
, struct Qdisc
*parent
,
680 struct sk_buff
*skb
, struct nlmsghdr
*n
, u32 classid
,
681 struct Qdisc
*new, struct Qdisc
*old
)
683 struct Qdisc
*q
= old
;
684 struct net
*net
= dev_net(dev
);
687 if (parent
== NULL
) {
688 unsigned int i
, num_q
, ingress
;
691 num_q
= dev
->num_tx_queues
;
692 if ((q
&& q
->flags
& TCQ_F_INGRESS
) ||
693 (new && new->flags
& TCQ_F_INGRESS
)) {
696 if (!dev_ingress_queue(dev
))
700 if (dev
->flags
& IFF_UP
)
703 if (new && new->ops
->attach
) {
704 new->ops
->attach(new);
708 for (i
= 0; i
< num_q
; i
++) {
709 struct netdev_queue
*dev_queue
= dev_ingress_queue(dev
);
712 dev_queue
= netdev_get_tx_queue(dev
, i
);
714 old
= dev_graft_qdisc(dev_queue
, new);
716 atomic_inc(&new->refcnt
);
723 notify_and_destroy(net
, skb
, n
, classid
,
725 if (new && !new->ops
->attach
)
726 atomic_inc(&new->refcnt
);
727 dev
->qdisc
= new ? : &noop_qdisc
;
729 notify_and_destroy(net
, skb
, n
, classid
, old
, new);
732 if (dev
->flags
& IFF_UP
)
735 const struct Qdisc_class_ops
*cops
= parent
->ops
->cl_ops
;
738 if (cops
&& cops
->graft
) {
739 unsigned long cl
= cops
->get(parent
, classid
);
741 err
= cops
->graft(parent
, cl
, new, &old
);
742 cops
->put(parent
, cl
);
747 notify_and_destroy(net
, skb
, n
, classid
, old
, new);
752 /* lockdep annotation is needed for ingress; egress gets it only for name */
753 static struct lock_class_key qdisc_tx_lock
;
754 static struct lock_class_key qdisc_rx_lock
;
757 Allocate and initialize new qdisc.
759 Parameters are passed via opt.
762 static struct Qdisc
*
763 qdisc_create(struct net_device
*dev
, struct netdev_queue
*dev_queue
,
764 struct Qdisc
*p
, u32 parent
, u32 handle
,
765 struct nlattr
**tca
, int *errp
)
768 struct nlattr
*kind
= tca
[TCA_KIND
];
770 struct Qdisc_ops
*ops
;
771 struct qdisc_size_table
*stab
;
773 ops
= qdisc_lookup_ops(kind
);
774 #ifdef CONFIG_MODULES
775 if (ops
== NULL
&& kind
!= NULL
) {
777 if (nla_strlcpy(name
, kind
, IFNAMSIZ
) < IFNAMSIZ
) {
778 /* We dropped the RTNL semaphore in order to
779 * perform the module load. So, even if we
780 * succeeded in loading the module we have to
781 * tell the caller to replay the request. We
782 * indicate this using -EAGAIN.
783 * We replay the request because the device may
784 * go away in the mean time.
787 request_module("sch_%s", name
);
789 ops
= qdisc_lookup_ops(kind
);
791 /* We will try again qdisc_lookup_ops,
792 * so don't keep a reference.
794 module_put(ops
->owner
);
806 sch
= qdisc_alloc(dev_queue
, ops
);
812 sch
->parent
= parent
;
814 if (handle
== TC_H_INGRESS
) {
815 sch
->flags
|= TCQ_F_INGRESS
;
816 handle
= TC_H_MAKE(TC_H_INGRESS
, 0);
817 lockdep_set_class(qdisc_lock(sch
), &qdisc_rx_lock
);
820 handle
= qdisc_alloc_handle(dev
);
825 lockdep_set_class(qdisc_lock(sch
), &qdisc_tx_lock
);
828 sch
->handle
= handle
;
830 if (!ops
->init
|| (err
= ops
->init(sch
, tca
[TCA_OPTIONS
])) == 0) {
832 stab
= qdisc_get_stab(tca
[TCA_STAB
]);
840 spinlock_t
*root_lock
;
843 if (sch
->flags
& TCQ_F_MQROOT
)
846 if ((sch
->parent
!= TC_H_ROOT
) &&
847 !(sch
->flags
& TCQ_F_INGRESS
) &&
848 (!p
|| !(p
->flags
& TCQ_F_MQROOT
)))
849 root_lock
= qdisc_root_sleeping_lock(sch
);
851 root_lock
= qdisc_lock(sch
);
853 err
= gen_new_estimator(&sch
->bstats
, &sch
->rate_est
,
854 root_lock
, tca
[TCA_RATE
]);
865 kfree((char *) sch
- sch
->padded
);
867 module_put(ops
->owner
);
874 * Any broken qdiscs that would require a ops->reset() here?
875 * The qdisc was never in action so it shouldn't be necessary.
877 qdisc_put_stab(sch
->stab
);
883 static int qdisc_change(struct Qdisc
*sch
, struct nlattr
**tca
)
885 struct qdisc_size_table
*stab
= NULL
;
888 if (tca
[TCA_OPTIONS
]) {
889 if (sch
->ops
->change
== NULL
)
891 err
= sch
->ops
->change(sch
, tca
[TCA_OPTIONS
]);
897 stab
= qdisc_get_stab(tca
[TCA_STAB
]);
899 return PTR_ERR(stab
);
902 qdisc_put_stab(sch
->stab
);
906 /* NB: ignores errors from replace_estimator
907 because change can't be undone. */
908 if (sch
->flags
& TCQ_F_MQROOT
)
910 gen_replace_estimator(&sch
->bstats
, &sch
->rate_est
,
911 qdisc_root_sleeping_lock(sch
),
918 struct check_loop_arg
920 struct qdisc_walker w
;
925 static int check_loop_fn(struct Qdisc
*q
, unsigned long cl
, struct qdisc_walker
*w
);
927 static int check_loop(struct Qdisc
*q
, struct Qdisc
*p
, int depth
)
929 struct check_loop_arg arg
;
931 if (q
->ops
->cl_ops
== NULL
)
934 arg
.w
.stop
= arg
.w
.skip
= arg
.w
.count
= 0;
935 arg
.w
.fn
= check_loop_fn
;
938 q
->ops
->cl_ops
->walk(q
, &arg
.w
);
939 return arg
.w
.stop
? -ELOOP
: 0;
943 check_loop_fn(struct Qdisc
*q
, unsigned long cl
, struct qdisc_walker
*w
)
946 const struct Qdisc_class_ops
*cops
= q
->ops
->cl_ops
;
947 struct check_loop_arg
*arg
= (struct check_loop_arg
*)w
;
949 leaf
= cops
->leaf(q
, cl
);
951 if (leaf
== arg
->p
|| arg
->depth
> 7)
953 return check_loop(leaf
, arg
->p
, arg
->depth
+ 1);
962 static int tc_get_qdisc(struct sk_buff
*skb
, struct nlmsghdr
*n
, void *arg
)
964 struct net
*net
= sock_net(skb
->sk
);
965 struct tcmsg
*tcm
= NLMSG_DATA(n
);
966 struct nlattr
*tca
[TCA_MAX
+ 1];
967 struct net_device
*dev
;
968 u32 clid
= tcm
->tcm_parent
;
969 struct Qdisc
*q
= NULL
;
970 struct Qdisc
*p
= NULL
;
973 if ((dev
= __dev_get_by_index(net
, tcm
->tcm_ifindex
)) == NULL
)
976 err
= nlmsg_parse(n
, sizeof(*tcm
), tca
, TCA_MAX
, NULL
);
981 if (clid
!= TC_H_ROOT
) {
982 if (TC_H_MAJ(clid
) != TC_H_MAJ(TC_H_INGRESS
)) {
983 if ((p
= qdisc_lookup(dev
, TC_H_MAJ(clid
))) == NULL
)
985 q
= qdisc_leaf(p
, clid
);
986 } else { /* ingress */
987 if (dev_ingress_queue(dev
))
988 q
= dev_ingress_queue(dev
)->qdisc_sleeping
;
996 if (tcm
->tcm_handle
&& q
->handle
!= tcm
->tcm_handle
)
999 if ((q
= qdisc_lookup(dev
, tcm
->tcm_handle
)) == NULL
)
1003 if (tca
[TCA_KIND
] && nla_strcmp(tca
[TCA_KIND
], q
->ops
->id
))
1006 if (n
->nlmsg_type
== RTM_DELQDISC
) {
1011 if ((err
= qdisc_graft(dev
, p
, skb
, n
, clid
, NULL
, q
)) != 0)
1014 qdisc_notify(net
, skb
, n
, clid
, NULL
, q
);
1020 Create/change qdisc.
1023 static int tc_modify_qdisc(struct sk_buff
*skb
, struct nlmsghdr
*n
, void *arg
)
1025 struct net
*net
= sock_net(skb
->sk
);
1027 struct nlattr
*tca
[TCA_MAX
+ 1];
1028 struct net_device
*dev
;
1030 struct Qdisc
*q
, *p
;
1034 /* Reinit, just in case something touches this. */
1035 tcm
= NLMSG_DATA(n
);
1036 clid
= tcm
->tcm_parent
;
1039 if ((dev
= __dev_get_by_index(net
, tcm
->tcm_ifindex
)) == NULL
)
1042 err
= nlmsg_parse(n
, sizeof(*tcm
), tca
, TCA_MAX
, NULL
);
1047 if (clid
!= TC_H_ROOT
) {
1048 if (clid
!= TC_H_INGRESS
) {
1049 if ((p
= qdisc_lookup(dev
, TC_H_MAJ(clid
))) == NULL
)
1051 q
= qdisc_leaf(p
, clid
);
1052 } else { /* ingress */
1053 if (dev_ingress_queue_create(dev
))
1054 q
= dev_ingress_queue(dev
)->qdisc_sleeping
;
1060 /* It may be default qdisc, ignore it */
1061 if (q
&& q
->handle
== 0)
1064 if (!q
|| !tcm
->tcm_handle
|| q
->handle
!= tcm
->tcm_handle
) {
1065 if (tcm
->tcm_handle
) {
1066 if (q
&& !(n
->nlmsg_flags
&NLM_F_REPLACE
))
1068 if (TC_H_MIN(tcm
->tcm_handle
))
1070 if ((q
= qdisc_lookup(dev
, tcm
->tcm_handle
)) == NULL
)
1071 goto create_n_graft
;
1072 if (n
->nlmsg_flags
&NLM_F_EXCL
)
1074 if (tca
[TCA_KIND
] && nla_strcmp(tca
[TCA_KIND
], q
->ops
->id
))
1077 (p
&& check_loop(q
, p
, 0)))
1079 atomic_inc(&q
->refcnt
);
1083 goto create_n_graft
;
1085 /* This magic test requires explanation.
1087 * We know, that some child q is already
1088 * attached to this parent and have choice:
1089 * either to change it or to create/graft new one.
1091 * 1. We are allowed to create/graft only
1092 * if CREATE and REPLACE flags are set.
1094 * 2. If EXCL is set, requestor wanted to say,
1095 * that qdisc tcm_handle is not expected
1096 * to exist, so that we choose create/graft too.
1098 * 3. The last case is when no flags are set.
1099 * Alas, it is sort of hole in API, we
1100 * cannot decide what to do unambiguously.
1101 * For now we select create/graft, if
1102 * user gave KIND, which does not match existing.
1104 if ((n
->nlmsg_flags
&NLM_F_CREATE
) &&
1105 (n
->nlmsg_flags
&NLM_F_REPLACE
) &&
1106 ((n
->nlmsg_flags
&NLM_F_EXCL
) ||
1108 nla_strcmp(tca
[TCA_KIND
], q
->ops
->id
))))
1109 goto create_n_graft
;
1113 if (!tcm
->tcm_handle
)
1115 q
= qdisc_lookup(dev
, tcm
->tcm_handle
);
1118 /* Change qdisc parameters */
1121 if (n
->nlmsg_flags
&NLM_F_EXCL
)
1123 if (tca
[TCA_KIND
] && nla_strcmp(tca
[TCA_KIND
], q
->ops
->id
))
1125 err
= qdisc_change(q
, tca
);
1127 qdisc_notify(net
, skb
, n
, clid
, NULL
, q
);
1131 if (!(n
->nlmsg_flags
&NLM_F_CREATE
))
1133 if (clid
== TC_H_INGRESS
) {
1134 if (dev_ingress_queue(dev
))
1135 q
= qdisc_create(dev
, dev_ingress_queue(dev
), p
,
1136 tcm
->tcm_parent
, tcm
->tcm_parent
,
1141 struct netdev_queue
*dev_queue
;
1143 if (p
&& p
->ops
->cl_ops
&& p
->ops
->cl_ops
->select_queue
)
1144 dev_queue
= p
->ops
->cl_ops
->select_queue(p
, tcm
);
1146 dev_queue
= p
->dev_queue
;
1148 dev_queue
= netdev_get_tx_queue(dev
, 0);
1150 q
= qdisc_create(dev
, dev_queue
, p
,
1151 tcm
->tcm_parent
, tcm
->tcm_handle
,
1161 err
= qdisc_graft(dev
, p
, skb
, n
, clid
, q
, NULL
);
1171 static int tc_fill_qdisc(struct sk_buff
*skb
, struct Qdisc
*q
, u32 clid
,
1172 u32 pid
, u32 seq
, u16 flags
, int event
)
1175 struct nlmsghdr
*nlh
;
1176 unsigned char *b
= skb_tail_pointer(skb
);
1179 nlh
= NLMSG_NEW(skb
, pid
, seq
, event
, sizeof(*tcm
), flags
);
1180 tcm
= NLMSG_DATA(nlh
);
1181 tcm
->tcm_family
= AF_UNSPEC
;
1184 tcm
->tcm_ifindex
= qdisc_dev(q
)->ifindex
;
1185 tcm
->tcm_parent
= clid
;
1186 tcm
->tcm_handle
= q
->handle
;
1187 tcm
->tcm_info
= atomic_read(&q
->refcnt
);
1188 NLA_PUT_STRING(skb
, TCA_KIND
, q
->ops
->id
);
1189 if (q
->ops
->dump
&& q
->ops
->dump(q
, skb
) < 0)
1190 goto nla_put_failure
;
1191 q
->qstats
.qlen
= q
->q
.qlen
;
1193 if (q
->stab
&& qdisc_dump_stab(skb
, q
->stab
) < 0)
1194 goto nla_put_failure
;
1196 if (gnet_stats_start_copy_compat(skb
, TCA_STATS2
, TCA_STATS
, TCA_XSTATS
,
1197 qdisc_root_sleeping_lock(q
), &d
) < 0)
1198 goto nla_put_failure
;
1200 if (q
->ops
->dump_stats
&& q
->ops
->dump_stats(q
, &d
) < 0)
1201 goto nla_put_failure
;
1203 if (gnet_stats_copy_basic(&d
, &q
->bstats
) < 0 ||
1204 gnet_stats_copy_rate_est(&d
, &q
->bstats
, &q
->rate_est
) < 0 ||
1205 gnet_stats_copy_queue(&d
, &q
->qstats
) < 0)
1206 goto nla_put_failure
;
1208 if (gnet_stats_finish_copy(&d
) < 0)
1209 goto nla_put_failure
;
1211 nlh
->nlmsg_len
= skb_tail_pointer(skb
) - b
;
1220 static bool tc_qdisc_dump_ignore(struct Qdisc
*q
)
1222 return (q
->flags
& TCQ_F_BUILTIN
) ? true : false;
1225 static int qdisc_notify(struct net
*net
, struct sk_buff
*oskb
,
1226 struct nlmsghdr
*n
, u32 clid
,
1227 struct Qdisc
*old
, struct Qdisc
*new)
1229 struct sk_buff
*skb
;
1230 u32 pid
= oskb
? NETLINK_CB(oskb
).pid
: 0;
1232 skb
= alloc_skb(NLMSG_GOODSIZE
, GFP_KERNEL
);
1236 if (old
&& !tc_qdisc_dump_ignore(old
)) {
1237 if (tc_fill_qdisc(skb
, old
, clid
, pid
, n
->nlmsg_seq
, 0, RTM_DELQDISC
) < 0)
1240 if (new && !tc_qdisc_dump_ignore(new)) {
1241 if (tc_fill_qdisc(skb
, new, clid
, pid
, n
->nlmsg_seq
, old
? NLM_F_REPLACE
: 0, RTM_NEWQDISC
) < 0)
1246 return rtnetlink_send(skb
, net
, pid
, RTNLGRP_TC
, n
->nlmsg_flags
&NLM_F_ECHO
);
1253 static int tc_dump_qdisc_root(struct Qdisc
*root
, struct sk_buff
*skb
,
1254 struct netlink_callback
*cb
,
1255 int *q_idx_p
, int s_q_idx
)
1257 int ret
= 0, q_idx
= *q_idx_p
;
1264 if (q_idx
< s_q_idx
) {
1267 if (!tc_qdisc_dump_ignore(q
) &&
1268 tc_fill_qdisc(skb
, q
, q
->parent
, NETLINK_CB(cb
->skb
).pid
,
1269 cb
->nlh
->nlmsg_seq
, NLM_F_MULTI
, RTM_NEWQDISC
) <= 0)
1273 list_for_each_entry(q
, &root
->list
, list
) {
1274 if (q_idx
< s_q_idx
) {
1278 if (!tc_qdisc_dump_ignore(q
) &&
1279 tc_fill_qdisc(skb
, q
, q
->parent
, NETLINK_CB(cb
->skb
).pid
,
1280 cb
->nlh
->nlmsg_seq
, NLM_F_MULTI
, RTM_NEWQDISC
) <= 0)
1293 static int tc_dump_qdisc(struct sk_buff
*skb
, struct netlink_callback
*cb
)
1295 struct net
*net
= sock_net(skb
->sk
);
1298 struct net_device
*dev
;
1300 s_idx
= cb
->args
[0];
1301 s_q_idx
= q_idx
= cb
->args
[1];
1305 for_each_netdev_rcu(net
, dev
) {
1306 struct netdev_queue
*dev_queue
;
1314 if (tc_dump_qdisc_root(dev
->qdisc
, skb
, cb
, &q_idx
, s_q_idx
) < 0)
1317 dev_queue
= dev_ingress_queue(dev
);
1319 tc_dump_qdisc_root(dev_queue
->qdisc_sleeping
, skb
, cb
,
1320 &q_idx
, s_q_idx
) < 0)
1331 cb
->args
[1] = q_idx
;
1338 /************************************************
1339 * Traffic classes manipulation. *
1340 ************************************************/
1344 static int tc_ctl_tclass(struct sk_buff
*skb
, struct nlmsghdr
*n
, void *arg
)
1346 struct net
*net
= sock_net(skb
->sk
);
1347 struct tcmsg
*tcm
= NLMSG_DATA(n
);
1348 struct nlattr
*tca
[TCA_MAX
+ 1];
1349 struct net_device
*dev
;
1350 struct Qdisc
*q
= NULL
;
1351 const struct Qdisc_class_ops
*cops
;
1352 unsigned long cl
= 0;
1353 unsigned long new_cl
;
1354 u32 pid
= tcm
->tcm_parent
;
1355 u32 clid
= tcm
->tcm_handle
;
1356 u32 qid
= TC_H_MAJ(clid
);
1359 if ((dev
= __dev_get_by_index(net
, tcm
->tcm_ifindex
)) == NULL
)
1362 err
= nlmsg_parse(n
, sizeof(*tcm
), tca
, TCA_MAX
, NULL
);
1367 parent == TC_H_UNSPEC - unspecified parent.
1368 parent == TC_H_ROOT - class is root, which has no parent.
1369 parent == X:0 - parent is root class.
1370 parent == X:Y - parent is a node in hierarchy.
1371 parent == 0:Y - parent is X:Y, where X:0 is qdisc.
1373 handle == 0:0 - generate handle from kernel pool.
1374 handle == 0:Y - class is X:Y, where X:0 is qdisc.
1375 handle == X:Y - clear.
1376 handle == X:0 - root class.
1379 /* Step 1. Determine qdisc handle X:0 */
1381 if (pid
!= TC_H_ROOT
) {
1382 u32 qid1
= TC_H_MAJ(pid
);
1385 /* If both majors are known, they must be identical. */
1390 } else if (qid
== 0)
1391 qid
= dev
->qdisc
->handle
;
1393 /* Now qid is genuine qdisc handle consistent
1394 both with parent and child.
1396 TC_H_MAJ(pid) still may be unspecified, complete it now.
1399 pid
= TC_H_MAKE(qid
, pid
);
1402 qid
= dev
->qdisc
->handle
;
1405 /* OK. Locate qdisc */
1406 if ((q
= qdisc_lookup(dev
, qid
)) == NULL
)
1409 /* An check that it supports classes */
1410 cops
= q
->ops
->cl_ops
;
1414 /* Now try to get class */
1416 if (pid
== TC_H_ROOT
)
1419 clid
= TC_H_MAKE(qid
, clid
);
1422 cl
= cops
->get(q
, clid
);
1426 if (n
->nlmsg_type
!= RTM_NEWTCLASS
|| !(n
->nlmsg_flags
&NLM_F_CREATE
))
1429 switch (n
->nlmsg_type
) {
1432 if (n
->nlmsg_flags
&NLM_F_EXCL
)
1438 err
= cops
->delete(q
, cl
);
1440 tclass_notify(net
, skb
, n
, q
, cl
, RTM_DELTCLASS
);
1443 err
= tclass_notify(net
, skb
, n
, q
, cl
, RTM_NEWTCLASS
);
1454 err
= cops
->change(q
, clid
, pid
, tca
, &new_cl
);
1456 tclass_notify(net
, skb
, n
, q
, new_cl
, RTM_NEWTCLASS
);
1466 static int tc_fill_tclass(struct sk_buff
*skb
, struct Qdisc
*q
,
1468 u32 pid
, u32 seq
, u16 flags
, int event
)
1471 struct nlmsghdr
*nlh
;
1472 unsigned char *b
= skb_tail_pointer(skb
);
1474 const struct Qdisc_class_ops
*cl_ops
= q
->ops
->cl_ops
;
1476 nlh
= NLMSG_NEW(skb
, pid
, seq
, event
, sizeof(*tcm
), flags
);
1477 tcm
= NLMSG_DATA(nlh
);
1478 tcm
->tcm_family
= AF_UNSPEC
;
1481 tcm
->tcm_ifindex
= qdisc_dev(q
)->ifindex
;
1482 tcm
->tcm_parent
= q
->handle
;
1483 tcm
->tcm_handle
= q
->handle
;
1485 NLA_PUT_STRING(skb
, TCA_KIND
, q
->ops
->id
);
1486 if (cl_ops
->dump
&& cl_ops
->dump(q
, cl
, skb
, tcm
) < 0)
1487 goto nla_put_failure
;
1489 if (gnet_stats_start_copy_compat(skb
, TCA_STATS2
, TCA_STATS
, TCA_XSTATS
,
1490 qdisc_root_sleeping_lock(q
), &d
) < 0)
1491 goto nla_put_failure
;
1493 if (cl_ops
->dump_stats
&& cl_ops
->dump_stats(q
, cl
, &d
) < 0)
1494 goto nla_put_failure
;
1496 if (gnet_stats_finish_copy(&d
) < 0)
1497 goto nla_put_failure
;
1499 nlh
->nlmsg_len
= skb_tail_pointer(skb
) - b
;
1508 static int tclass_notify(struct net
*net
, struct sk_buff
*oskb
,
1509 struct nlmsghdr
*n
, struct Qdisc
*q
,
1510 unsigned long cl
, int event
)
1512 struct sk_buff
*skb
;
1513 u32 pid
= oskb
? NETLINK_CB(oskb
).pid
: 0;
1515 skb
= alloc_skb(NLMSG_GOODSIZE
, GFP_KERNEL
);
1519 if (tc_fill_tclass(skb
, q
, cl
, pid
, n
->nlmsg_seq
, 0, event
) < 0) {
1524 return rtnetlink_send(skb
, net
, pid
, RTNLGRP_TC
, n
->nlmsg_flags
&NLM_F_ECHO
);
1527 struct qdisc_dump_args
1529 struct qdisc_walker w
;
1530 struct sk_buff
*skb
;
1531 struct netlink_callback
*cb
;
1534 static int qdisc_class_dump(struct Qdisc
*q
, unsigned long cl
, struct qdisc_walker
*arg
)
1536 struct qdisc_dump_args
*a
= (struct qdisc_dump_args
*)arg
;
1538 return tc_fill_tclass(a
->skb
, q
, cl
, NETLINK_CB(a
->cb
->skb
).pid
,
1539 a
->cb
->nlh
->nlmsg_seq
, NLM_F_MULTI
, RTM_NEWTCLASS
);
1542 static int tc_dump_tclass_qdisc(struct Qdisc
*q
, struct sk_buff
*skb
,
1543 struct tcmsg
*tcm
, struct netlink_callback
*cb
,
1546 struct qdisc_dump_args arg
;
1548 if (tc_qdisc_dump_ignore(q
) ||
1549 *t_p
< s_t
|| !q
->ops
->cl_ops
||
1551 TC_H_MAJ(tcm
->tcm_parent
) != q
->handle
)) {
1556 memset(&cb
->args
[1], 0, sizeof(cb
->args
)-sizeof(cb
->args
[0]));
1557 arg
.w
.fn
= qdisc_class_dump
;
1561 arg
.w
.skip
= cb
->args
[1];
1563 q
->ops
->cl_ops
->walk(q
, &arg
.w
);
1564 cb
->args
[1] = arg
.w
.count
;
1571 static int tc_dump_tclass_root(struct Qdisc
*root
, struct sk_buff
*skb
,
1572 struct tcmsg
*tcm
, struct netlink_callback
*cb
,
1580 if (tc_dump_tclass_qdisc(root
, skb
, tcm
, cb
, t_p
, s_t
) < 0)
1583 list_for_each_entry(q
, &root
->list
, list
) {
1584 if (tc_dump_tclass_qdisc(q
, skb
, tcm
, cb
, t_p
, s_t
) < 0)
1591 static int tc_dump_tclass(struct sk_buff
*skb
, struct netlink_callback
*cb
)
1593 struct tcmsg
*tcm
= (struct tcmsg
*)NLMSG_DATA(cb
->nlh
);
1594 struct net
*net
= sock_net(skb
->sk
);
1595 struct netdev_queue
*dev_queue
;
1596 struct net_device
*dev
;
1599 if (cb
->nlh
->nlmsg_len
< NLMSG_LENGTH(sizeof(*tcm
)))
1601 if ((dev
= dev_get_by_index(net
, tcm
->tcm_ifindex
)) == NULL
)
1607 if (tc_dump_tclass_root(dev
->qdisc
, skb
, tcm
, cb
, &t
, s_t
) < 0)
1610 dev_queue
= dev_ingress_queue(dev
);
1612 tc_dump_tclass_root(dev_queue
->qdisc_sleeping
, skb
, tcm
, cb
,
1623 /* Main classifier routine: scans classifier chain attached
1624 to this qdisc, (optionally) tests for protocol and asks
1625 specific classifiers.
1627 int tc_classify_compat(struct sk_buff
*skb
, struct tcf_proto
*tp
,
1628 struct tcf_result
*res
)
1630 __be16 protocol
= skb
->protocol
;
1633 for (; tp
; tp
= tp
->next
) {
1634 if ((tp
->protocol
== protocol
||
1635 tp
->protocol
== htons(ETH_P_ALL
)) &&
1636 (err
= tp
->classify(skb
, tp
, res
)) >= 0) {
1637 #ifdef CONFIG_NET_CLS_ACT
1638 if (err
!= TC_ACT_RECLASSIFY
&& skb
->tc_verd
)
1639 skb
->tc_verd
= SET_TC_VERD(skb
->tc_verd
, 0);
1646 EXPORT_SYMBOL(tc_classify_compat
);
1648 int tc_classify(struct sk_buff
*skb
, struct tcf_proto
*tp
,
1649 struct tcf_result
*res
)
1653 #ifdef CONFIG_NET_CLS_ACT
1654 struct tcf_proto
*otp
= tp
;
1657 protocol
= skb
->protocol
;
1659 err
= tc_classify_compat(skb
, tp
, res
);
1660 #ifdef CONFIG_NET_CLS_ACT
1661 if (err
== TC_ACT_RECLASSIFY
) {
1662 u32 verd
= G_TC_VERD(skb
->tc_verd
);
1665 if (verd
++ >= MAX_REC_LOOP
) {
1666 if (net_ratelimit())
1668 "%s: packet reclassify loop"
1669 " rule prio %u protocol %02x\n",
1671 tp
->prio
& 0xffff, ntohs(tp
->protocol
));
1674 skb
->tc_verd
= SET_TC_VERD(skb
->tc_verd
, verd
);
1680 EXPORT_SYMBOL(tc_classify
);
1682 void tcf_destroy(struct tcf_proto
*tp
)
1684 tp
->ops
->destroy(tp
);
1685 module_put(tp
->ops
->owner
);
1689 void tcf_destroy_chain(struct tcf_proto
**fl
)
1691 struct tcf_proto
*tp
;
1693 while ((tp
= *fl
) != NULL
) {
1698 EXPORT_SYMBOL(tcf_destroy_chain
);
1700 #ifdef CONFIG_PROC_FS
1701 static int psched_show(struct seq_file
*seq
, void *v
)
1705 hrtimer_get_res(CLOCK_MONOTONIC
, &ts
);
1706 seq_printf(seq
, "%08x %08x %08x %08x\n",
1707 (u32
)NSEC_PER_USEC
, (u32
)PSCHED_TICKS2NS(1),
1709 (u32
)NSEC_PER_SEC
/(u32
)ktime_to_ns(timespec_to_ktime(ts
)));
1714 static int psched_open(struct inode
*inode
, struct file
*file
)
1716 return single_open(file
, psched_show
, NULL
);
1719 static const struct file_operations psched_fops
= {
1720 .owner
= THIS_MODULE
,
1721 .open
= psched_open
,
1723 .llseek
= seq_lseek
,
1724 .release
= single_release
,
1727 static int __net_init
psched_net_init(struct net
*net
)
1729 struct proc_dir_entry
*e
;
1731 e
= proc_net_fops_create(net
, "psched", 0, &psched_fops
);
1738 static void __net_exit
psched_net_exit(struct net
*net
)
1740 proc_net_remove(net
, "psched");
1743 static int __net_init
psched_net_init(struct net
*net
)
1748 static void __net_exit
psched_net_exit(struct net
*net
)
1753 static struct pernet_operations psched_net_ops
= {
1754 .init
= psched_net_init
,
1755 .exit
= psched_net_exit
,
1758 static int __init
pktsched_init(void)
1762 err
= register_pernet_subsys(&psched_net_ops
);
1764 printk(KERN_ERR
"pktsched_init: "
1765 "cannot initialize per netns operations\n");
1769 register_qdisc(&pfifo_qdisc_ops
);
1770 register_qdisc(&bfifo_qdisc_ops
);
1771 register_qdisc(&pfifo_head_drop_qdisc_ops
);
1772 register_qdisc(&mq_qdisc_ops
);
1774 rtnl_register(PF_UNSPEC
, RTM_NEWQDISC
, tc_modify_qdisc
, NULL
);
1775 rtnl_register(PF_UNSPEC
, RTM_DELQDISC
, tc_get_qdisc
, NULL
);
1776 rtnl_register(PF_UNSPEC
, RTM_GETQDISC
, tc_get_qdisc
, tc_dump_qdisc
);
1777 rtnl_register(PF_UNSPEC
, RTM_NEWTCLASS
, tc_ctl_tclass
, NULL
);
1778 rtnl_register(PF_UNSPEC
, RTM_DELTCLASS
, tc_ctl_tclass
, NULL
);
1779 rtnl_register(PF_UNSPEC
, RTM_GETTCLASS
, tc_ctl_tclass
, tc_dump_tclass
);
1784 subsys_initcall(pktsched_init
);