2 * net/sched/sch_api.c Packet scheduler API.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
13 * Rani Assaf <rani@magic.metawire.com> :980802: JIFFIES and CPU clock sources are repaired.
14 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
15 * Jamal Hadi Salim <hadi@nortelnetworks.com>: 990601: ingress support
18 #include <linux/module.h>
19 #include <linux/types.h>
20 #include <linux/kernel.h>
21 #include <linux/string.h>
22 #include <linux/errno.h>
23 #include <linux/skbuff.h>
24 #include <linux/init.h>
25 #include <linux/proc_fs.h>
26 #include <linux/seq_file.h>
27 #include <linux/kmod.h>
28 #include <linux/list.h>
29 #include <linux/hrtimer.h>
30 #include <linux/lockdep.h>
31 #include <linux/slab.h>
33 #include <net/net_namespace.h>
35 #include <net/netlink.h>
36 #include <net/pkt_sched.h>
38 static int qdisc_notify(struct net
*net
, struct sk_buff
*oskb
,
39 struct nlmsghdr
*n
, u32 clid
,
40 struct Qdisc
*old
, struct Qdisc
*new);
41 static int tclass_notify(struct net
*net
, struct sk_buff
*oskb
,
42 struct nlmsghdr
*n
, struct Qdisc
*q
,
43 unsigned long cl
, int event
);
50 This file consists of two interrelated parts:
52 1. queueing disciplines manager frontend.
53 2. traffic classes manager frontend.
55 Generally, queueing discipline ("qdisc") is a black box,
56 which is able to enqueue packets and to dequeue them (when
57 device is ready to send something) in order and at times
58 determined by algorithm hidden in it.
60 qdisc's are divided to two categories:
61 - "queues", which have no internal structure visible from outside.
62 - "schedulers", which split all the packets to "traffic classes",
63 using "packet classifiers" (look at cls_api.c)
65 In turn, classes may have child qdiscs (as rule, queues)
66 attached to them etc. etc. etc.
68 The goal of the routines in this file is to translate
69 information supplied by user in the form of handles
70 to more intelligible for kernel form, to make some sanity
71 checks and part of work, which is common to all qdiscs
72 and to provide rtnetlink notifications.
74 All real intelligent work is done inside qdisc modules.
78 Every discipline has two major routines: enqueue and dequeue.
82 dequeue usually returns a skb to send. It is allowed to return NULL,
83 but it does not mean that queue is empty, it just means that
84 discipline does not want to send anything this time.
85 Queue is really empty if q->q.qlen == 0.
86 For complicated disciplines with multiple queues q->q is not
87 real packet queue, but however q->q.qlen must be valid.
91 enqueue returns 0, if packet was enqueued successfully.
92 If packet (this one or another one) was dropped, it returns
94 NET_XMIT_DROP - this packet dropped
95 Expected action: do not backoff, but wait until queue will clear.
96 NET_XMIT_CN - probably this packet enqueued, but another one dropped.
97 Expected action: backoff or ignore
98 NET_XMIT_POLICED - dropped by police.
99 Expected action: backoff or error to real-time apps.
105 like dequeue but without removing a packet from the queue
109 returns qdisc to initial state: purge all buffers, clear all
110 timers, counters (except for statistics) etc.
114 initializes newly created qdisc.
118 destroys resources allocated by init and during lifetime of qdisc.
122 changes qdisc parameters.
125 /* Protects list of registered TC modules. It is pure SMP lock. */
126 static DEFINE_RWLOCK(qdisc_mod_lock
);
129 /************************************************
130 * Queueing disciplines manipulation. *
131 ************************************************/
134 /* The list of all installed queueing disciplines. */
136 static struct Qdisc_ops
*qdisc_base
;
138 /* Register/uregister queueing discipline */
140 int register_qdisc(struct Qdisc_ops
*qops
)
142 struct Qdisc_ops
*q
, **qp
;
145 write_lock(&qdisc_mod_lock
);
146 for (qp
= &qdisc_base
; (q
= *qp
) != NULL
; qp
= &q
->next
)
147 if (!strcmp(qops
->id
, q
->id
))
150 if (qops
->enqueue
== NULL
)
151 qops
->enqueue
= noop_qdisc_ops
.enqueue
;
152 if (qops
->peek
== NULL
) {
153 if (qops
->dequeue
== NULL
)
154 qops
->peek
= noop_qdisc_ops
.peek
;
158 if (qops
->dequeue
== NULL
)
159 qops
->dequeue
= noop_qdisc_ops
.dequeue
;
162 const struct Qdisc_class_ops
*cops
= qops
->cl_ops
;
164 if (!(cops
->get
&& cops
->put
&& cops
->walk
&& cops
->leaf
))
167 if (cops
->tcf_chain
&& !(cops
->bind_tcf
&& cops
->unbind_tcf
))
175 write_unlock(&qdisc_mod_lock
);
182 EXPORT_SYMBOL(register_qdisc
);
184 int unregister_qdisc(struct Qdisc_ops
*qops
)
186 struct Qdisc_ops
*q
, **qp
;
189 write_lock(&qdisc_mod_lock
);
190 for (qp
= &qdisc_base
; (q
= *qp
) != NULL
; qp
= &q
->next
)
198 write_unlock(&qdisc_mod_lock
);
201 EXPORT_SYMBOL(unregister_qdisc
);
203 /* Get default qdisc if not otherwise specified */
204 void qdisc_get_default(char *name
, size_t len
)
206 read_lock(&qdisc_mod_lock
);
207 strlcpy(name
, default_qdisc_ops
->id
, len
);
208 read_unlock(&qdisc_mod_lock
);
211 static struct Qdisc_ops
*qdisc_lookup_default(const char *name
)
213 struct Qdisc_ops
*q
= NULL
;
215 for (q
= qdisc_base
; q
; q
= q
->next
) {
216 if (!strcmp(name
, q
->id
)) {
217 if (!try_module_get(q
->owner
))
226 /* Set new default qdisc to use */
227 int qdisc_set_default(const char *name
)
229 const struct Qdisc_ops
*ops
;
231 if (!capable(CAP_NET_ADMIN
))
234 write_lock(&qdisc_mod_lock
);
235 ops
= qdisc_lookup_default(name
);
237 /* Not found, drop lock and try to load module */
238 write_unlock(&qdisc_mod_lock
);
239 request_module("sch_%s", name
);
240 write_lock(&qdisc_mod_lock
);
242 ops
= qdisc_lookup_default(name
);
246 /* Set new default */
247 module_put(default_qdisc_ops
->owner
);
248 default_qdisc_ops
= ops
;
250 write_unlock(&qdisc_mod_lock
);
252 return ops
? 0 : -ENOENT
;
255 /* We know handle. Find qdisc among all qdisc's attached to device
256 (root qdisc, all its children, children of children etc.)
259 static struct Qdisc
*qdisc_match_from_root(struct Qdisc
*root
, u32 handle
)
263 if (!(root
->flags
& TCQ_F_BUILTIN
) &&
264 root
->handle
== handle
)
267 list_for_each_entry(q
, &root
->list
, list
) {
268 if (q
->handle
== handle
)
274 static void qdisc_list_add(struct Qdisc
*q
)
276 if ((q
->parent
!= TC_H_ROOT
) && !(q
->flags
& TCQ_F_INGRESS
))
277 list_add_tail(&q
->list
, &qdisc_dev(q
)->qdisc
->list
);
280 void qdisc_list_del(struct Qdisc
*q
)
282 if ((q
->parent
!= TC_H_ROOT
) && !(q
->flags
& TCQ_F_INGRESS
))
285 EXPORT_SYMBOL(qdisc_list_del
);
287 struct Qdisc
*qdisc_lookup(struct net_device
*dev
, u32 handle
)
291 q
= qdisc_match_from_root(dev
->qdisc
, handle
);
295 if (dev_ingress_queue(dev
))
296 q
= qdisc_match_from_root(
297 dev_ingress_queue(dev
)->qdisc_sleeping
,
303 static struct Qdisc
*qdisc_leaf(struct Qdisc
*p
, u32 classid
)
307 const struct Qdisc_class_ops
*cops
= p
->ops
->cl_ops
;
311 cl
= cops
->get(p
, classid
);
315 leaf
= cops
->leaf(p
, cl
);
320 /* Find queueing discipline by name */
322 static struct Qdisc_ops
*qdisc_lookup_ops(struct nlattr
*kind
)
324 struct Qdisc_ops
*q
= NULL
;
327 read_lock(&qdisc_mod_lock
);
328 for (q
= qdisc_base
; q
; q
= q
->next
) {
329 if (nla_strcmp(kind
, q
->id
) == 0) {
330 if (!try_module_get(q
->owner
))
335 read_unlock(&qdisc_mod_lock
);
340 /* The linklayer setting were not transferred from iproute2, in older
341 * versions, and the rate tables lookup systems have been dropped in
342 * the kernel. To keep backward compatible with older iproute2 tc
343 * utils, we detect the linklayer setting by detecting if the rate
344 * table were modified.
346 * For linklayer ATM table entries, the rate table will be aligned to
347 * 48 bytes, thus some table entries will contain the same value. The
348 * mpu (min packet unit) is also encoded into the old rate table, thus
349 * starting from the mpu, we find low and high table entries for
350 * mapping this cell. If these entries contain the same value, when
351 * the rate tables have been modified for linklayer ATM.
353 * This is done by rounding mpu to the nearest 48 bytes cell/entry,
354 * and then roundup to the next cell, calc the table entry one below,
357 static __u8
__detect_linklayer(struct tc_ratespec
*r
, __u32
*rtab
)
359 int low
= roundup(r
->mpu
, 48);
360 int high
= roundup(low
+1, 48);
361 int cell_low
= low
>> r
->cell_log
;
362 int cell_high
= (high
>> r
->cell_log
) - 1;
364 /* rtab is too inaccurate at rates > 100Mbit/s */
365 if ((r
->rate
> (100000000/8)) || (rtab
[0] == 0)) {
366 pr_debug("TC linklayer: Giving up ATM detection\n");
367 return TC_LINKLAYER_ETHERNET
;
370 if ((cell_high
> cell_low
) && (cell_high
< 256)
371 && (rtab
[cell_low
] == rtab
[cell_high
])) {
372 pr_debug("TC linklayer: Detected ATM, low(%d)=high(%d)=%u\n",
373 cell_low
, cell_high
, rtab
[cell_high
]);
374 return TC_LINKLAYER_ATM
;
376 return TC_LINKLAYER_ETHERNET
;
379 static struct qdisc_rate_table
*qdisc_rtab_list
;
381 struct qdisc_rate_table
*qdisc_get_rtab(struct tc_ratespec
*r
, struct nlattr
*tab
)
383 struct qdisc_rate_table
*rtab
;
385 if (tab
== NULL
|| r
->rate
== 0 || r
->cell_log
== 0 ||
386 nla_len(tab
) != TC_RTAB_SIZE
)
389 for (rtab
= qdisc_rtab_list
; rtab
; rtab
= rtab
->next
) {
390 if (!memcmp(&rtab
->rate
, r
, sizeof(struct tc_ratespec
)) &&
391 !memcmp(&rtab
->data
, nla_data(tab
), 1024)) {
397 rtab
= kmalloc(sizeof(*rtab
), GFP_KERNEL
);
401 memcpy(rtab
->data
, nla_data(tab
), 1024);
402 if (r
->linklayer
== TC_LINKLAYER_UNAWARE
)
403 r
->linklayer
= __detect_linklayer(r
, rtab
->data
);
404 rtab
->next
= qdisc_rtab_list
;
405 qdisc_rtab_list
= rtab
;
409 EXPORT_SYMBOL(qdisc_get_rtab
);
411 void qdisc_put_rtab(struct qdisc_rate_table
*tab
)
413 struct qdisc_rate_table
*rtab
, **rtabp
;
415 if (!tab
|| --tab
->refcnt
)
418 for (rtabp
= &qdisc_rtab_list
;
419 (rtab
= *rtabp
) != NULL
;
420 rtabp
= &rtab
->next
) {
428 EXPORT_SYMBOL(qdisc_put_rtab
);
430 static LIST_HEAD(qdisc_stab_list
);
431 static DEFINE_SPINLOCK(qdisc_stab_lock
);
433 static const struct nla_policy stab_policy
[TCA_STAB_MAX
+ 1] = {
434 [TCA_STAB_BASE
] = { .len
= sizeof(struct tc_sizespec
) },
435 [TCA_STAB_DATA
] = { .type
= NLA_BINARY
},
438 static struct qdisc_size_table
*qdisc_get_stab(struct nlattr
*opt
)
440 struct nlattr
*tb
[TCA_STAB_MAX
+ 1];
441 struct qdisc_size_table
*stab
;
442 struct tc_sizespec
*s
;
443 unsigned int tsize
= 0;
447 err
= nla_parse_nested(tb
, TCA_STAB_MAX
, opt
, stab_policy
);
450 if (!tb
[TCA_STAB_BASE
])
451 return ERR_PTR(-EINVAL
);
453 s
= nla_data(tb
[TCA_STAB_BASE
]);
456 if (!tb
[TCA_STAB_DATA
])
457 return ERR_PTR(-EINVAL
);
458 tab
= nla_data(tb
[TCA_STAB_DATA
]);
459 tsize
= nla_len(tb
[TCA_STAB_DATA
]) / sizeof(u16
);
462 if (tsize
!= s
->tsize
|| (!tab
&& tsize
> 0))
463 return ERR_PTR(-EINVAL
);
465 spin_lock(&qdisc_stab_lock
);
467 list_for_each_entry(stab
, &qdisc_stab_list
, list
) {
468 if (memcmp(&stab
->szopts
, s
, sizeof(*s
)))
470 if (tsize
> 0 && memcmp(stab
->data
, tab
, tsize
* sizeof(u16
)))
473 spin_unlock(&qdisc_stab_lock
);
477 spin_unlock(&qdisc_stab_lock
);
479 stab
= kmalloc(sizeof(*stab
) + tsize
* sizeof(u16
), GFP_KERNEL
);
481 return ERR_PTR(-ENOMEM
);
486 memcpy(stab
->data
, tab
, tsize
* sizeof(u16
));
488 spin_lock(&qdisc_stab_lock
);
489 list_add_tail(&stab
->list
, &qdisc_stab_list
);
490 spin_unlock(&qdisc_stab_lock
);
495 static void stab_kfree_rcu(struct rcu_head
*head
)
497 kfree(container_of(head
, struct qdisc_size_table
, rcu
));
500 void qdisc_put_stab(struct qdisc_size_table
*tab
)
505 spin_lock(&qdisc_stab_lock
);
507 if (--tab
->refcnt
== 0) {
508 list_del(&tab
->list
);
509 call_rcu_bh(&tab
->rcu
, stab_kfree_rcu
);
512 spin_unlock(&qdisc_stab_lock
);
514 EXPORT_SYMBOL(qdisc_put_stab
);
516 static int qdisc_dump_stab(struct sk_buff
*skb
, struct qdisc_size_table
*stab
)
520 nest
= nla_nest_start(skb
, TCA_STAB
);
522 goto nla_put_failure
;
523 if (nla_put(skb
, TCA_STAB_BASE
, sizeof(stab
->szopts
), &stab
->szopts
))
524 goto nla_put_failure
;
525 nla_nest_end(skb
, nest
);
533 void __qdisc_calculate_pkt_len(struct sk_buff
*skb
, const struct qdisc_size_table
*stab
)
537 pkt_len
= skb
->len
+ stab
->szopts
.overhead
;
538 if (unlikely(!stab
->szopts
.tsize
))
541 slot
= pkt_len
+ stab
->szopts
.cell_align
;
542 if (unlikely(slot
< 0))
545 slot
>>= stab
->szopts
.cell_log
;
546 if (likely(slot
< stab
->szopts
.tsize
))
547 pkt_len
= stab
->data
[slot
];
549 pkt_len
= stab
->data
[stab
->szopts
.tsize
- 1] *
550 (slot
/ stab
->szopts
.tsize
) +
551 stab
->data
[slot
% stab
->szopts
.tsize
];
553 pkt_len
<<= stab
->szopts
.size_log
;
555 if (unlikely(pkt_len
< 1))
557 qdisc_skb_cb(skb
)->pkt_len
= pkt_len
;
559 EXPORT_SYMBOL(__qdisc_calculate_pkt_len
);
561 void qdisc_warn_nonwc(char *txt
, struct Qdisc
*qdisc
)
563 if (!(qdisc
->flags
& TCQ_F_WARN_NONWC
)) {
564 pr_warn("%s: %s qdisc %X: is non-work-conserving?\n",
565 txt
, qdisc
->ops
->id
, qdisc
->handle
>> 16);
566 qdisc
->flags
|= TCQ_F_WARN_NONWC
;
569 EXPORT_SYMBOL(qdisc_warn_nonwc
);
571 static enum hrtimer_restart
qdisc_watchdog(struct hrtimer
*timer
)
573 struct qdisc_watchdog
*wd
= container_of(timer
, struct qdisc_watchdog
,
576 qdisc_unthrottled(wd
->qdisc
);
577 __netif_schedule(qdisc_root(wd
->qdisc
));
579 return HRTIMER_NORESTART
;
582 void qdisc_watchdog_init(struct qdisc_watchdog
*wd
, struct Qdisc
*qdisc
)
584 hrtimer_init(&wd
->timer
, CLOCK_MONOTONIC
, HRTIMER_MODE_ABS
);
585 wd
->timer
.function
= qdisc_watchdog
;
588 EXPORT_SYMBOL(qdisc_watchdog_init
);
590 void qdisc_watchdog_schedule_ns(struct qdisc_watchdog
*wd
, u64 expires
)
592 if (test_bit(__QDISC_STATE_DEACTIVATED
,
593 &qdisc_root_sleeping(wd
->qdisc
)->state
))
596 qdisc_throttled(wd
->qdisc
);
598 hrtimer_start(&wd
->timer
,
599 ns_to_ktime(expires
),
602 EXPORT_SYMBOL(qdisc_watchdog_schedule_ns
);
604 void qdisc_watchdog_cancel(struct qdisc_watchdog
*wd
)
606 hrtimer_cancel(&wd
->timer
);
607 qdisc_unthrottled(wd
->qdisc
);
609 EXPORT_SYMBOL(qdisc_watchdog_cancel
);
611 static struct hlist_head
*qdisc_class_hash_alloc(unsigned int n
)
613 unsigned int size
= n
* sizeof(struct hlist_head
), i
;
614 struct hlist_head
*h
;
616 if (size
<= PAGE_SIZE
)
617 h
= kmalloc(size
, GFP_KERNEL
);
619 h
= (struct hlist_head
*)
620 __get_free_pages(GFP_KERNEL
, get_order(size
));
623 for (i
= 0; i
< n
; i
++)
624 INIT_HLIST_HEAD(&h
[i
]);
629 static void qdisc_class_hash_free(struct hlist_head
*h
, unsigned int n
)
631 unsigned int size
= n
* sizeof(struct hlist_head
);
633 if (size
<= PAGE_SIZE
)
636 free_pages((unsigned long)h
, get_order(size
));
639 void qdisc_class_hash_grow(struct Qdisc
*sch
, struct Qdisc_class_hash
*clhash
)
641 struct Qdisc_class_common
*cl
;
642 struct hlist_node
*next
;
643 struct hlist_head
*nhash
, *ohash
;
644 unsigned int nsize
, nmask
, osize
;
647 /* Rehash when load factor exceeds 0.75 */
648 if (clhash
->hashelems
* 4 <= clhash
->hashsize
* 3)
650 nsize
= clhash
->hashsize
* 2;
652 nhash
= qdisc_class_hash_alloc(nsize
);
656 ohash
= clhash
->hash
;
657 osize
= clhash
->hashsize
;
660 for (i
= 0; i
< osize
; i
++) {
661 hlist_for_each_entry_safe(cl
, next
, &ohash
[i
], hnode
) {
662 h
= qdisc_class_hash(cl
->classid
, nmask
);
663 hlist_add_head(&cl
->hnode
, &nhash
[h
]);
666 clhash
->hash
= nhash
;
667 clhash
->hashsize
= nsize
;
668 clhash
->hashmask
= nmask
;
669 sch_tree_unlock(sch
);
671 qdisc_class_hash_free(ohash
, osize
);
673 EXPORT_SYMBOL(qdisc_class_hash_grow
);
675 int qdisc_class_hash_init(struct Qdisc_class_hash
*clhash
)
677 unsigned int size
= 4;
679 clhash
->hash
= qdisc_class_hash_alloc(size
);
680 if (clhash
->hash
== NULL
)
682 clhash
->hashsize
= size
;
683 clhash
->hashmask
= size
- 1;
684 clhash
->hashelems
= 0;
687 EXPORT_SYMBOL(qdisc_class_hash_init
);
689 void qdisc_class_hash_destroy(struct Qdisc_class_hash
*clhash
)
691 qdisc_class_hash_free(clhash
->hash
, clhash
->hashsize
);
693 EXPORT_SYMBOL(qdisc_class_hash_destroy
);
695 void qdisc_class_hash_insert(struct Qdisc_class_hash
*clhash
,
696 struct Qdisc_class_common
*cl
)
700 INIT_HLIST_NODE(&cl
->hnode
);
701 h
= qdisc_class_hash(cl
->classid
, clhash
->hashmask
);
702 hlist_add_head(&cl
->hnode
, &clhash
->hash
[h
]);
705 EXPORT_SYMBOL(qdisc_class_hash_insert
);
707 void qdisc_class_hash_remove(struct Qdisc_class_hash
*clhash
,
708 struct Qdisc_class_common
*cl
)
710 hlist_del(&cl
->hnode
);
713 EXPORT_SYMBOL(qdisc_class_hash_remove
);
715 /* Allocate an unique handle from space managed by kernel
716 * Possible range is [8000-FFFF]:0000 (0x8000 values)
718 static u32
qdisc_alloc_handle(struct net_device
*dev
)
721 static u32 autohandle
= TC_H_MAKE(0x80000000U
, 0);
724 autohandle
+= TC_H_MAKE(0x10000U
, 0);
725 if (autohandle
== TC_H_MAKE(TC_H_ROOT
, 0))
726 autohandle
= TC_H_MAKE(0x80000000U
, 0);
727 if (!qdisc_lookup(dev
, autohandle
))
735 void qdisc_tree_decrease_qlen(struct Qdisc
*sch
, unsigned int n
)
737 const struct Qdisc_class_ops
*cops
;
743 while ((parentid
= sch
->parent
)) {
744 if (TC_H_MAJ(parentid
) == TC_H_MAJ(TC_H_INGRESS
))
747 sch
= qdisc_lookup(qdisc_dev(sch
), TC_H_MAJ(parentid
));
749 WARN_ON(parentid
!= TC_H_ROOT
);
752 cops
= sch
->ops
->cl_ops
;
753 if (cops
->qlen_notify
) {
754 cl
= cops
->get(sch
, parentid
);
755 cops
->qlen_notify(sch
, cl
);
761 EXPORT_SYMBOL(qdisc_tree_decrease_qlen
);
763 static void notify_and_destroy(struct net
*net
, struct sk_buff
*skb
,
764 struct nlmsghdr
*n
, u32 clid
,
765 struct Qdisc
*old
, struct Qdisc
*new)
768 qdisc_notify(net
, skb
, n
, clid
, old
, new);
774 /* Graft qdisc "new" to class "classid" of qdisc "parent" or
777 * When appropriate send a netlink notification using 'skb'
780 * On success, destroy old qdisc.
783 static int qdisc_graft(struct net_device
*dev
, struct Qdisc
*parent
,
784 struct sk_buff
*skb
, struct nlmsghdr
*n
, u32 classid
,
785 struct Qdisc
*new, struct Qdisc
*old
)
787 struct Qdisc
*q
= old
;
788 struct net
*net
= dev_net(dev
);
791 if (parent
== NULL
) {
792 unsigned int i
, num_q
, ingress
;
795 num_q
= dev
->num_tx_queues
;
796 if ((q
&& q
->flags
& TCQ_F_INGRESS
) ||
797 (new && new->flags
& TCQ_F_INGRESS
)) {
800 if (!dev_ingress_queue(dev
))
804 if (dev
->flags
& IFF_UP
)
807 if (new && new->ops
->attach
) {
808 new->ops
->attach(new);
812 for (i
= 0; i
< num_q
; i
++) {
813 struct netdev_queue
*dev_queue
= dev_ingress_queue(dev
);
816 dev_queue
= netdev_get_tx_queue(dev
, i
);
818 old
= dev_graft_qdisc(dev_queue
, new);
820 atomic_inc(&new->refcnt
);
827 notify_and_destroy(net
, skb
, n
, classid
,
829 if (new && !new->ops
->attach
)
830 atomic_inc(&new->refcnt
);
831 dev
->qdisc
= new ? : &noop_qdisc
;
833 notify_and_destroy(net
, skb
, n
, classid
, old
, new);
836 if (dev
->flags
& IFF_UP
)
839 const struct Qdisc_class_ops
*cops
= parent
->ops
->cl_ops
;
842 if (cops
&& cops
->graft
) {
843 unsigned long cl
= cops
->get(parent
, classid
);
845 err
= cops
->graft(parent
, cl
, new, &old
);
846 cops
->put(parent
, cl
);
851 notify_and_destroy(net
, skb
, n
, classid
, old
, new);
856 /* lockdep annotation is needed for ingress; egress gets it only for name */
857 static struct lock_class_key qdisc_tx_lock
;
858 static struct lock_class_key qdisc_rx_lock
;
861 Allocate and initialize new qdisc.
863 Parameters are passed via opt.
866 static struct Qdisc
*
867 qdisc_create(struct net_device
*dev
, struct netdev_queue
*dev_queue
,
868 struct Qdisc
*p
, u32 parent
, u32 handle
,
869 struct nlattr
**tca
, int *errp
)
872 struct nlattr
*kind
= tca
[TCA_KIND
];
874 struct Qdisc_ops
*ops
;
875 struct qdisc_size_table
*stab
;
877 ops
= qdisc_lookup_ops(kind
);
878 #ifdef CONFIG_MODULES
879 if (ops
== NULL
&& kind
!= NULL
) {
881 if (nla_strlcpy(name
, kind
, IFNAMSIZ
) < IFNAMSIZ
) {
882 /* We dropped the RTNL semaphore in order to
883 * perform the module load. So, even if we
884 * succeeded in loading the module we have to
885 * tell the caller to replay the request. We
886 * indicate this using -EAGAIN.
887 * We replay the request because the device may
888 * go away in the mean time.
891 request_module("sch_%s", name
);
893 ops
= qdisc_lookup_ops(kind
);
895 /* We will try again qdisc_lookup_ops,
896 * so don't keep a reference.
898 module_put(ops
->owner
);
910 sch
= qdisc_alloc(dev_queue
, ops
);
916 sch
->parent
= parent
;
918 if (handle
== TC_H_INGRESS
) {
919 sch
->flags
|= TCQ_F_INGRESS
;
920 handle
= TC_H_MAKE(TC_H_INGRESS
, 0);
921 lockdep_set_class(qdisc_lock(sch
), &qdisc_rx_lock
);
924 handle
= qdisc_alloc_handle(dev
);
929 lockdep_set_class(qdisc_lock(sch
), &qdisc_tx_lock
);
930 if (!netif_is_multiqueue(dev
))
931 sch
->flags
|= TCQ_F_ONETXQUEUE
;
934 sch
->handle
= handle
;
936 if (!ops
->init
|| (err
= ops
->init(sch
, tca
[TCA_OPTIONS
])) == 0) {
938 stab
= qdisc_get_stab(tca
[TCA_STAB
]);
943 rcu_assign_pointer(sch
->stab
, stab
);
946 spinlock_t
*root_lock
;
949 if (sch
->flags
& TCQ_F_MQROOT
)
952 if ((sch
->parent
!= TC_H_ROOT
) &&
953 !(sch
->flags
& TCQ_F_INGRESS
) &&
954 (!p
|| !(p
->flags
& TCQ_F_MQROOT
)))
955 root_lock
= qdisc_root_sleeping_lock(sch
);
957 root_lock
= qdisc_lock(sch
);
959 err
= gen_new_estimator(&sch
->bstats
, &sch
->rate_est
,
960 root_lock
, tca
[TCA_RATE
]);
971 kfree((char *) sch
- sch
->padded
);
973 module_put(ops
->owner
);
980 * Any broken qdiscs that would require a ops->reset() here?
981 * The qdisc was never in action so it shouldn't be necessary.
983 qdisc_put_stab(rtnl_dereference(sch
->stab
));
989 static int qdisc_change(struct Qdisc
*sch
, struct nlattr
**tca
)
991 struct qdisc_size_table
*ostab
, *stab
= NULL
;
994 if (tca
[TCA_OPTIONS
]) {
995 if (sch
->ops
->change
== NULL
)
997 err
= sch
->ops
->change(sch
, tca
[TCA_OPTIONS
]);
1002 if (tca
[TCA_STAB
]) {
1003 stab
= qdisc_get_stab(tca
[TCA_STAB
]);
1005 return PTR_ERR(stab
);
1008 ostab
= rtnl_dereference(sch
->stab
);
1009 rcu_assign_pointer(sch
->stab
, stab
);
1010 qdisc_put_stab(ostab
);
1012 if (tca
[TCA_RATE
]) {
1013 /* NB: ignores errors from replace_estimator
1014 because change can't be undone. */
1015 if (sch
->flags
& TCQ_F_MQROOT
)
1017 gen_replace_estimator(&sch
->bstats
, &sch
->rate_est
,
1018 qdisc_root_sleeping_lock(sch
),
1025 struct check_loop_arg
{
1026 struct qdisc_walker w
;
1031 static int check_loop_fn(struct Qdisc
*q
, unsigned long cl
, struct qdisc_walker
*w
);
1033 static int check_loop(struct Qdisc
*q
, struct Qdisc
*p
, int depth
)
1035 struct check_loop_arg arg
;
1037 if (q
->ops
->cl_ops
== NULL
)
1040 arg
.w
.stop
= arg
.w
.skip
= arg
.w
.count
= 0;
1041 arg
.w
.fn
= check_loop_fn
;
1044 q
->ops
->cl_ops
->walk(q
, &arg
.w
);
1045 return arg
.w
.stop
? -ELOOP
: 0;
1049 check_loop_fn(struct Qdisc
*q
, unsigned long cl
, struct qdisc_walker
*w
)
1052 const struct Qdisc_class_ops
*cops
= q
->ops
->cl_ops
;
1053 struct check_loop_arg
*arg
= (struct check_loop_arg
*)w
;
1055 leaf
= cops
->leaf(q
, cl
);
1057 if (leaf
== arg
->p
|| arg
->depth
> 7)
1059 return check_loop(leaf
, arg
->p
, arg
->depth
+ 1);
1068 static int tc_get_qdisc(struct sk_buff
*skb
, struct nlmsghdr
*n
)
1070 struct net
*net
= sock_net(skb
->sk
);
1071 struct tcmsg
*tcm
= nlmsg_data(n
);
1072 struct nlattr
*tca
[TCA_MAX
+ 1];
1073 struct net_device
*dev
;
1075 struct Qdisc
*q
= NULL
;
1076 struct Qdisc
*p
= NULL
;
1079 if ((n
->nlmsg_type
!= RTM_GETQDISC
) && !capable(CAP_NET_ADMIN
))
1082 err
= nlmsg_parse(n
, sizeof(*tcm
), tca
, TCA_MAX
, NULL
);
1086 dev
= __dev_get_by_index(net
, tcm
->tcm_ifindex
);
1090 clid
= tcm
->tcm_parent
;
1092 if (clid
!= TC_H_ROOT
) {
1093 if (TC_H_MAJ(clid
) != TC_H_MAJ(TC_H_INGRESS
)) {
1094 p
= qdisc_lookup(dev
, TC_H_MAJ(clid
));
1097 q
= qdisc_leaf(p
, clid
);
1098 } else if (dev_ingress_queue(dev
)) {
1099 q
= dev_ingress_queue(dev
)->qdisc_sleeping
;
1107 if (tcm
->tcm_handle
&& q
->handle
!= tcm
->tcm_handle
)
1110 q
= qdisc_lookup(dev
, tcm
->tcm_handle
);
1115 if (tca
[TCA_KIND
] && nla_strcmp(tca
[TCA_KIND
], q
->ops
->id
))
1118 if (n
->nlmsg_type
== RTM_DELQDISC
) {
1123 err
= qdisc_graft(dev
, p
, skb
, n
, clid
, NULL
, q
);
1127 qdisc_notify(net
, skb
, n
, clid
, NULL
, q
);
1133 * Create/change qdisc.
1136 static int tc_modify_qdisc(struct sk_buff
*skb
, struct nlmsghdr
*n
)
1138 struct net
*net
= sock_net(skb
->sk
);
1140 struct nlattr
*tca
[TCA_MAX
+ 1];
1141 struct net_device
*dev
;
1143 struct Qdisc
*q
, *p
;
1146 if (!capable(CAP_NET_ADMIN
))
1150 /* Reinit, just in case something touches this. */
1151 err
= nlmsg_parse(n
, sizeof(*tcm
), tca
, TCA_MAX
, NULL
);
1155 tcm
= nlmsg_data(n
);
1156 clid
= tcm
->tcm_parent
;
1159 dev
= __dev_get_by_index(net
, tcm
->tcm_ifindex
);
1165 if (clid
!= TC_H_ROOT
) {
1166 if (clid
!= TC_H_INGRESS
) {
1167 p
= qdisc_lookup(dev
, TC_H_MAJ(clid
));
1170 q
= qdisc_leaf(p
, clid
);
1171 } else if (dev_ingress_queue_create(dev
)) {
1172 q
= dev_ingress_queue(dev
)->qdisc_sleeping
;
1178 /* It may be default qdisc, ignore it */
1179 if (q
&& q
->handle
== 0)
1182 if (!q
|| !tcm
->tcm_handle
|| q
->handle
!= tcm
->tcm_handle
) {
1183 if (tcm
->tcm_handle
) {
1184 if (q
&& !(n
->nlmsg_flags
& NLM_F_REPLACE
))
1186 if (TC_H_MIN(tcm
->tcm_handle
))
1188 q
= qdisc_lookup(dev
, tcm
->tcm_handle
);
1190 goto create_n_graft
;
1191 if (n
->nlmsg_flags
& NLM_F_EXCL
)
1193 if (tca
[TCA_KIND
] && nla_strcmp(tca
[TCA_KIND
], q
->ops
->id
))
1196 (p
&& check_loop(q
, p
, 0)))
1198 atomic_inc(&q
->refcnt
);
1202 goto create_n_graft
;
1204 /* This magic test requires explanation.
1206 * We know, that some child q is already
1207 * attached to this parent and have choice:
1208 * either to change it or to create/graft new one.
1210 * 1. We are allowed to create/graft only
1211 * if CREATE and REPLACE flags are set.
1213 * 2. If EXCL is set, requestor wanted to say,
1214 * that qdisc tcm_handle is not expected
1215 * to exist, so that we choose create/graft too.
1217 * 3. The last case is when no flags are set.
1218 * Alas, it is sort of hole in API, we
1219 * cannot decide what to do unambiguously.
1220 * For now we select create/graft, if
1221 * user gave KIND, which does not match existing.
1223 if ((n
->nlmsg_flags
& NLM_F_CREATE
) &&
1224 (n
->nlmsg_flags
& NLM_F_REPLACE
) &&
1225 ((n
->nlmsg_flags
& NLM_F_EXCL
) ||
1227 nla_strcmp(tca
[TCA_KIND
], q
->ops
->id
))))
1228 goto create_n_graft
;
1232 if (!tcm
->tcm_handle
)
1234 q
= qdisc_lookup(dev
, tcm
->tcm_handle
);
1237 /* Change qdisc parameters */
1240 if (n
->nlmsg_flags
& NLM_F_EXCL
)
1242 if (tca
[TCA_KIND
] && nla_strcmp(tca
[TCA_KIND
], q
->ops
->id
))
1244 err
= qdisc_change(q
, tca
);
1246 qdisc_notify(net
, skb
, n
, clid
, NULL
, q
);
1250 if (!(n
->nlmsg_flags
& NLM_F_CREATE
))
1252 if (clid
== TC_H_INGRESS
) {
1253 if (dev_ingress_queue(dev
))
1254 q
= qdisc_create(dev
, dev_ingress_queue(dev
), p
,
1255 tcm
->tcm_parent
, tcm
->tcm_parent
,
1260 struct netdev_queue
*dev_queue
;
1262 if (p
&& p
->ops
->cl_ops
&& p
->ops
->cl_ops
->select_queue
)
1263 dev_queue
= p
->ops
->cl_ops
->select_queue(p
, tcm
);
1265 dev_queue
= p
->dev_queue
;
1267 dev_queue
= netdev_get_tx_queue(dev
, 0);
1269 q
= qdisc_create(dev
, dev_queue
, p
,
1270 tcm
->tcm_parent
, tcm
->tcm_handle
,
1280 err
= qdisc_graft(dev
, p
, skb
, n
, clid
, q
, NULL
);
1290 static int tc_fill_qdisc(struct sk_buff
*skb
, struct Qdisc
*q
, u32 clid
,
1291 u32 portid
, u32 seq
, u16 flags
, int event
)
1294 struct nlmsghdr
*nlh
;
1295 unsigned char *b
= skb_tail_pointer(skb
);
1297 struct qdisc_size_table
*stab
;
1299 nlh
= nlmsg_put(skb
, portid
, seq
, event
, sizeof(*tcm
), flags
);
1301 goto out_nlmsg_trim
;
1302 tcm
= nlmsg_data(nlh
);
1303 tcm
->tcm_family
= AF_UNSPEC
;
1306 tcm
->tcm_ifindex
= qdisc_dev(q
)->ifindex
;
1307 tcm
->tcm_parent
= clid
;
1308 tcm
->tcm_handle
= q
->handle
;
1309 tcm
->tcm_info
= atomic_read(&q
->refcnt
);
1310 if (nla_put_string(skb
, TCA_KIND
, q
->ops
->id
))
1311 goto nla_put_failure
;
1312 if (q
->ops
->dump
&& q
->ops
->dump(q
, skb
) < 0)
1313 goto nla_put_failure
;
1314 q
->qstats
.qlen
= q
->q
.qlen
;
1316 stab
= rtnl_dereference(q
->stab
);
1317 if (stab
&& qdisc_dump_stab(skb
, stab
) < 0)
1318 goto nla_put_failure
;
1320 if (gnet_stats_start_copy_compat(skb
, TCA_STATS2
, TCA_STATS
, TCA_XSTATS
,
1321 qdisc_root_sleeping_lock(q
), &d
) < 0)
1322 goto nla_put_failure
;
1324 if (q
->ops
->dump_stats
&& q
->ops
->dump_stats(q
, &d
) < 0)
1325 goto nla_put_failure
;
1327 if (gnet_stats_copy_basic(&d
, &q
->bstats
) < 0 ||
1328 gnet_stats_copy_rate_est(&d
, &q
->bstats
, &q
->rate_est
) < 0 ||
1329 gnet_stats_copy_queue(&d
, &q
->qstats
) < 0)
1330 goto nla_put_failure
;
1332 if (gnet_stats_finish_copy(&d
) < 0)
1333 goto nla_put_failure
;
1335 nlh
->nlmsg_len
= skb_tail_pointer(skb
) - b
;
1344 static bool tc_qdisc_dump_ignore(struct Qdisc
*q
)
1346 return (q
->flags
& TCQ_F_BUILTIN
) ? true : false;
1349 static int qdisc_notify(struct net
*net
, struct sk_buff
*oskb
,
1350 struct nlmsghdr
*n
, u32 clid
,
1351 struct Qdisc
*old
, struct Qdisc
*new)
1353 struct sk_buff
*skb
;
1354 u32 portid
= oskb
? NETLINK_CB(oskb
).portid
: 0;
1356 skb
= alloc_skb(NLMSG_GOODSIZE
, GFP_KERNEL
);
1360 if (old
&& !tc_qdisc_dump_ignore(old
)) {
1361 if (tc_fill_qdisc(skb
, old
, clid
, portid
, n
->nlmsg_seq
,
1362 0, RTM_DELQDISC
) < 0)
1365 if (new && !tc_qdisc_dump_ignore(new)) {
1366 if (tc_fill_qdisc(skb
, new, clid
, portid
, n
->nlmsg_seq
,
1367 old
? NLM_F_REPLACE
: 0, RTM_NEWQDISC
) < 0)
1372 return rtnetlink_send(skb
, net
, portid
, RTNLGRP_TC
,
1373 n
->nlmsg_flags
& NLM_F_ECHO
);
1380 static int tc_dump_qdisc_root(struct Qdisc
*root
, struct sk_buff
*skb
,
1381 struct netlink_callback
*cb
,
1382 int *q_idx_p
, int s_q_idx
)
1384 int ret
= 0, q_idx
= *q_idx_p
;
1391 if (q_idx
< s_q_idx
) {
1394 if (!tc_qdisc_dump_ignore(q
) &&
1395 tc_fill_qdisc(skb
, q
, q
->parent
, NETLINK_CB(cb
->skb
).portid
,
1396 cb
->nlh
->nlmsg_seq
, NLM_F_MULTI
, RTM_NEWQDISC
) <= 0)
1400 list_for_each_entry(q
, &root
->list
, list
) {
1401 if (q_idx
< s_q_idx
) {
1405 if (!tc_qdisc_dump_ignore(q
) &&
1406 tc_fill_qdisc(skb
, q
, q
->parent
, NETLINK_CB(cb
->skb
).portid
,
1407 cb
->nlh
->nlmsg_seq
, NLM_F_MULTI
, RTM_NEWQDISC
) <= 0)
1420 static int tc_dump_qdisc(struct sk_buff
*skb
, struct netlink_callback
*cb
)
1422 struct net
*net
= sock_net(skb
->sk
);
1425 struct net_device
*dev
;
1427 s_idx
= cb
->args
[0];
1428 s_q_idx
= q_idx
= cb
->args
[1];
1432 for_each_netdev_rcu(net
, dev
) {
1433 struct netdev_queue
*dev_queue
;
1441 if (tc_dump_qdisc_root(dev
->qdisc
, skb
, cb
, &q_idx
, s_q_idx
) < 0)
1444 dev_queue
= dev_ingress_queue(dev
);
1446 tc_dump_qdisc_root(dev_queue
->qdisc_sleeping
, skb
, cb
,
1447 &q_idx
, s_q_idx
) < 0)
1458 cb
->args
[1] = q_idx
;
1465 /************************************************
1466 * Traffic classes manipulation. *
1467 ************************************************/
1471 static int tc_ctl_tclass(struct sk_buff
*skb
, struct nlmsghdr
*n
)
1473 struct net
*net
= sock_net(skb
->sk
);
1474 struct tcmsg
*tcm
= nlmsg_data(n
);
1475 struct nlattr
*tca
[TCA_MAX
+ 1];
1476 struct net_device
*dev
;
1477 struct Qdisc
*q
= NULL
;
1478 const struct Qdisc_class_ops
*cops
;
1479 unsigned long cl
= 0;
1480 unsigned long new_cl
;
1486 if ((n
->nlmsg_type
!= RTM_GETTCLASS
) && !capable(CAP_NET_ADMIN
))
1489 err
= nlmsg_parse(n
, sizeof(*tcm
), tca
, TCA_MAX
, NULL
);
1493 dev
= __dev_get_by_index(net
, tcm
->tcm_ifindex
);
1498 parent == TC_H_UNSPEC - unspecified parent.
1499 parent == TC_H_ROOT - class is root, which has no parent.
1500 parent == X:0 - parent is root class.
1501 parent == X:Y - parent is a node in hierarchy.
1502 parent == 0:Y - parent is X:Y, where X:0 is qdisc.
1504 handle == 0:0 - generate handle from kernel pool.
1505 handle == 0:Y - class is X:Y, where X:0 is qdisc.
1506 handle == X:Y - clear.
1507 handle == X:0 - root class.
1510 /* Step 1. Determine qdisc handle X:0 */
1512 portid
= tcm
->tcm_parent
;
1513 clid
= tcm
->tcm_handle
;
1514 qid
= TC_H_MAJ(clid
);
1516 if (portid
!= TC_H_ROOT
) {
1517 u32 qid1
= TC_H_MAJ(portid
);
1520 /* If both majors are known, they must be identical. */
1525 } else if (qid
== 0)
1526 qid
= dev
->qdisc
->handle
;
1528 /* Now qid is genuine qdisc handle consistent
1529 * both with parent and child.
1531 * TC_H_MAJ(portid) still may be unspecified, complete it now.
1534 portid
= TC_H_MAKE(qid
, portid
);
1537 qid
= dev
->qdisc
->handle
;
1540 /* OK. Locate qdisc */
1541 q
= qdisc_lookup(dev
, qid
);
1545 /* An check that it supports classes */
1546 cops
= q
->ops
->cl_ops
;
1550 /* Now try to get class */
1552 if (portid
== TC_H_ROOT
)
1555 clid
= TC_H_MAKE(qid
, clid
);
1558 cl
= cops
->get(q
, clid
);
1562 if (n
->nlmsg_type
!= RTM_NEWTCLASS
||
1563 !(n
->nlmsg_flags
& NLM_F_CREATE
))
1566 switch (n
->nlmsg_type
) {
1569 if (n
->nlmsg_flags
& NLM_F_EXCL
)
1575 err
= cops
->delete(q
, cl
);
1577 tclass_notify(net
, skb
, n
, q
, cl
, RTM_DELTCLASS
);
1580 err
= tclass_notify(net
, skb
, n
, q
, cl
, RTM_NEWTCLASS
);
1591 err
= cops
->change(q
, clid
, portid
, tca
, &new_cl
);
1593 tclass_notify(net
, skb
, n
, q
, new_cl
, RTM_NEWTCLASS
);
1603 static int tc_fill_tclass(struct sk_buff
*skb
, struct Qdisc
*q
,
1605 u32 portid
, u32 seq
, u16 flags
, int event
)
1608 struct nlmsghdr
*nlh
;
1609 unsigned char *b
= skb_tail_pointer(skb
);
1611 const struct Qdisc_class_ops
*cl_ops
= q
->ops
->cl_ops
;
1613 nlh
= nlmsg_put(skb
, portid
, seq
, event
, sizeof(*tcm
), flags
);
1615 goto out_nlmsg_trim
;
1616 tcm
= nlmsg_data(nlh
);
1617 tcm
->tcm_family
= AF_UNSPEC
;
1620 tcm
->tcm_ifindex
= qdisc_dev(q
)->ifindex
;
1621 tcm
->tcm_parent
= q
->handle
;
1622 tcm
->tcm_handle
= q
->handle
;
1624 if (nla_put_string(skb
, TCA_KIND
, q
->ops
->id
))
1625 goto nla_put_failure
;
1626 if (cl_ops
->dump
&& cl_ops
->dump(q
, cl
, skb
, tcm
) < 0)
1627 goto nla_put_failure
;
1629 if (gnet_stats_start_copy_compat(skb
, TCA_STATS2
, TCA_STATS
, TCA_XSTATS
,
1630 qdisc_root_sleeping_lock(q
), &d
) < 0)
1631 goto nla_put_failure
;
1633 if (cl_ops
->dump_stats
&& cl_ops
->dump_stats(q
, cl
, &d
) < 0)
1634 goto nla_put_failure
;
1636 if (gnet_stats_finish_copy(&d
) < 0)
1637 goto nla_put_failure
;
1639 nlh
->nlmsg_len
= skb_tail_pointer(skb
) - b
;
1648 static int tclass_notify(struct net
*net
, struct sk_buff
*oskb
,
1649 struct nlmsghdr
*n
, struct Qdisc
*q
,
1650 unsigned long cl
, int event
)
1652 struct sk_buff
*skb
;
1653 u32 portid
= oskb
? NETLINK_CB(oskb
).portid
: 0;
1655 skb
= alloc_skb(NLMSG_GOODSIZE
, GFP_KERNEL
);
1659 if (tc_fill_tclass(skb
, q
, cl
, portid
, n
->nlmsg_seq
, 0, event
) < 0) {
1664 return rtnetlink_send(skb
, net
, portid
, RTNLGRP_TC
,
1665 n
->nlmsg_flags
& NLM_F_ECHO
);
1668 struct qdisc_dump_args
{
1669 struct qdisc_walker w
;
1670 struct sk_buff
*skb
;
1671 struct netlink_callback
*cb
;
1674 static int qdisc_class_dump(struct Qdisc
*q
, unsigned long cl
, struct qdisc_walker
*arg
)
1676 struct qdisc_dump_args
*a
= (struct qdisc_dump_args
*)arg
;
1678 return tc_fill_tclass(a
->skb
, q
, cl
, NETLINK_CB(a
->cb
->skb
).portid
,
1679 a
->cb
->nlh
->nlmsg_seq
, NLM_F_MULTI
, RTM_NEWTCLASS
);
1682 static int tc_dump_tclass_qdisc(struct Qdisc
*q
, struct sk_buff
*skb
,
1683 struct tcmsg
*tcm
, struct netlink_callback
*cb
,
1686 struct qdisc_dump_args arg
;
1688 if (tc_qdisc_dump_ignore(q
) ||
1689 *t_p
< s_t
|| !q
->ops
->cl_ops
||
1691 TC_H_MAJ(tcm
->tcm_parent
) != q
->handle
)) {
1696 memset(&cb
->args
[1], 0, sizeof(cb
->args
)-sizeof(cb
->args
[0]));
1697 arg
.w
.fn
= qdisc_class_dump
;
1701 arg
.w
.skip
= cb
->args
[1];
1703 q
->ops
->cl_ops
->walk(q
, &arg
.w
);
1704 cb
->args
[1] = arg
.w
.count
;
1711 static int tc_dump_tclass_root(struct Qdisc
*root
, struct sk_buff
*skb
,
1712 struct tcmsg
*tcm
, struct netlink_callback
*cb
,
1720 if (tc_dump_tclass_qdisc(root
, skb
, tcm
, cb
, t_p
, s_t
) < 0)
1723 list_for_each_entry(q
, &root
->list
, list
) {
1724 if (tc_dump_tclass_qdisc(q
, skb
, tcm
, cb
, t_p
, s_t
) < 0)
1731 static int tc_dump_tclass(struct sk_buff
*skb
, struct netlink_callback
*cb
)
1733 struct tcmsg
*tcm
= nlmsg_data(cb
->nlh
);
1734 struct net
*net
= sock_net(skb
->sk
);
1735 struct netdev_queue
*dev_queue
;
1736 struct net_device
*dev
;
1739 if (nlmsg_len(cb
->nlh
) < sizeof(*tcm
))
1741 dev
= dev_get_by_index(net
, tcm
->tcm_ifindex
);
1748 if (tc_dump_tclass_root(dev
->qdisc
, skb
, tcm
, cb
, &t
, s_t
) < 0)
1751 dev_queue
= dev_ingress_queue(dev
);
1753 tc_dump_tclass_root(dev_queue
->qdisc_sleeping
, skb
, tcm
, cb
,
1764 /* Main classifier routine: scans classifier chain attached
1765 * to this qdisc, (optionally) tests for protocol and asks
1766 * specific classifiers.
1768 int tc_classify_compat(struct sk_buff
*skb
, const struct tcf_proto
*tp
,
1769 struct tcf_result
*res
)
1771 __be16 protocol
= skb
->protocol
;
1774 for (; tp
; tp
= tp
->next
) {
1775 if (tp
->protocol
!= protocol
&&
1776 tp
->protocol
!= htons(ETH_P_ALL
))
1778 err
= tp
->classify(skb
, tp
, res
);
1781 #ifdef CONFIG_NET_CLS_ACT
1782 if (err
!= TC_ACT_RECLASSIFY
&& skb
->tc_verd
)
1783 skb
->tc_verd
= SET_TC_VERD(skb
->tc_verd
, 0);
1790 EXPORT_SYMBOL(tc_classify_compat
);
1792 int tc_classify(struct sk_buff
*skb
, const struct tcf_proto
*tp
,
1793 struct tcf_result
*res
)
1796 #ifdef CONFIG_NET_CLS_ACT
1797 const struct tcf_proto
*otp
= tp
;
1801 err
= tc_classify_compat(skb
, tp
, res
);
1802 #ifdef CONFIG_NET_CLS_ACT
1803 if (err
== TC_ACT_RECLASSIFY
) {
1804 u32 verd
= G_TC_VERD(skb
->tc_verd
);
1807 if (verd
++ >= MAX_REC_LOOP
) {
1808 net_notice_ratelimited("%s: packet reclassify loop rule prio %u protocol %02x\n",
1811 ntohs(tp
->protocol
));
1814 skb
->tc_verd
= SET_TC_VERD(skb
->tc_verd
, verd
);
1820 EXPORT_SYMBOL(tc_classify
);
1822 void tcf_destroy(struct tcf_proto
*tp
)
1824 tp
->ops
->destroy(tp
);
1825 module_put(tp
->ops
->owner
);
1829 void tcf_destroy_chain(struct tcf_proto
**fl
)
1831 struct tcf_proto
*tp
;
1833 while ((tp
= *fl
) != NULL
) {
1838 EXPORT_SYMBOL(tcf_destroy_chain
);
1840 #ifdef CONFIG_PROC_FS
1841 static int psched_show(struct seq_file
*seq
, void *v
)
1845 hrtimer_get_res(CLOCK_MONOTONIC
, &ts
);
1846 seq_printf(seq
, "%08x %08x %08x %08x\n",
1847 (u32
)NSEC_PER_USEC
, (u32
)PSCHED_TICKS2NS(1),
1849 (u32
)NSEC_PER_SEC
/(u32
)ktime_to_ns(timespec_to_ktime(ts
)));
1854 static int psched_open(struct inode
*inode
, struct file
*file
)
1856 return single_open(file
, psched_show
, NULL
);
1859 static const struct file_operations psched_fops
= {
1860 .owner
= THIS_MODULE
,
1861 .open
= psched_open
,
1863 .llseek
= seq_lseek
,
1864 .release
= single_release
,
1867 static int __net_init
psched_net_init(struct net
*net
)
1869 struct proc_dir_entry
*e
;
1871 e
= proc_create("psched", 0, net
->proc_net
, &psched_fops
);
1878 static void __net_exit
psched_net_exit(struct net
*net
)
1880 remove_proc_entry("psched", net
->proc_net
);
1883 static int __net_init
psched_net_init(struct net
*net
)
1888 static void __net_exit
psched_net_exit(struct net
*net
)
1893 static struct pernet_operations psched_net_ops
= {
1894 .init
= psched_net_init
,
1895 .exit
= psched_net_exit
,
1898 static int __init
pktsched_init(void)
1902 err
= register_pernet_subsys(&psched_net_ops
);
1904 pr_err("pktsched_init: "
1905 "cannot initialize per netns operations\n");
1909 register_qdisc(&pfifo_fast_ops
);
1910 register_qdisc(&pfifo_qdisc_ops
);
1911 register_qdisc(&bfifo_qdisc_ops
);
1912 register_qdisc(&pfifo_head_drop_qdisc_ops
);
1913 register_qdisc(&mq_qdisc_ops
);
1915 rtnl_register(PF_UNSPEC
, RTM_NEWQDISC
, tc_modify_qdisc
, NULL
, NULL
);
1916 rtnl_register(PF_UNSPEC
, RTM_DELQDISC
, tc_get_qdisc
, NULL
, NULL
);
1917 rtnl_register(PF_UNSPEC
, RTM_GETQDISC
, tc_get_qdisc
, tc_dump_qdisc
, NULL
);
1918 rtnl_register(PF_UNSPEC
, RTM_NEWTCLASS
, tc_ctl_tclass
, NULL
, NULL
);
1919 rtnl_register(PF_UNSPEC
, RTM_DELTCLASS
, tc_ctl_tclass
, NULL
, NULL
);
1920 rtnl_register(PF_UNSPEC
, RTM_GETTCLASS
, tc_ctl_tclass
, tc_dump_tclass
, NULL
);
1925 subsys_initcall(pktsched_init
);