2 * net/sched/sch_api.c Packet scheduler API.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
13 * Rani Assaf <rani@magic.metawire.com> :980802: JIFFIES and CPU clock sources are repaired.
14 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
15 * Jamal Hadi Salim <hadi@nortelnetworks.com>: 990601: ingress support
18 #include <linux/module.h>
19 #include <linux/types.h>
20 #include <linux/kernel.h>
21 #include <linux/string.h>
22 #include <linux/errno.h>
23 #include <linux/skbuff.h>
24 #include <linux/init.h>
25 #include <linux/proc_fs.h>
26 #include <linux/seq_file.h>
27 #include <linux/kmod.h>
28 #include <linux/list.h>
29 #include <linux/hrtimer.h>
30 #include <linux/lockdep.h>
31 #include <linux/slab.h>
33 #include <net/net_namespace.h>
35 #include <net/netlink.h>
36 #include <net/pkt_sched.h>
38 static int qdisc_notify(struct net
*net
, struct sk_buff
*oskb
,
39 struct nlmsghdr
*n
, u32 clid
,
40 struct Qdisc
*old
, struct Qdisc
*new);
41 static int tclass_notify(struct net
*net
, struct sk_buff
*oskb
,
42 struct nlmsghdr
*n
, struct Qdisc
*q
,
43 unsigned long cl
, int event
);
50 This file consists of two interrelated parts:
52 1. queueing disciplines manager frontend.
53 2. traffic classes manager frontend.
55 Generally, queueing discipline ("qdisc") is a black box,
56 which is able to enqueue packets and to dequeue them (when
57 device is ready to send something) in order and at times
58 determined by algorithm hidden in it.
60 qdisc's are divided to two categories:
61 - "queues", which have no internal structure visible from outside.
62 - "schedulers", which split all the packets to "traffic classes",
63 using "packet classifiers" (look at cls_api.c)
65 In turn, classes may have child qdiscs (as rule, queues)
66 attached to them etc. etc. etc.
68 The goal of the routines in this file is to translate
69 information supplied by user in the form of handles
70 to more intelligible for kernel form, to make some sanity
71 checks and part of work, which is common to all qdiscs
72 and to provide rtnetlink notifications.
74 All real intelligent work is done inside qdisc modules.
78 Every discipline has two major routines: enqueue and dequeue.
82 dequeue usually returns a skb to send. It is allowed to return NULL,
83 but it does not mean that queue is empty, it just means that
84 discipline does not want to send anything this time.
85 Queue is really empty if q->q.qlen == 0.
86 For complicated disciplines with multiple queues q->q is not
87 real packet queue, but however q->q.qlen must be valid.
91 enqueue returns 0, if packet was enqueued successfully.
92 If packet (this one or another one) was dropped, it returns
94 NET_XMIT_DROP - this packet dropped
95 Expected action: do not backoff, but wait until queue will clear.
96 NET_XMIT_CN - probably this packet enqueued, but another one dropped.
97 Expected action: backoff or ignore
98 NET_XMIT_POLICED - dropped by police.
99 Expected action: backoff or error to real-time apps.
105 like dequeue but without removing a packet from the queue
109 returns qdisc to initial state: purge all buffers, clear all
110 timers, counters (except for statistics) etc.
114 initializes newly created qdisc.
118 destroys resources allocated by init and during lifetime of qdisc.
122 changes qdisc parameters.
125 /* Protects list of registered TC modules. It is pure SMP lock. */
126 static DEFINE_RWLOCK(qdisc_mod_lock
);
129 /************************************************
130 * Queueing disciplines manipulation. *
131 ************************************************/
134 /* The list of all installed queueing disciplines. */
136 static struct Qdisc_ops
*qdisc_base
;
138 /* Register/uregister queueing discipline */
140 int register_qdisc(struct Qdisc_ops
*qops
)
142 struct Qdisc_ops
*q
, **qp
;
145 write_lock(&qdisc_mod_lock
);
146 for (qp
= &qdisc_base
; (q
= *qp
) != NULL
; qp
= &q
->next
)
147 if (!strcmp(qops
->id
, q
->id
))
150 if (qops
->enqueue
== NULL
)
151 qops
->enqueue
= noop_qdisc_ops
.enqueue
;
152 if (qops
->peek
== NULL
) {
153 if (qops
->dequeue
== NULL
) {
154 qops
->peek
= noop_qdisc_ops
.peek
;
160 if (qops
->dequeue
== NULL
)
161 qops
->dequeue
= noop_qdisc_ops
.dequeue
;
167 write_unlock(&qdisc_mod_lock
);
170 EXPORT_SYMBOL(register_qdisc
);
172 int unregister_qdisc(struct Qdisc_ops
*qops
)
174 struct Qdisc_ops
*q
, **qp
;
177 write_lock(&qdisc_mod_lock
);
178 for (qp
= &qdisc_base
; (q
=*qp
)!=NULL
; qp
= &q
->next
)
186 write_unlock(&qdisc_mod_lock
);
189 EXPORT_SYMBOL(unregister_qdisc
);
191 /* We know handle. Find qdisc among all qdisc's attached to device
192 (root qdisc, all its children, children of children etc.)
195 static struct Qdisc
*qdisc_match_from_root(struct Qdisc
*root
, u32 handle
)
199 if (!(root
->flags
& TCQ_F_BUILTIN
) &&
200 root
->handle
== handle
)
203 list_for_each_entry(q
, &root
->list
, list
) {
204 if (q
->handle
== handle
)
210 static void qdisc_list_add(struct Qdisc
*q
)
212 if ((q
->parent
!= TC_H_ROOT
) && !(q
->flags
& TCQ_F_INGRESS
))
213 list_add_tail(&q
->list
, &qdisc_dev(q
)->qdisc
->list
);
216 void qdisc_list_del(struct Qdisc
*q
)
218 if ((q
->parent
!= TC_H_ROOT
) && !(q
->flags
& TCQ_F_INGRESS
))
221 EXPORT_SYMBOL(qdisc_list_del
);
223 struct Qdisc
*qdisc_lookup(struct net_device
*dev
, u32 handle
)
227 q
= qdisc_match_from_root(dev
->qdisc
, handle
);
231 q
= qdisc_match_from_root(dev
->rx_queue
.qdisc_sleeping
, handle
);
236 static struct Qdisc
*qdisc_leaf(struct Qdisc
*p
, u32 classid
)
240 const struct Qdisc_class_ops
*cops
= p
->ops
->cl_ops
;
244 cl
= cops
->get(p
, classid
);
248 leaf
= cops
->leaf(p
, cl
);
253 /* Find queueing discipline by name */
255 static struct Qdisc_ops
*qdisc_lookup_ops(struct nlattr
*kind
)
257 struct Qdisc_ops
*q
= NULL
;
260 read_lock(&qdisc_mod_lock
);
261 for (q
= qdisc_base
; q
; q
= q
->next
) {
262 if (nla_strcmp(kind
, q
->id
) == 0) {
263 if (!try_module_get(q
->owner
))
268 read_unlock(&qdisc_mod_lock
);
273 static struct qdisc_rate_table
*qdisc_rtab_list
;
275 struct qdisc_rate_table
*qdisc_get_rtab(struct tc_ratespec
*r
, struct nlattr
*tab
)
277 struct qdisc_rate_table
*rtab
;
279 for (rtab
= qdisc_rtab_list
; rtab
; rtab
= rtab
->next
) {
280 if (memcmp(&rtab
->rate
, r
, sizeof(struct tc_ratespec
)) == 0) {
286 if (tab
== NULL
|| r
->rate
== 0 || r
->cell_log
== 0 ||
287 nla_len(tab
) != TC_RTAB_SIZE
)
290 rtab
= kmalloc(sizeof(*rtab
), GFP_KERNEL
);
294 memcpy(rtab
->data
, nla_data(tab
), 1024);
295 rtab
->next
= qdisc_rtab_list
;
296 qdisc_rtab_list
= rtab
;
300 EXPORT_SYMBOL(qdisc_get_rtab
);
302 void qdisc_put_rtab(struct qdisc_rate_table
*tab
)
304 struct qdisc_rate_table
*rtab
, **rtabp
;
306 if (!tab
|| --tab
->refcnt
)
309 for (rtabp
= &qdisc_rtab_list
; (rtab
=*rtabp
) != NULL
; rtabp
= &rtab
->next
) {
317 EXPORT_SYMBOL(qdisc_put_rtab
);
319 static LIST_HEAD(qdisc_stab_list
);
320 static DEFINE_SPINLOCK(qdisc_stab_lock
);
322 static const struct nla_policy stab_policy
[TCA_STAB_MAX
+ 1] = {
323 [TCA_STAB_BASE
] = { .len
= sizeof(struct tc_sizespec
) },
324 [TCA_STAB_DATA
] = { .type
= NLA_BINARY
},
327 static struct qdisc_size_table
*qdisc_get_stab(struct nlattr
*opt
)
329 struct nlattr
*tb
[TCA_STAB_MAX
+ 1];
330 struct qdisc_size_table
*stab
;
331 struct tc_sizespec
*s
;
332 unsigned int tsize
= 0;
336 err
= nla_parse_nested(tb
, TCA_STAB_MAX
, opt
, stab_policy
);
339 if (!tb
[TCA_STAB_BASE
])
340 return ERR_PTR(-EINVAL
);
342 s
= nla_data(tb
[TCA_STAB_BASE
]);
345 if (!tb
[TCA_STAB_DATA
])
346 return ERR_PTR(-EINVAL
);
347 tab
= nla_data(tb
[TCA_STAB_DATA
]);
348 tsize
= nla_len(tb
[TCA_STAB_DATA
]) / sizeof(u16
);
351 if (!s
|| tsize
!= s
->tsize
|| (!tab
&& tsize
> 0))
352 return ERR_PTR(-EINVAL
);
354 spin_lock(&qdisc_stab_lock
);
356 list_for_each_entry(stab
, &qdisc_stab_list
, list
) {
357 if (memcmp(&stab
->szopts
, s
, sizeof(*s
)))
359 if (tsize
> 0 && memcmp(stab
->data
, tab
, tsize
* sizeof(u16
)))
362 spin_unlock(&qdisc_stab_lock
);
366 spin_unlock(&qdisc_stab_lock
);
368 stab
= kmalloc(sizeof(*stab
) + tsize
* sizeof(u16
), GFP_KERNEL
);
370 return ERR_PTR(-ENOMEM
);
375 memcpy(stab
->data
, tab
, tsize
* sizeof(u16
));
377 spin_lock(&qdisc_stab_lock
);
378 list_add_tail(&stab
->list
, &qdisc_stab_list
);
379 spin_unlock(&qdisc_stab_lock
);
384 void qdisc_put_stab(struct qdisc_size_table
*tab
)
389 spin_lock(&qdisc_stab_lock
);
391 if (--tab
->refcnt
== 0) {
392 list_del(&tab
->list
);
396 spin_unlock(&qdisc_stab_lock
);
398 EXPORT_SYMBOL(qdisc_put_stab
);
400 static int qdisc_dump_stab(struct sk_buff
*skb
, struct qdisc_size_table
*stab
)
404 nest
= nla_nest_start(skb
, TCA_STAB
);
406 goto nla_put_failure
;
407 NLA_PUT(skb
, TCA_STAB_BASE
, sizeof(stab
->szopts
), &stab
->szopts
);
408 nla_nest_end(skb
, nest
);
416 void qdisc_calculate_pkt_len(struct sk_buff
*skb
, struct qdisc_size_table
*stab
)
420 pkt_len
= skb
->len
+ stab
->szopts
.overhead
;
421 if (unlikely(!stab
->szopts
.tsize
))
424 slot
= pkt_len
+ stab
->szopts
.cell_align
;
425 if (unlikely(slot
< 0))
428 slot
>>= stab
->szopts
.cell_log
;
429 if (likely(slot
< stab
->szopts
.tsize
))
430 pkt_len
= stab
->data
[slot
];
432 pkt_len
= stab
->data
[stab
->szopts
.tsize
- 1] *
433 (slot
/ stab
->szopts
.tsize
) +
434 stab
->data
[slot
% stab
->szopts
.tsize
];
436 pkt_len
<<= stab
->szopts
.size_log
;
438 if (unlikely(pkt_len
< 1))
440 qdisc_skb_cb(skb
)->pkt_len
= pkt_len
;
442 EXPORT_SYMBOL(qdisc_calculate_pkt_len
);
444 void qdisc_warn_nonwc(char *txt
, struct Qdisc
*qdisc
)
446 if (!(qdisc
->flags
& TCQ_F_WARN_NONWC
)) {
448 "%s: %s qdisc %X: is non-work-conserving?\n",
449 txt
, qdisc
->ops
->id
, qdisc
->handle
>> 16);
450 qdisc
->flags
|= TCQ_F_WARN_NONWC
;
453 EXPORT_SYMBOL(qdisc_warn_nonwc
);
455 static enum hrtimer_restart
qdisc_watchdog(struct hrtimer
*timer
)
457 struct qdisc_watchdog
*wd
= container_of(timer
, struct qdisc_watchdog
,
460 wd
->qdisc
->flags
&= ~TCQ_F_THROTTLED
;
461 __netif_schedule(qdisc_root(wd
->qdisc
));
463 return HRTIMER_NORESTART
;
466 void qdisc_watchdog_init(struct qdisc_watchdog
*wd
, struct Qdisc
*qdisc
)
468 hrtimer_init(&wd
->timer
, CLOCK_MONOTONIC
, HRTIMER_MODE_ABS
);
469 wd
->timer
.function
= qdisc_watchdog
;
472 EXPORT_SYMBOL(qdisc_watchdog_init
);
474 void qdisc_watchdog_schedule(struct qdisc_watchdog
*wd
, psched_time_t expires
)
478 if (test_bit(__QDISC_STATE_DEACTIVATED
,
479 &qdisc_root_sleeping(wd
->qdisc
)->state
))
482 wd
->qdisc
->flags
|= TCQ_F_THROTTLED
;
483 time
= ktime_set(0, 0);
484 time
= ktime_add_ns(time
, PSCHED_TICKS2NS(expires
));
485 hrtimer_start(&wd
->timer
, time
, HRTIMER_MODE_ABS
);
487 EXPORT_SYMBOL(qdisc_watchdog_schedule
);
489 void qdisc_watchdog_cancel(struct qdisc_watchdog
*wd
)
491 hrtimer_cancel(&wd
->timer
);
492 wd
->qdisc
->flags
&= ~TCQ_F_THROTTLED
;
494 EXPORT_SYMBOL(qdisc_watchdog_cancel
);
496 static struct hlist_head
*qdisc_class_hash_alloc(unsigned int n
)
498 unsigned int size
= n
* sizeof(struct hlist_head
), i
;
499 struct hlist_head
*h
;
501 if (size
<= PAGE_SIZE
)
502 h
= kmalloc(size
, GFP_KERNEL
);
504 h
= (struct hlist_head
*)
505 __get_free_pages(GFP_KERNEL
, get_order(size
));
508 for (i
= 0; i
< n
; i
++)
509 INIT_HLIST_HEAD(&h
[i
]);
514 static void qdisc_class_hash_free(struct hlist_head
*h
, unsigned int n
)
516 unsigned int size
= n
* sizeof(struct hlist_head
);
518 if (size
<= PAGE_SIZE
)
521 free_pages((unsigned long)h
, get_order(size
));
524 void qdisc_class_hash_grow(struct Qdisc
*sch
, struct Qdisc_class_hash
*clhash
)
526 struct Qdisc_class_common
*cl
;
527 struct hlist_node
*n
, *next
;
528 struct hlist_head
*nhash
, *ohash
;
529 unsigned int nsize
, nmask
, osize
;
532 /* Rehash when load factor exceeds 0.75 */
533 if (clhash
->hashelems
* 4 <= clhash
->hashsize
* 3)
535 nsize
= clhash
->hashsize
* 2;
537 nhash
= qdisc_class_hash_alloc(nsize
);
541 ohash
= clhash
->hash
;
542 osize
= clhash
->hashsize
;
545 for (i
= 0; i
< osize
; i
++) {
546 hlist_for_each_entry_safe(cl
, n
, next
, &ohash
[i
], hnode
) {
547 h
= qdisc_class_hash(cl
->classid
, nmask
);
548 hlist_add_head(&cl
->hnode
, &nhash
[h
]);
551 clhash
->hash
= nhash
;
552 clhash
->hashsize
= nsize
;
553 clhash
->hashmask
= nmask
;
554 sch_tree_unlock(sch
);
556 qdisc_class_hash_free(ohash
, osize
);
558 EXPORT_SYMBOL(qdisc_class_hash_grow
);
560 int qdisc_class_hash_init(struct Qdisc_class_hash
*clhash
)
562 unsigned int size
= 4;
564 clhash
->hash
= qdisc_class_hash_alloc(size
);
565 if (clhash
->hash
== NULL
)
567 clhash
->hashsize
= size
;
568 clhash
->hashmask
= size
- 1;
569 clhash
->hashelems
= 0;
572 EXPORT_SYMBOL(qdisc_class_hash_init
);
574 void qdisc_class_hash_destroy(struct Qdisc_class_hash
*clhash
)
576 qdisc_class_hash_free(clhash
->hash
, clhash
->hashsize
);
578 EXPORT_SYMBOL(qdisc_class_hash_destroy
);
580 void qdisc_class_hash_insert(struct Qdisc_class_hash
*clhash
,
581 struct Qdisc_class_common
*cl
)
585 INIT_HLIST_NODE(&cl
->hnode
);
586 h
= qdisc_class_hash(cl
->classid
, clhash
->hashmask
);
587 hlist_add_head(&cl
->hnode
, &clhash
->hash
[h
]);
590 EXPORT_SYMBOL(qdisc_class_hash_insert
);
592 void qdisc_class_hash_remove(struct Qdisc_class_hash
*clhash
,
593 struct Qdisc_class_common
*cl
)
595 hlist_del(&cl
->hnode
);
598 EXPORT_SYMBOL(qdisc_class_hash_remove
);
600 /* Allocate an unique handle from space managed by kernel */
602 static u32
qdisc_alloc_handle(struct net_device
*dev
)
605 static u32 autohandle
= TC_H_MAKE(0x80000000U
, 0);
608 autohandle
+= TC_H_MAKE(0x10000U
, 0);
609 if (autohandle
== TC_H_MAKE(TC_H_ROOT
, 0))
610 autohandle
= TC_H_MAKE(0x80000000U
, 0);
611 } while (qdisc_lookup(dev
, autohandle
) && --i
> 0);
613 return i
>0 ? autohandle
: 0;
616 void qdisc_tree_decrease_qlen(struct Qdisc
*sch
, unsigned int n
)
618 const struct Qdisc_class_ops
*cops
;
624 while ((parentid
= sch
->parent
)) {
625 if (TC_H_MAJ(parentid
) == TC_H_MAJ(TC_H_INGRESS
))
628 sch
= qdisc_lookup(qdisc_dev(sch
), TC_H_MAJ(parentid
));
630 WARN_ON(parentid
!= TC_H_ROOT
);
633 cops
= sch
->ops
->cl_ops
;
634 if (cops
->qlen_notify
) {
635 cl
= cops
->get(sch
, parentid
);
636 cops
->qlen_notify(sch
, cl
);
642 EXPORT_SYMBOL(qdisc_tree_decrease_qlen
);
644 static void notify_and_destroy(struct net
*net
, struct sk_buff
*skb
,
645 struct nlmsghdr
*n
, u32 clid
,
646 struct Qdisc
*old
, struct Qdisc
*new)
649 qdisc_notify(net
, skb
, n
, clid
, old
, new);
655 /* Graft qdisc "new" to class "classid" of qdisc "parent" or
658 * When appropriate send a netlink notification using 'skb'
661 * On success, destroy old qdisc.
664 static int qdisc_graft(struct net_device
*dev
, struct Qdisc
*parent
,
665 struct sk_buff
*skb
, struct nlmsghdr
*n
, u32 classid
,
666 struct Qdisc
*new, struct Qdisc
*old
)
668 struct Qdisc
*q
= old
;
669 struct net
*net
= dev_net(dev
);
672 if (parent
== NULL
) {
673 unsigned int i
, num_q
, ingress
;
676 num_q
= dev
->num_tx_queues
;
677 if ((q
&& q
->flags
& TCQ_F_INGRESS
) ||
678 (new && new->flags
& TCQ_F_INGRESS
)) {
683 if (dev
->flags
& IFF_UP
)
686 if (new && new->ops
->attach
) {
687 new->ops
->attach(new);
691 for (i
= 0; i
< num_q
; i
++) {
692 struct netdev_queue
*dev_queue
= &dev
->rx_queue
;
695 dev_queue
= netdev_get_tx_queue(dev
, i
);
697 old
= dev_graft_qdisc(dev_queue
, new);
699 atomic_inc(&new->refcnt
);
706 notify_and_destroy(net
, skb
, n
, classid
,
708 if (new && !new->ops
->attach
)
709 atomic_inc(&new->refcnt
);
710 dev
->qdisc
= new ? : &noop_qdisc
;
712 notify_and_destroy(net
, skb
, n
, classid
, old
, new);
715 if (dev
->flags
& IFF_UP
)
718 const struct Qdisc_class_ops
*cops
= parent
->ops
->cl_ops
;
721 if (cops
&& cops
->graft
) {
722 unsigned long cl
= cops
->get(parent
, classid
);
724 err
= cops
->graft(parent
, cl
, new, &old
);
725 cops
->put(parent
, cl
);
730 notify_and_destroy(net
, skb
, n
, classid
, old
, new);
735 /* lockdep annotation is needed for ingress; egress gets it only for name */
736 static struct lock_class_key qdisc_tx_lock
;
737 static struct lock_class_key qdisc_rx_lock
;
740 Allocate and initialize new qdisc.
742 Parameters are passed via opt.
745 static struct Qdisc
*
746 qdisc_create(struct net_device
*dev
, struct netdev_queue
*dev_queue
,
747 struct Qdisc
*p
, u32 parent
, u32 handle
,
748 struct nlattr
**tca
, int *errp
)
751 struct nlattr
*kind
= tca
[TCA_KIND
];
753 struct Qdisc_ops
*ops
;
754 struct qdisc_size_table
*stab
;
756 ops
= qdisc_lookup_ops(kind
);
757 #ifdef CONFIG_MODULES
758 if (ops
== NULL
&& kind
!= NULL
) {
760 if (nla_strlcpy(name
, kind
, IFNAMSIZ
) < IFNAMSIZ
) {
761 /* We dropped the RTNL semaphore in order to
762 * perform the module load. So, even if we
763 * succeeded in loading the module we have to
764 * tell the caller to replay the request. We
765 * indicate this using -EAGAIN.
766 * We replay the request because the device may
767 * go away in the mean time.
770 request_module("sch_%s", name
);
772 ops
= qdisc_lookup_ops(kind
);
774 /* We will try again qdisc_lookup_ops,
775 * so don't keep a reference.
777 module_put(ops
->owner
);
789 sch
= qdisc_alloc(dev_queue
, ops
);
795 sch
->parent
= parent
;
797 if (handle
== TC_H_INGRESS
) {
798 sch
->flags
|= TCQ_F_INGRESS
;
799 handle
= TC_H_MAKE(TC_H_INGRESS
, 0);
800 lockdep_set_class(qdisc_lock(sch
), &qdisc_rx_lock
);
803 handle
= qdisc_alloc_handle(dev
);
808 lockdep_set_class(qdisc_lock(sch
), &qdisc_tx_lock
);
811 sch
->handle
= handle
;
813 if (!ops
->init
|| (err
= ops
->init(sch
, tca
[TCA_OPTIONS
])) == 0) {
815 stab
= qdisc_get_stab(tca
[TCA_STAB
]);
823 spinlock_t
*root_lock
;
826 if (sch
->flags
& TCQ_F_MQROOT
)
829 if ((sch
->parent
!= TC_H_ROOT
) &&
830 !(sch
->flags
& TCQ_F_INGRESS
) &&
831 (!p
|| !(p
->flags
& TCQ_F_MQROOT
)))
832 root_lock
= qdisc_root_sleeping_lock(sch
);
834 root_lock
= qdisc_lock(sch
);
836 err
= gen_new_estimator(&sch
->bstats
, &sch
->rate_est
,
837 root_lock
, tca
[TCA_RATE
]);
848 kfree((char *) sch
- sch
->padded
);
850 module_put(ops
->owner
);
857 * Any broken qdiscs that would require a ops->reset() here?
858 * The qdisc was never in action so it shouldn't be necessary.
860 qdisc_put_stab(sch
->stab
);
866 static int qdisc_change(struct Qdisc
*sch
, struct nlattr
**tca
)
868 struct qdisc_size_table
*stab
= NULL
;
871 if (tca
[TCA_OPTIONS
]) {
872 if (sch
->ops
->change
== NULL
)
874 err
= sch
->ops
->change(sch
, tca
[TCA_OPTIONS
]);
880 stab
= qdisc_get_stab(tca
[TCA_STAB
]);
882 return PTR_ERR(stab
);
885 qdisc_put_stab(sch
->stab
);
889 /* NB: ignores errors from replace_estimator
890 because change can't be undone. */
891 if (sch
->flags
& TCQ_F_MQROOT
)
893 gen_replace_estimator(&sch
->bstats
, &sch
->rate_est
,
894 qdisc_root_sleeping_lock(sch
),
901 struct check_loop_arg
903 struct qdisc_walker w
;
908 static int check_loop_fn(struct Qdisc
*q
, unsigned long cl
, struct qdisc_walker
*w
);
910 static int check_loop(struct Qdisc
*q
, struct Qdisc
*p
, int depth
)
912 struct check_loop_arg arg
;
914 if (q
->ops
->cl_ops
== NULL
)
917 arg
.w
.stop
= arg
.w
.skip
= arg
.w
.count
= 0;
918 arg
.w
.fn
= check_loop_fn
;
921 q
->ops
->cl_ops
->walk(q
, &arg
.w
);
922 return arg
.w
.stop
? -ELOOP
: 0;
926 check_loop_fn(struct Qdisc
*q
, unsigned long cl
, struct qdisc_walker
*w
)
929 const struct Qdisc_class_ops
*cops
= q
->ops
->cl_ops
;
930 struct check_loop_arg
*arg
= (struct check_loop_arg
*)w
;
932 leaf
= cops
->leaf(q
, cl
);
934 if (leaf
== arg
->p
|| arg
->depth
> 7)
936 return check_loop(leaf
, arg
->p
, arg
->depth
+ 1);
945 static int tc_get_qdisc(struct sk_buff
*skb
, struct nlmsghdr
*n
, void *arg
)
947 struct net
*net
= sock_net(skb
->sk
);
948 struct tcmsg
*tcm
= NLMSG_DATA(n
);
949 struct nlattr
*tca
[TCA_MAX
+ 1];
950 struct net_device
*dev
;
951 u32 clid
= tcm
->tcm_parent
;
952 struct Qdisc
*q
= NULL
;
953 struct Qdisc
*p
= NULL
;
956 if ((dev
= __dev_get_by_index(net
, tcm
->tcm_ifindex
)) == NULL
)
959 err
= nlmsg_parse(n
, sizeof(*tcm
), tca
, TCA_MAX
, NULL
);
964 if (clid
!= TC_H_ROOT
) {
965 if (TC_H_MAJ(clid
) != TC_H_MAJ(TC_H_INGRESS
)) {
966 if ((p
= qdisc_lookup(dev
, TC_H_MAJ(clid
))) == NULL
)
968 q
= qdisc_leaf(p
, clid
);
969 } else { /* ingress */
970 q
= dev
->rx_queue
.qdisc_sleeping
;
978 if (tcm
->tcm_handle
&& q
->handle
!= tcm
->tcm_handle
)
981 if ((q
= qdisc_lookup(dev
, tcm
->tcm_handle
)) == NULL
)
985 if (tca
[TCA_KIND
] && nla_strcmp(tca
[TCA_KIND
], q
->ops
->id
))
988 if (n
->nlmsg_type
== RTM_DELQDISC
) {
993 if ((err
= qdisc_graft(dev
, p
, skb
, n
, clid
, NULL
, q
)) != 0)
996 qdisc_notify(net
, skb
, n
, clid
, NULL
, q
);
1002 Create/change qdisc.
1005 static int tc_modify_qdisc(struct sk_buff
*skb
, struct nlmsghdr
*n
, void *arg
)
1007 struct net
*net
= sock_net(skb
->sk
);
1009 struct nlattr
*tca
[TCA_MAX
+ 1];
1010 struct net_device
*dev
;
1012 struct Qdisc
*q
, *p
;
1016 /* Reinit, just in case something touches this. */
1017 tcm
= NLMSG_DATA(n
);
1018 clid
= tcm
->tcm_parent
;
1021 if ((dev
= __dev_get_by_index(net
, tcm
->tcm_ifindex
)) == NULL
)
1024 err
= nlmsg_parse(n
, sizeof(*tcm
), tca
, TCA_MAX
, NULL
);
1029 if (clid
!= TC_H_ROOT
) {
1030 if (clid
!= TC_H_INGRESS
) {
1031 if ((p
= qdisc_lookup(dev
, TC_H_MAJ(clid
))) == NULL
)
1033 q
= qdisc_leaf(p
, clid
);
1034 } else { /*ingress */
1035 q
= dev
->rx_queue
.qdisc_sleeping
;
1041 /* It may be default qdisc, ignore it */
1042 if (q
&& q
->handle
== 0)
1045 if (!q
|| !tcm
->tcm_handle
|| q
->handle
!= tcm
->tcm_handle
) {
1046 if (tcm
->tcm_handle
) {
1047 if (q
&& !(n
->nlmsg_flags
&NLM_F_REPLACE
))
1049 if (TC_H_MIN(tcm
->tcm_handle
))
1051 if ((q
= qdisc_lookup(dev
, tcm
->tcm_handle
)) == NULL
)
1052 goto create_n_graft
;
1053 if (n
->nlmsg_flags
&NLM_F_EXCL
)
1055 if (tca
[TCA_KIND
] && nla_strcmp(tca
[TCA_KIND
], q
->ops
->id
))
1058 (p
&& check_loop(q
, p
, 0)))
1060 atomic_inc(&q
->refcnt
);
1064 goto create_n_graft
;
1066 /* This magic test requires explanation.
1068 * We know, that some child q is already
1069 * attached to this parent and have choice:
1070 * either to change it or to create/graft new one.
1072 * 1. We are allowed to create/graft only
1073 * if CREATE and REPLACE flags are set.
1075 * 2. If EXCL is set, requestor wanted to say,
1076 * that qdisc tcm_handle is not expected
1077 * to exist, so that we choose create/graft too.
1079 * 3. The last case is when no flags are set.
1080 * Alas, it is sort of hole in API, we
1081 * cannot decide what to do unambiguously.
1082 * For now we select create/graft, if
1083 * user gave KIND, which does not match existing.
1085 if ((n
->nlmsg_flags
&NLM_F_CREATE
) &&
1086 (n
->nlmsg_flags
&NLM_F_REPLACE
) &&
1087 ((n
->nlmsg_flags
&NLM_F_EXCL
) ||
1089 nla_strcmp(tca
[TCA_KIND
], q
->ops
->id
))))
1090 goto create_n_graft
;
1094 if (!tcm
->tcm_handle
)
1096 q
= qdisc_lookup(dev
, tcm
->tcm_handle
);
1099 /* Change qdisc parameters */
1102 if (n
->nlmsg_flags
&NLM_F_EXCL
)
1104 if (tca
[TCA_KIND
] && nla_strcmp(tca
[TCA_KIND
], q
->ops
->id
))
1106 err
= qdisc_change(q
, tca
);
1108 qdisc_notify(net
, skb
, n
, clid
, NULL
, q
);
1112 if (!(n
->nlmsg_flags
&NLM_F_CREATE
))
1114 if (clid
== TC_H_INGRESS
)
1115 q
= qdisc_create(dev
, &dev
->rx_queue
, p
,
1116 tcm
->tcm_parent
, tcm
->tcm_parent
,
1119 struct netdev_queue
*dev_queue
;
1121 if (p
&& p
->ops
->cl_ops
&& p
->ops
->cl_ops
->select_queue
)
1122 dev_queue
= p
->ops
->cl_ops
->select_queue(p
, tcm
);
1124 dev_queue
= p
->dev_queue
;
1126 dev_queue
= netdev_get_tx_queue(dev
, 0);
1128 q
= qdisc_create(dev
, dev_queue
, p
,
1129 tcm
->tcm_parent
, tcm
->tcm_handle
,
1139 err
= qdisc_graft(dev
, p
, skb
, n
, clid
, q
, NULL
);
1149 static int tc_fill_qdisc(struct sk_buff
*skb
, struct Qdisc
*q
, u32 clid
,
1150 u32 pid
, u32 seq
, u16 flags
, int event
)
1153 struct nlmsghdr
*nlh
;
1154 unsigned char *b
= skb_tail_pointer(skb
);
1157 nlh
= NLMSG_NEW(skb
, pid
, seq
, event
, sizeof(*tcm
), flags
);
1158 tcm
= NLMSG_DATA(nlh
);
1159 tcm
->tcm_family
= AF_UNSPEC
;
1162 tcm
->tcm_ifindex
= qdisc_dev(q
)->ifindex
;
1163 tcm
->tcm_parent
= clid
;
1164 tcm
->tcm_handle
= q
->handle
;
1165 tcm
->tcm_info
= atomic_read(&q
->refcnt
);
1166 NLA_PUT_STRING(skb
, TCA_KIND
, q
->ops
->id
);
1167 if (q
->ops
->dump
&& q
->ops
->dump(q
, skb
) < 0)
1168 goto nla_put_failure
;
1169 q
->qstats
.qlen
= q
->q
.qlen
;
1171 if (q
->stab
&& qdisc_dump_stab(skb
, q
->stab
) < 0)
1172 goto nla_put_failure
;
1174 if (gnet_stats_start_copy_compat(skb
, TCA_STATS2
, TCA_STATS
, TCA_XSTATS
,
1175 qdisc_root_sleeping_lock(q
), &d
) < 0)
1176 goto nla_put_failure
;
1178 if (q
->ops
->dump_stats
&& q
->ops
->dump_stats(q
, &d
) < 0)
1179 goto nla_put_failure
;
1181 if (gnet_stats_copy_basic(&d
, &q
->bstats
) < 0 ||
1182 gnet_stats_copy_rate_est(&d
, &q
->bstats
, &q
->rate_est
) < 0 ||
1183 gnet_stats_copy_queue(&d
, &q
->qstats
) < 0)
1184 goto nla_put_failure
;
1186 if (gnet_stats_finish_copy(&d
) < 0)
1187 goto nla_put_failure
;
1189 nlh
->nlmsg_len
= skb_tail_pointer(skb
) - b
;
1198 static bool tc_qdisc_dump_ignore(struct Qdisc
*q
)
1200 return (q
->flags
& TCQ_F_BUILTIN
) ? true : false;
1203 static int qdisc_notify(struct net
*net
, struct sk_buff
*oskb
,
1204 struct nlmsghdr
*n
, u32 clid
,
1205 struct Qdisc
*old
, struct Qdisc
*new)
1207 struct sk_buff
*skb
;
1208 u32 pid
= oskb
? NETLINK_CB(oskb
).pid
: 0;
1210 skb
= alloc_skb(NLMSG_GOODSIZE
, GFP_KERNEL
);
1214 if (old
&& !tc_qdisc_dump_ignore(old
)) {
1215 if (tc_fill_qdisc(skb
, old
, clid
, pid
, n
->nlmsg_seq
, 0, RTM_DELQDISC
) < 0)
1218 if (new && !tc_qdisc_dump_ignore(new)) {
1219 if (tc_fill_qdisc(skb
, new, clid
, pid
, n
->nlmsg_seq
, old
? NLM_F_REPLACE
: 0, RTM_NEWQDISC
) < 0)
1224 return rtnetlink_send(skb
, net
, pid
, RTNLGRP_TC
, n
->nlmsg_flags
&NLM_F_ECHO
);
1231 static int tc_dump_qdisc_root(struct Qdisc
*root
, struct sk_buff
*skb
,
1232 struct netlink_callback
*cb
,
1233 int *q_idx_p
, int s_q_idx
)
1235 int ret
= 0, q_idx
= *q_idx_p
;
1242 if (q_idx
< s_q_idx
) {
1245 if (!tc_qdisc_dump_ignore(q
) &&
1246 tc_fill_qdisc(skb
, q
, q
->parent
, NETLINK_CB(cb
->skb
).pid
,
1247 cb
->nlh
->nlmsg_seq
, NLM_F_MULTI
, RTM_NEWQDISC
) <= 0)
1251 list_for_each_entry(q
, &root
->list
, list
) {
1252 if (q_idx
< s_q_idx
) {
1256 if (!tc_qdisc_dump_ignore(q
) &&
1257 tc_fill_qdisc(skb
, q
, q
->parent
, NETLINK_CB(cb
->skb
).pid
,
1258 cb
->nlh
->nlmsg_seq
, NLM_F_MULTI
, RTM_NEWQDISC
) <= 0)
1271 static int tc_dump_qdisc(struct sk_buff
*skb
, struct netlink_callback
*cb
)
1273 struct net
*net
= sock_net(skb
->sk
);
1276 struct net_device
*dev
;
1278 s_idx
= cb
->args
[0];
1279 s_q_idx
= q_idx
= cb
->args
[1];
1283 for_each_netdev_rcu(net
, dev
) {
1284 struct netdev_queue
*dev_queue
;
1292 if (tc_dump_qdisc_root(dev
->qdisc
, skb
, cb
, &q_idx
, s_q_idx
) < 0)
1295 dev_queue
= &dev
->rx_queue
;
1296 if (tc_dump_qdisc_root(dev_queue
->qdisc_sleeping
, skb
, cb
, &q_idx
, s_q_idx
) < 0)
1307 cb
->args
[1] = q_idx
;
1314 /************************************************
1315 * Traffic classes manipulation. *
1316 ************************************************/
1320 static int tc_ctl_tclass(struct sk_buff
*skb
, struct nlmsghdr
*n
, void *arg
)
1322 struct net
*net
= sock_net(skb
->sk
);
1323 struct tcmsg
*tcm
= NLMSG_DATA(n
);
1324 struct nlattr
*tca
[TCA_MAX
+ 1];
1325 struct net_device
*dev
;
1326 struct Qdisc
*q
= NULL
;
1327 const struct Qdisc_class_ops
*cops
;
1328 unsigned long cl
= 0;
1329 unsigned long new_cl
;
1330 u32 pid
= tcm
->tcm_parent
;
1331 u32 clid
= tcm
->tcm_handle
;
1332 u32 qid
= TC_H_MAJ(clid
);
1335 if ((dev
= __dev_get_by_index(net
, tcm
->tcm_ifindex
)) == NULL
)
1338 err
= nlmsg_parse(n
, sizeof(*tcm
), tca
, TCA_MAX
, NULL
);
1343 parent == TC_H_UNSPEC - unspecified parent.
1344 parent == TC_H_ROOT - class is root, which has no parent.
1345 parent == X:0 - parent is root class.
1346 parent == X:Y - parent is a node in hierarchy.
1347 parent == 0:Y - parent is X:Y, where X:0 is qdisc.
1349 handle == 0:0 - generate handle from kernel pool.
1350 handle == 0:Y - class is X:Y, where X:0 is qdisc.
1351 handle == X:Y - clear.
1352 handle == X:0 - root class.
1355 /* Step 1. Determine qdisc handle X:0 */
1357 if (pid
!= TC_H_ROOT
) {
1358 u32 qid1
= TC_H_MAJ(pid
);
1361 /* If both majors are known, they must be identical. */
1366 } else if (qid
== 0)
1367 qid
= dev
->qdisc
->handle
;
1369 /* Now qid is genuine qdisc handle consistent
1370 both with parent and child.
1372 TC_H_MAJ(pid) still may be unspecified, complete it now.
1375 pid
= TC_H_MAKE(qid
, pid
);
1378 qid
= dev
->qdisc
->handle
;
1381 /* OK. Locate qdisc */
1382 if ((q
= qdisc_lookup(dev
, qid
)) == NULL
)
1385 /* An check that it supports classes */
1386 cops
= q
->ops
->cl_ops
;
1390 /* Now try to get class */
1392 if (pid
== TC_H_ROOT
)
1395 clid
= TC_H_MAKE(qid
, clid
);
1398 cl
= cops
->get(q
, clid
);
1402 if (n
->nlmsg_type
!= RTM_NEWTCLASS
|| !(n
->nlmsg_flags
&NLM_F_CREATE
))
1405 switch (n
->nlmsg_type
) {
1408 if (n
->nlmsg_flags
&NLM_F_EXCL
)
1414 err
= cops
->delete(q
, cl
);
1416 tclass_notify(net
, skb
, n
, q
, cl
, RTM_DELTCLASS
);
1419 err
= tclass_notify(net
, skb
, n
, q
, cl
, RTM_NEWTCLASS
);
1430 err
= cops
->change(q
, clid
, pid
, tca
, &new_cl
);
1432 tclass_notify(net
, skb
, n
, q
, new_cl
, RTM_NEWTCLASS
);
1442 static int tc_fill_tclass(struct sk_buff
*skb
, struct Qdisc
*q
,
1444 u32 pid
, u32 seq
, u16 flags
, int event
)
1447 struct nlmsghdr
*nlh
;
1448 unsigned char *b
= skb_tail_pointer(skb
);
1450 const struct Qdisc_class_ops
*cl_ops
= q
->ops
->cl_ops
;
1452 nlh
= NLMSG_NEW(skb
, pid
, seq
, event
, sizeof(*tcm
), flags
);
1453 tcm
= NLMSG_DATA(nlh
);
1454 tcm
->tcm_family
= AF_UNSPEC
;
1457 tcm
->tcm_ifindex
= qdisc_dev(q
)->ifindex
;
1458 tcm
->tcm_parent
= q
->handle
;
1459 tcm
->tcm_handle
= q
->handle
;
1461 NLA_PUT_STRING(skb
, TCA_KIND
, q
->ops
->id
);
1462 if (cl_ops
->dump
&& cl_ops
->dump(q
, cl
, skb
, tcm
) < 0)
1463 goto nla_put_failure
;
1465 if (gnet_stats_start_copy_compat(skb
, TCA_STATS2
, TCA_STATS
, TCA_XSTATS
,
1466 qdisc_root_sleeping_lock(q
), &d
) < 0)
1467 goto nla_put_failure
;
1469 if (cl_ops
->dump_stats
&& cl_ops
->dump_stats(q
, cl
, &d
) < 0)
1470 goto nla_put_failure
;
1472 if (gnet_stats_finish_copy(&d
) < 0)
1473 goto nla_put_failure
;
1475 nlh
->nlmsg_len
= skb_tail_pointer(skb
) - b
;
1484 static int tclass_notify(struct net
*net
, struct sk_buff
*oskb
,
1485 struct nlmsghdr
*n
, struct Qdisc
*q
,
1486 unsigned long cl
, int event
)
1488 struct sk_buff
*skb
;
1489 u32 pid
= oskb
? NETLINK_CB(oskb
).pid
: 0;
1491 skb
= alloc_skb(NLMSG_GOODSIZE
, GFP_KERNEL
);
1495 if (tc_fill_tclass(skb
, q
, cl
, pid
, n
->nlmsg_seq
, 0, event
) < 0) {
1500 return rtnetlink_send(skb
, net
, pid
, RTNLGRP_TC
, n
->nlmsg_flags
&NLM_F_ECHO
);
1503 struct qdisc_dump_args
1505 struct qdisc_walker w
;
1506 struct sk_buff
*skb
;
1507 struct netlink_callback
*cb
;
1510 static int qdisc_class_dump(struct Qdisc
*q
, unsigned long cl
, struct qdisc_walker
*arg
)
1512 struct qdisc_dump_args
*a
= (struct qdisc_dump_args
*)arg
;
1514 return tc_fill_tclass(a
->skb
, q
, cl
, NETLINK_CB(a
->cb
->skb
).pid
,
1515 a
->cb
->nlh
->nlmsg_seq
, NLM_F_MULTI
, RTM_NEWTCLASS
);
1518 static int tc_dump_tclass_qdisc(struct Qdisc
*q
, struct sk_buff
*skb
,
1519 struct tcmsg
*tcm
, struct netlink_callback
*cb
,
1522 struct qdisc_dump_args arg
;
1524 if (tc_qdisc_dump_ignore(q
) ||
1525 *t_p
< s_t
|| !q
->ops
->cl_ops
||
1527 TC_H_MAJ(tcm
->tcm_parent
) != q
->handle
)) {
1532 memset(&cb
->args
[1], 0, sizeof(cb
->args
)-sizeof(cb
->args
[0]));
1533 arg
.w
.fn
= qdisc_class_dump
;
1537 arg
.w
.skip
= cb
->args
[1];
1539 q
->ops
->cl_ops
->walk(q
, &arg
.w
);
1540 cb
->args
[1] = arg
.w
.count
;
1547 static int tc_dump_tclass_root(struct Qdisc
*root
, struct sk_buff
*skb
,
1548 struct tcmsg
*tcm
, struct netlink_callback
*cb
,
1556 if (tc_dump_tclass_qdisc(root
, skb
, tcm
, cb
, t_p
, s_t
) < 0)
1559 list_for_each_entry(q
, &root
->list
, list
) {
1560 if (tc_dump_tclass_qdisc(q
, skb
, tcm
, cb
, t_p
, s_t
) < 0)
1567 static int tc_dump_tclass(struct sk_buff
*skb
, struct netlink_callback
*cb
)
1569 struct tcmsg
*tcm
= (struct tcmsg
*)NLMSG_DATA(cb
->nlh
);
1570 struct net
*net
= sock_net(skb
->sk
);
1571 struct netdev_queue
*dev_queue
;
1572 struct net_device
*dev
;
1575 if (cb
->nlh
->nlmsg_len
< NLMSG_LENGTH(sizeof(*tcm
)))
1577 if ((dev
= dev_get_by_index(net
, tcm
->tcm_ifindex
)) == NULL
)
1583 if (tc_dump_tclass_root(dev
->qdisc
, skb
, tcm
, cb
, &t
, s_t
) < 0)
1586 dev_queue
= &dev
->rx_queue
;
1587 if (tc_dump_tclass_root(dev_queue
->qdisc_sleeping
, skb
, tcm
, cb
, &t
, s_t
) < 0)
1597 /* Main classifier routine: scans classifier chain attached
1598 to this qdisc, (optionally) tests for protocol and asks
1599 specific classifiers.
1601 int tc_classify_compat(struct sk_buff
*skb
, struct tcf_proto
*tp
,
1602 struct tcf_result
*res
)
1604 __be16 protocol
= skb
->protocol
;
1607 for (; tp
; tp
= tp
->next
) {
1608 if ((tp
->protocol
== protocol
||
1609 tp
->protocol
== htons(ETH_P_ALL
)) &&
1610 (err
= tp
->classify(skb
, tp
, res
)) >= 0) {
1611 #ifdef CONFIG_NET_CLS_ACT
1612 if (err
!= TC_ACT_RECLASSIFY
&& skb
->tc_verd
)
1613 skb
->tc_verd
= SET_TC_VERD(skb
->tc_verd
, 0);
1620 EXPORT_SYMBOL(tc_classify_compat
);
1622 int tc_classify(struct sk_buff
*skb
, struct tcf_proto
*tp
,
1623 struct tcf_result
*res
)
1627 #ifdef CONFIG_NET_CLS_ACT
1628 struct tcf_proto
*otp
= tp
;
1631 protocol
= skb
->protocol
;
1633 err
= tc_classify_compat(skb
, tp
, res
);
1634 #ifdef CONFIG_NET_CLS_ACT
1635 if (err
== TC_ACT_RECLASSIFY
) {
1636 u32 verd
= G_TC_VERD(skb
->tc_verd
);
1639 if (verd
++ >= MAX_REC_LOOP
) {
1640 if (net_ratelimit())
1642 "%s: packet reclassify loop"
1643 " rule prio %u protocol %02x\n",
1645 tp
->prio
& 0xffff, ntohs(tp
->protocol
));
1648 skb
->tc_verd
= SET_TC_VERD(skb
->tc_verd
, verd
);
1654 EXPORT_SYMBOL(tc_classify
);
1656 void tcf_destroy(struct tcf_proto
*tp
)
1658 tp
->ops
->destroy(tp
);
1659 module_put(tp
->ops
->owner
);
1663 void tcf_destroy_chain(struct tcf_proto
**fl
)
1665 struct tcf_proto
*tp
;
1667 while ((tp
= *fl
) != NULL
) {
1672 EXPORT_SYMBOL(tcf_destroy_chain
);
1674 #ifdef CONFIG_PROC_FS
1675 static int psched_show(struct seq_file
*seq
, void *v
)
1679 hrtimer_get_res(CLOCK_MONOTONIC
, &ts
);
1680 seq_printf(seq
, "%08x %08x %08x %08x\n",
1681 (u32
)NSEC_PER_USEC
, (u32
)PSCHED_TICKS2NS(1),
1683 (u32
)NSEC_PER_SEC
/(u32
)ktime_to_ns(timespec_to_ktime(ts
)));
1688 static int psched_open(struct inode
*inode
, struct file
*file
)
1690 return single_open(file
, psched_show
, NULL
);
1693 static const struct file_operations psched_fops
= {
1694 .owner
= THIS_MODULE
,
1695 .open
= psched_open
,
1697 .llseek
= seq_lseek
,
1698 .release
= single_release
,
1701 static int __net_init
psched_net_init(struct net
*net
)
1703 struct proc_dir_entry
*e
;
1705 e
= proc_net_fops_create(net
, "psched", 0, &psched_fops
);
1712 static void __net_exit
psched_net_exit(struct net
*net
)
1714 proc_net_remove(net
, "psched");
1717 static int __net_init
psched_net_init(struct net
*net
)
1722 static void __net_exit
psched_net_exit(struct net
*net
)
1727 static struct pernet_operations psched_net_ops
= {
1728 .init
= psched_net_init
,
1729 .exit
= psched_net_exit
,
1732 static int __init
pktsched_init(void)
1736 err
= register_pernet_subsys(&psched_net_ops
);
1738 printk(KERN_ERR
"pktsched_init: "
1739 "cannot initialize per netns operations\n");
1743 register_qdisc(&pfifo_qdisc_ops
);
1744 register_qdisc(&bfifo_qdisc_ops
);
1745 register_qdisc(&pfifo_head_drop_qdisc_ops
);
1746 register_qdisc(&mq_qdisc_ops
);
1748 rtnl_register(PF_UNSPEC
, RTM_NEWQDISC
, tc_modify_qdisc
, NULL
);
1749 rtnl_register(PF_UNSPEC
, RTM_DELQDISC
, tc_get_qdisc
, NULL
);
1750 rtnl_register(PF_UNSPEC
, RTM_GETQDISC
, tc_get_qdisc
, tc_dump_qdisc
);
1751 rtnl_register(PF_UNSPEC
, RTM_NEWTCLASS
, tc_ctl_tclass
, NULL
);
1752 rtnl_register(PF_UNSPEC
, RTM_DELTCLASS
, tc_ctl_tclass
, NULL
);
1753 rtnl_register(PF_UNSPEC
, RTM_GETTCLASS
, tc_ctl_tclass
, tc_dump_tclass
);
1758 subsys_initcall(pktsched_init
);