2 * net/sched/sch_generic.c Generic packet scheduler routines.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 * Jamal Hadi Salim, <hadi@cyberus.ca> 990601
14 #include <asm/uaccess.h>
15 #include <asm/system.h>
16 #include <linux/bitops.h>
17 #include <linux/config.h>
18 #include <linux/module.h>
19 #include <linux/types.h>
20 #include <linux/kernel.h>
21 #include <linux/sched.h>
22 #include <linux/string.h>
24 #include <linux/socket.h>
25 #include <linux/sockios.h>
27 #include <linux/errno.h>
28 #include <linux/interrupt.h>
29 #include <linux/netdevice.h>
30 #include <linux/skbuff.h>
31 #include <linux/rtnetlink.h>
32 #include <linux/init.h>
33 #include <linux/rcupdate.h>
34 #include <linux/list.h>
36 #include <net/pkt_sched.h>
38 /* Main transmission queue. */
40 /* Main qdisc structure lock.
42 However, modifications
43 to data, participating in scheduling must be additionally
44 protected with dev->queue_lock spinlock.
46 The idea is the following:
47 - enqueue, dequeue are serialized via top level device
48 spinlock dev->queue_lock.
49 - tree walking is protected by read_lock_bh(qdisc_tree_lock)
50 and this lock is used only in process context.
51 - updates to tree are made under rtnl semaphore or
52 from softirq context (__qdisc_destroy rcu-callback)
53 hence this lock needs local bh disabling.
55 qdisc_tree_lock must be grabbed BEFORE dev->queue_lock!
57 DEFINE_RWLOCK(qdisc_tree_lock
);
59 void qdisc_lock_tree(struct net_device
*dev
)
61 write_lock_bh(&qdisc_tree_lock
);
62 spin_lock_bh(&dev
->queue_lock
);
65 void qdisc_unlock_tree(struct net_device
*dev
)
67 spin_unlock_bh(&dev
->queue_lock
);
68 write_unlock_bh(&qdisc_tree_lock
);
72 dev->queue_lock serializes queue accesses for this device
73 AND dev->qdisc pointer itself.
75 dev->xmit_lock serializes accesses to device driver.
77 dev->queue_lock and dev->xmit_lock are mutually exclusive,
78 if one is grabbed, another must be free.
83 Note, that this procedure can be called by a watchdog timer, so that
84 we do not check dev->tbusy flag here.
86 Returns: 0 - queue is empty.
87 >0 - queue is not empty, but throttled.
88 <0 - queue is not empty. Device is throttled, if dev->tbusy != 0.
90 NOTE: Called under dev->queue_lock with locally disabled BH.
93 int qdisc_restart(struct net_device
*dev
)
95 struct Qdisc
*q
= dev
->qdisc
;
99 if ((skb
= q
->dequeue(q
)) != NULL
) {
100 unsigned nolock
= (dev
->features
& NETIF_F_LLTX
);
102 * When the driver has LLTX set it does its own locking
103 * in start_xmit. No need to add additional overhead by
104 * locking again. These checks are worth it because
105 * even uncongested locks can be quite expensive.
106 * The driver can do trylock like here too, in case
107 * of lock congestion it should return -1 and the packet
111 if (!spin_trylock(&dev
->xmit_lock
)) {
113 /* So, someone grabbed the driver. */
115 /* It may be transient configuration error,
116 when hard_start_xmit() recurses. We detect
117 it by checking xmit owner and drop the
118 packet when deadloop is detected.
120 if (dev
->xmit_lock_owner
== smp_processor_id()) {
123 printk(KERN_DEBUG
"Dead loop on netdevice %s, fix it urgently!\n", dev
->name
);
126 __get_cpu_var(netdev_rx_stat
).cpu_collision
++;
129 /* Remember that the driver is grabbed by us. */
130 dev
->xmit_lock_owner
= smp_processor_id();
134 /* And release queue */
135 spin_unlock(&dev
->queue_lock
);
137 if (!netif_queue_stopped(dev
)) {
140 dev_queue_xmit_nit(skb
, dev
);
142 ret
= dev
->hard_start_xmit(skb
, dev
);
143 if (ret
== NETDEV_TX_OK
) {
145 dev
->xmit_lock_owner
= -1;
146 spin_unlock(&dev
->xmit_lock
);
148 spin_lock(&dev
->queue_lock
);
151 if (ret
== NETDEV_TX_LOCKED
&& nolock
) {
152 spin_lock(&dev
->queue_lock
);
157 /* NETDEV_TX_BUSY - we need to requeue */
158 /* Release the driver */
160 dev
->xmit_lock_owner
= -1;
161 spin_unlock(&dev
->xmit_lock
);
163 spin_lock(&dev
->queue_lock
);
167 /* Device kicked us out :(
168 This is possible in three cases:
171 1. fastroute is enabled
172 2. device cannot determine busy state
173 before start of transmission (f.e. dialout)
174 3. device is buggy (ppp)
178 q
->ops
->requeue(skb
, q
);
182 BUG_ON((int) q
->q
.qlen
< 0);
186 static void dev_watchdog(unsigned long arg
)
188 struct net_device
*dev
= (struct net_device
*)arg
;
190 spin_lock(&dev
->xmit_lock
);
191 if (dev
->qdisc
!= &noop_qdisc
) {
192 if (netif_device_present(dev
) &&
193 netif_running(dev
) &&
194 netif_carrier_ok(dev
)) {
195 if (netif_queue_stopped(dev
) &&
196 (jiffies
- dev
->trans_start
) > dev
->watchdog_timeo
) {
197 printk(KERN_INFO
"NETDEV WATCHDOG: %s: transmit timed out\n", dev
->name
);
198 dev
->tx_timeout(dev
);
200 if (!mod_timer(&dev
->watchdog_timer
, jiffies
+ dev
->watchdog_timeo
))
204 spin_unlock(&dev
->xmit_lock
);
209 static void dev_watchdog_init(struct net_device
*dev
)
211 init_timer(&dev
->watchdog_timer
);
212 dev
->watchdog_timer
.data
= (unsigned long)dev
;
213 dev
->watchdog_timer
.function
= dev_watchdog
;
216 void __netdev_watchdog_up(struct net_device
*dev
)
218 if (dev
->tx_timeout
) {
219 if (dev
->watchdog_timeo
<= 0)
220 dev
->watchdog_timeo
= 5*HZ
;
221 if (!mod_timer(&dev
->watchdog_timer
, jiffies
+ dev
->watchdog_timeo
))
226 static void dev_watchdog_up(struct net_device
*dev
)
228 spin_lock_bh(&dev
->xmit_lock
);
229 __netdev_watchdog_up(dev
);
230 spin_unlock_bh(&dev
->xmit_lock
);
233 static void dev_watchdog_down(struct net_device
*dev
)
235 spin_lock_bh(&dev
->xmit_lock
);
236 if (del_timer(&dev
->watchdog_timer
))
238 spin_unlock_bh(&dev
->xmit_lock
);
241 /* "NOOP" scheduler: the best scheduler, recommended for all interfaces
242 under all circumstances. It is difficult to invent anything faster or
247 noop_enqueue(struct sk_buff
*skb
, struct Qdisc
* qdisc
)
253 static struct sk_buff
*
254 noop_dequeue(struct Qdisc
* qdisc
)
260 noop_requeue(struct sk_buff
*skb
, struct Qdisc
* qdisc
)
263 printk(KERN_DEBUG
"%s deferred output. It is buggy.\n", skb
->dev
->name
);
268 struct Qdisc_ops noop_qdisc_ops
= {
273 .enqueue
= noop_enqueue
,
274 .dequeue
= noop_dequeue
,
275 .requeue
= noop_requeue
,
276 .owner
= THIS_MODULE
,
279 struct Qdisc noop_qdisc
= {
280 .enqueue
= noop_enqueue
,
281 .dequeue
= noop_dequeue
,
282 .flags
= TCQ_F_BUILTIN
,
283 .ops
= &noop_qdisc_ops
,
284 .list
= LIST_HEAD_INIT(noop_qdisc
.list
),
287 static struct Qdisc_ops noqueue_qdisc_ops
= {
292 .enqueue
= noop_enqueue
,
293 .dequeue
= noop_dequeue
,
294 .requeue
= noop_requeue
,
295 .owner
= THIS_MODULE
,
298 static struct Qdisc noqueue_qdisc
= {
300 .dequeue
= noop_dequeue
,
301 .flags
= TCQ_F_BUILTIN
,
302 .ops
= &noqueue_qdisc_ops
,
303 .list
= LIST_HEAD_INIT(noqueue_qdisc
.list
),
307 static const u8 prio2band
[TC_PRIO_MAX
+1] =
308 { 1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1 };
310 /* 3-band FIFO queue: old style, but should be a bit faster than
311 generic prio+fifo combination.
315 pfifo_fast_enqueue(struct sk_buff
*skb
, struct Qdisc
* qdisc
)
317 struct sk_buff_head
*list
= qdisc_priv(qdisc
);
319 list
+= prio2band
[skb
->priority
&TC_PRIO_MAX
];
321 if (list
->qlen
< qdisc
->dev
->tx_queue_len
) {
322 __skb_queue_tail(list
, skb
);
324 qdisc
->bstats
.bytes
+= skb
->len
;
325 qdisc
->bstats
.packets
++;
328 qdisc
->qstats
.drops
++;
330 return NET_XMIT_DROP
;
333 static struct sk_buff
*
334 pfifo_fast_dequeue(struct Qdisc
* qdisc
)
337 struct sk_buff_head
*list
= qdisc_priv(qdisc
);
340 for (prio
= 0; prio
< 3; prio
++, list
++) {
341 skb
= __skb_dequeue(list
);
351 pfifo_fast_requeue(struct sk_buff
*skb
, struct Qdisc
* qdisc
)
353 struct sk_buff_head
*list
= qdisc_priv(qdisc
);
355 list
+= prio2band
[skb
->priority
&TC_PRIO_MAX
];
357 __skb_queue_head(list
, skb
);
359 qdisc
->qstats
.requeues
++;
364 pfifo_fast_reset(struct Qdisc
* qdisc
)
367 struct sk_buff_head
*list
= qdisc_priv(qdisc
);
369 for (prio
=0; prio
< 3; prio
++)
370 skb_queue_purge(list
+prio
);
374 static int pfifo_fast_dump(struct Qdisc
*qdisc
, struct sk_buff
*skb
)
376 unsigned char *b
= skb
->tail
;
377 struct tc_prio_qopt opt
;
380 memcpy(&opt
.priomap
, prio2band
, TC_PRIO_MAX
+1);
381 RTA_PUT(skb
, TCA_OPTIONS
, sizeof(opt
), &opt
);
385 skb_trim(skb
, b
- skb
->data
);
389 static int pfifo_fast_init(struct Qdisc
*qdisc
, struct rtattr
*opt
)
392 struct sk_buff_head
*list
= qdisc_priv(qdisc
);
395 skb_queue_head_init(list
+i
);
400 static struct Qdisc_ops pfifo_fast_ops
= {
404 .priv_size
= 3 * sizeof(struct sk_buff_head
),
405 .enqueue
= pfifo_fast_enqueue
,
406 .dequeue
= pfifo_fast_dequeue
,
407 .requeue
= pfifo_fast_requeue
,
408 .init
= pfifo_fast_init
,
409 .reset
= pfifo_fast_reset
,
410 .dump
= pfifo_fast_dump
,
411 .owner
= THIS_MODULE
,
414 struct Qdisc
* qdisc_create_dflt(struct net_device
*dev
, struct Qdisc_ops
*ops
)
420 /* ensure that the Qdisc and the private data are 32-byte aligned */
421 size
= ((sizeof(*sch
) + QDISC_ALIGN_CONST
) & ~QDISC_ALIGN_CONST
);
422 size
+= ops
->priv_size
+ QDISC_ALIGN_CONST
;
424 p
= kmalloc(size
, GFP_KERNEL
);
429 sch
= (struct Qdisc
*)(((unsigned long)p
+ QDISC_ALIGN_CONST
)
430 & ~QDISC_ALIGN_CONST
);
431 sch
->padded
= (char *)sch
- (char *)p
;
433 INIT_LIST_HEAD(&sch
->list
);
434 skb_queue_head_init(&sch
->q
);
436 sch
->enqueue
= ops
->enqueue
;
437 sch
->dequeue
= ops
->dequeue
;
440 sch
->stats_lock
= &dev
->queue_lock
;
441 atomic_set(&sch
->refcnt
, 1);
442 if (!ops
->init
|| ops
->init(sch
, NULL
) == 0)
450 /* Under dev->queue_lock and BH! */
452 void qdisc_reset(struct Qdisc
*qdisc
)
454 struct Qdisc_ops
*ops
= qdisc
->ops
;
460 /* this is the rcu callback function to clean up a qdisc when there
461 * are no further references to it */
463 static void __qdisc_destroy(struct rcu_head
*head
)
465 struct Qdisc
*qdisc
= container_of(head
, struct Qdisc
, q_rcu
);
466 struct Qdisc_ops
*ops
= qdisc
->ops
;
468 #ifdef CONFIG_NET_ESTIMATOR
469 gen_kill_estimator(&qdisc
->bstats
, &qdisc
->rate_est
);
471 write_lock(&qdisc_tree_lock
);
476 write_unlock(&qdisc_tree_lock
);
477 module_put(ops
->owner
);
480 kfree((char *) qdisc
- qdisc
->padded
);
483 /* Under dev->queue_lock and BH! */
485 void qdisc_destroy(struct Qdisc
*qdisc
)
487 struct list_head cql
= LIST_HEAD_INIT(cql
);
488 struct Qdisc
*cq
, *q
, *n
;
490 if (qdisc
->flags
& TCQ_F_BUILTIN
||
491 !atomic_dec_and_test(&qdisc
->refcnt
))
494 if (!list_empty(&qdisc
->list
)) {
495 if (qdisc
->ops
->cl_ops
== NULL
)
496 list_del(&qdisc
->list
);
498 list_move(&qdisc
->list
, &cql
);
501 /* unlink inner qdiscs from dev->qdisc_list immediately */
502 list_for_each_entry(cq
, &cql
, list
)
503 list_for_each_entry_safe(q
, n
, &qdisc
->dev
->qdisc_list
, list
)
504 if (TC_H_MAJ(q
->parent
) == TC_H_MAJ(cq
->handle
)) {
505 if (q
->ops
->cl_ops
== NULL
)
506 list_del_init(&q
->list
);
508 list_move_tail(&q
->list
, &cql
);
510 list_for_each_entry_safe(cq
, n
, &cql
, list
)
511 list_del_init(&cq
->list
);
513 call_rcu(&qdisc
->q_rcu
, __qdisc_destroy
);
516 void dev_activate(struct net_device
*dev
)
518 /* No queueing discipline is attached to device;
519 create default one i.e. pfifo_fast for devices,
520 which need queueing and noqueue_qdisc for
524 if (dev
->qdisc_sleeping
== &noop_qdisc
) {
526 if (dev
->tx_queue_len
) {
527 qdisc
= qdisc_create_dflt(dev
, &pfifo_fast_ops
);
529 printk(KERN_INFO
"%s: activation failed\n", dev
->name
);
532 write_lock_bh(&qdisc_tree_lock
);
533 list_add_tail(&qdisc
->list
, &dev
->qdisc_list
);
534 write_unlock_bh(&qdisc_tree_lock
);
536 qdisc
= &noqueue_qdisc
;
538 write_lock_bh(&qdisc_tree_lock
);
539 dev
->qdisc_sleeping
= qdisc
;
540 write_unlock_bh(&qdisc_tree_lock
);
543 if (!netif_carrier_ok(dev
))
544 /* Delay activation until next carrier-on event */
547 spin_lock_bh(&dev
->queue_lock
);
548 rcu_assign_pointer(dev
->qdisc
, dev
->qdisc_sleeping
);
549 if (dev
->qdisc
!= &noqueue_qdisc
) {
550 dev
->trans_start
= jiffies
;
551 dev_watchdog_up(dev
);
553 spin_unlock_bh(&dev
->queue_lock
);
556 void dev_deactivate(struct net_device
*dev
)
560 spin_lock_bh(&dev
->queue_lock
);
562 dev
->qdisc
= &noop_qdisc
;
566 spin_unlock_bh(&dev
->queue_lock
);
568 dev_watchdog_down(dev
);
570 while (test_bit(__LINK_STATE_SCHED
, &dev
->state
))
573 spin_unlock_wait(&dev
->xmit_lock
);
576 void dev_init_scheduler(struct net_device
*dev
)
578 qdisc_lock_tree(dev
);
579 dev
->qdisc
= &noop_qdisc
;
580 dev
->qdisc_sleeping
= &noop_qdisc
;
581 INIT_LIST_HEAD(&dev
->qdisc_list
);
582 qdisc_unlock_tree(dev
);
584 dev_watchdog_init(dev
);
587 void dev_shutdown(struct net_device
*dev
)
591 qdisc_lock_tree(dev
);
592 qdisc
= dev
->qdisc_sleeping
;
593 dev
->qdisc
= &noop_qdisc
;
594 dev
->qdisc_sleeping
= &noop_qdisc
;
595 qdisc_destroy(qdisc
);
596 #if defined(CONFIG_NET_SCH_INGRESS) || defined(CONFIG_NET_SCH_INGRESS_MODULE)
597 if ((qdisc
= dev
->qdisc_ingress
) != NULL
) {
598 dev
->qdisc_ingress
= NULL
;
599 qdisc_destroy(qdisc
);
602 BUG_TRAP(!timer_pending(&dev
->watchdog_timer
));
603 qdisc_unlock_tree(dev
);
606 EXPORT_SYMBOL(__netdev_watchdog_up
);
607 EXPORT_SYMBOL(noop_qdisc
);
608 EXPORT_SYMBOL(noop_qdisc_ops
);
609 EXPORT_SYMBOL(qdisc_create_dflt
);
610 EXPORT_SYMBOL(qdisc_destroy
);
611 EXPORT_SYMBOL(qdisc_reset
);
612 EXPORT_SYMBOL(qdisc_restart
);
613 EXPORT_SYMBOL(qdisc_lock_tree
);
614 EXPORT_SYMBOL(qdisc_unlock_tree
);