Import 2.3.18pre1
[davej-history.git] / drivers / net / shaper.c
blob3c6589a0633144c9152f259a8633211dd68c810a
1 /*
2 * Simple traffic shaper for Linux NET3.
4 * (c) Copyright 1996 Alan Cox <alan@redhat.com>, All Rights Reserved.
5 * http://www.redhat.com
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
12 * Neither Alan Cox nor CymruNet Ltd. admit liability nor provide
13 * warranty for any of this software. This material is provided
14 * "AS-IS" and at no charge.
17 * Algorithm:
19 * Queue Frame:
20 * Compute time length of frame at regulated speed
21 * Add frame to queue at appropriate point
22 * Adjust time length computation for followup frames
23 * Any frame that falls outside of its boundaries is freed
25 * We work to the following constants
27 * SHAPER_QLEN Maximum queued frames
28 * SHAPER_LATENCY Bounding latency on a frame. Leaving this latency
29 * window drops the frame. This stops us queueing
30 * frames for a long time and confusing a remote
31 * host.
32 * SHAPER_MAXSLIP Maximum time a priority frame may jump forward.
33 * That bounds the penalty we will inflict on low
34 * priority traffic.
35 * SHAPER_BURST Time range we call "now" in order to reduce
36 * system load. The more we make this the burstier
37 * the behaviour, the better local performance you
38 * get through packet clustering on routers and the
39 * worse the remote end gets to judge rtts.
41 * This is designed to handle lower speed links ( < 200K/second or so). We
42 * run off a 100-150Hz base clock typically. This gives us a resolution at
43 * 200Kbit/second of about 2Kbit or 256 bytes. Above that our timer
44 * resolution may start to cause much more burstiness in the traffic. We
45 * could avoid a lot of that by calling kick_shaper() at the end of the
46 * tied device transmissions. If you run above about 100K second you
47 * may need to tune the supposed speed rate for the right values.
49 * BUGS:
50 * Downing the interface under the shaper before the shaper
51 * will render your machine defunct. Don't for now shape over
52 * PPP or SLIP therefore!
53 * This will be fixed in BETA4
55 * Update History :
57 * bh_atomic() SMP races fixes and rewritten the locking code to
58 * be SMP safe and irq-mask friendly.
59 * NOTE: we can't use start_bh_atomic() in kick_shaper()
60 * because it's going to be recalled from an irq handler,
61 * and synchronize_bh() is a nono if called from irq context.
62 * 1999 Andrea Arcangeli
64 * Device statistics (tx_pakets, tx_bytes,
65 * tx_drops: queue_over_time and collisions: max_queue_exceded)
66 * 1999/06/18 Jordi Murgo <savage@apostols.org>
69 #include <linux/module.h>
70 #include <linux/kernel.h>
71 #include <linux/sched.h>
72 #include <linux/ptrace.h>
73 #include <linux/fcntl.h>
74 #include <linux/mm.h>
75 #include <linux/malloc.h>
76 #include <linux/string.h>
77 #include <linux/errno.h>
78 #include <linux/netdevice.h>
79 #include <linux/etherdevice.h>
80 #include <linux/skbuff.h>
81 #include <linux/if_arp.h>
82 #include <linux/init.h>
83 #include <net/dst.h>
84 #include <net/arp.h>
85 #include <linux/if_shaper.h>
87 int sh_debug; /* Debug flag */
89 #define SHAPER_BANNER "CymruNet Traffic Shaper BETA 0.04 for Linux 2.1\n"
92 * Locking
95 static int shaper_lock(struct shaper *sh)
98 * Lock in an interrupt must fail
100 while (test_and_set_bit(0, &sh->locked))
102 if (!in_interrupt())
103 sleep_on(&sh->wait_queue);
104 else
105 return 0;
108 return 1;
111 static void shaper_kick(struct shaper *sh);
113 static void shaper_unlock(struct shaper *sh)
115 clear_bit(0, &sh->locked);
116 wake_up(&sh->wait_queue);
117 shaper_kick(sh);
121 * Compute clocks on a buffer
124 static int shaper_clocks(struct shaper *shaper, struct sk_buff *skb)
126 int t=skb->len/shaper->bytespertick;
127 return t;
131 * Set the speed of a shaper. We compute this in bytes per tick since
132 * thats how the machine wants to run. Quoted input is in bits per second
133 * as is traditional (note not BAUD). We assume 8 bit bytes.
136 static void shaper_setspeed(struct shaper *shaper, int bitspersec)
138 shaper->bitspersec=bitspersec;
139 shaper->bytespertick=(bitspersec/HZ)/8;
140 if(!shaper->bytespertick)
141 shaper->bytespertick++;
145 * Throw a frame at a shaper.
148 static int shaper_qframe(struct shaper *shaper, struct sk_buff *skb)
150 struct sk_buff *ptr;
153 * Get ready to work on this shaper. Lock may fail if its
154 * an interrupt and locked.
157 if(!shaper_lock(shaper))
158 return -1;
159 ptr=shaper->sendq.prev;
162 * Set up our packet details
165 skb->shapelatency=0;
166 skb->shapeclock=shaper->recovery;
167 if(time_before(skb->shapeclock, jiffies))
168 skb->shapeclock=jiffies;
169 skb->priority=0; /* short term bug fix */
170 skb->shapestamp=jiffies;
173 * Time slots for this packet.
176 skb->shapelen= shaper_clocks(shaper,skb);
178 #ifdef SHAPER_COMPLEX /* and broken.. */
180 while(ptr && ptr!=(struct sk_buff *)&shaper->sendq)
182 if(ptr->pri<skb->pri
183 && jiffies - ptr->shapeclock < SHAPER_MAXSLIP)
185 struct sk_buff *tmp=ptr->prev;
188 * It goes before us therefore we slip the length
189 * of the new frame.
192 ptr->shapeclock+=skb->shapelen;
193 ptr->shapelatency+=skb->shapelen;
196 * The packet may have slipped so far back it
197 * fell off.
199 if(ptr->shapelatency > SHAPER_LATENCY)
201 skb_unlink(ptr);
202 dev_kfree_skb(ptr);
204 ptr=tmp;
206 else
207 break;
209 if(ptr==NULL || ptr==(struct sk_buff *)&shaper->sendq)
210 skb_queue_head(&shaper->sendq,skb);
211 else
213 struct sk_buff *tmp;
215 * Set the packet clock out time according to the
216 * frames ahead. Im sure a bit of thought could drop
217 * this loop.
219 for(tmp=skb_peek(&shaper->sendq); tmp!=NULL && tmp!=ptr; tmp=tmp->next)
220 skb->shapeclock+=tmp->shapelen;
221 skb_append(ptr,skb);
223 #else
225 struct sk_buff *tmp;
227 * Up our shape clock by the time pending on the queue
228 * (Should keep this in the shaper as a variable..)
230 for(tmp=skb_peek(&shaper->sendq); tmp!=NULL &&
231 tmp!=(struct sk_buff *)&shaper->sendq; tmp=tmp->next)
232 skb->shapeclock+=tmp->shapelen;
234 * Queue over time. Spill packet.
236 if(skb->shapeclock-jiffies > SHAPER_LATENCY) {
237 dev_kfree_skb(skb);
238 shaper->stats.tx_dropped++;
239 } else
240 skb_queue_tail(&shaper->sendq, skb);
242 #endif
243 if(sh_debug)
244 printk("Frame queued.\n");
245 if(skb_queue_len(&shaper->sendq)>SHAPER_QLEN)
247 ptr=skb_dequeue(&shaper->sendq);
248 dev_kfree_skb(ptr);
249 shaper->stats.collisions++;
251 shaper_unlock(shaper);
252 return 0;
256 * Transmit from a shaper
259 static void shaper_queue_xmit(struct shaper *shaper, struct sk_buff *skb)
261 struct sk_buff *newskb=skb_clone(skb, GFP_ATOMIC);
262 if(sh_debug)
263 printk("Kick frame on %p\n",newskb);
264 if(newskb)
266 newskb->dev=shaper->dev;
267 newskb->priority=2;
268 if(sh_debug)
269 printk("Kick new frame to %s, %d\n",
270 shaper->dev->name,newskb->priority);
271 dev_queue_xmit(newskb);
273 shaper->stats.tx_bytes+=newskb->len;
274 shaper->stats.tx_packets++;
276 if(sh_debug)
277 printk("Kicked new frame out.\n");
278 dev_kfree_skb(skb);
283 * Timer handler for shaping clock
286 static void shaper_timer(unsigned long data)
288 struct shaper *sh=(struct shaper *)data;
289 shaper_kick(sh);
293 * Kick a shaper queue and try and do something sensible with the
294 * queue.
297 static void shaper_kick(struct shaper *shaper)
299 struct sk_buff *skb;
302 * Shaper unlock will kick
305 if (test_and_set_bit(0, &shaper->locked))
307 if(sh_debug)
308 printk("Shaper locked.\n");
309 mod_timer(&shaper->timer, jiffies);
310 return;
315 * Walk the list (may be empty)
318 while((skb=skb_peek(&shaper->sendq))!=NULL)
321 * Each packet due to go out by now (within an error
322 * of SHAPER_BURST) gets kicked onto the link
325 if(sh_debug)
326 printk("Clock = %d, jiffies = %ld\n", skb->shapeclock, jiffies);
327 if(time_before_eq(skb->shapeclock - jiffies, SHAPER_BURST))
330 * Pull the frame and get interrupts back on.
333 skb_unlink(skb);
334 if (shaper->recovery < skb->shapeclock + skb->shapelen)
335 shaper->recovery = skb->shapeclock + skb->shapelen;
337 * Pass on to the physical target device via
338 * our low level packet thrower.
341 skb->shapepend=0;
342 shaper_queue_xmit(shaper, skb); /* Fire */
344 else
345 break;
349 * Next kick.
352 if(skb!=NULL)
353 mod_timer(&shaper->timer, skb->shapeclock);
355 clear_bit(0, &shaper->locked);
360 * Flush the shaper queues on a closedown
363 static void shaper_flush(struct shaper *shaper)
365 struct sk_buff *skb;
366 if(!shaper_lock(shaper))
368 printk(KERN_ERR "shaper: shaper_flush() called by an irq!\n");
369 return;
371 while((skb=skb_dequeue(&shaper->sendq))!=NULL)
372 dev_kfree_skb(skb);
373 shaper_unlock(shaper);
377 * Bring the interface up. We just disallow this until a
378 * bind.
381 static int shaper_open(struct net_device *dev)
383 struct shaper *shaper=dev->priv;
386 * Can't open until attached.
387 * Also can't open until speed is set, or we'll get
388 * a division by zero.
391 if(shaper->dev==NULL)
392 return -ENODEV;
393 if(shaper->bitspersec==0)
394 return -EINVAL;
395 MOD_INC_USE_COUNT;
396 return 0;
400 * Closing a shaper flushes the queues.
403 static int shaper_close(struct net_device *dev)
405 struct shaper *shaper=dev->priv;
406 shaper_flush(shaper);
407 start_bh_atomic();
408 del_timer(&shaper->timer);
409 end_bh_atomic();
410 MOD_DEC_USE_COUNT;
411 return 0;
415 * Revectored calls. We alter the parameters and call the functions
416 * for our attached device. This enables us to bandwidth allocate after
417 * ARP and other resolutions and not before.
421 static int shaper_start_xmit(struct sk_buff *skb, struct net_device *dev)
423 struct shaper *sh=dev->priv;
424 return shaper_qframe(sh, skb);
427 static struct net_device_stats *shaper_get_stats(struct net_device *dev)
429 struct shaper *sh=dev->priv;
430 return &sh->stats;
433 static int shaper_header(struct sk_buff *skb, struct net_device *dev,
434 unsigned short type, void *daddr, void *saddr, unsigned len)
436 struct shaper *sh=dev->priv;
437 int v;
438 if(sh_debug)
439 printk("Shaper header\n");
440 skb->dev=sh->dev;
441 v=sh->hard_header(skb,sh->dev,type,daddr,saddr,len);
442 skb->dev=dev;
443 return v;
446 static int shaper_rebuild_header(struct sk_buff *skb)
448 struct shaper *sh=skb->dev->priv;
449 struct net_device *dev=skb->dev;
450 int v;
451 if(sh_debug)
452 printk("Shaper rebuild header\n");
453 skb->dev=sh->dev;
454 v=sh->rebuild_header(skb);
455 skb->dev=dev;
456 return v;
459 #if 0
460 static int shaper_cache(struct neighbour *neigh, struct hh_cache *hh)
462 struct shaper *sh=neigh->dev->priv;
463 struct net_device *tmp;
464 int ret;
465 if(sh_debug)
466 printk("Shaper header cache bind\n");
467 tmp=neigh->dev;
468 neigh->dev=sh->dev;
469 ret=sh->hard_header_cache(neigh,hh);
470 neigh->dev=tmp;
471 return ret;
474 static void shaper_cache_update(struct hh_cache *hh, struct net_device *dev,
475 unsigned char *haddr)
477 struct shaper *sh=dev->priv;
478 if(sh_debug)
479 printk("Shaper cache update\n");
480 sh->header_cache_update(hh, sh->dev, haddr);
482 #endif
484 static int shaper_neigh_setup(struct neighbour *n)
486 if (n->nud_state == NUD_NONE) {
487 n->ops = &arp_broken_ops;
488 n->output = n->ops->output;
490 return 0;
493 static int shaper_neigh_setup_dev(struct net_device *dev, struct neigh_parms *p)
495 if (p->tbl->family == AF_INET) {
496 p->neigh_setup = shaper_neigh_setup;
497 p->ucast_probes = 0;
498 p->mcast_probes = 0;
500 return 0;
503 static int shaper_attach(struct net_device *shdev, struct shaper *sh, struct net_device *dev)
505 sh->dev = dev;
506 sh->hard_start_xmit=dev->hard_start_xmit;
507 sh->get_stats=dev->get_stats;
508 if(dev->hard_header)
510 sh->hard_header=dev->hard_header;
511 shdev->hard_header = shaper_header;
513 else
514 shdev->hard_header = NULL;
516 if(dev->rebuild_header)
518 sh->rebuild_header = dev->rebuild_header;
519 shdev->rebuild_header = shaper_rebuild_header;
521 else
522 shdev->rebuild_header = NULL;
524 #if 0
525 if(dev->hard_header_cache)
527 sh->hard_header_cache = dev->hard_header_cache;
528 shdev->hard_header_cache= shaper_cache;
530 else
532 shdev->hard_header_cache= NULL;
535 if(dev->header_cache_update)
537 sh->header_cache_update = dev->header_cache_update;
538 shdev->header_cache_update = shaper_cache_update;
540 else
541 shdev->header_cache_update= NULL;
542 #else
543 shdev->header_cache_update = NULL;
544 shdev->hard_header_cache = NULL;
545 #endif
546 shdev->neigh_setup = shaper_neigh_setup_dev;
548 shdev->hard_header_len=dev->hard_header_len;
549 shdev->type=dev->type;
550 shdev->addr_len=dev->addr_len;
551 shdev->mtu=dev->mtu;
552 sh->bitspersec=0;
553 return 0;
556 static int shaper_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
558 struct shaperconf *ss= (struct shaperconf *)&ifr->ifr_data;
559 struct shaper *sh=dev->priv;
560 switch(ss->ss_cmd)
562 case SHAPER_SET_DEV:
564 struct net_device *them=__dev_get_by_name(ss->ss_name);
565 if(them==NULL)
566 return -ENODEV;
567 if(sh->dev)
568 return -EBUSY;
569 return shaper_attach(dev,dev->priv, them);
571 case SHAPER_GET_DEV:
572 if(sh->dev==NULL)
573 return -ENODEV;
574 strcpy(ss->ss_name, sh->dev->name);
575 return 0;
576 case SHAPER_SET_SPEED:
577 shaper_setspeed(sh,ss->ss_speed);
578 return 0;
579 case SHAPER_GET_SPEED:
580 ss->ss_speed=sh->bitspersec;
581 return 0;
582 default:
583 return -EINVAL;
587 static struct shaper *shaper_alloc(struct net_device *dev)
589 struct shaper *sh=kmalloc(sizeof(struct shaper), GFP_KERNEL);
590 if(sh==NULL)
591 return NULL;
592 memset(sh,0,sizeof(*sh));
593 skb_queue_head_init(&sh->sendq);
594 init_timer(&sh->timer);
595 sh->timer.function=shaper_timer;
596 sh->timer.data=(unsigned long)sh;
597 init_waitqueue_head(&sh->wait_queue);
598 return sh;
602 * Add a shaper device to the system
605 int __init shaper_probe(struct net_device *dev)
608 * Set up the shaper.
611 dev->priv = shaper_alloc(dev);
612 if(dev->priv==NULL)
613 return -ENOMEM;
615 dev->open = shaper_open;
616 dev->stop = shaper_close;
617 dev->hard_start_xmit = shaper_start_xmit;
618 dev->get_stats = shaper_get_stats;
619 dev->set_multicast_list = NULL;
622 * Intialise the packet queues
625 dev_init_buffers(dev);
628 * Handlers for when we attach to a device.
631 dev->hard_header = shaper_header;
632 dev->rebuild_header = shaper_rebuild_header;
633 #if 0
634 dev->hard_header_cache = shaper_cache;
635 dev->header_cache_update= shaper_cache_update;
636 #endif
637 dev->neigh_setup = shaper_neigh_setup_dev;
638 dev->do_ioctl = shaper_ioctl;
639 dev->hard_header_len = 0;
640 dev->type = ARPHRD_ETHER; /* initially */
641 dev->set_mac_address = NULL;
642 dev->mtu = 1500;
643 dev->addr_len = 0;
644 dev->tx_queue_len = 10;
645 dev->flags = 0;
648 * Shaper is ok
651 return 0;
654 #ifdef MODULE
656 static char devicename[9];
658 static struct net_device dev_shape =
660 devicename,
661 0, 0, 0, 0,
662 0, 0,
663 0, 0, 0, NULL, shaper_probe
666 int init_module(void)
668 int err=dev_alloc_name(&dev_shape,"shaper%d");
669 if(err<0)
670 return err;
671 printk(SHAPER_BANNER);
672 if (register_netdev(&dev_shape) != 0)
673 return -EIO;
674 printk("Traffic shaper initialised.\n");
675 return 0;
678 void cleanup_module(void)
680 struct shaper *sh=dev_shape.priv;
683 * No need to check MOD_IN_USE, as sys_delete_module() checks.
684 * To be unloadable we must be closed and detached so we don't
685 * need to flush things.
688 unregister_netdev(&dev_shape);
691 * Free up the private structure, or leak memory :-)
693 kfree(sh);
694 dev_shape.priv = NULL;
697 #else
699 static struct net_device dev_sh0 =
701 "shaper0",
702 0, 0, 0, 0,
703 0, 0,
704 0, 0, 0, NULL, shaper_probe
708 static struct net_device dev_sh1 =
710 "shaper1",
711 0, 0, 0, 0,
712 0, 0,
713 0, 0, 0, NULL, shaper_probe
717 static struct net_device dev_sh2 =
719 "shaper2",
720 0, 0, 0, 0,
721 0, 0,
722 0, 0, 0, NULL, shaper_probe
725 static struct net_device dev_sh3 =
727 "shaper3",
728 0, 0, 0, 0,
729 0, 0,
730 0, 0, 0, NULL, shaper_probe
733 void shaper_init(void)
735 register_netdev(&dev_sh0);
736 register_netdev(&dev_sh1);
737 register_netdev(&dev_sh2);
738 register_netdev(&dev_sh3);
741 #endif /* MODULE */