[PATCH] percpu counter data type changes to suppport more than 2**31 ext3 free blocks...
[linux-2.6/openmoko-kernel/knife-kernel.git] / drivers / net / shaper.c
blob88e212043a43969cd0b9ea1447fe7265f10deb58
1 /*
2 * Simple traffic shaper for Linux NET3.
4 * (c) Copyright 1996 Alan Cox <alan@redhat.com>, All Rights Reserved.
5 * http://www.redhat.com
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
12 * Neither Alan Cox nor CymruNet Ltd. admit liability nor provide
13 * warranty for any of this software. This material is provided
14 * "AS-IS" and at no charge.
17 * Algorithm:
19 * Queue Frame:
20 * Compute time length of frame at regulated speed
21 * Add frame to queue at appropriate point
22 * Adjust time length computation for followup frames
23 * Any frame that falls outside of its boundaries is freed
25 * We work to the following constants
27 * SHAPER_QLEN Maximum queued frames
28 * SHAPER_LATENCY Bounding latency on a frame. Leaving this latency
29 * window drops the frame. This stops us queueing
30 * frames for a long time and confusing a remote
31 * host.
32 * SHAPER_MAXSLIP Maximum time a priority frame may jump forward.
33 * That bounds the penalty we will inflict on low
34 * priority traffic.
35 * SHAPER_BURST Time range we call "now" in order to reduce
36 * system load. The more we make this the burstier
37 * the behaviour, the better local performance you
38 * get through packet clustering on routers and the
39 * worse the remote end gets to judge rtts.
41 * This is designed to handle lower speed links ( < 200K/second or so). We
42 * run off a 100-150Hz base clock typically. This gives us a resolution at
43 * 200Kbit/second of about 2Kbit or 256 bytes. Above that our timer
44 * resolution may start to cause much more burstiness in the traffic. We
45 * could avoid a lot of that by calling kick_shaper() at the end of the
46 * tied device transmissions. If you run above about 100K second you
47 * may need to tune the supposed speed rate for the right values.
49 * BUGS:
50 * Downing the interface under the shaper before the shaper
51 * will render your machine defunct. Don't for now shape over
52 * PPP or SLIP therefore!
53 * This will be fixed in BETA4
55 * Update History :
57 * bh_atomic() SMP races fixes and rewritten the locking code to
58 * be SMP safe and irq-mask friendly.
59 * NOTE: we can't use start_bh_atomic() in kick_shaper()
60 * because it's going to be recalled from an irq handler,
61 * and synchronize_bh() is a nono if called from irq context.
62 * 1999 Andrea Arcangeli
64 * Device statistics (tx_pakets, tx_bytes,
65 * tx_drops: queue_over_time and collisions: max_queue_exceded)
66 * 1999/06/18 Jordi Murgo <savage@apostols.org>
68 * Use skb->cb for private data.
69 * 2000/03 Andi Kleen
72 #include <linux/config.h>
73 #include <linux/module.h>
74 #include <linux/kernel.h>
75 #include <linux/fcntl.h>
76 #include <linux/mm.h>
77 #include <linux/slab.h>
78 #include <linux/string.h>
79 #include <linux/errno.h>
80 #include <linux/netdevice.h>
81 #include <linux/etherdevice.h>
82 #include <linux/skbuff.h>
83 #include <linux/if_arp.h>
84 #include <linux/init.h>
85 #include <linux/if_shaper.h>
86 #include <linux/jiffies.h>
88 #include <net/dst.h>
89 #include <net/arp.h>
91 struct shaper_cb {
92 unsigned long shapeclock; /* Time it should go out */
93 unsigned long shapestamp; /* Stamp for shaper */
94 __u32 shapelatency; /* Latency on frame */
95 __u32 shapelen; /* Frame length in clocks */
96 __u16 shapepend; /* Pending */
97 };
98 #define SHAPERCB(skb) ((struct shaper_cb *) ((skb)->cb))
100 static int sh_debug; /* Debug flag */
102 #define SHAPER_BANNER "CymruNet Traffic Shaper BETA 0.04 for Linux 2.1\n"
104 static void shaper_kick(struct shaper *sh);
107 * Compute clocks on a buffer
110 static int shaper_clocks(struct shaper *shaper, struct sk_buff *skb)
112 int t=skb->len/shaper->bytespertick;
113 return t;
117 * Set the speed of a shaper. We compute this in bytes per tick since
118 * thats how the machine wants to run. Quoted input is in bits per second
119 * as is traditional (note not BAUD). We assume 8 bit bytes.
122 static void shaper_setspeed(struct shaper *shaper, int bitspersec)
124 shaper->bitspersec=bitspersec;
125 shaper->bytespertick=(bitspersec/HZ)/8;
126 if(!shaper->bytespertick)
127 shaper->bytespertick++;
131 * Throw a frame at a shaper.
135 static int shaper_start_xmit(struct sk_buff *skb, struct net_device *dev)
137 struct shaper *shaper = dev->priv;
138 struct sk_buff *ptr;
140 spin_lock(&shaper->lock);
141 ptr=shaper->sendq.prev;
144 * Set up our packet details
147 SHAPERCB(skb)->shapelatency=0;
148 SHAPERCB(skb)->shapeclock=shaper->recovery;
149 if(time_before(SHAPERCB(skb)->shapeclock, jiffies))
150 SHAPERCB(skb)->shapeclock=jiffies;
151 skb->priority=0; /* short term bug fix */
152 SHAPERCB(skb)->shapestamp=jiffies;
155 * Time slots for this packet.
158 SHAPERCB(skb)->shapelen= shaper_clocks(shaper,skb);
161 struct sk_buff *tmp;
163 * Up our shape clock by the time pending on the queue
164 * (Should keep this in the shaper as a variable..)
166 for(tmp=skb_peek(&shaper->sendq); tmp!=NULL &&
167 tmp!=(struct sk_buff *)&shaper->sendq; tmp=tmp->next)
168 SHAPERCB(skb)->shapeclock+=SHAPERCB(tmp)->shapelen;
170 * Queue over time. Spill packet.
172 if(time_after(SHAPERCB(skb)->shapeclock,jiffies + SHAPER_LATENCY)) {
173 dev_kfree_skb(skb);
174 shaper->stats.tx_dropped++;
175 } else
176 skb_queue_tail(&shaper->sendq, skb);
179 if(sh_debug)
180 printk("Frame queued.\n");
181 if(skb_queue_len(&shaper->sendq)>SHAPER_QLEN)
183 ptr=skb_dequeue(&shaper->sendq);
184 dev_kfree_skb(ptr);
185 shaper->stats.collisions++;
187 shaper_kick(shaper);
188 spin_unlock(&shaper->lock);
189 return 0;
193 * Transmit from a shaper
196 static void shaper_queue_xmit(struct shaper *shaper, struct sk_buff *skb)
198 struct sk_buff *newskb=skb_clone(skb, GFP_ATOMIC);
199 if(sh_debug)
200 printk("Kick frame on %p\n",newskb);
201 if(newskb)
203 newskb->dev=shaper->dev;
204 newskb->priority=2;
205 if(sh_debug)
206 printk("Kick new frame to %s, %d\n",
207 shaper->dev->name,newskb->priority);
208 dev_queue_xmit(newskb);
210 shaper->stats.tx_bytes += skb->len;
211 shaper->stats.tx_packets++;
213 if(sh_debug)
214 printk("Kicked new frame out.\n");
215 dev_kfree_skb(skb);
220 * Timer handler for shaping clock
223 static void shaper_timer(unsigned long data)
225 struct shaper *shaper = (struct shaper *)data;
227 spin_lock(&shaper->lock);
228 shaper_kick(shaper);
229 spin_unlock(&shaper->lock);
233 * Kick a shaper queue and try and do something sensible with the
234 * queue.
237 static void shaper_kick(struct shaper *shaper)
239 struct sk_buff *skb;
242 * Walk the list (may be empty)
245 while((skb=skb_peek(&shaper->sendq))!=NULL)
248 * Each packet due to go out by now (within an error
249 * of SHAPER_BURST) gets kicked onto the link
252 if(sh_debug)
253 printk("Clock = %ld, jiffies = %ld\n", SHAPERCB(skb)->shapeclock, jiffies);
254 if(time_before_eq(SHAPERCB(skb)->shapeclock, jiffies + SHAPER_BURST))
257 * Pull the frame and get interrupts back on.
260 skb_unlink(skb, &shaper->sendq);
261 if (shaper->recovery <
262 SHAPERCB(skb)->shapeclock + SHAPERCB(skb)->shapelen)
263 shaper->recovery = SHAPERCB(skb)->shapeclock + SHAPERCB(skb)->shapelen;
265 * Pass on to the physical target device via
266 * our low level packet thrower.
269 SHAPERCB(skb)->shapepend=0;
270 shaper_queue_xmit(shaper, skb); /* Fire */
272 else
273 break;
277 * Next kick.
280 if(skb!=NULL)
281 mod_timer(&shaper->timer, SHAPERCB(skb)->shapeclock);
286 * Bring the interface up. We just disallow this until a
287 * bind.
290 static int shaper_open(struct net_device *dev)
292 struct shaper *shaper=dev->priv;
295 * Can't open until attached.
296 * Also can't open until speed is set, or we'll get
297 * a division by zero.
300 if(shaper->dev==NULL)
301 return -ENODEV;
302 if(shaper->bitspersec==0)
303 return -EINVAL;
304 return 0;
308 * Closing a shaper flushes the queues.
311 static int shaper_close(struct net_device *dev)
313 struct shaper *shaper=dev->priv;
314 struct sk_buff *skb;
316 while ((skb = skb_dequeue(&shaper->sendq)) != NULL)
317 dev_kfree_skb(skb);
319 spin_lock_bh(&shaper->lock);
320 shaper_kick(shaper);
321 spin_unlock_bh(&shaper->lock);
323 del_timer_sync(&shaper->timer);
324 return 0;
328 * Revectored calls. We alter the parameters and call the functions
329 * for our attached device. This enables us to bandwidth allocate after
330 * ARP and other resolutions and not before.
333 static struct net_device_stats *shaper_get_stats(struct net_device *dev)
335 struct shaper *sh=dev->priv;
336 return &sh->stats;
339 static int shaper_header(struct sk_buff *skb, struct net_device *dev,
340 unsigned short type, void *daddr, void *saddr, unsigned len)
342 struct shaper *sh=dev->priv;
343 int v;
344 if(sh_debug)
345 printk("Shaper header\n");
346 skb->dev=sh->dev;
347 v=sh->hard_header(skb,sh->dev,type,daddr,saddr,len);
348 skb->dev=dev;
349 return v;
352 static int shaper_rebuild_header(struct sk_buff *skb)
354 struct shaper *sh=skb->dev->priv;
355 struct net_device *dev=skb->dev;
356 int v;
357 if(sh_debug)
358 printk("Shaper rebuild header\n");
359 skb->dev=sh->dev;
360 v=sh->rebuild_header(skb);
361 skb->dev=dev;
362 return v;
365 #if 0
366 static int shaper_cache(struct neighbour *neigh, struct hh_cache *hh)
368 struct shaper *sh=neigh->dev->priv;
369 struct net_device *tmp;
370 int ret;
371 if(sh_debug)
372 printk("Shaper header cache bind\n");
373 tmp=neigh->dev;
374 neigh->dev=sh->dev;
375 ret=sh->hard_header_cache(neigh,hh);
376 neigh->dev=tmp;
377 return ret;
380 static void shaper_cache_update(struct hh_cache *hh, struct net_device *dev,
381 unsigned char *haddr)
383 struct shaper *sh=dev->priv;
384 if(sh_debug)
385 printk("Shaper cache update\n");
386 sh->header_cache_update(hh, sh->dev, haddr);
388 #endif
390 #ifdef CONFIG_INET
392 static int shaper_neigh_setup(struct neighbour *n)
394 #ifdef CONFIG_INET
395 if (n->nud_state == NUD_NONE) {
396 n->ops = &arp_broken_ops;
397 n->output = n->ops->output;
399 #endif
400 return 0;
403 static int shaper_neigh_setup_dev(struct net_device *dev, struct neigh_parms *p)
405 #ifdef CONFIG_INET
406 if (p->tbl->family == AF_INET) {
407 p->neigh_setup = shaper_neigh_setup;
408 p->ucast_probes = 0;
409 p->mcast_probes = 0;
411 #endif
412 return 0;
415 #else /* !(CONFIG_INET) */
417 static int shaper_neigh_setup_dev(struct net_device *dev, struct neigh_parms *p)
419 return 0;
422 #endif
424 static int shaper_attach(struct net_device *shdev, struct shaper *sh, struct net_device *dev)
426 sh->dev = dev;
427 sh->hard_start_xmit=dev->hard_start_xmit;
428 sh->get_stats=dev->get_stats;
429 if(dev->hard_header)
431 sh->hard_header=dev->hard_header;
432 shdev->hard_header = shaper_header;
434 else
435 shdev->hard_header = NULL;
437 if(dev->rebuild_header)
439 sh->rebuild_header = dev->rebuild_header;
440 shdev->rebuild_header = shaper_rebuild_header;
442 else
443 shdev->rebuild_header = NULL;
445 #if 0
446 if(dev->hard_header_cache)
448 sh->hard_header_cache = dev->hard_header_cache;
449 shdev->hard_header_cache= shaper_cache;
451 else
453 shdev->hard_header_cache= NULL;
456 if(dev->header_cache_update)
458 sh->header_cache_update = dev->header_cache_update;
459 shdev->header_cache_update = shaper_cache_update;
461 else
462 shdev->header_cache_update= NULL;
463 #else
464 shdev->header_cache_update = NULL;
465 shdev->hard_header_cache = NULL;
466 #endif
467 shdev->neigh_setup = shaper_neigh_setup_dev;
469 shdev->hard_header_len=dev->hard_header_len;
470 shdev->type=dev->type;
471 shdev->addr_len=dev->addr_len;
472 shdev->mtu=dev->mtu;
473 sh->bitspersec=0;
474 return 0;
477 static int shaper_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
479 struct shaperconf *ss= (struct shaperconf *)&ifr->ifr_ifru;
480 struct shaper *sh=dev->priv;
482 if(ss->ss_cmd == SHAPER_SET_DEV || ss->ss_cmd == SHAPER_SET_SPEED)
484 if(!capable(CAP_NET_ADMIN))
485 return -EPERM;
488 switch(ss->ss_cmd)
490 case SHAPER_SET_DEV:
492 struct net_device *them=__dev_get_by_name(ss->ss_name);
493 if(them==NULL)
494 return -ENODEV;
495 if(sh->dev)
496 return -EBUSY;
497 return shaper_attach(dev,dev->priv, them);
499 case SHAPER_GET_DEV:
500 if(sh->dev==NULL)
501 return -ENODEV;
502 strcpy(ss->ss_name, sh->dev->name);
503 return 0;
504 case SHAPER_SET_SPEED:
505 shaper_setspeed(sh,ss->ss_speed);
506 return 0;
507 case SHAPER_GET_SPEED:
508 ss->ss_speed=sh->bitspersec;
509 return 0;
510 default:
511 return -EINVAL;
515 static void shaper_init_priv(struct net_device *dev)
517 struct shaper *sh = dev->priv;
519 skb_queue_head_init(&sh->sendq);
520 init_timer(&sh->timer);
521 sh->timer.function=shaper_timer;
522 sh->timer.data=(unsigned long)sh;
523 spin_lock_init(&sh->lock);
527 * Add a shaper device to the system
530 static void __init shaper_setup(struct net_device *dev)
533 * Set up the shaper.
536 SET_MODULE_OWNER(dev);
538 shaper_init_priv(dev);
540 dev->open = shaper_open;
541 dev->stop = shaper_close;
542 dev->hard_start_xmit = shaper_start_xmit;
543 dev->get_stats = shaper_get_stats;
544 dev->set_multicast_list = NULL;
547 * Intialise the packet queues
551 * Handlers for when we attach to a device.
554 dev->hard_header = shaper_header;
555 dev->rebuild_header = shaper_rebuild_header;
556 #if 0
557 dev->hard_header_cache = shaper_cache;
558 dev->header_cache_update= shaper_cache_update;
559 #endif
560 dev->neigh_setup = shaper_neigh_setup_dev;
561 dev->do_ioctl = shaper_ioctl;
562 dev->hard_header_len = 0;
563 dev->type = ARPHRD_ETHER; /* initially */
564 dev->set_mac_address = NULL;
565 dev->mtu = 1500;
566 dev->addr_len = 0;
567 dev->tx_queue_len = 10;
568 dev->flags = 0;
571 static int shapers = 1;
572 #ifdef MODULE
574 module_param(shapers, int, 0);
575 MODULE_PARM_DESC(shapers, "Traffic shaper: maximum number of shapers");
577 #else /* MODULE */
579 static int __init set_num_shapers(char *str)
581 shapers = simple_strtol(str, NULL, 0);
582 return 1;
585 __setup("shapers=", set_num_shapers);
587 #endif /* MODULE */
589 static struct net_device **devs;
591 static unsigned int shapers_registered = 0;
593 static int __init shaper_init(void)
595 int i;
596 size_t alloc_size;
597 struct net_device *dev;
598 char name[IFNAMSIZ];
600 if (shapers < 1)
601 return -ENODEV;
603 alloc_size = sizeof(*dev) * shapers;
604 devs = kmalloc(alloc_size, GFP_KERNEL);
605 if (!devs)
606 return -ENOMEM;
607 memset(devs, 0, alloc_size);
609 for (i = 0; i < shapers; i++) {
611 snprintf(name, IFNAMSIZ, "shaper%d", i);
612 dev = alloc_netdev(sizeof(struct shaper), name,
613 shaper_setup);
614 if (!dev)
615 break;
617 if (register_netdev(dev)) {
618 free_netdev(dev);
619 break;
622 devs[i] = dev;
623 shapers_registered++;
626 if (!shapers_registered) {
627 kfree(devs);
628 devs = NULL;
631 return (shapers_registered ? 0 : -ENODEV);
634 static void __exit shaper_exit (void)
636 int i;
638 for (i = 0; i < shapers_registered; i++) {
639 if (devs[i]) {
640 unregister_netdev(devs[i]);
641 free_netdev(devs[i]);
645 kfree(devs);
646 devs = NULL;
649 module_init(shaper_init);
650 module_exit(shaper_exit);
651 MODULE_LICENSE("GPL");