2 * Equalizer Load-balancer for serial network interfaces.
4 * (c) Copyright 1995 Simon "Guru Aleph-Null" Janes
5 * NCM: Network and Communications Management, Inc.
7 * (c) Copyright 2002 David S. Miller (davem@redhat.com)
9 * This software may be used and distributed according to the terms
10 * of the GNU General Public License, incorporated herein by reference.
12 * The author may be reached as simon@ncm.com, or C/O
17 * Phone: 1-703-847-0040 ext 103
22 * skeleton.c by Donald Becker.
24 * The Harried and Overworked Alan Cox
26 * The Alan Cox and Mike McLagan plot to get someone else to do the code,
27 * which turned out to be me.
32 * Revision 1.2 1996/04/11 17:51:52 guru
33 * Added one-line eql_remove_slave patch.
35 * Revision 1.1 1996/04/11 17:44:17 guru
38 * Revision 3.13 1996/01/21 15:17:18 alan
39 * tx_queue_len changes.
42 * Revision 3.12 1995/03/22 21:07:51 anarchy
43 * Added capable() checks on configuration.
46 * Revision 3.11 1995/01/19 23:14:31 guru
47 * slave_load = (ULONG_MAX - (ULONG_MAX / 2)) -
48 * (priority_Bps) + bytes_queued * 8;
50 * Revision 3.10 1995/01/19 23:07:53 guru
52 * slave_load = (ULONG_MAX - (ULONG_MAX / 2)) -
53 * (priority_Bps) + bytes_queued;
55 * Revision 3.9 1995/01/19 22:38:20 guru
56 * slave_load = (ULONG_MAX - (ULONG_MAX / 2)) -
57 * (priority_Bps) + bytes_queued * 4;
59 * Revision 3.8 1995/01/19 22:30:55 guru
60 * slave_load = (ULONG_MAX - (ULONG_MAX / 2)) -
61 * (priority_Bps) + bytes_queued * 2;
63 * Revision 3.7 1995/01/19 21:52:35 guru
64 * printk's trimmed out.
66 * Revision 3.6 1995/01/19 21:49:56 guru
67 * This is working pretty well. I gained 1 K/s in speed.. now it's just
68 * robustness and printk's to be diked out.
70 * Revision 3.5 1995/01/18 22:29:59 guru
71 * still crashes the kernel when the lock_wait thing is woken up.
73 * Revision 3.4 1995/01/18 21:59:47 guru
74 * Broken set-bit locking snapshot
76 * Revision 3.3 1995/01/17 22:09:18 guru
77 * infinite sleep in a lock somewhere..
79 * Revision 3.2 1995/01/15 16:46:06 guru
80 * Log trimmed of non-pertinent 1.x branch messages
82 * Revision 3.1 1995/01/15 14:41:45 guru
83 * New Scheduler and timer stuff...
85 * Revision 1.15 1995/01/15 14:29:02 guru
86 * Will make 1.14 (now 1.15) the 3.0 branch, and the 1.12 the 2.0 branch, the one
87 * with the dumber scheduler
89 * Revision 1.14 1995/01/15 02:37:08 guru
90 * shock.. the kept-new-versions could have zonked working
93 * Revision 1.13 1995/01/15 02:36:31 guru
96 * scheduler was torn out and replaced with something smarter
98 * global names not prefixed with eql_ were renamed to protect
99 * against namespace collisions
101 * a few more abstract interfaces were added to facilitate any
102 * potential change of datastructure. the driver is still using
103 * a linked list of slaves. going to a heap would be a bit of
106 * this compiles fine with no warnings.
108 * the locking mechanism and timer stuff must be written however,
109 * this version will not work otherwise
111 * Sorry, I had to rewrite most of this for 2.5.x -DaveM
114 #include <linux/module.h>
115 #include <linux/kernel.h>
116 #include <linux/init.h>
117 #include <linux/timer.h>
118 #include <linux/netdevice.h>
119 #include <net/net_namespace.h>
121 #include <linux/if.h>
122 #include <linux/if_arp.h>
123 #include <linux/if_eql.h>
125 #include <asm/uaccess.h>
127 static int eql_open(struct net_device
*dev
);
128 static int eql_close(struct net_device
*dev
);
129 static int eql_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
);
130 static int eql_slave_xmit(struct sk_buff
*skb
, struct net_device
*dev
);
132 #define eql_is_slave(dev) ((dev->flags & IFF_SLAVE) == IFF_SLAVE)
133 #define eql_is_master(dev) ((dev->flags & IFF_MASTER) == IFF_MASTER)
135 static void eql_kill_one_slave(slave_queue_t
*queue
, slave_t
*slave
);
137 static void eql_timer(unsigned long param
)
139 equalizer_t
*eql
= (equalizer_t
*) param
;
140 struct list_head
*this, *tmp
, *head
;
142 spin_lock_bh(&eql
->queue
.lock
);
143 head
= &eql
->queue
.all_slaves
;
144 list_for_each_safe(this, tmp
, head
) {
145 slave_t
*slave
= list_entry(this, slave_t
, list
);
147 if ((slave
->dev
->flags
& IFF_UP
) == IFF_UP
) {
148 slave
->bytes_queued
-= slave
->priority_Bps
;
149 if (slave
->bytes_queued
< 0)
150 slave
->bytes_queued
= 0;
152 eql_kill_one_slave(&eql
->queue
, slave
);
156 spin_unlock_bh(&eql
->queue
.lock
);
158 eql
->timer
.expires
= jiffies
+ EQL_DEFAULT_RESCHED_IVAL
;
159 add_timer(&eql
->timer
);
162 static const char version
[] __initconst
=
163 "Equalizer2002: Simon Janes (simon@ncm.com) and David S. Miller (davem@redhat.com)\n";
165 static const struct net_device_ops eql_netdev_ops
= {
166 .ndo_open
= eql_open
,
167 .ndo_stop
= eql_close
,
168 .ndo_do_ioctl
= eql_ioctl
,
169 .ndo_start_xmit
= eql_slave_xmit
,
172 static void __init
eql_setup(struct net_device
*dev
)
174 equalizer_t
*eql
= netdev_priv(dev
);
176 init_timer(&eql
->timer
);
177 eql
->timer
.data
= (unsigned long) eql
;
178 eql
->timer
.expires
= jiffies
+ EQL_DEFAULT_RESCHED_IVAL
;
179 eql
->timer
.function
= eql_timer
;
181 spin_lock_init(&eql
->queue
.lock
);
182 INIT_LIST_HEAD(&eql
->queue
.all_slaves
);
183 eql
->queue
.master_dev
= dev
;
185 dev
->netdev_ops
= &eql_netdev_ops
;
188 * Now we undo some of the things that eth_setup does
192 dev
->mtu
= EQL_DEFAULT_MTU
; /* set to 576 in if_eql.h */
193 dev
->flags
= IFF_MASTER
;
195 dev
->type
= ARPHRD_SLIP
;
196 dev
->tx_queue_len
= 5; /* Hands them off fast */
199 static int eql_open(struct net_device
*dev
)
201 equalizer_t
*eql
= netdev_priv(dev
);
203 /* XXX We should force this off automatically for the user. */
204 printk(KERN_INFO
"%s: remember to turn off Van-Jacobson compression on "
205 "your slave devices.\n", dev
->name
);
207 BUG_ON(!list_empty(&eql
->queue
.all_slaves
));
210 eql
->max_slaves
= EQL_DEFAULT_MAX_SLAVES
; /* 4 usually... */
212 add_timer(&eql
->timer
);
217 static void eql_kill_one_slave(slave_queue_t
*queue
, slave_t
*slave
)
219 list_del(&slave
->list
);
221 slave
->dev
->flags
&= ~IFF_SLAVE
;
226 static void eql_kill_slave_queue(slave_queue_t
*queue
)
228 struct list_head
*head
, *tmp
, *this;
230 spin_lock_bh(&queue
->lock
);
232 head
= &queue
->all_slaves
;
233 list_for_each_safe(this, tmp
, head
) {
234 slave_t
*s
= list_entry(this, slave_t
, list
);
236 eql_kill_one_slave(queue
, s
);
239 spin_unlock_bh(&queue
->lock
);
242 static int eql_close(struct net_device
*dev
)
244 equalizer_t
*eql
= netdev_priv(dev
);
247 * The timer has to be stopped first before we start hacking away
248 * at the data structure it scans every so often...
251 del_timer_sync(&eql
->timer
);
253 eql_kill_slave_queue(&eql
->queue
);
258 static int eql_enslave(struct net_device
*dev
, slaving_request_t __user
*srq
);
259 static int eql_emancipate(struct net_device
*dev
, slaving_request_t __user
*srq
);
261 static int eql_g_slave_cfg(struct net_device
*dev
, slave_config_t __user
*sc
);
262 static int eql_s_slave_cfg(struct net_device
*dev
, slave_config_t __user
*sc
);
264 static int eql_g_master_cfg(struct net_device
*dev
, master_config_t __user
*mc
);
265 static int eql_s_master_cfg(struct net_device
*dev
, master_config_t __user
*mc
);
267 static int eql_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
269 if (cmd
!= EQL_GETMASTRCFG
&& cmd
!= EQL_GETSLAVECFG
&&
270 !capable(CAP_NET_ADMIN
))
275 return eql_enslave(dev
, ifr
->ifr_data
);
277 return eql_emancipate(dev
, ifr
->ifr_data
);
278 case EQL_GETSLAVECFG
:
279 return eql_g_slave_cfg(dev
, ifr
->ifr_data
);
280 case EQL_SETSLAVECFG
:
281 return eql_s_slave_cfg(dev
, ifr
->ifr_data
);
282 case EQL_GETMASTRCFG
:
283 return eql_g_master_cfg(dev
, ifr
->ifr_data
);
284 case EQL_SETMASTRCFG
:
285 return eql_s_master_cfg(dev
, ifr
->ifr_data
);
291 /* queue->lock must be held */
292 static slave_t
*__eql_schedule_slaves(slave_queue_t
*queue
)
294 unsigned long best_load
= ~0UL;
295 struct list_head
*this, *tmp
, *head
;
300 /* Make a pass to set the best slave. */
301 head
= &queue
->all_slaves
;
302 list_for_each_safe(this, tmp
, head
) {
303 slave_t
*slave
= list_entry(this, slave_t
, list
);
304 unsigned long slave_load
, bytes_queued
, priority_Bps
;
306 /* Go through the slave list once, updating best_slave
307 * whenever a new best_load is found.
309 bytes_queued
= slave
->bytes_queued
;
310 priority_Bps
= slave
->priority_Bps
;
311 if ((slave
->dev
->flags
& IFF_UP
) == IFF_UP
) {
312 slave_load
= (~0UL - (~0UL / 2)) -
313 (priority_Bps
) + bytes_queued
* 8;
315 if (slave_load
< best_load
) {
316 best_load
= slave_load
;
320 /* We found a dead slave, kill it. */
321 eql_kill_one_slave(queue
, slave
);
327 static int eql_slave_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
329 equalizer_t
*eql
= netdev_priv(dev
);
332 spin_lock(&eql
->queue
.lock
);
334 slave
= __eql_schedule_slaves(&eql
->queue
);
336 struct net_device
*slave_dev
= slave
->dev
;
338 skb
->dev
= slave_dev
;
340 slave
->bytes_queued
+= skb
->len
;
342 dev
->stats
.tx_packets
++;
344 dev
->stats
.tx_dropped
++;
348 spin_unlock(&eql
->queue
.lock
);
354 * Private ioctl functions
357 /* queue->lock must be held */
358 static slave_t
*__eql_find_slave_dev(slave_queue_t
*queue
, struct net_device
*dev
)
360 struct list_head
*this, *head
;
362 head
= &queue
->all_slaves
;
363 list_for_each(this, head
) {
364 slave_t
*slave
= list_entry(this, slave_t
, list
);
366 if (slave
->dev
== dev
)
373 static inline int eql_is_full(slave_queue_t
*queue
)
375 equalizer_t
*eql
= netdev_priv(queue
->master_dev
);
377 if (queue
->num_slaves
>= eql
->max_slaves
)
382 /* queue->lock must be held */
383 static int __eql_insert_slave(slave_queue_t
*queue
, slave_t
*slave
)
385 if (!eql_is_full(queue
)) {
386 slave_t
*duplicate_slave
= NULL
;
388 duplicate_slave
= __eql_find_slave_dev(queue
, slave
->dev
);
390 eql_kill_one_slave(queue
, duplicate_slave
);
392 list_add(&slave
->list
, &queue
->all_slaves
);
394 slave
->dev
->flags
|= IFF_SLAVE
;
402 static int eql_enslave(struct net_device
*master_dev
, slaving_request_t __user
*srqp
)
404 struct net_device
*slave_dev
;
405 slaving_request_t srq
;
407 if (copy_from_user(&srq
, srqp
, sizeof (slaving_request_t
)))
410 slave_dev
= dev_get_by_name(&init_net
, srq
.slave_name
);
412 if ((master_dev
->flags
& IFF_UP
) == IFF_UP
) {
413 /* slave is not a master & not already a slave: */
414 if (!eql_is_master(slave_dev
) &&
415 !eql_is_slave(slave_dev
)) {
416 slave_t
*s
= kmalloc(sizeof(*s
), GFP_KERNEL
);
417 equalizer_t
*eql
= netdev_priv(master_dev
);
425 memset(s
, 0, sizeof(*s
));
427 s
->priority
= srq
.priority
;
428 s
->priority_bps
= srq
.priority
;
429 s
->priority_Bps
= srq
.priority
/ 8;
431 spin_lock_bh(&eql
->queue
.lock
);
432 ret
= __eql_insert_slave(&eql
->queue
, s
);
437 spin_unlock_bh(&eql
->queue
.lock
);
448 static int eql_emancipate(struct net_device
*master_dev
, slaving_request_t __user
*srqp
)
450 equalizer_t
*eql
= netdev_priv(master_dev
);
451 struct net_device
*slave_dev
;
452 slaving_request_t srq
;
455 if (copy_from_user(&srq
, srqp
, sizeof (slaving_request_t
)))
458 slave_dev
= dev_get_by_name(&init_net
, srq
.slave_name
);
461 spin_lock_bh(&eql
->queue
.lock
);
463 if (eql_is_slave(slave_dev
)) {
464 slave_t
*slave
= __eql_find_slave_dev(&eql
->queue
,
468 eql_kill_one_slave(&eql
->queue
, slave
);
474 spin_unlock_bh(&eql
->queue
.lock
);
480 static int eql_g_slave_cfg(struct net_device
*dev
, slave_config_t __user
*scp
)
482 equalizer_t
*eql
= netdev_priv(dev
);
484 struct net_device
*slave_dev
;
488 if (copy_from_user(&sc
, scp
, sizeof (slave_config_t
)))
491 slave_dev
= dev_get_by_name(&init_net
, sc
.slave_name
);
497 spin_lock_bh(&eql
->queue
.lock
);
498 if (eql_is_slave(slave_dev
)) {
499 slave
= __eql_find_slave_dev(&eql
->queue
, slave_dev
);
501 sc
.priority
= slave
->priority
;
505 spin_unlock_bh(&eql
->queue
.lock
);
509 if (!ret
&& copy_to_user(scp
, &sc
, sizeof (slave_config_t
)))
515 static int eql_s_slave_cfg(struct net_device
*dev
, slave_config_t __user
*scp
)
519 struct net_device
*slave_dev
;
523 if (copy_from_user(&sc
, scp
, sizeof (slave_config_t
)))
526 slave_dev
= dev_get_by_name(&init_net
, sc
.slave_name
);
532 eql
= netdev_priv(dev
);
533 spin_lock_bh(&eql
->queue
.lock
);
534 if (eql_is_slave(slave_dev
)) {
535 slave
= __eql_find_slave_dev(&eql
->queue
, slave_dev
);
537 slave
->priority
= sc
.priority
;
538 slave
->priority_bps
= sc
.priority
;
539 slave
->priority_Bps
= sc
.priority
/ 8;
543 spin_unlock_bh(&eql
->queue
.lock
);
548 static int eql_g_master_cfg(struct net_device
*dev
, master_config_t __user
*mcp
)
553 if (eql_is_master(dev
)) {
554 eql
= netdev_priv(dev
);
555 mc
.max_slaves
= eql
->max_slaves
;
556 mc
.min_slaves
= eql
->min_slaves
;
557 if (copy_to_user(mcp
, &mc
, sizeof (master_config_t
)))
564 static int eql_s_master_cfg(struct net_device
*dev
, master_config_t __user
*mcp
)
569 if (copy_from_user(&mc
, mcp
, sizeof (master_config_t
)))
572 if (eql_is_master(dev
)) {
573 eql
= netdev_priv(dev
);
574 eql
->max_slaves
= mc
.max_slaves
;
575 eql
->min_slaves
= mc
.min_slaves
;
581 static struct net_device
*dev_eql
;
583 static int __init
eql_init_module(void)
589 dev_eql
= alloc_netdev(sizeof(equalizer_t
), "eql", eql_setup
);
593 err
= register_netdev(dev_eql
);
595 free_netdev(dev_eql
);
599 static void __exit
eql_cleanup_module(void)
601 unregister_netdev(dev_eql
);
602 free_netdev(dev_eql
);
605 module_init(eql_init_module
);
606 module_exit(eql_cleanup_module
);
607 MODULE_LICENSE("GPL");