[NET]: Add some sparse annotations to network driver stack.
[linux-2.6/history.git] / net / core / dev.c
blobef7eb00b09d8e1198cd03133c3e7ada1b7a3f58d
1 /*
2 * NET3 Protocol independent device support routines.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Derived from the non IP parts of dev.c 1.0.19
10 * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
14 * Additional Authors:
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
22 * Changes:
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
34 * drivers
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
44 * call a packet.
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
50 * changes.
51 * Rudi Cilibrasi : Pass the right thing to
52 * set_mac_address()
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
58 * 1 device.
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
66 * the backlog queue.
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
75 #include <asm/uaccess.h>
76 #include <asm/system.h>
77 #include <asm/bitops.h>
78 #include <linux/config.h>
79 #include <linux/cpu.h>
80 #include <linux/types.h>
81 #include <linux/kernel.h>
82 #include <linux/sched.h>
83 #include <linux/string.h>
84 #include <linux/mm.h>
85 #include <linux/socket.h>
86 #include <linux/sockios.h>
87 #include <linux/errno.h>
88 #include <linux/interrupt.h>
89 #include <linux/if_ether.h>
90 #include <linux/netdevice.h>
91 #include <linux/etherdevice.h>
92 #include <linux/notifier.h>
93 #include <linux/skbuff.h>
94 #include <net/sock.h>
95 #include <linux/rtnetlink.h>
96 #include <linux/proc_fs.h>
97 #include <linux/seq_file.h>
98 #include <linux/stat.h>
99 #include <linux/if_bridge.h>
100 #include <linux/divert.h>
101 #include <net/dst.h>
102 #include <net/pkt_sched.h>
103 #include <net/checksum.h>
104 #include <linux/highmem.h>
105 #include <linux/init.h>
106 #include <linux/kmod.h>
107 #include <linux/module.h>
108 #include <linux/kallsyms.h>
109 #include <linux/netpoll.h>
110 #ifdef CONFIG_NET_RADIO
111 #include <linux/wireless.h> /* Note : will define WIRELESS_EXT */
112 #include <net/iw_handler.h>
113 #endif /* CONFIG_NET_RADIO */
114 #include <asm/current.h>
116 /* This define, if set, will randomly drop a packet when congestion
117 * is more than moderate. It helps fairness in the multi-interface
118 * case when one of them is a hog, but it kills performance for the
119 * single interface case so it is off now by default.
121 #undef RAND_LIE
123 /* Setting this will sample the queue lengths and thus congestion
124 * via a timer instead of as each packet is received.
126 #undef OFFLINE_SAMPLE
129 * The list of packet types we will receive (as opposed to discard)
130 * and the routines to invoke.
132 * Why 16. Because with 16 the only overlap we get on a hash of the
133 * low nibble of the protocol value is RARP/SNAP/X.25.
135 * NOTE: That is no longer true with the addition of VLAN tags. Not
136 * sure which should go first, but I bet it won't make much
137 * difference if we are running VLANs. The good news is that
138 * this protocol won't be in the list unless compiled in, so
139 * the average user (w/out VLANs) will not be adversly affected.
140 * --BLG
142 * 0800 IP
143 * 8100 802.1Q VLAN
144 * 0001 802.3
145 * 0002 AX.25
146 * 0004 802.2
147 * 8035 RARP
148 * 0005 SNAP
149 * 0805 X.25
150 * 0806 ARP
151 * 8137 IPX
152 * 0009 Localtalk
153 * 86DD IPv6
156 static spinlock_t ptype_lock = SPIN_LOCK_UNLOCKED;
157 static struct list_head ptype_base[16]; /* 16 way hashed list */
158 static struct list_head ptype_all; /* Taps */
160 #ifdef OFFLINE_SAMPLE
161 static void sample_queue(unsigned long dummy);
162 static struct timer_list samp_timer = TIMER_INITIALIZER(sample_queue, 0, 0);
163 #endif
166 * The @dev_base list is protected by @dev_base_lock and the rtln
167 * semaphore.
169 * Pure readers hold dev_base_lock for reading.
171 * Writers must hold the rtnl semaphore while they loop through the
172 * dev_base list, and hold dev_base_lock for writing when they do the
173 * actual updates. This allows pure readers to access the list even
174 * while a writer is preparing to update it.
176 * To put it another way, dev_base_lock is held for writing only to
177 * protect against pure readers; the rtnl semaphore provides the
178 * protection against other writers.
180 * See, for example usages, register_netdevice() and
181 * unregister_netdevice(), which must be called with the rtnl
182 * semaphore held.
184 struct net_device *dev_base;
185 struct net_device **dev_tail = &dev_base;
186 rwlock_t dev_base_lock = RW_LOCK_UNLOCKED;
188 EXPORT_SYMBOL(dev_base);
189 EXPORT_SYMBOL(dev_base_lock);
191 #define NETDEV_HASHBITS 8
192 static struct hlist_head dev_name_head[1<<NETDEV_HASHBITS];
193 static struct hlist_head dev_index_head[1<<NETDEV_HASHBITS];
195 static inline struct hlist_head *dev_name_hash(const char *name)
197 unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
198 return &dev_name_head[hash & ((1<<NETDEV_HASHBITS)-1)];
201 static inline struct hlist_head *dev_index_hash(int ifindex)
203 return &dev_index_head[ifindex & ((1<<NETDEV_HASHBITS)-1)];
207 * Our notifier list
210 static struct notifier_block *netdev_chain;
213 * Device drivers call our routines to queue packets here. We empty the
214 * queue in the local softnet handler.
216 DEFINE_PER_CPU(struct softnet_data, softnet_data) = { 0, };
218 #ifdef CONFIG_NET_FASTROUTE
219 int netdev_fastroute;
220 int netdev_fastroute_obstacles;
221 #endif
223 extern int netdev_sysfs_init(void);
224 extern int netdev_register_sysfs(struct net_device *);
225 extern int netdev_unregister_sysfs(struct net_device *);
228 /*******************************************************************************
230 Protocol management and registration routines
232 *******************************************************************************/
235 * For efficiency
238 int netdev_nit;
241 * Add a protocol ID to the list. Now that the input handler is
242 * smarter we can dispense with all the messy stuff that used to be
243 * here.
245 * BEWARE!!! Protocol handlers, mangling input packets,
246 * MUST BE last in hash buckets and checking protocol handlers
247 * MUST start from promiscuous ptype_all chain in net_bh.
248 * It is true now, do not change it.
249 * Explanation follows: if protocol handler, mangling packet, will
250 * be the first on list, it is not able to sense, that packet
251 * is cloned and should be copied-on-write, so that it will
252 * change it and subsequent readers will get broken packet.
253 * --ANK (980803)
257 * dev_add_pack - add packet handler
258 * @pt: packet type declaration
260 * Add a protocol handler to the networking stack. The passed &packet_type
261 * is linked into kernel lists and may not be freed until it has been
262 * removed from the kernel lists.
264 * This call does not sleep therefore it can not
265 * guarantee all CPU's that are in middle of receiving packets
266 * will see the new packet type (until the next received packet).
269 void dev_add_pack(struct packet_type *pt)
271 int hash;
273 spin_lock_bh(&ptype_lock);
274 #ifdef CONFIG_NET_FASTROUTE
275 if (pt->af_packet_priv) {
276 netdev_fastroute_obstacles++;
277 dev_clear_fastroute(pt->dev);
279 #endif
280 if (pt->type == htons(ETH_P_ALL)) {
281 netdev_nit++;
282 list_add_rcu(&pt->list, &ptype_all);
283 } else {
284 hash = ntohs(pt->type) & 15;
285 list_add_rcu(&pt->list, &ptype_base[hash]);
287 spin_unlock_bh(&ptype_lock);
290 extern void linkwatch_run_queue(void);
295 * __dev_remove_pack - remove packet handler
296 * @pt: packet type declaration
298 * Remove a protocol handler that was previously added to the kernel
299 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
300 * from the kernel lists and can be freed or reused once this function
301 * returns.
303 * The packet type might still be in use by receivers
304 * and must not be freed until after all the CPU's have gone
305 * through a quiescent state.
307 void __dev_remove_pack(struct packet_type *pt)
309 struct list_head *head;
310 struct packet_type *pt1;
312 spin_lock_bh(&ptype_lock);
314 if (pt->type == htons(ETH_P_ALL)) {
315 netdev_nit--;
316 head = &ptype_all;
317 } else
318 head = &ptype_base[ntohs(pt->type) & 15];
320 list_for_each_entry(pt1, head, list) {
321 if (pt == pt1) {
322 #ifdef CONFIG_NET_FASTROUTE
323 if (pt->af_packet_priv)
324 netdev_fastroute_obstacles--;
325 #endif
326 list_del_rcu(&pt->list);
327 goto out;
331 printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
332 out:
333 spin_unlock_bh(&ptype_lock);
336 * dev_remove_pack - remove packet handler
337 * @pt: packet type declaration
339 * Remove a protocol handler that was previously added to the kernel
340 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
341 * from the kernel lists and can be freed or reused once this function
342 * returns.
344 * This call sleeps to guarantee that no CPU is looking at the packet
345 * type after return.
347 void dev_remove_pack(struct packet_type *pt)
349 __dev_remove_pack(pt);
351 synchronize_net();
354 /******************************************************************************
356 Device Boot-time Settings Routines
358 *******************************************************************************/
360 /* Boot time configuration table */
361 static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
364 * netdev_boot_setup_add - add new setup entry
365 * @name: name of the device
366 * @map: configured settings for the device
368 * Adds new setup entry to the dev_boot_setup list. The function
369 * returns 0 on error and 1 on success. This is a generic routine to
370 * all netdevices.
372 int netdev_boot_setup_add(char *name, struct ifmap *map)
374 struct netdev_boot_setup *s;
375 int i;
377 s = dev_boot_setup;
378 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
379 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
380 memset(s[i].name, 0, sizeof(s[i].name));
381 strcpy(s[i].name, name);
382 memcpy(&s[i].map, map, sizeof(s[i].map));
383 break;
387 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
391 * netdev_boot_setup_check - check boot time settings
392 * @dev: the netdevice
394 * Check boot time settings for the device.
395 * The found settings are set for the device to be used
396 * later in the device probing.
397 * Returns 0 if no settings found, 1 if they are.
399 int netdev_boot_setup_check(struct net_device *dev)
401 struct netdev_boot_setup *s = dev_boot_setup;
402 int i;
404 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
405 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
406 !strncmp(dev->name, s[i].name, strlen(s[i].name))) {
407 dev->irq = s[i].map.irq;
408 dev->base_addr = s[i].map.base_addr;
409 dev->mem_start = s[i].map.mem_start;
410 dev->mem_end = s[i].map.mem_end;
411 return 1;
414 return 0;
419 * netdev_boot_base - get address from boot time settings
420 * @prefix: prefix for network device
421 * @unit: id for network device
423 * Check boot time settings for the base address of device.
424 * The found settings are set for the device to be used
425 * later in the device probing.
426 * Returns 0 if no settings found.
428 unsigned long netdev_boot_base(const char *prefix, int unit)
430 const struct netdev_boot_setup *s = dev_boot_setup;
431 char name[IFNAMSIZ];
432 int i;
434 sprintf(name, "%s%d", prefix, unit);
435 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
436 if (!strcmp(name, s[i].name))
437 return s[i].map.base_addr;
438 return 0;
442 * Saves at boot time configured settings for any netdevice.
444 int __init netdev_boot_setup(char *str)
446 int ints[5];
447 struct ifmap map;
449 str = get_options(str, ARRAY_SIZE(ints), ints);
450 if (!str || !*str)
451 return 0;
453 /* Save settings */
454 memset(&map, 0, sizeof(map));
455 if (ints[0] > 0)
456 map.irq = ints[1];
457 if (ints[0] > 1)
458 map.base_addr = ints[2];
459 if (ints[0] > 2)
460 map.mem_start = ints[3];
461 if (ints[0] > 3)
462 map.mem_end = ints[4];
464 /* Add new entry to the list */
465 return netdev_boot_setup_add(str, &map);
468 __setup("netdev=", netdev_boot_setup);
470 /*******************************************************************************
472 Device Interface Subroutines
474 *******************************************************************************/
477 * __dev_get_by_name - find a device by its name
478 * @name: name to find
480 * Find an interface by name. Must be called under RTNL semaphore
481 * or @dev_base_lock. If the name is found a pointer to the device
482 * is returned. If the name is not found then %NULL is returned. The
483 * reference counters are not incremented so the caller must be
484 * careful with locks.
487 struct net_device *__dev_get_by_name(const char *name)
489 struct hlist_node *p;
491 hlist_for_each(p, dev_name_hash(name)) {
492 struct net_device *dev
493 = hlist_entry(p, struct net_device, name_hlist);
494 if (!strncmp(dev->name, name, IFNAMSIZ))
495 return dev;
497 return NULL;
501 * dev_get_by_name - find a device by its name
502 * @name: name to find
504 * Find an interface by name. This can be called from any
505 * context and does its own locking. The returned handle has
506 * the usage count incremented and the caller must use dev_put() to
507 * release it when it is no longer needed. %NULL is returned if no
508 * matching device is found.
511 struct net_device *dev_get_by_name(const char *name)
513 struct net_device *dev;
515 read_lock(&dev_base_lock);
516 dev = __dev_get_by_name(name);
517 if (dev)
518 dev_hold(dev);
519 read_unlock(&dev_base_lock);
520 return dev;
524 Return value is changed to int to prevent illegal usage in future.
525 It is still legal to use to check for device existence.
527 User should understand, that the result returned by this function
528 is meaningless, if it was not issued under rtnl semaphore.
532 * dev_get - test if a device exists
533 * @name: name to test for
535 * Test if a name exists. Returns true if the name is found. In order
536 * to be sure the name is not allocated or removed during the test the
537 * caller must hold the rtnl semaphore.
539 * This function exists only for back compatibility with older
540 * drivers.
542 int __dev_get(const char *name)
544 struct net_device *dev;
546 read_lock(&dev_base_lock);
547 dev = __dev_get_by_name(name);
548 read_unlock(&dev_base_lock);
549 return dev != NULL;
553 * __dev_get_by_index - find a device by its ifindex
554 * @ifindex: index of device
556 * Search for an interface by index. Returns %NULL if the device
557 * is not found or a pointer to the device. The device has not
558 * had its reference counter increased so the caller must be careful
559 * about locking. The caller must hold either the RTNL semaphore
560 * or @dev_base_lock.
563 struct net_device *__dev_get_by_index(int ifindex)
565 struct hlist_node *p;
567 hlist_for_each(p, dev_index_hash(ifindex)) {
568 struct net_device *dev
569 = hlist_entry(p, struct net_device, index_hlist);
570 if (dev->ifindex == ifindex)
571 return dev;
573 return NULL;
578 * dev_get_by_index - find a device by its ifindex
579 * @ifindex: index of device
581 * Search for an interface by index. Returns NULL if the device
582 * is not found or a pointer to the device. The device returned has
583 * had a reference added and the pointer is safe until the user calls
584 * dev_put to indicate they have finished with it.
587 struct net_device *dev_get_by_index(int ifindex)
589 struct net_device *dev;
591 read_lock(&dev_base_lock);
592 dev = __dev_get_by_index(ifindex);
593 if (dev)
594 dev_hold(dev);
595 read_unlock(&dev_base_lock);
596 return dev;
600 * dev_getbyhwaddr - find a device by its hardware address
601 * @type: media type of device
602 * @ha: hardware address
604 * Search for an interface by MAC address. Returns NULL if the device
605 * is not found or a pointer to the device. The caller must hold the
606 * rtnl semaphore. The returned device has not had its ref count increased
607 * and the caller must therefore be careful about locking
609 * BUGS:
610 * If the API was consistent this would be __dev_get_by_hwaddr
613 struct net_device *dev_getbyhwaddr(unsigned short type, char *ha)
615 struct net_device *dev;
617 ASSERT_RTNL();
619 for (dev = dev_base; dev; dev = dev->next)
620 if (dev->type == type &&
621 !memcmp(dev->dev_addr, ha, dev->addr_len))
622 break;
623 return dev;
626 struct net_device *__dev_getfirstbyhwtype(unsigned short type)
628 struct net_device *dev;
630 for (dev = dev_base; dev; dev = dev->next)
631 if (dev->type == type)
632 break;
633 return dev;
636 EXPORT_SYMBOL(__dev_getfirstbyhwtype);
638 struct net_device *dev_getfirstbyhwtype(unsigned short type)
640 struct net_device *dev;
642 rtnl_lock();
643 dev = __dev_getfirstbyhwtype(type);
644 if (dev)
645 dev_hold(dev);
646 rtnl_unlock();
647 return dev;
650 EXPORT_SYMBOL(dev_getfirstbyhwtype);
653 * dev_get_by_flags - find any device with given flags
654 * @if_flags: IFF_* values
655 * @mask: bitmask of bits in if_flags to check
657 * Search for any interface with the given flags. Returns NULL if a device
658 * is not found or a pointer to the device. The device returned has
659 * had a reference added and the pointer is safe until the user calls
660 * dev_put to indicate they have finished with it.
663 struct net_device * dev_get_by_flags(unsigned short if_flags, unsigned short mask)
665 struct net_device *dev;
667 read_lock(&dev_base_lock);
668 dev = __dev_get_by_flags(if_flags, mask);
669 if (dev)
670 dev_hold(dev);
671 read_unlock(&dev_base_lock);
672 return dev;
676 * __dev_get_by_flags - find any device with given flags
677 * @if_flags: IFF_* values
678 * @mask: bitmask of bits in if_flags to check
680 * Search for any interface with the given flags. Returns NULL if a device
681 * is not found or a pointer to the device. The caller must hold either
682 * the RTNL semaphore or @dev_base_lock.
685 struct net_device *__dev_get_by_flags(unsigned short if_flags, unsigned short mask)
687 struct net_device *dev;
689 for (dev = dev_base; dev != NULL; dev = dev->next) {
690 if (((dev->flags ^ if_flags) & mask) == 0)
691 return dev;
693 return NULL;
697 * dev_valid_name - check if name is okay for network device
698 * @name: name string
700 * Network device names need to be valid file names to
701 * to allow sysfs to work
703 int dev_valid_name(const char *name)
705 return !(*name == '\0'
706 || !strcmp(name, ".")
707 || !strcmp(name, "..")
708 || strchr(name, '/'));
712 * dev_alloc_name - allocate a name for a device
713 * @dev: device
714 * @name: name format string
716 * Passed a format string - eg "lt%d" it will try and find a suitable
717 * id. Not efficient for many devices, not called a lot. The caller
718 * must hold the dev_base or rtnl lock while allocating the name and
719 * adding the device in order to avoid duplicates. Returns the number
720 * of the unit assigned or a negative errno code.
723 int dev_alloc_name(struct net_device *dev, const char *name)
725 int i = 0;
726 char buf[IFNAMSIZ];
727 const char *p;
728 const int max_netdevices = 8*PAGE_SIZE;
729 long *inuse;
730 struct net_device *d;
732 p = strnchr(name, IFNAMSIZ-1, '%');
733 if (p) {
735 * Verify the string as this thing may have come from
736 * the user. There must be either one "%d" and no other "%"
737 * characters.
739 if (p[1] != 'd' || strchr(p + 2, '%'))
740 return -EINVAL;
742 /* Use one page as a bit array of possible slots */
743 inuse = (long *) get_zeroed_page(GFP_ATOMIC);
744 if (!inuse)
745 return -ENOMEM;
747 for (d = dev_base; d; d = d->next) {
748 if (!sscanf(d->name, name, &i))
749 continue;
750 if (i < 0 || i >= max_netdevices)
751 continue;
753 /* avoid cases where sscanf is not exact inverse of printf */
754 snprintf(buf, sizeof(buf), name, i);
755 if (!strncmp(buf, d->name, IFNAMSIZ))
756 set_bit(i, inuse);
759 i = find_first_zero_bit(inuse, max_netdevices);
760 free_page((unsigned long) inuse);
763 snprintf(buf, sizeof(buf), name, i);
764 if (!__dev_get_by_name(buf)) {
765 strlcpy(dev->name, buf, IFNAMSIZ);
766 return i;
769 /* It is possible to run out of possible slots
770 * when the name is long and there isn't enough space left
771 * for the digits, or if all bits are used.
773 return -ENFILE;
778 * dev_change_name - change name of a device
779 * @dev: device
780 * @name: name (or format string) must be at least IFNAMSIZ
782 * Change name of a device, can pass format strings "eth%d".
783 * for wildcarding.
785 int dev_change_name(struct net_device *dev, char *newname)
787 ASSERT_RTNL();
789 if (dev->flags & IFF_UP)
790 return -EBUSY;
792 if (!dev_valid_name(newname))
793 return -EINVAL;
795 if (strchr(newname, '%')) {
796 int err = dev_alloc_name(dev, newname);
797 if (err < 0)
798 return err;
799 strcpy(newname, dev->name);
801 else if (__dev_get_by_name(newname))
802 return -EEXIST;
803 else
804 strlcpy(dev->name, newname, IFNAMSIZ);
806 hlist_del(&dev->name_hlist);
807 hlist_add_head(&dev->name_hlist, dev_name_hash(dev->name));
809 class_device_rename(&dev->class_dev, dev->name);
810 notifier_call_chain(&netdev_chain, NETDEV_CHANGENAME, dev);
811 return 0;
815 * netdev_state_change - device changes state
816 * @dev: device to cause notification
818 * Called to indicate a device has changed state. This function calls
819 * the notifier chains for netdev_chain and sends a NEWLINK message
820 * to the routing socket.
822 void netdev_state_change(struct net_device *dev)
824 if (dev->flags & IFF_UP) {
825 notifier_call_chain(&netdev_chain, NETDEV_CHANGE, dev);
826 rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
831 * dev_load - load a network module
832 * @name: name of interface
834 * If a network interface is not present and the process has suitable
835 * privileges this function loads the module. If module loading is not
836 * available in this kernel then it becomes a nop.
839 void dev_load(const char *name)
841 struct net_device *dev;
843 read_lock(&dev_base_lock);
844 dev = __dev_get_by_name(name);
845 read_unlock(&dev_base_lock);
847 if (!dev && capable(CAP_SYS_MODULE))
848 request_module("%s", name);
851 static int default_rebuild_header(struct sk_buff *skb)
853 printk(KERN_DEBUG "%s: default_rebuild_header called -- BUG!\n",
854 skb->dev ? skb->dev->name : "NULL!!!");
855 kfree_skb(skb);
856 return 1;
861 * Some old buggy device drivers change get_stats after registering
862 * the device. Try and trap them here.
863 * This can be elimnated when all devices are known fixed.
865 static inline int get_stats_changed(struct net_device *dev)
867 int changed = dev->last_stats != dev->get_stats;
868 dev->last_stats = dev->get_stats;
869 return changed;
873 * dev_open - prepare an interface for use.
874 * @dev: device to open
876 * Takes a device from down to up state. The device's private open
877 * function is invoked and then the multicast lists are loaded. Finally
878 * the device is moved into the up state and a %NETDEV_UP message is
879 * sent to the netdev notifier chain.
881 * Calling this function on an active interface is a nop. On a failure
882 * a negative errno code is returned.
884 int dev_open(struct net_device *dev)
886 int ret = 0;
889 * Is it already up?
892 if (dev->flags & IFF_UP)
893 return 0;
896 * Check for broken device drivers.
898 if (get_stats_changed(dev) && net_ratelimit()) {
899 printk(KERN_ERR "%s: driver changed get_stats after register\n",
900 dev->name);
904 * Is it even present?
906 if (!netif_device_present(dev))
907 return -ENODEV;
910 * Call device private open method
912 set_bit(__LINK_STATE_START, &dev->state);
913 if (dev->open) {
914 ret = dev->open(dev);
915 if (ret)
916 clear_bit(__LINK_STATE_START, &dev->state);
920 * Check for more broken device drivers.
922 if (get_stats_changed(dev) && net_ratelimit()) {
923 printk(KERN_ERR "%s: driver changed get_stats in open\n",
924 dev->name);
928 * If it went open OK then:
931 if (!ret) {
933 * Set the flags.
935 dev->flags |= IFF_UP;
938 * Initialize multicasting status
940 dev_mc_upload(dev);
943 * Wakeup transmit queue engine
945 dev_activate(dev);
948 * ... and announce new interface.
950 notifier_call_chain(&netdev_chain, NETDEV_UP, dev);
952 return ret;
955 #ifdef CONFIG_NET_FASTROUTE
957 static void dev_do_clear_fastroute(struct net_device *dev)
959 if (dev->accept_fastpath) {
960 int i;
962 for (i = 0; i <= NETDEV_FASTROUTE_HMASK; i++) {
963 struct dst_entry *dst;
965 write_lock_irq(&dev->fastpath_lock);
966 dst = dev->fastpath[i];
967 dev->fastpath[i] = NULL;
968 write_unlock_irq(&dev->fastpath_lock);
970 dst_release(dst);
975 void dev_clear_fastroute(struct net_device *dev)
977 if (dev) {
978 dev_do_clear_fastroute(dev);
979 } else {
980 read_lock(&dev_base_lock);
981 for (dev = dev_base; dev; dev = dev->next)
982 dev_do_clear_fastroute(dev);
983 read_unlock(&dev_base_lock);
986 #endif
989 * dev_close - shutdown an interface.
990 * @dev: device to shutdown
992 * This function moves an active device into down state. A
993 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
994 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
995 * chain.
997 int dev_close(struct net_device *dev)
999 if (!(dev->flags & IFF_UP))
1000 return 0;
1003 * Tell people we are going down, so that they can
1004 * prepare to death, when device is still operating.
1006 notifier_call_chain(&netdev_chain, NETDEV_GOING_DOWN, dev);
1008 dev_deactivate(dev);
1010 clear_bit(__LINK_STATE_START, &dev->state);
1012 /* Synchronize to scheduled poll. We cannot touch poll list,
1013 * it can be even on different cpu. So just clear netif_running(),
1014 * and wait when poll really will happen. Actually, the best place
1015 * for this is inside dev->stop() after device stopped its irq
1016 * engine, but this requires more changes in devices. */
1018 smp_mb__after_clear_bit(); /* Commit netif_running(). */
1019 while (test_bit(__LINK_STATE_RX_SCHED, &dev->state)) {
1020 /* No hurry. */
1021 current->state = TASK_INTERRUPTIBLE;
1022 schedule_timeout(1);
1026 * Call the device specific close. This cannot fail.
1027 * Only if device is UP
1029 * We allow it to be called even after a DETACH hot-plug
1030 * event.
1032 if (dev->stop)
1033 dev->stop(dev);
1036 * Device is now down.
1039 dev->flags &= ~IFF_UP;
1040 #ifdef CONFIG_NET_FASTROUTE
1041 dev_clear_fastroute(dev);
1042 #endif
1045 * Tell people we are down
1047 notifier_call_chain(&netdev_chain, NETDEV_DOWN, dev);
1049 return 0;
1054 * Device change register/unregister. These are not inline or static
1055 * as we export them to the world.
1059 * register_netdevice_notifier - register a network notifier block
1060 * @nb: notifier
1062 * Register a notifier to be called when network device events occur.
1063 * The notifier passed is linked into the kernel structures and must
1064 * not be reused until it has been unregistered. A negative errno code
1065 * is returned on a failure.
1067 * When registered all registration and up events are replayed
1068 * to the new notifier to allow device to have a race free
1069 * view of the network device list.
1072 int register_netdevice_notifier(struct notifier_block *nb)
1074 struct net_device *dev;
1075 int err;
1077 rtnl_lock();
1078 err = notifier_chain_register(&netdev_chain, nb);
1079 if (!err) {
1080 for (dev = dev_base; dev; dev = dev->next) {
1081 nb->notifier_call(nb, NETDEV_REGISTER, dev);
1083 if (dev->flags & IFF_UP)
1084 nb->notifier_call(nb, NETDEV_UP, dev);
1087 rtnl_unlock();
1088 return err;
1092 * unregister_netdevice_notifier - unregister a network notifier block
1093 * @nb: notifier
1095 * Unregister a notifier previously registered by
1096 * register_netdevice_notifier(). The notifier is unlinked into the
1097 * kernel structures and may then be reused. A negative errno code
1098 * is returned on a failure.
1101 int unregister_netdevice_notifier(struct notifier_block *nb)
1103 return notifier_chain_unregister(&netdev_chain, nb);
1107 * call_netdevice_notifiers - call all network notifier blocks
1108 * @val: value passed unmodified to notifier function
1109 * @v: pointer passed unmodified to notifier function
1111 * Call all network notifier blocks. Parameters and return value
1112 * are as for notifier_call_chain().
1115 int call_netdevice_notifiers(unsigned long val, void *v)
1117 return notifier_call_chain(&netdev_chain, val, v);
1121 * Support routine. Sends outgoing frames to any network
1122 * taps currently in use.
1125 void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1127 struct packet_type *ptype;
1128 net_timestamp(&skb->stamp);
1130 rcu_read_lock();
1131 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1132 /* Never send packets back to the socket
1133 * they originated from - MvS (miquels@drinkel.ow.org)
1135 if ((ptype->dev == dev || !ptype->dev) &&
1136 (ptype->af_packet_priv == NULL ||
1137 (struct sock *)ptype->af_packet_priv != skb->sk)) {
1138 struct sk_buff *skb2= skb_clone(skb, GFP_ATOMIC);
1139 if (!skb2)
1140 break;
1142 /* skb->nh should be correctly
1143 set by sender, so that the second statement is
1144 just protection against buggy protocols.
1146 skb2->mac.raw = skb2->data;
1148 if (skb2->nh.raw < skb2->data ||
1149 skb2->nh.raw > skb2->tail) {
1150 if (net_ratelimit())
1151 printk(KERN_CRIT "protocol %04x is "
1152 "buggy, dev %s\n",
1153 skb2->protocol, dev->name);
1154 skb2->nh.raw = skb2->data;
1157 skb2->h.raw = skb2->nh.raw;
1158 skb2->pkt_type = PACKET_OUTGOING;
1159 ptype->func(skb2, skb->dev, ptype);
1162 rcu_read_unlock();
1165 /* Calculate csum in the case, when packet is misrouted.
1166 * If it failed by some reason, ignore and send skb with wrong
1167 * checksum.
1169 struct sk_buff *skb_checksum_help(struct sk_buff *skb)
1171 unsigned int csum;
1172 int offset = skb->h.raw - skb->data;
1174 if (offset > (int)skb->len)
1175 BUG();
1176 csum = skb_checksum(skb, offset, skb->len-offset, 0);
1178 offset = skb->tail - skb->h.raw;
1179 if (offset <= 0)
1180 BUG();
1181 if (skb->csum + 2 > offset)
1182 BUG();
1184 *(u16*)(skb->h.raw + skb->csum) = csum_fold(csum);
1185 skb->ip_summed = CHECKSUM_NONE;
1186 return skb;
1189 #ifdef CONFIG_HIGHMEM
1190 /* Actually, we should eliminate this check as soon as we know, that:
1191 * 1. IOMMU is present and allows to map all the memory.
1192 * 2. No high memory really exists on this machine.
1195 static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
1197 int i;
1199 if (dev->features & NETIF_F_HIGHDMA)
1200 return 0;
1202 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1203 if (skb_shinfo(skb)->frags[i].page >= highmem_start_page)
1204 return 1;
1206 return 0;
1208 #else
1209 #define illegal_highdma(dev, skb) (0)
1210 #endif
1212 extern void skb_release_data(struct sk_buff *);
1214 /* Keep head the same: replace data */
1215 int __skb_linearize(struct sk_buff *skb, int gfp_mask)
1217 unsigned int size;
1218 u8 *data;
1219 long offset;
1220 struct skb_shared_info *ninfo;
1221 int headerlen = skb->data - skb->head;
1222 int expand = (skb->tail + skb->data_len) - skb->end;
1224 if (skb_shared(skb))
1225 BUG();
1227 if (expand <= 0)
1228 expand = 0;
1230 size = skb->end - skb->head + expand;
1231 size = SKB_DATA_ALIGN(size);
1232 data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask);
1233 if (!data)
1234 return -ENOMEM;
1236 /* Copy entire thing */
1237 if (skb_copy_bits(skb, -headerlen, data, headerlen + skb->len))
1238 BUG();
1240 /* Set up shinfo */
1241 ninfo = (struct skb_shared_info*)(data + size);
1242 atomic_set(&ninfo->dataref, 1);
1243 ninfo->tso_size = skb_shinfo(skb)->tso_size;
1244 ninfo->tso_segs = skb_shinfo(skb)->tso_segs;
1245 ninfo->nr_frags = 0;
1246 ninfo->frag_list = NULL;
1248 /* Offset between the two in bytes */
1249 offset = data - skb->head;
1251 /* Free old data. */
1252 skb_release_data(skb);
1254 skb->head = data;
1255 skb->end = data + size;
1257 /* Set up new pointers */
1258 skb->h.raw += offset;
1259 skb->nh.raw += offset;
1260 skb->mac.raw += offset;
1261 skb->tail += offset;
1262 skb->data += offset;
1264 /* We are no longer a clone, even if we were. */
1265 skb->cloned = 0;
1267 skb->tail += skb->data_len;
1268 skb->data_len = 0;
1269 return 0;
1273 * dev_queue_xmit - transmit a buffer
1274 * @skb: buffer to transmit
1276 * Queue a buffer for transmission to a network device. The caller must
1277 * have set the device and priority and built the buffer before calling
1278 * this function. The function can be called from an interrupt.
1280 * A negative errno code is returned on a failure. A success does not
1281 * guarantee the frame will be transmitted as it may be dropped due
1282 * to congestion or traffic shaping.
1285 int dev_queue_xmit(struct sk_buff *skb)
1287 struct net_device *dev = skb->dev;
1288 struct Qdisc *q;
1289 int rc = -ENOMEM;
1291 if (skb_shinfo(skb)->frag_list &&
1292 !(dev->features & NETIF_F_FRAGLIST) &&
1293 __skb_linearize(skb, GFP_ATOMIC))
1294 goto out_kfree_skb;
1296 /* Fragmented skb is linearized if device does not support SG,
1297 * or if at least one of fragments is in highmem and device
1298 * does not support DMA from it.
1300 if (skb_shinfo(skb)->nr_frags &&
1301 (!(dev->features & NETIF_F_SG) || illegal_highdma(dev, skb)) &&
1302 __skb_linearize(skb, GFP_ATOMIC))
1303 goto out_kfree_skb;
1305 /* If packet is not checksummed and device does not support
1306 * checksumming for this protocol, complete checksumming here.
1308 if (skb->ip_summed == CHECKSUM_HW &&
1309 (!(dev->features & (NETIF_F_HW_CSUM | NETIF_F_NO_CSUM)) &&
1310 (!(dev->features & NETIF_F_IP_CSUM) ||
1311 skb->protocol != htons(ETH_P_IP)))) {
1312 if ((skb = skb_checksum_help(skb)) == NULL)
1313 goto out;
1316 /* Grab device queue */
1317 spin_lock_bh(&dev->queue_lock);
1318 q = dev->qdisc;
1319 if (q->enqueue) {
1320 rc = q->enqueue(skb, q);
1322 qdisc_run(dev);
1324 spin_unlock_bh(&dev->queue_lock);
1325 rc = rc == NET_XMIT_BYPASS ? NET_XMIT_SUCCESS : rc;
1326 goto out;
1329 /* The device has no queue. Common case for software devices:
1330 loopback, all the sorts of tunnels...
1332 Really, it is unlikely that xmit_lock protection is necessary here.
1333 (f.e. loopback and IP tunnels are clean ignoring statistics
1334 counters.)
1335 However, it is possible, that they rely on protection
1336 made by us here.
1338 Check this and shot the lock. It is not prone from deadlocks.
1339 Either shot noqueue qdisc, it is even simpler 8)
1341 if (dev->flags & IFF_UP) {
1342 int cpu = smp_processor_id();
1344 if (dev->xmit_lock_owner != cpu) {
1346 * The spin_lock effectivly does a preempt lock, but
1347 * we are about to drop that...
1349 preempt_disable();
1350 spin_unlock(&dev->queue_lock);
1351 spin_lock(&dev->xmit_lock);
1352 dev->xmit_lock_owner = cpu;
1353 preempt_enable();
1355 if (!netif_queue_stopped(dev)) {
1356 if (netdev_nit)
1357 dev_queue_xmit_nit(skb, dev);
1359 rc = 0;
1360 if (!dev->hard_start_xmit(skb, dev)) {
1361 dev->xmit_lock_owner = -1;
1362 spin_unlock_bh(&dev->xmit_lock);
1363 goto out;
1366 dev->xmit_lock_owner = -1;
1367 spin_unlock_bh(&dev->xmit_lock);
1368 if (net_ratelimit())
1369 printk(KERN_CRIT "Virtual device %s asks to "
1370 "queue packet!\n", dev->name);
1371 goto out_enetdown;
1372 } else {
1373 /* Recursion is detected! It is possible,
1374 * unfortunately */
1375 if (net_ratelimit())
1376 printk(KERN_CRIT "Dead loop on virtual device "
1377 "%s, fix it urgently!\n", dev->name);
1380 spin_unlock_bh(&dev->queue_lock);
1381 out_enetdown:
1382 rc = -ENETDOWN;
1383 out_kfree_skb:
1384 kfree_skb(skb);
1385 out:
1386 return rc;
1390 /*=======================================================================
1391 Receiver routines
1392 =======================================================================*/
1394 int netdev_max_backlog = 300;
1395 int weight_p = 64; /* old backlog weight */
1396 /* These numbers are selected based on intuition and some
1397 * experimentatiom, if you have more scientific way of doing this
1398 * please go ahead and fix things.
1400 int no_cong_thresh = 10;
1401 int no_cong = 20;
1402 int lo_cong = 100;
1403 int mod_cong = 290;
1405 DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, };
1408 #ifdef CONFIG_NET_HW_FLOWCONTROL
1409 atomic_t netdev_dropping = ATOMIC_INIT(0);
1410 static unsigned long netdev_fc_mask = 1;
1411 unsigned long netdev_fc_xoff;
1412 spinlock_t netdev_fc_lock = SPIN_LOCK_UNLOCKED;
1414 static struct
1416 void (*stimul)(struct net_device *);
1417 struct net_device *dev;
1418 } netdev_fc_slots[BITS_PER_LONG];
1420 int netdev_register_fc(struct net_device *dev,
1421 void (*stimul)(struct net_device *dev))
1423 int bit = 0;
1424 unsigned long flags;
1426 spin_lock_irqsave(&netdev_fc_lock, flags);
1427 if (netdev_fc_mask != ~0UL) {
1428 bit = ffz(netdev_fc_mask);
1429 netdev_fc_slots[bit].stimul = stimul;
1430 netdev_fc_slots[bit].dev = dev;
1431 set_bit(bit, &netdev_fc_mask);
1432 clear_bit(bit, &netdev_fc_xoff);
1434 spin_unlock_irqrestore(&netdev_fc_lock, flags);
1435 return bit;
1438 void netdev_unregister_fc(int bit)
1440 unsigned long flags;
1442 spin_lock_irqsave(&netdev_fc_lock, flags);
1443 if (bit > 0) {
1444 netdev_fc_slots[bit].stimul = NULL;
1445 netdev_fc_slots[bit].dev = NULL;
1446 clear_bit(bit, &netdev_fc_mask);
1447 clear_bit(bit, &netdev_fc_xoff);
1449 spin_unlock_irqrestore(&netdev_fc_lock, flags);
1452 static void netdev_wakeup(void)
1454 unsigned long xoff;
1456 spin_lock(&netdev_fc_lock);
1457 xoff = netdev_fc_xoff;
1458 netdev_fc_xoff = 0;
1459 while (xoff) {
1460 int i = ffz(~xoff);
1461 xoff &= ~(1 << i);
1462 netdev_fc_slots[i].stimul(netdev_fc_slots[i].dev);
1464 spin_unlock(&netdev_fc_lock);
1466 #endif
1468 static void get_sample_stats(int cpu)
1470 #ifdef RAND_LIE
1471 unsigned long rd;
1472 int rq;
1473 #endif
1474 struct softnet_data *sd = &per_cpu(softnet_data, cpu);
1475 int blog = sd->input_pkt_queue.qlen;
1476 int avg_blog = sd->avg_blog;
1478 avg_blog = (avg_blog >> 1) + (blog >> 1);
1480 if (avg_blog > mod_cong) {
1481 /* Above moderate congestion levels. */
1482 sd->cng_level = NET_RX_CN_HIGH;
1483 #ifdef RAND_LIE
1484 rd = net_random();
1485 rq = rd % netdev_max_backlog;
1486 if (rq < avg_blog) /* unlucky bastard */
1487 sd->cng_level = NET_RX_DROP;
1488 #endif
1489 } else if (avg_blog > lo_cong) {
1490 sd->cng_level = NET_RX_CN_MOD;
1491 #ifdef RAND_LIE
1492 rd = net_random();
1493 rq = rd % netdev_max_backlog;
1494 if (rq < avg_blog) /* unlucky bastard */
1495 sd->cng_level = NET_RX_CN_HIGH;
1496 #endif
1497 } else if (avg_blog > no_cong)
1498 sd->cng_level = NET_RX_CN_LOW;
1499 else /* no congestion */
1500 sd->cng_level = NET_RX_SUCCESS;
1502 sd->avg_blog = avg_blog;
1505 #ifdef OFFLINE_SAMPLE
1506 static void sample_queue(unsigned long dummy)
1508 /* 10 ms 0r 1ms -- i don't care -- JHS */
1509 int next_tick = 1;
1510 int cpu = smp_processor_id();
1512 get_sample_stats(cpu);
1513 next_tick += jiffies;
1514 mod_timer(&samp_timer, next_tick);
1516 #endif
1520 * netif_rx - post buffer to the network code
1521 * @skb: buffer to post
1523 * This function receives a packet from a device driver and queues it for
1524 * the upper (protocol) levels to process. It always succeeds. The buffer
1525 * may be dropped during processing for congestion control or by the
1526 * protocol layers.
1528 * return values:
1529 * NET_RX_SUCCESS (no congestion)
1530 * NET_RX_CN_LOW (low congestion)
1531 * NET_RX_CN_MOD (moderate congestion)
1532 * NET_RX_CN_HIGH (high congestion)
1533 * NET_RX_DROP (packet was dropped)
1537 int netif_rx(struct sk_buff *skb)
1539 int this_cpu;
1540 struct softnet_data *queue;
1541 unsigned long flags;
1543 #ifdef CONFIG_NETPOLL_RX
1544 if (skb->dev->netpoll_rx && netpoll_rx(skb)) {
1545 kfree_skb(skb);
1546 return NET_RX_DROP;
1548 #endif
1550 if (!skb->stamp.tv_sec)
1551 net_timestamp(&skb->stamp);
1554 * The code is rearranged so that the path is the most
1555 * short when CPU is congested, but is still operating.
1557 local_irq_save(flags);
1558 this_cpu = smp_processor_id();
1559 queue = &__get_cpu_var(softnet_data);
1561 __get_cpu_var(netdev_rx_stat).total++;
1562 if (queue->input_pkt_queue.qlen <= netdev_max_backlog) {
1563 if (queue->input_pkt_queue.qlen) {
1564 if (queue->throttle)
1565 goto drop;
1567 enqueue:
1568 dev_hold(skb->dev);
1569 __skb_queue_tail(&queue->input_pkt_queue, skb);
1570 #ifndef OFFLINE_SAMPLE
1571 get_sample_stats(this_cpu);
1572 #endif
1573 local_irq_restore(flags);
1574 return queue->cng_level;
1577 if (queue->throttle) {
1578 queue->throttle = 0;
1579 #ifdef CONFIG_NET_HW_FLOWCONTROL
1580 if (atomic_dec_and_test(&netdev_dropping))
1581 netdev_wakeup();
1582 #endif
1585 netif_rx_schedule(&queue->backlog_dev);
1586 goto enqueue;
1589 if (!queue->throttle) {
1590 queue->throttle = 1;
1591 __get_cpu_var(netdev_rx_stat).throttled++;
1592 #ifdef CONFIG_NET_HW_FLOWCONTROL
1593 atomic_inc(&netdev_dropping);
1594 #endif
1597 drop:
1598 __get_cpu_var(netdev_rx_stat).dropped++;
1599 local_irq_restore(flags);
1601 kfree_skb(skb);
1602 return NET_RX_DROP;
1605 static __inline__ void skb_bond(struct sk_buff *skb)
1607 struct net_device *dev = skb->dev;
1609 if (dev->master) {
1610 skb->real_dev = skb->dev;
1611 skb->dev = dev->master;
1615 static void net_tx_action(struct softirq_action *h)
1617 struct softnet_data *sd = &__get_cpu_var(softnet_data);
1619 if (sd->completion_queue) {
1620 struct sk_buff *clist;
1622 local_irq_disable();
1623 clist = sd->completion_queue;
1624 sd->completion_queue = NULL;
1625 local_irq_enable();
1627 while (clist) {
1628 struct sk_buff *skb = clist;
1629 clist = clist->next;
1631 BUG_TRAP(!atomic_read(&skb->users));
1632 __kfree_skb(skb);
1636 if (sd->output_queue) {
1637 struct net_device *head;
1639 local_irq_disable();
1640 head = sd->output_queue;
1641 sd->output_queue = NULL;
1642 local_irq_enable();
1644 while (head) {
1645 struct net_device *dev = head;
1646 head = head->next_sched;
1648 smp_mb__before_clear_bit();
1649 clear_bit(__LINK_STATE_SCHED, &dev->state);
1651 if (spin_trylock(&dev->queue_lock)) {
1652 qdisc_run(dev);
1653 spin_unlock(&dev->queue_lock);
1654 } else {
1655 netif_schedule(dev);
1661 static __inline__ int deliver_skb(struct sk_buff *skb,
1662 struct packet_type *pt_prev, int last)
1664 atomic_inc(&skb->users);
1665 return pt_prev->func(skb, skb->dev, pt_prev);
1669 #if defined(CONFIG_BRIDGE) || defined (CONFIG_BRIDGE_MODULE)
1670 int (*br_handle_frame_hook)(struct sk_buff *skb);
1672 static __inline__ int handle_bridge(struct sk_buff *skb,
1673 struct packet_type *pt_prev)
1675 int ret = NET_RX_DROP;
1676 if (pt_prev)
1677 ret = deliver_skb(skb, pt_prev, 0);
1679 return ret;
1682 #endif
1684 static inline int __handle_bridge(struct sk_buff *skb,
1685 struct packet_type **pt_prev, int *ret)
1687 #if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
1688 if (skb->dev->br_port && skb->pkt_type != PACKET_LOOPBACK) {
1689 *ret = handle_bridge(skb, *pt_prev);
1690 if (br_handle_frame_hook(skb) == 0)
1691 return 1;
1693 *pt_prev = NULL;
1695 #endif
1696 return 0;
1699 int netif_receive_skb(struct sk_buff *skb)
1701 struct packet_type *ptype, *pt_prev;
1702 int ret = NET_RX_DROP;
1703 unsigned short type;
1705 #ifdef CONFIG_NETPOLL_RX
1706 if (skb->dev->netpoll_rx && skb->dev->poll && netpoll_rx(skb)) {
1707 kfree_skb(skb);
1708 return NET_RX_DROP;
1710 #endif
1712 if (!skb->stamp.tv_sec)
1713 net_timestamp(&skb->stamp);
1715 skb_bond(skb);
1717 __get_cpu_var(netdev_rx_stat).total++;
1719 #ifdef CONFIG_NET_FASTROUTE
1720 if (skb->pkt_type == PACKET_FASTROUTE) {
1721 __get_cpu_var(netdev_rx_stat).fastroute_deferred_out++;
1722 return dev_queue_xmit(skb);
1724 #endif
1726 skb->h.raw = skb->nh.raw = skb->data;
1727 skb->mac_len = skb->nh.raw - skb->mac.raw;
1729 pt_prev = NULL;
1730 rcu_read_lock();
1731 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1732 if (!ptype->dev || ptype->dev == skb->dev) {
1733 if (pt_prev)
1734 ret = deliver_skb(skb, pt_prev, 0);
1735 pt_prev = ptype;
1739 handle_diverter(skb);
1741 if (__handle_bridge(skb, &pt_prev, &ret))
1742 goto out;
1744 type = skb->protocol;
1745 list_for_each_entry_rcu(ptype, &ptype_base[ntohs(type)&15], list) {
1746 if (ptype->type == type &&
1747 (!ptype->dev || ptype->dev == skb->dev)) {
1748 if (pt_prev)
1749 ret = deliver_skb(skb, pt_prev, 0);
1750 pt_prev = ptype;
1754 if (pt_prev) {
1755 ret = pt_prev->func(skb, skb->dev, pt_prev);
1756 } else {
1757 kfree_skb(skb);
1758 /* Jamal, now you will not able to escape explaining
1759 * me how you were going to use this. :-)
1761 ret = NET_RX_DROP;
1764 out:
1765 rcu_read_unlock();
1766 return ret;
1769 static int process_backlog(struct net_device *backlog_dev, int *budget)
1771 int work = 0;
1772 int quota = min(backlog_dev->quota, *budget);
1773 struct softnet_data *queue = &__get_cpu_var(softnet_data);
1774 unsigned long start_time = jiffies;
1776 for (;;) {
1777 struct sk_buff *skb;
1778 struct net_device *dev;
1780 local_irq_disable();
1781 skb = __skb_dequeue(&queue->input_pkt_queue);
1782 if (!skb)
1783 goto job_done;
1784 local_irq_enable();
1786 dev = skb->dev;
1788 netif_receive_skb(skb);
1790 dev_put(dev);
1792 work++;
1794 if (work >= quota || jiffies - start_time > 1)
1795 break;
1797 #ifdef CONFIG_NET_HW_FLOWCONTROL
1798 if (queue->throttle &&
1799 queue->input_pkt_queue.qlen < no_cong_thresh ) {
1800 queue->throttle = 0;
1801 if (atomic_dec_and_test(&netdev_dropping)) {
1802 netdev_wakeup();
1803 break;
1806 #endif
1809 backlog_dev->quota -= work;
1810 *budget -= work;
1811 return -1;
1813 job_done:
1814 backlog_dev->quota -= work;
1815 *budget -= work;
1817 list_del(&backlog_dev->poll_list);
1818 smp_mb__before_clear_bit();
1819 netif_poll_enable(backlog_dev);
1821 if (queue->throttle) {
1822 queue->throttle = 0;
1823 #ifdef CONFIG_NET_HW_FLOWCONTROL
1824 if (atomic_dec_and_test(&netdev_dropping))
1825 netdev_wakeup();
1826 #endif
1828 local_irq_enable();
1829 return 0;
1832 static void net_rx_action(struct softirq_action *h)
1834 struct softnet_data *queue = &__get_cpu_var(softnet_data);
1835 unsigned long start_time = jiffies;
1836 int budget = netdev_max_backlog;
1839 local_irq_disable();
1841 while (!list_empty(&queue->poll_list)) {
1842 struct net_device *dev;
1844 if (budget <= 0 || jiffies - start_time > 1)
1845 goto softnet_break;
1847 local_irq_enable();
1849 dev = list_entry(queue->poll_list.next,
1850 struct net_device, poll_list);
1852 if (dev->quota <= 0 || dev->poll(dev, &budget)) {
1853 local_irq_disable();
1854 list_del(&dev->poll_list);
1855 list_add_tail(&dev->poll_list, &queue->poll_list);
1856 if (dev->quota < 0)
1857 dev->quota += dev->weight;
1858 else
1859 dev->quota = dev->weight;
1860 } else {
1861 dev_put(dev);
1862 local_irq_disable();
1865 out:
1866 local_irq_enable();
1867 return;
1869 softnet_break:
1870 __get_cpu_var(netdev_rx_stat).time_squeeze++;
1871 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
1872 goto out;
1875 static gifconf_func_t * gifconf_list [NPROTO];
1878 * register_gifconf - register a SIOCGIF handler
1879 * @family: Address family
1880 * @gifconf: Function handler
1882 * Register protocol dependent address dumping routines. The handler
1883 * that is passed must not be freed or reused until it has been replaced
1884 * by another handler.
1886 int register_gifconf(unsigned int family, gifconf_func_t * gifconf)
1888 if (family >= NPROTO)
1889 return -EINVAL;
1890 gifconf_list[family] = gifconf;
1891 return 0;
1896 * Map an interface index to its name (SIOCGIFNAME)
1900 * We need this ioctl for efficient implementation of the
1901 * if_indextoname() function required by the IPv6 API. Without
1902 * it, we would have to search all the interfaces to find a
1903 * match. --pb
1906 static int dev_ifname(struct ifreq __user *arg)
1908 struct net_device *dev;
1909 struct ifreq ifr;
1912 * Fetch the caller's info block.
1915 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
1916 return -EFAULT;
1918 read_lock(&dev_base_lock);
1919 dev = __dev_get_by_index(ifr.ifr_ifindex);
1920 if (!dev) {
1921 read_unlock(&dev_base_lock);
1922 return -ENODEV;
1925 strcpy(ifr.ifr_name, dev->name);
1926 read_unlock(&dev_base_lock);
1928 if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
1929 return -EFAULT;
1930 return 0;
1934 * Perform a SIOCGIFCONF call. This structure will change
1935 * size eventually, and there is nothing I can do about it.
1936 * Thus we will need a 'compatibility mode'.
1939 static int dev_ifconf(char __user *arg)
1941 struct ifconf ifc;
1942 struct net_device *dev;
1943 char *pos;
1944 int len;
1945 int total;
1946 int i;
1949 * Fetch the caller's info block.
1952 if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
1953 return -EFAULT;
1955 pos = ifc.ifc_buf;
1956 len = ifc.ifc_len;
1959 * Loop over the interfaces, and write an info block for each.
1962 total = 0;
1963 for (dev = dev_base; dev; dev = dev->next) {
1964 for (i = 0; i < NPROTO; i++) {
1965 if (gifconf_list[i]) {
1966 int done;
1967 if (!pos)
1968 done = gifconf_list[i](dev, NULL, 0);
1969 else
1970 done = gifconf_list[i](dev, pos + total,
1971 len - total);
1972 if (done < 0)
1973 return -EFAULT;
1974 total += done;
1980 * All done. Write the updated control block back to the caller.
1982 ifc.ifc_len = total;
1985 * Both BSD and Solaris return 0 here, so we do too.
1987 return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
1990 #ifdef CONFIG_PROC_FS
1992 * This is invoked by the /proc filesystem handler to display a device
1993 * in detail.
1995 static __inline__ struct net_device *dev_get_idx(loff_t pos)
1997 struct net_device *dev;
1998 loff_t i;
2000 for (i = 0, dev = dev_base; dev && i < pos; ++i, dev = dev->next);
2002 return i == pos ? dev : NULL;
2005 void *dev_seq_start(struct seq_file *seq, loff_t *pos)
2007 read_lock(&dev_base_lock);
2008 return *pos ? dev_get_idx(*pos - 1) : SEQ_START_TOKEN;
2011 void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2013 ++*pos;
2014 return v == SEQ_START_TOKEN ? dev_base : ((struct net_device *)v)->next;
2017 void dev_seq_stop(struct seq_file *seq, void *v)
2019 read_unlock(&dev_base_lock);
2022 static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
2024 if (dev->get_stats) {
2025 struct net_device_stats *stats = dev->get_stats(dev);
2027 seq_printf(seq, "%6s:%8lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu "
2028 "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n",
2029 dev->name, stats->rx_bytes, stats->rx_packets,
2030 stats->rx_errors,
2031 stats->rx_dropped + stats->rx_missed_errors,
2032 stats->rx_fifo_errors,
2033 stats->rx_length_errors + stats->rx_over_errors +
2034 stats->rx_crc_errors + stats->rx_frame_errors,
2035 stats->rx_compressed, stats->multicast,
2036 stats->tx_bytes, stats->tx_packets,
2037 stats->tx_errors, stats->tx_dropped,
2038 stats->tx_fifo_errors, stats->collisions,
2039 stats->tx_carrier_errors +
2040 stats->tx_aborted_errors +
2041 stats->tx_window_errors +
2042 stats->tx_heartbeat_errors,
2043 stats->tx_compressed);
2044 } else
2045 seq_printf(seq, "%6s: No statistics available.\n", dev->name);
2049 * Called from the PROCfs module. This now uses the new arbitrary sized
2050 * /proc/net interface to create /proc/net/dev
2052 static int dev_seq_show(struct seq_file *seq, void *v)
2054 if (v == SEQ_START_TOKEN)
2055 seq_puts(seq, "Inter-| Receive "
2056 " | Transmit\n"
2057 " face |bytes packets errs drop fifo frame "
2058 "compressed multicast|bytes packets errs "
2059 "drop fifo colls carrier compressed\n");
2060 else
2061 dev_seq_printf_stats(seq, v);
2062 return 0;
2065 static struct netif_rx_stats *softnet_get_online(loff_t *pos)
2067 struct netif_rx_stats *rc = NULL;
2069 while (*pos < NR_CPUS)
2070 if (cpu_online(*pos)) {
2071 rc = &per_cpu(netdev_rx_stat, *pos);
2072 break;
2073 } else
2074 ++*pos;
2075 return rc;
2078 static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
2080 return softnet_get_online(pos);
2083 static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2085 ++*pos;
2086 return softnet_get_online(pos);
2089 static void softnet_seq_stop(struct seq_file *seq, void *v)
2093 static int softnet_seq_show(struct seq_file *seq, void *v)
2095 struct netif_rx_stats *s = v;
2097 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
2098 s->total, s->dropped, s->time_squeeze, s->throttled,
2099 s->fastroute_hit, s->fastroute_success, s->fastroute_defer,
2100 s->fastroute_deferred_out,
2101 #if 0
2102 s->fastroute_latency_reduction
2103 #else
2104 s->cpu_collision
2105 #endif
2107 return 0;
2110 static struct seq_operations dev_seq_ops = {
2111 .start = dev_seq_start,
2112 .next = dev_seq_next,
2113 .stop = dev_seq_stop,
2114 .show = dev_seq_show,
2117 static int dev_seq_open(struct inode *inode, struct file *file)
2119 return seq_open(file, &dev_seq_ops);
2122 static struct file_operations dev_seq_fops = {
2123 .owner = THIS_MODULE,
2124 .open = dev_seq_open,
2125 .read = seq_read,
2126 .llseek = seq_lseek,
2127 .release = seq_release,
2130 static struct seq_operations softnet_seq_ops = {
2131 .start = softnet_seq_start,
2132 .next = softnet_seq_next,
2133 .stop = softnet_seq_stop,
2134 .show = softnet_seq_show,
2137 static int softnet_seq_open(struct inode *inode, struct file *file)
2139 return seq_open(file, &softnet_seq_ops);
2142 static struct file_operations softnet_seq_fops = {
2143 .owner = THIS_MODULE,
2144 .open = softnet_seq_open,
2145 .read = seq_read,
2146 .llseek = seq_lseek,
2147 .release = seq_release,
2150 #ifdef WIRELESS_EXT
2151 extern int wireless_proc_init(void);
2152 #else
2153 #define wireless_proc_init() 0
2154 #endif
2156 static int __init dev_proc_init(void)
2158 int rc = -ENOMEM;
2160 if (!proc_net_fops_create("dev", S_IRUGO, &dev_seq_fops))
2161 goto out;
2162 if (!proc_net_fops_create("softnet_stat", S_IRUGO, &softnet_seq_fops))
2163 goto out_dev;
2164 if (wireless_proc_init())
2165 goto out_softnet;
2166 rc = 0;
2167 out:
2168 return rc;
2169 out_softnet:
2170 proc_net_remove("softnet_stat");
2171 out_dev:
2172 proc_net_remove("dev");
2173 goto out;
2175 #else
2176 #define dev_proc_init() 0
2177 #endif /* CONFIG_PROC_FS */
2181 * netdev_set_master - set up master/slave pair
2182 * @slave: slave device
2183 * @master: new master device
2185 * Changes the master device of the slave. Pass %NULL to break the
2186 * bonding. The caller must hold the RTNL semaphore. On a failure
2187 * a negative errno code is returned. On success the reference counts
2188 * are adjusted, %RTM_NEWLINK is sent to the routing socket and the
2189 * function returns zero.
2191 int netdev_set_master(struct net_device *slave, struct net_device *master)
2193 struct net_device *old = slave->master;
2195 ASSERT_RTNL();
2197 if (master) {
2198 if (old)
2199 return -EBUSY;
2200 dev_hold(master);
2203 slave->master = master;
2205 synchronize_net();
2207 if (old)
2208 dev_put(old);
2210 if (master)
2211 slave->flags |= IFF_SLAVE;
2212 else
2213 slave->flags &= ~IFF_SLAVE;
2215 rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
2216 return 0;
2220 * dev_set_promiscuity - update promiscuity count on a device
2221 * @dev: device
2222 * @inc: modifier
2224 * Add or remove promsicuity from a device. While the count in the device
2225 * remains above zero the interface remains promiscuous. Once it hits zero
2226 * the device reverts back to normal filtering operation. A negative inc
2227 * value is used to drop promiscuity on the device.
2229 void dev_set_promiscuity(struct net_device *dev, int inc)
2231 unsigned short old_flags = dev->flags;
2233 dev->flags |= IFF_PROMISC;
2234 if ((dev->promiscuity += inc) == 0)
2235 dev->flags &= ~IFF_PROMISC;
2236 if (dev->flags ^ old_flags) {
2237 #ifdef CONFIG_NET_FASTROUTE
2238 if (dev->flags & IFF_PROMISC) {
2239 netdev_fastroute_obstacles++;
2240 dev_clear_fastroute(dev);
2241 } else
2242 netdev_fastroute_obstacles--;
2243 #endif
2244 dev_mc_upload(dev);
2245 printk(KERN_INFO "device %s %s promiscuous mode\n",
2246 dev->name, (dev->flags & IFF_PROMISC) ? "entered" :
2247 "left");
2252 * dev_set_allmulti - update allmulti count on a device
2253 * @dev: device
2254 * @inc: modifier
2256 * Add or remove reception of all multicast frames to a device. While the
2257 * count in the device remains above zero the interface remains listening
2258 * to all interfaces. Once it hits zero the device reverts back to normal
2259 * filtering operation. A negative @inc value is used to drop the counter
2260 * when releasing a resource needing all multicasts.
2263 void dev_set_allmulti(struct net_device *dev, int inc)
2265 unsigned short old_flags = dev->flags;
2267 dev->flags |= IFF_ALLMULTI;
2268 if ((dev->allmulti += inc) == 0)
2269 dev->flags &= ~IFF_ALLMULTI;
2270 if (dev->flags ^ old_flags)
2271 dev_mc_upload(dev);
2274 unsigned dev_get_flags(const struct net_device *dev)
2276 unsigned flags;
2278 flags = (dev->flags & ~(IFF_PROMISC |
2279 IFF_ALLMULTI |
2280 IFF_RUNNING)) |
2281 (dev->gflags & (IFF_PROMISC |
2282 IFF_ALLMULTI));
2284 if (netif_running(dev) && netif_carrier_ok(dev))
2285 flags |= IFF_RUNNING;
2287 return flags;
2290 int dev_change_flags(struct net_device *dev, unsigned flags)
2292 int ret;
2293 int old_flags = dev->flags;
2296 * Set the flags on our device.
2299 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
2300 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
2301 IFF_AUTOMEDIA)) |
2302 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
2303 IFF_ALLMULTI));
2306 * Load in the correct multicast list now the flags have changed.
2309 dev_mc_upload(dev);
2312 * Have we downed the interface. We handle IFF_UP ourselves
2313 * according to user attempts to set it, rather than blindly
2314 * setting it.
2317 ret = 0;
2318 if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
2319 ret = ((old_flags & IFF_UP) ? dev_close : dev_open)(dev);
2321 if (!ret)
2322 dev_mc_upload(dev);
2325 if (dev->flags & IFF_UP &&
2326 ((old_flags ^ dev->flags) &~ (IFF_UP | IFF_PROMISC | IFF_ALLMULTI |
2327 IFF_VOLATILE)))
2328 notifier_call_chain(&netdev_chain, NETDEV_CHANGE, dev);
2330 if ((flags ^ dev->gflags) & IFF_PROMISC) {
2331 int inc = (flags & IFF_PROMISC) ? +1 : -1;
2332 dev->gflags ^= IFF_PROMISC;
2333 dev_set_promiscuity(dev, inc);
2336 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
2337 is important. Some (broken) drivers set IFF_PROMISC, when
2338 IFF_ALLMULTI is requested not asking us and not reporting.
2340 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
2341 int inc = (flags & IFF_ALLMULTI) ? +1 : -1;
2342 dev->gflags ^= IFF_ALLMULTI;
2343 dev_set_allmulti(dev, inc);
2346 if (old_flags ^ dev->flags)
2347 rtmsg_ifinfo(RTM_NEWLINK, dev, old_flags ^ dev->flags);
2349 return ret;
2352 int dev_set_mtu(struct net_device *dev, int new_mtu)
2354 int err;
2356 if (new_mtu == dev->mtu)
2357 return 0;
2359 /* MTU must be positive. */
2360 if (new_mtu < 0)
2361 return -EINVAL;
2363 if (!netif_device_present(dev))
2364 return -ENODEV;
2366 err = 0;
2367 if (dev->change_mtu)
2368 err = dev->change_mtu(dev, new_mtu);
2369 else
2370 dev->mtu = new_mtu;
2371 if (!err && dev->flags & IFF_UP)
2372 notifier_call_chain(&netdev_chain,
2373 NETDEV_CHANGEMTU, dev);
2374 return err;
2379 * Perform the SIOCxIFxxx calls.
2381 static int dev_ifsioc(struct ifreq *ifr, unsigned int cmd)
2383 int err;
2384 struct net_device *dev = __dev_get_by_name(ifr->ifr_name);
2386 if (!dev)
2387 return -ENODEV;
2389 switch (cmd) {
2390 case SIOCGIFFLAGS: /* Get interface flags */
2391 ifr->ifr_flags = dev_get_flags(dev);
2392 return 0;
2394 case SIOCSIFFLAGS: /* Set interface flags */
2395 return dev_change_flags(dev, ifr->ifr_flags);
2397 case SIOCGIFMETRIC: /* Get the metric on the interface
2398 (currently unused) */
2399 ifr->ifr_metric = 0;
2400 return 0;
2402 case SIOCSIFMETRIC: /* Set the metric on the interface
2403 (currently unused) */
2404 return -EOPNOTSUPP;
2406 case SIOCGIFMTU: /* Get the MTU of a device */
2407 ifr->ifr_mtu = dev->mtu;
2408 return 0;
2410 case SIOCSIFMTU: /* Set the MTU of a device */
2411 return dev_set_mtu(dev, ifr->ifr_mtu);
2413 case SIOCGIFHWADDR:
2414 memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
2415 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
2416 ifr->ifr_hwaddr.sa_family = dev->type;
2417 return 0;
2419 case SIOCSIFHWADDR:
2420 if (!dev->set_mac_address)
2421 return -EOPNOTSUPP;
2422 if (ifr->ifr_hwaddr.sa_family != dev->type)
2423 return -EINVAL;
2424 if (!netif_device_present(dev))
2425 return -ENODEV;
2426 err = dev->set_mac_address(dev, &ifr->ifr_hwaddr);
2427 if (!err)
2428 notifier_call_chain(&netdev_chain,
2429 NETDEV_CHANGEADDR, dev);
2430 return err;
2432 case SIOCSIFHWBROADCAST:
2433 if (ifr->ifr_hwaddr.sa_family != dev->type)
2434 return -EINVAL;
2435 memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
2436 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
2437 notifier_call_chain(&netdev_chain,
2438 NETDEV_CHANGEADDR, dev);
2439 return 0;
2441 case SIOCGIFMAP:
2442 ifr->ifr_map.mem_start = dev->mem_start;
2443 ifr->ifr_map.mem_end = dev->mem_end;
2444 ifr->ifr_map.base_addr = dev->base_addr;
2445 ifr->ifr_map.irq = dev->irq;
2446 ifr->ifr_map.dma = dev->dma;
2447 ifr->ifr_map.port = dev->if_port;
2448 return 0;
2450 case SIOCSIFMAP:
2451 if (dev->set_config) {
2452 if (!netif_device_present(dev))
2453 return -ENODEV;
2454 return dev->set_config(dev, &ifr->ifr_map);
2456 return -EOPNOTSUPP;
2458 case SIOCADDMULTI:
2459 if (!dev->set_multicast_list ||
2460 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
2461 return -EINVAL;
2462 if (!netif_device_present(dev))
2463 return -ENODEV;
2464 return dev_mc_add(dev, ifr->ifr_hwaddr.sa_data,
2465 dev->addr_len, 1);
2467 case SIOCDELMULTI:
2468 if (!dev->set_multicast_list ||
2469 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
2470 return -EINVAL;
2471 if (!netif_device_present(dev))
2472 return -ENODEV;
2473 return dev_mc_delete(dev, ifr->ifr_hwaddr.sa_data,
2474 dev->addr_len, 1);
2476 case SIOCGIFINDEX:
2477 ifr->ifr_ifindex = dev->ifindex;
2478 return 0;
2480 case SIOCGIFTXQLEN:
2481 ifr->ifr_qlen = dev->tx_queue_len;
2482 return 0;
2484 case SIOCSIFTXQLEN:
2485 if (ifr->ifr_qlen < 0)
2486 return -EINVAL;
2487 dev->tx_queue_len = ifr->ifr_qlen;
2488 return 0;
2490 case SIOCSIFNAME:
2491 ifr->ifr_newname[IFNAMSIZ-1] = '\0';
2492 return dev_change_name(dev, ifr->ifr_newname);
2495 * Unknown or private ioctl
2498 default:
2499 if ((cmd >= SIOCDEVPRIVATE &&
2500 cmd <= SIOCDEVPRIVATE + 15) ||
2501 cmd == SIOCBONDENSLAVE ||
2502 cmd == SIOCBONDRELEASE ||
2503 cmd == SIOCBONDSETHWADDR ||
2504 cmd == SIOCBONDSLAVEINFOQUERY ||
2505 cmd == SIOCBONDINFOQUERY ||
2506 cmd == SIOCBONDCHANGEACTIVE ||
2507 cmd == SIOCGMIIPHY ||
2508 cmd == SIOCGMIIREG ||
2509 cmd == SIOCSMIIREG ||
2510 cmd == SIOCWANDEV) {
2511 err = -EOPNOTSUPP;
2512 if (dev->do_ioctl) {
2513 if (netif_device_present(dev))
2514 err = dev->do_ioctl(dev, ifr,
2515 cmd);
2516 else
2517 err = -ENODEV;
2519 } else
2520 err = -EINVAL;
2523 return err;
2527 * This function handles all "interface"-type I/O control requests. The actual
2528 * 'doing' part of this is dev_ifsioc above.
2532 * dev_ioctl - network device ioctl
2533 * @cmd: command to issue
2534 * @arg: pointer to a struct ifreq in user space
2536 * Issue ioctl functions to devices. This is normally called by the
2537 * user space syscall interfaces but can sometimes be useful for
2538 * other purposes. The return value is the return from the syscall if
2539 * positive or a negative errno code on error.
2542 int dev_ioctl(unsigned int cmd, void __user *arg)
2544 struct ifreq ifr;
2545 int ret;
2546 char *colon;
2548 /* One special case: SIOCGIFCONF takes ifconf argument
2549 and requires shared lock, because it sleeps writing
2550 to user space.
2553 if (cmd == SIOCGIFCONF) {
2554 rtnl_shlock();
2555 ret = dev_ifconf((char __user *) arg);
2556 rtnl_shunlock();
2557 return ret;
2559 if (cmd == SIOCGIFNAME)
2560 return dev_ifname((struct ifreq __user *)arg);
2562 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
2563 return -EFAULT;
2565 ifr.ifr_name[IFNAMSIZ-1] = 0;
2567 colon = strchr(ifr.ifr_name, ':');
2568 if (colon)
2569 *colon = 0;
2572 * See which interface the caller is talking about.
2575 switch (cmd) {
2577 * These ioctl calls:
2578 * - can be done by all.
2579 * - atomic and do not require locking.
2580 * - return a value
2582 case SIOCGIFFLAGS:
2583 case SIOCGIFMETRIC:
2584 case SIOCGIFMTU:
2585 case SIOCGIFHWADDR:
2586 case SIOCGIFSLAVE:
2587 case SIOCGIFMAP:
2588 case SIOCGIFINDEX:
2589 case SIOCGIFTXQLEN:
2590 dev_load(ifr.ifr_name);
2591 read_lock(&dev_base_lock);
2592 ret = dev_ifsioc(&ifr, cmd);
2593 read_unlock(&dev_base_lock);
2594 if (!ret) {
2595 if (colon)
2596 *colon = ':';
2597 if (copy_to_user(arg, &ifr,
2598 sizeof(struct ifreq)))
2599 ret = -EFAULT;
2601 return ret;
2603 case SIOCETHTOOL:
2604 dev_load(ifr.ifr_name);
2605 rtnl_lock();
2606 ret = dev_ethtool(&ifr);
2607 rtnl_unlock();
2608 if (!ret) {
2609 if (colon)
2610 *colon = ':';
2611 if (copy_to_user(arg, &ifr,
2612 sizeof(struct ifreq)))
2613 ret = -EFAULT;
2615 return ret;
2618 * These ioctl calls:
2619 * - require superuser power.
2620 * - require strict serialization.
2621 * - return a value
2623 case SIOCGMIIPHY:
2624 case SIOCGMIIREG:
2625 case SIOCSIFNAME:
2626 if (!capable(CAP_NET_ADMIN))
2627 return -EPERM;
2628 dev_load(ifr.ifr_name);
2629 rtnl_lock();
2630 ret = dev_ifsioc(&ifr, cmd);
2631 rtnl_unlock();
2632 if (!ret) {
2633 if (colon)
2634 *colon = ':';
2635 if (copy_to_user(arg, &ifr,
2636 sizeof(struct ifreq)))
2637 ret = -EFAULT;
2639 return ret;
2642 * These ioctl calls:
2643 * - require superuser power.
2644 * - require strict serialization.
2645 * - do not return a value
2647 case SIOCSIFFLAGS:
2648 case SIOCSIFMETRIC:
2649 case SIOCSIFMTU:
2650 case SIOCSIFMAP:
2651 case SIOCSIFHWADDR:
2652 case SIOCSIFSLAVE:
2653 case SIOCADDMULTI:
2654 case SIOCDELMULTI:
2655 case SIOCSIFHWBROADCAST:
2656 case SIOCSIFTXQLEN:
2657 case SIOCSMIIREG:
2658 case SIOCBONDENSLAVE:
2659 case SIOCBONDRELEASE:
2660 case SIOCBONDSETHWADDR:
2661 case SIOCBONDSLAVEINFOQUERY:
2662 case SIOCBONDINFOQUERY:
2663 case SIOCBONDCHANGEACTIVE:
2664 if (!capable(CAP_NET_ADMIN))
2665 return -EPERM;
2666 dev_load(ifr.ifr_name);
2667 rtnl_lock();
2668 ret = dev_ifsioc(&ifr, cmd);
2669 rtnl_unlock();
2670 return ret;
2672 case SIOCGIFMEM:
2673 /* Get the per device memory space. We can add this but
2674 * currently do not support it */
2675 case SIOCSIFMEM:
2676 /* Set the per device memory buffer space.
2677 * Not applicable in our case */
2678 case SIOCSIFLINK:
2679 return -EINVAL;
2682 * Unknown or private ioctl.
2684 default:
2685 if (cmd == SIOCWANDEV ||
2686 (cmd >= SIOCDEVPRIVATE &&
2687 cmd <= SIOCDEVPRIVATE + 15)) {
2688 dev_load(ifr.ifr_name);
2689 rtnl_lock();
2690 ret = dev_ifsioc(&ifr, cmd);
2691 rtnl_unlock();
2692 if (!ret && copy_to_user(arg, &ifr,
2693 sizeof(struct ifreq)))
2694 ret = -EFAULT;
2695 return ret;
2697 #ifdef WIRELESS_EXT
2698 /* Take care of Wireless Extensions */
2699 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) {
2700 /* If command is `set a parameter', or
2701 * `get the encoding parameters', check if
2702 * the user has the right to do it */
2703 if (IW_IS_SET(cmd) || cmd == SIOCGIWENCODE) {
2704 if (!capable(CAP_NET_ADMIN))
2705 return -EPERM;
2707 dev_load(ifr.ifr_name);
2708 rtnl_lock();
2709 /* Follow me in net/core/wireless.c */
2710 ret = wireless_process_ioctl(&ifr, cmd);
2711 rtnl_unlock();
2712 if (!ret && IW_IS_GET(cmd) &&
2713 copy_to_user(arg, &ifr,
2714 sizeof(struct ifreq)))
2715 ret = -EFAULT;
2716 return ret;
2718 #endif /* WIRELESS_EXT */
2719 return -EINVAL;
2725 * dev_new_index - allocate an ifindex
2727 * Returns a suitable unique value for a new device interface
2728 * number. The caller must hold the rtnl semaphore or the
2729 * dev_base_lock to be sure it remains unique.
2731 int dev_new_index(void)
2733 static int ifindex;
2734 for (;;) {
2735 if (++ifindex <= 0)
2736 ifindex = 1;
2737 if (!__dev_get_by_index(ifindex))
2738 return ifindex;
2742 static int dev_boot_phase = 1;
2744 /* Delayed registration/unregisteration */
2745 static spinlock_t net_todo_list_lock = SPIN_LOCK_UNLOCKED;
2746 static struct list_head net_todo_list = LIST_HEAD_INIT(net_todo_list);
2748 static inline void net_set_todo(struct net_device *dev)
2750 spin_lock(&net_todo_list_lock);
2751 list_add_tail(&dev->todo_list, &net_todo_list);
2752 spin_unlock(&net_todo_list_lock);
2756 * register_netdevice - register a network device
2757 * @dev: device to register
2759 * Take a completed network device structure and add it to the kernel
2760 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
2761 * chain. 0 is returned on success. A negative errno code is returned
2762 * on a failure to set up the device, or if the name is a duplicate.
2764 * Callers must hold the rtnl semaphore. See the comment at the
2765 * end of Space.c for details about the locking. You may want
2766 * register_netdev() instead of this.
2768 * BUGS:
2769 * The locking appears insufficient to guarantee two parallel registers
2770 * will not get the same name.
2773 int register_netdevice(struct net_device *dev)
2775 struct hlist_head *head;
2776 struct hlist_node *p;
2777 int ret;
2779 BUG_ON(dev_boot_phase);
2780 ASSERT_RTNL();
2782 /* When net_device's are persistent, this will be fatal. */
2783 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
2785 spin_lock_init(&dev->queue_lock);
2786 spin_lock_init(&dev->xmit_lock);
2787 dev->xmit_lock_owner = -1;
2788 #ifdef CONFIG_NET_FASTROUTE
2789 dev->fastpath_lock = RW_LOCK_UNLOCKED;
2790 #endif
2792 ret = alloc_divert_blk(dev);
2793 if (ret)
2794 goto out;
2796 dev->iflink = -1;
2798 /* Init, if this function is available */
2799 if (dev->init) {
2800 ret = dev->init(dev);
2801 if (ret) {
2802 if (ret > 0)
2803 ret = -EIO;
2804 goto out_err;
2808 if (!dev_valid_name(dev->name)) {
2809 ret = -EINVAL;
2810 goto out_err;
2813 dev->ifindex = dev_new_index();
2814 if (dev->iflink == -1)
2815 dev->iflink = dev->ifindex;
2817 /* Check for existence of name */
2818 head = dev_name_hash(dev->name);
2819 hlist_for_each(p, head) {
2820 struct net_device *d
2821 = hlist_entry(p, struct net_device, name_hlist);
2822 if (!strncmp(d->name, dev->name, IFNAMSIZ)) {
2823 ret = -EEXIST;
2824 goto out_err;
2828 /* Fix illegal SG+CSUM combinations. */
2829 if ((dev->features & NETIF_F_SG) &&
2830 !(dev->features & (NETIF_F_IP_CSUM |
2831 NETIF_F_NO_CSUM |
2832 NETIF_F_HW_CSUM))) {
2833 printk("%s: Dropping NETIF_F_SG since no checksum feature.\n",
2834 dev->name);
2835 dev->features &= ~NETIF_F_SG;
2839 * nil rebuild_header routine,
2840 * that should be never called and used as just bug trap.
2843 if (!dev->rebuild_header)
2844 dev->rebuild_header = default_rebuild_header;
2847 * Default initial state at registry is that the
2848 * device is present.
2851 set_bit(__LINK_STATE_PRESENT, &dev->state);
2853 dev->next = NULL;
2854 dev_init_scheduler(dev);
2855 write_lock_bh(&dev_base_lock);
2856 *dev_tail = dev;
2857 dev_tail = &dev->next;
2858 hlist_add_head(&dev->name_hlist, head);
2859 hlist_add_head(&dev->index_hlist, dev_index_hash(dev->ifindex));
2860 dev_hold(dev);
2861 dev->reg_state = NETREG_REGISTERING;
2862 write_unlock_bh(&dev_base_lock);
2864 /* Notify protocols, that a new device appeared. */
2865 notifier_call_chain(&netdev_chain, NETDEV_REGISTER, dev);
2867 /* Finish registration after unlock */
2868 net_set_todo(dev);
2869 ret = 0;
2871 out:
2872 return ret;
2873 out_err:
2874 free_divert_blk(dev);
2875 goto out;
2879 * netdev_wait_allrefs - wait until all references are gone.
2881 * This is called when unregistering network devices.
2883 * Any protocol or device that holds a reference should register
2884 * for netdevice notification, and cleanup and put back the
2885 * reference if they receive an UNREGISTER event.
2886 * We can get stuck here if buggy protocols don't correctly
2887 * call dev_put.
2889 static void netdev_wait_allrefs(struct net_device *dev)
2891 unsigned long rebroadcast_time, warning_time;
2893 rebroadcast_time = warning_time = jiffies;
2894 while (atomic_read(&dev->refcnt) != 0) {
2895 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
2896 rtnl_shlock();
2897 rtnl_exlock();
2899 /* Rebroadcast unregister notification */
2900 notifier_call_chain(&netdev_chain,
2901 NETDEV_UNREGISTER, dev);
2903 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
2904 &dev->state)) {
2905 /* We must not have linkwatch events
2906 * pending on unregister. If this
2907 * happens, we simply run the queue
2908 * unscheduled, resulting in a noop
2909 * for this device.
2911 linkwatch_run_queue();
2914 rtnl_exunlock();
2915 rtnl_shunlock();
2917 rebroadcast_time = jiffies;
2920 current->state = TASK_INTERRUPTIBLE;
2921 schedule_timeout(HZ / 4);
2923 if (time_after(jiffies, warning_time + 10 * HZ)) {
2924 printk(KERN_EMERG "unregister_netdevice: "
2925 "waiting for %s to become free. Usage "
2926 "count = %d\n",
2927 dev->name, atomic_read(&dev->refcnt));
2928 warning_time = jiffies;
2933 /* The sequence is:
2935 * rtnl_lock();
2936 * ...
2937 * register_netdevice(x1);
2938 * register_netdevice(x2);
2939 * ...
2940 * unregister_netdevice(y1);
2941 * unregister_netdevice(y2);
2942 * ...
2943 * rtnl_unlock();
2944 * free_netdev(y1);
2945 * free_netdev(y2);
2947 * We are invoked by rtnl_unlock() after it drops the semaphore.
2948 * This allows us to deal with problems:
2949 * 1) We can create/delete sysfs objects which invoke hotplug
2950 * without deadlocking with linkwatch via keventd.
2951 * 2) Since we run with the RTNL semaphore not held, we can sleep
2952 * safely in order to wait for the netdev refcnt to drop to zero.
2954 static DECLARE_MUTEX(net_todo_run_mutex);
2955 void netdev_run_todo(void)
2957 struct list_head list = LIST_HEAD_INIT(list);
2959 /* Safe outside mutex since we only care about entries that
2960 * this cpu put into queue while under RTNL.
2962 if (list_empty(&net_todo_list))
2963 return;
2965 /* Need to guard against multiple cpu's getting out of order. */
2966 down(&net_todo_run_mutex);
2968 /* Snapshot list, allow later requests */
2969 spin_lock(&net_todo_list_lock);
2970 list_splice_init(&net_todo_list, &list);
2971 spin_unlock(&net_todo_list_lock);
2973 while (!list_empty(&list)) {
2974 struct net_device *dev
2975 = list_entry(list.next, struct net_device, todo_list);
2976 list_del(&dev->todo_list);
2978 switch(dev->reg_state) {
2979 case NETREG_REGISTERING:
2980 netdev_register_sysfs(dev);
2981 dev->reg_state = NETREG_REGISTERED;
2982 break;
2984 case NETREG_UNREGISTERING:
2985 netdev_unregister_sysfs(dev);
2986 dev->reg_state = NETREG_UNREGISTERED;
2988 netdev_wait_allrefs(dev);
2990 /* paranoia */
2991 BUG_ON(atomic_read(&dev->refcnt));
2992 BUG_TRAP(!dev->ip_ptr);
2993 BUG_TRAP(!dev->ip6_ptr);
2994 BUG_TRAP(!dev->dn_ptr);
2997 /* It must be the very last action,
2998 * after this 'dev' may point to freed up memory.
3000 if (dev->destructor)
3001 dev->destructor(dev);
3002 break;
3004 default:
3005 printk(KERN_ERR "network todo '%s' but state %d\n",
3006 dev->name, dev->reg_state);
3007 break;
3011 up(&net_todo_run_mutex);
3015 * free_netdev - free network device
3016 * @dev: device
3018 * This function does the last stage of destroying an allocated device
3019 * interface. The reference to the device object is released.
3020 * If this is the last reference then it will be freed.
3022 void free_netdev(struct net_device *dev)
3024 /* Compatiablity with error handling in drivers */
3025 if (dev->reg_state == NETREG_UNINITIALIZED) {
3026 kfree((char *)dev - dev->padded);
3027 return;
3030 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
3031 dev->reg_state = NETREG_RELEASED;
3033 /* will free via class release */
3034 class_device_put(&dev->class_dev);
3037 /* Synchronize with packet receive processing. */
3038 void synchronize_net(void)
3040 might_sleep();
3041 synchronize_kernel();
3045 * unregister_netdevice - remove device from the kernel
3046 * @dev: device
3048 * This function shuts down a device interface and removes it
3049 * from the kernel tables. On success 0 is returned, on a failure
3050 * a negative errno code is returned.
3052 * Callers must hold the rtnl semaphore. See the comment at the
3053 * end of Space.c for details about the locking. You may want
3054 * unregister_netdev() instead of this.
3057 int unregister_netdevice(struct net_device *dev)
3059 struct net_device *d, **dp;
3061 BUG_ON(dev_boot_phase);
3062 ASSERT_RTNL();
3064 /* Some devices call without registering for initialization unwind. */
3065 if (dev->reg_state == NETREG_UNINITIALIZED) {
3066 printk(KERN_DEBUG "unregister_netdevice: device %s/%p never "
3067 "was registered\n", dev->name, dev);
3068 return -ENODEV;
3071 BUG_ON(dev->reg_state != NETREG_REGISTERED);
3073 /* If device is running, close it first. */
3074 if (dev->flags & IFF_UP)
3075 dev_close(dev);
3077 /* And unlink it from device chain. */
3078 for (dp = &dev_base; (d = *dp) != NULL; dp = &d->next) {
3079 if (d == dev) {
3080 write_lock_bh(&dev_base_lock);
3081 hlist_del(&dev->name_hlist);
3082 hlist_del(&dev->index_hlist);
3083 if (dev_tail == &dev->next)
3084 dev_tail = dp;
3085 *dp = d->next;
3086 write_unlock_bh(&dev_base_lock);
3087 break;
3090 if (!d) {
3091 printk(KERN_ERR "unregister net_device: '%s' not found\n",
3092 dev->name);
3093 return -ENODEV;
3096 dev->reg_state = NETREG_UNREGISTERING;
3098 synchronize_net();
3100 #ifdef CONFIG_NET_FASTROUTE
3101 dev_clear_fastroute(dev);
3102 #endif
3104 /* Shutdown queueing discipline. */
3105 dev_shutdown(dev);
3108 /* Notify protocols, that we are about to destroy
3109 this device. They should clean all the things.
3111 notifier_call_chain(&netdev_chain, NETDEV_UNREGISTER, dev);
3114 * Flush the multicast chain
3116 dev_mc_discard(dev);
3118 if (dev->uninit)
3119 dev->uninit(dev);
3121 /* Notifier chain MUST detach us from master device. */
3122 BUG_TRAP(!dev->master);
3124 free_divert_blk(dev);
3126 /* Finish processing unregister after unlock */
3127 net_set_todo(dev);
3129 dev_put(dev);
3130 return 0;
3133 #ifdef CONFIG_HOTPLUG_CPU
3134 static int dev_cpu_callback(struct notifier_block *nfb,
3135 unsigned long action,
3136 void *ocpu)
3138 struct sk_buff **list_skb;
3139 struct net_device **list_net;
3140 struct sk_buff *skb;
3141 unsigned int cpu, oldcpu = (unsigned long)ocpu;
3142 struct softnet_data *sd, *oldsd;
3144 if (action != CPU_DEAD)
3145 return NOTIFY_OK;
3147 local_irq_disable();
3148 cpu = smp_processor_id();
3149 sd = &per_cpu(softnet_data, cpu);
3150 oldsd = &per_cpu(softnet_data, oldcpu);
3152 /* Find end of our completion_queue. */
3153 list_skb = &sd->completion_queue;
3154 while (*list_skb)
3155 list_skb = &(*list_skb)->next;
3156 /* Append completion queue from offline CPU. */
3157 *list_skb = oldsd->completion_queue;
3158 oldsd->completion_queue = NULL;
3160 /* Find end of our output_queue. */
3161 list_net = &sd->output_queue;
3162 while (*list_net)
3163 list_net = &(*list_net)->next_sched;
3164 /* Append output queue from offline CPU. */
3165 *list_net = oldsd->output_queue;
3166 oldsd->output_queue = NULL;
3168 raise_softirq_irqoff(NET_TX_SOFTIRQ);
3169 local_irq_enable();
3171 /* Process offline CPU's input_pkt_queue */
3172 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue)))
3173 netif_rx(skb);
3175 return NOTIFY_OK;
3177 #endif /* CONFIG_HOTPLUG_CPU */
3181 * Initialize the DEV module. At boot time this walks the device list and
3182 * unhooks any devices that fail to initialise (normally hardware not
3183 * present) and leaves us with a valid list of present and active devices.
3188 * This is called single threaded during boot, so no need
3189 * to take the rtnl semaphore.
3191 static int __init net_dev_init(void)
3193 int i, rc = -ENOMEM;
3195 BUG_ON(!dev_boot_phase);
3197 if (dev_proc_init())
3198 goto out;
3200 if (netdev_sysfs_init())
3201 goto out;
3203 INIT_LIST_HEAD(&ptype_all);
3204 for (i = 0; i < 16; i++)
3205 INIT_LIST_HEAD(&ptype_base[i]);
3207 for (i = 0; i < ARRAY_SIZE(dev_name_head); i++)
3208 INIT_HLIST_HEAD(&dev_name_head[i]);
3210 for (i = 0; i < ARRAY_SIZE(dev_index_head); i++)
3211 INIT_HLIST_HEAD(&dev_index_head[i]);
3214 * Initialise the packet receive queues.
3217 for (i = 0; i < NR_CPUS; i++) {
3218 struct softnet_data *queue;
3220 queue = &per_cpu(softnet_data, i);
3221 skb_queue_head_init(&queue->input_pkt_queue);
3222 queue->throttle = 0;
3223 queue->cng_level = 0;
3224 queue->avg_blog = 10; /* arbitrary non-zero */
3225 queue->completion_queue = NULL;
3226 INIT_LIST_HEAD(&queue->poll_list);
3227 set_bit(__LINK_STATE_START, &queue->backlog_dev.state);
3228 queue->backlog_dev.weight = weight_p;
3229 queue->backlog_dev.poll = process_backlog;
3230 atomic_set(&queue->backlog_dev.refcnt, 1);
3233 #ifdef OFFLINE_SAMPLE
3234 samp_timer.expires = jiffies + (10 * HZ);
3235 add_timer(&samp_timer);
3236 #endif
3238 dev_boot_phase = 0;
3240 open_softirq(NET_TX_SOFTIRQ, net_tx_action, NULL);
3241 open_softirq(NET_RX_SOFTIRQ, net_rx_action, NULL);
3243 hotcpu_notifier(dev_cpu_callback, 0);
3244 dst_init();
3245 dev_mcast_init();
3246 rc = 0;
3247 out:
3248 return rc;
3251 subsys_initcall(net_dev_init);
3253 EXPORT_SYMBOL(__dev_get);
3254 EXPORT_SYMBOL(__dev_get_by_flags);
3255 EXPORT_SYMBOL(__dev_get_by_index);
3256 EXPORT_SYMBOL(__dev_get_by_name);
3257 EXPORT_SYMBOL(__dev_remove_pack);
3258 EXPORT_SYMBOL(__skb_linearize);
3259 EXPORT_SYMBOL(call_netdevice_notifiers);
3260 EXPORT_SYMBOL(dev_add_pack);
3261 EXPORT_SYMBOL(dev_alloc_name);
3262 EXPORT_SYMBOL(dev_close);
3263 EXPORT_SYMBOL(dev_get_by_flags);
3264 EXPORT_SYMBOL(dev_get_by_index);
3265 EXPORT_SYMBOL(dev_get_by_name);
3266 EXPORT_SYMBOL(dev_getbyhwaddr);
3267 EXPORT_SYMBOL(dev_ioctl);
3268 EXPORT_SYMBOL(dev_new_index);
3269 EXPORT_SYMBOL(dev_open);
3270 EXPORT_SYMBOL(dev_queue_xmit);
3271 EXPORT_SYMBOL(dev_queue_xmit_nit);
3272 EXPORT_SYMBOL(dev_remove_pack);
3273 EXPORT_SYMBOL(dev_set_allmulti);
3274 EXPORT_SYMBOL(dev_set_promiscuity);
3275 EXPORT_SYMBOL(free_netdev);
3276 EXPORT_SYMBOL(netdev_boot_setup_check);
3277 EXPORT_SYMBOL(netdev_set_master);
3278 EXPORT_SYMBOL(netdev_state_change);
3279 EXPORT_SYMBOL(netif_receive_skb);
3280 EXPORT_SYMBOL(netif_rx);
3281 EXPORT_SYMBOL(register_gifconf);
3282 EXPORT_SYMBOL(register_netdevice);
3283 EXPORT_SYMBOL(register_netdevice_notifier);
3284 EXPORT_SYMBOL(skb_checksum_help);
3285 EXPORT_SYMBOL(synchronize_net);
3286 EXPORT_SYMBOL(unregister_netdevice);
3287 EXPORT_SYMBOL(unregister_netdevice_notifier);
3289 #if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
3290 EXPORT_SYMBOL(br_handle_frame_hook);
3291 #endif
3292 /* for 801q VLAN support */
3293 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
3294 EXPORT_SYMBOL(dev_change_flags);
3295 #endif
3296 #ifdef CONFIG_KMOD
3297 EXPORT_SYMBOL(dev_load);
3298 #endif
3299 #ifdef CONFIG_NET_HW_FLOWCONTROL
3300 EXPORT_SYMBOL(netdev_dropping);
3301 EXPORT_SYMBOL(netdev_fc_xoff);
3302 EXPORT_SYMBOL(netdev_register_fc);
3303 EXPORT_SYMBOL(netdev_unregister_fc);
3304 #endif
3305 #ifdef CONFIG_NET_FASTROUTE
3306 EXPORT_SYMBOL(netdev_fastroute);
3307 EXPORT_SYMBOL(netdev_fastroute_obstacles);
3308 #endif
3310 EXPORT_PER_CPU_SYMBOL(softnet_data);