Linux 2.4.0-test7-pre7
[davej-history.git] / net / core / dev.c
bloba6ee7367aa6f558205e946de8446e1918a1436fd
1 /*
2 * NET3 Protocol independent device support routines.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Derived from the non IP parts of dev.c 1.0.19
10 * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
14 * Additional Authors:
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dhinds@allegro.stanford.edu>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
22 * Changes:
23 * Alan Cox : device private ioctl copies fields back.
24 * Alan Cox : Transmit queue code does relevant stunts to
25 * keep the queue safe.
26 * Alan Cox : Fixed double lock.
27 * Alan Cox : Fixed promisc NULL pointer trap
28 * ???????? : Support the full private ioctl range
29 * Alan Cox : Moved ioctl permission check into drivers
30 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
31 * Alan Cox : 100 backlog just doesn't cut it when
32 * you start doing multicast video 8)
33 * Alan Cox : Rewrote net_bh and list manager.
34 * Alan Cox : Fix ETH_P_ALL echoback lengths.
35 * Alan Cox : Took out transmit every packet pass
36 * Saved a few bytes in the ioctl handler
37 * Alan Cox : Network driver sets packet type before calling netif_rx. Saves
38 * a function call a packet.
39 * Alan Cox : Hashed net_bh()
40 * Richard Kooijman: Timestamp fixes.
41 * Alan Cox : Wrong field in SIOCGIFDSTADDR
42 * Alan Cox : Device lock protection.
43 * Alan Cox : Fixed nasty side effect of device close changes.
44 * Rudi Cilibrasi : Pass the right thing to set_mac_address()
45 * Dave Miller : 32bit quantity for the device lock to make it work out
46 * on a Sparc.
47 * Bjorn Ekwall : Added KERNELD hack.
48 * Alan Cox : Cleaned up the backlog initialise.
49 * Craig Metz : SIOCGIFCONF fix if space for under
50 * 1 device.
51 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
52 * is no device open function.
53 * Andi Kleen : Fix error reporting for SIOCGIFCONF
54 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
55 * Cyrus Durgin : Cleaned for KMOD
56 * Adam Sulmicki : Bug Fix : Network Device Unload
57 * A network device unload needs to purge
58 * the backlog queue.
59 * Paul Rusty Russell : SIOCSIFNAME
60 * Pekka Riikonen : Netdev boot-time settings code
61 * Andrew Morton : Make unregister_netdevice wait indefinitely on dev->refcnt
64 #include <asm/uaccess.h>
65 #include <asm/system.h>
66 #include <asm/bitops.h>
67 #include <linux/config.h>
68 #include <linux/types.h>
69 #include <linux/kernel.h>
70 #include <linux/sched.h>
71 #include <linux/string.h>
72 #include <linux/mm.h>
73 #include <linux/socket.h>
74 #include <linux/sockios.h>
75 #include <linux/errno.h>
76 #include <linux/interrupt.h>
77 #include <linux/if_ether.h>
78 #include <linux/netdevice.h>
79 #include <linux/etherdevice.h>
80 #include <linux/notifier.h>
81 #include <linux/skbuff.h>
82 #include <linux/brlock.h>
83 #include <net/sock.h>
84 #include <linux/rtnetlink.h>
85 #include <linux/proc_fs.h>
86 #include <linux/stat.h>
87 #include <linux/if_bridge.h>
88 #include <net/dst.h>
89 #include <net/pkt_sched.h>
90 #include <net/profile.h>
91 #include <linux/init.h>
92 #include <linux/kmod.h>
93 #if defined(CONFIG_NET_RADIO) || defined(CONFIG_NET_PCMCIA_RADIO)
94 #include <linux/wireless.h> /* Note : will define WIRELESS_EXT */
95 #endif /* CONFIG_NET_RADIO || CONFIG_NET_PCMCIA_RADIO */
96 #ifdef CONFIG_PLIP
97 extern int plip_init(void);
98 #endif
100 NET_PROFILE_DEFINE(dev_queue_xmit)
101 NET_PROFILE_DEFINE(softnet_process)
103 const char *if_port_text[] = {
104 "unknown",
105 "BNC",
106 "10baseT",
107 "AUI",
108 "100baseT",
109 "100baseTX",
110 "100baseFX"
114 * The list of packet types we will receive (as opposed to discard)
115 * and the routines to invoke.
117 * Why 16. Because with 16 the only overlap we get on a hash of the
118 * low nibble of the protocol value is RARP/SNAP/X.25.
120 * 0800 IP
121 * 0001 802.3
122 * 0002 AX.25
123 * 0004 802.2
124 * 8035 RARP
125 * 0005 SNAP
126 * 0805 X.25
127 * 0806 ARP
128 * 8137 IPX
129 * 0009 Localtalk
130 * 86DD IPv6
133 static struct packet_type *ptype_base[16]; /* 16 way hashed list */
134 static struct packet_type *ptype_all = NULL; /* Taps */
137 * Our notifier list
140 static struct notifier_block *netdev_chain=NULL;
143 * Device drivers call our routines to queue packets here. We empty the
144 * queue in the local softnet handler.
146 struct softnet_data softnet_data[NR_CPUS] __cacheline_aligned;
148 #ifdef CONFIG_NET_FASTROUTE
149 int netdev_fastroute;
150 int netdev_fastroute_obstacles;
151 #endif
154 /******************************************************************************************
156 Protocol management and registration routines
158 *******************************************************************************************/
161 * For efficiency
164 int netdev_nit=0;
167 * Add a protocol ID to the list. Now that the input handler is
168 * smarter we can dispense with all the messy stuff that used to be
169 * here.
171 * BEWARE!!! Protocol handlers, mangling input packets,
172 * MUST BE last in hash buckets and checking protocol handlers
173 * MUST start from promiscous ptype_all chain in net_bh.
174 * It is true now, do not change it.
175 * Explantion follows: if protocol handler, mangling packet, will
176 * be the first on list, it is not able to sense, that packet
177 * is cloned and should be copied-on-write, so that it will
178 * change it and subsequent readers will get broken packet.
179 * --ANK (980803)
183 * dev_add_pack - add packet handler
184 * @pt: packet type declaration
186 * Add a protocol handler to the networking stack. The passed &packet_type
187 * is linked into kernel lists and may not be freed until it has been
188 * removed from the kernel lists.
191 void dev_add_pack(struct packet_type *pt)
193 int hash;
195 br_write_lock_bh(BR_NETPROTO_LOCK);
197 #ifdef CONFIG_NET_FASTROUTE
198 /* Hack to detect packet socket */
199 if (pt->data) {
200 netdev_fastroute_obstacles++;
201 dev_clear_fastroute(pt->dev);
203 #endif
204 if (pt->type == htons(ETH_P_ALL)) {
205 netdev_nit++;
206 pt->next=ptype_all;
207 ptype_all=pt;
208 } else {
209 hash=ntohs(pt->type)&15;
210 pt->next = ptype_base[hash];
211 ptype_base[hash] = pt;
213 br_write_unlock_bh(BR_NETPROTO_LOCK);
218 * dev_remove_pack - remove packet handler
219 * @pt: packet type declaration
221 * Remove a protocol handler that was previously added to the kernel
222 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
223 * from the kernel lists and can be freed or reused once this function
224 * returns.
227 void dev_remove_pack(struct packet_type *pt)
229 struct packet_type **pt1;
231 br_write_lock_bh(BR_NETPROTO_LOCK);
233 if (pt->type == htons(ETH_P_ALL)) {
234 netdev_nit--;
235 pt1=&ptype_all;
236 } else {
237 pt1=&ptype_base[ntohs(pt->type)&15];
240 for (; (*pt1) != NULL; pt1 = &((*pt1)->next)) {
241 if (pt == (*pt1)) {
242 *pt1 = pt->next;
243 #ifdef CONFIG_NET_FASTROUTE
244 if (pt->data)
245 netdev_fastroute_obstacles--;
246 #endif
247 br_write_unlock_bh(BR_NETPROTO_LOCK);
248 return;
251 br_write_unlock_bh(BR_NETPROTO_LOCK);
252 printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
255 /******************************************************************************
257 Device Boot-time Settings Routines
259 *******************************************************************************/
261 /* Boot time configuration table */
262 static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
265 * netdev_boot_setup_add - add new setup entry
266 * @name: name of the device
267 * @map: configured settings for the device
269 * Adds new setup entry to the dev_boot_setup list. The function
270 * returns 0 on error and 1 on success. This is a generic routine to
271 * all netdevices.
273 int netdev_boot_setup_add(char *name, struct ifmap *map)
275 struct netdev_boot_setup *s;
276 int i;
278 s = dev_boot_setup;
279 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
280 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
281 memset(s[i].name, 0, sizeof(s[i].name));
282 strcpy(s[i].name, name);
283 memcpy(&s[i].map, map, sizeof(s[i].map));
284 break;
288 if (i >= NETDEV_BOOT_SETUP_MAX)
289 return 0;
291 return 1;
295 * netdev_boot_setup_check - check boot time settings
296 * @dev: the netdevice
298 * Check boot time settings for the device. If device's name is a
299 * mask (eg. eth%d) and settings are found then this will allocate
300 * name for the device. The found settings are set for the device
301 * to be used later in the device probing. Returns 0 if no settings
302 * found, 1 if they are.
304 int netdev_boot_setup_check(struct net_device *dev)
306 struct netdev_boot_setup *s;
307 char buf[IFNAMSIZ + 1];
308 int i, mask = 0;
310 memset(buf, 0, sizeof(buf));
311 strcpy(buf, dev->name);
312 if (strchr(dev->name, '%')) {
313 *strchr(buf, '%') = '\0';
314 mask = 1;
317 s = dev_boot_setup;
318 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
319 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
320 !strncmp(buf, s[i].name, mask ? strlen(buf) :
321 strlen(s[i].name))) {
322 if (__dev_get_by_name(s[i].name)) {
323 if (!mask)
324 return 0;
325 continue;
327 memset(dev->name, 0, IFNAMSIZ);
328 strcpy(dev->name, s[i].name);
329 dev->irq = s[i].map.irq;
330 dev->base_addr = s[i].map.base_addr;
331 dev->mem_start = s[i].map.mem_start;
332 dev->mem_end = s[i].map.mem_end;
333 return 1;
337 return 0;
341 * Saves at boot time configured settings for any netdevice.
343 static int __init netdev_boot_setup(char *str)
345 int ints[5];
346 struct ifmap map;
348 str = get_options(str, ARRAY_SIZE(ints), ints);
349 if (!str || !*str)
350 return 0;
352 /* Save settings */
353 memset(&map, -1, sizeof(map));
354 if (ints[0] > 0)
355 map.irq = ints[1];
356 if (ints[0] > 1)
357 map.base_addr = ints[2];
358 if (ints[0] > 2)
359 map.mem_start = ints[3];
360 if (ints[0] > 3)
361 map.mem_end = ints[4];
363 /* Add new entry to the list */
364 return netdev_boot_setup_add(str, &map);
367 __setup("netdev=", netdev_boot_setup);
369 /*****************************************************************************************
371 Device Interface Subroutines
373 ******************************************************************************************/
376 * __dev_get_by_name - find a device by its name
377 * @name: name to find
379 * Find an interface by name. Must be called under RTNL semaphore
380 * or @dev_base_lock. If the name is found a pointer to the device
381 * is returned. If the name is not found then %NULL is returned. The
382 * reference counters are not incremented so the caller must be
383 * careful with locks.
387 struct net_device *__dev_get_by_name(const char *name)
389 struct net_device *dev;
391 for (dev = dev_base; dev != NULL; dev = dev->next) {
392 if (strcmp(dev->name, name) == 0)
393 return dev;
395 return NULL;
399 * dev_get_by_name - find a device by its name
400 * @name: name to find
402 * Find an interface by name. This can be called from any
403 * context and does its own locking. The returned handle has
404 * the usage count incremented and the caller must use dev_put() to
405 * release it when it is no longer needed. %NULL is returned if no
406 * matching device is found.
409 struct net_device *dev_get_by_name(const char *name)
411 struct net_device *dev;
413 read_lock(&dev_base_lock);
414 dev = __dev_get_by_name(name);
415 if (dev)
416 dev_hold(dev);
417 read_unlock(&dev_base_lock);
418 return dev;
422 Return value is changed to int to prevent illegal usage in future.
423 It is still legal to use to check for device existance.
425 User should understand, that the result returned by this function
426 is meaningless, if it was not issued under rtnl semaphore.
430 * dev_get - test if a device exists
431 * @name: name to test for
433 * Test if a name exists. Returns true if the name is found. In order
434 * to be sure the name is not allocated or removed during the test the
435 * caller must hold the rtnl semaphore.
437 * This function primarily exists for back compatibility with older
438 * drivers.
441 int dev_get(const char *name)
443 struct net_device *dev;
445 read_lock(&dev_base_lock);
446 dev = __dev_get_by_name(name);
447 read_unlock(&dev_base_lock);
448 return dev != NULL;
452 * __dev_get_by_index - find a device by its ifindex
453 * @ifindex: index of device
455 * Search for an interface by index. Returns %NULL if the device
456 * is not found or a pointer to the device. The device has not
457 * had its reference counter increased so the caller must be careful
458 * about locking. The caller must hold either the RTNL semaphore
459 * or @dev_base_lock.
462 struct net_device * __dev_get_by_index(int ifindex)
464 struct net_device *dev;
466 for (dev = dev_base; dev != NULL; dev = dev->next) {
467 if (dev->ifindex == ifindex)
468 return dev;
470 return NULL;
475 * dev_get_by_index - find a device by its ifindex
476 * @ifindex: index of device
478 * Search for an interface by index. Returns NULL if the device
479 * is not found or a pointer to the device. The device returned has
480 * had a reference added and the pointer is safe until the user calls
481 * dev_put to indicate they have finished with it.
484 struct net_device * dev_get_by_index(int ifindex)
486 struct net_device *dev;
488 read_lock(&dev_base_lock);
489 dev = __dev_get_by_index(ifindex);
490 if (dev)
491 dev_hold(dev);
492 read_unlock(&dev_base_lock);
493 return dev;
497 * dev_getbyhwaddr - find a device by its hardware addres
498 * @type: media type of device
499 * @ha: hardware address
501 * Search for an interface by MAC address. Returns NULL if the device
502 * is not found or a pointer to the device. The caller must hold the
503 * rtnl semaphore. The returned device has not had its ref count increased
504 * and the caller must therefore be careful about locking
506 * BUGS:
507 * If the API was consistent this would be __dev_get_by_hwaddr
510 struct net_device *dev_getbyhwaddr(unsigned short type, char *ha)
512 struct net_device *dev;
514 ASSERT_RTNL();
516 for (dev = dev_base; dev != NULL; dev = dev->next) {
517 if (dev->type == type &&
518 memcmp(dev->dev_addr, ha, dev->addr_len) == 0)
519 return dev;
521 return NULL;
525 * dev_alloc_name - allocate a name for a device
526 * @dev: device
527 * @name: name format string
529 * Passed a format string - eg "lt%d" it will try and find a suitable
530 * id. Not efficient for many devices, not called a lot. The caller
531 * must hold the dev_base or rtnl lock while allocating the name and
532 * adding the device in order to avoid duplicates. Returns the number
533 * of the unit assigned or a negative errno code.
536 int dev_alloc_name(struct net_device *dev, const char *name)
538 int i;
539 char buf[32];
542 * If you need over 100 please also fix the algorithm...
544 for (i = 0; i < 100; i++) {
545 sprintf(buf,name,i);
546 if (__dev_get_by_name(buf) == NULL) {
547 strcpy(dev->name, buf);
548 return i;
551 return -ENFILE; /* Over 100 of the things .. bail out! */
555 * dev_alloc - allocate a network device and name
556 * @name: name format string
557 * @err: error return pointer
559 * Passed a format string, eg. "lt%d", it will allocate a network device
560 * and space for the name. %NULL is returned if no memory is available.
561 * If the allocation succeeds then the name is assigned and the
562 * device pointer returned. %NULL is returned if the name allocation
563 * failed. The cause of an error is returned as a negative errno code
564 * in the variable @err points to.
566 * The caller must hold the @dev_base or RTNL locks when doing this in
567 * order to avoid duplicate name allocations.
570 struct net_device *dev_alloc(const char *name, int *err)
572 struct net_device *dev=kmalloc(sizeof(struct net_device), GFP_KERNEL);
573 if (dev == NULL) {
574 *err = -ENOBUFS;
575 return NULL;
577 memset(dev, 0, sizeof(struct net_device));
578 *err = dev_alloc_name(dev, name);
579 if (*err < 0) {
580 kfree(dev);
581 return NULL;
583 return dev;
587 * netdev_state_change - device changes state
588 * @dev: device to cause notification
590 * Called to indicate a device has changed state. This function calls
591 * the notifier chains for netdev_chain and sends a NEWLINK message
592 * to the routing socket.
595 void netdev_state_change(struct net_device *dev)
597 if (dev->flags&IFF_UP) {
598 notifier_call_chain(&netdev_chain, NETDEV_CHANGE, dev);
599 rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
604 #ifdef CONFIG_KMOD
607 * dev_load - load a network module
608 * @name: name of interface
610 * If a network interface is not present and the process has suitable
611 * privileges this function loads the module. If module loading is not
612 * available in this kernel then it becomes a nop.
615 void dev_load(const char *name)
617 if (!__dev_get_by_name(name) && capable(CAP_SYS_MODULE))
618 request_module(name);
621 #else
623 extern inline void dev_load(const char *unused){;}
625 #endif
627 static int default_rebuild_header(struct sk_buff *skb)
629 printk(KERN_DEBUG "%s: default_rebuild_header called -- BUG!\n", skb->dev ? skb->dev->name : "NULL!!!");
630 kfree_skb(skb);
631 return 1;
635 * dev_open - prepare an interface for use.
636 * @dev: device to open
638 * Takes a device from down to up state. The device's private open
639 * function is invoked and then the multicast lists are loaded. Finally
640 * the device is moved into the up state and a %NETDEV_UP message is
641 * sent to the netdev notifier chain.
643 * Calling this function on an active interface is a nop. On a failure
644 * a negative errno code is returned.
647 int dev_open(struct net_device *dev)
649 int ret = 0;
652 * Is it already up?
655 if (dev->flags&IFF_UP)
656 return 0;
659 * Is it even present?
661 if (!netif_device_present(dev))
662 return -ENODEV;
665 * Call device private open method
668 if (dev->open)
669 ret = dev->open(dev);
672 * If it went open OK then:
675 if (ret == 0)
678 * Set the flags.
680 dev->flags |= IFF_UP;
682 set_bit(__LINK_STATE_START, &dev->state);
685 * Initialize multicasting status
687 dev_mc_upload(dev);
690 * Wakeup transmit queue engine
692 dev_activate(dev);
695 * ... and announce new interface.
697 notifier_call_chain(&netdev_chain, NETDEV_UP, dev);
699 return(ret);
702 #ifdef CONFIG_NET_FASTROUTE
704 static void dev_do_clear_fastroute(struct net_device *dev)
706 if (dev->accept_fastpath) {
707 int i;
709 for (i=0; i<=NETDEV_FASTROUTE_HMASK; i++) {
710 struct dst_entry *dst;
712 write_lock_irq(&dev->fastpath_lock);
713 dst = dev->fastpath[i];
714 dev->fastpath[i] = NULL;
715 write_unlock_irq(&dev->fastpath_lock);
717 dst_release(dst);
722 void dev_clear_fastroute(struct net_device *dev)
724 if (dev) {
725 dev_do_clear_fastroute(dev);
726 } else {
727 read_lock(&dev_base_lock);
728 for (dev = dev_base; dev; dev = dev->next)
729 dev_do_clear_fastroute(dev);
730 read_unlock(&dev_base_lock);
733 #endif
736 * dev_close - shutdown an interface.
737 * @dev: device to shutdown
739 * This function moves an active device into down state. A
740 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
741 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
742 * chain.
745 int dev_close(struct net_device *dev)
747 if (!(dev->flags&IFF_UP))
748 return 0;
751 * Tell people we are going down, so that they can
752 * prepare to death, when device is still operating.
754 notifier_call_chain(&netdev_chain, NETDEV_GOING_DOWN, dev);
756 dev_deactivate(dev);
758 clear_bit(__LINK_STATE_START, &dev->state);
761 * Call the device specific close. This cannot fail.
762 * Only if device is UP
764 * We allow it to be called even after a DETACH hot-plug
765 * event.
768 if (dev->stop)
769 dev->stop(dev);
772 * Device is now down.
775 dev->flags &= ~IFF_UP;
776 #ifdef CONFIG_NET_FASTROUTE
777 dev_clear_fastroute(dev);
778 #endif
781 * Tell people we are down
783 notifier_call_chain(&netdev_chain, NETDEV_DOWN, dev);
785 return(0);
790 * Device change register/unregister. These are not inline or static
791 * as we export them to the world.
795 * register_netdevice_notifier - register a network notifier block
796 * @nb: notifier
798 * Register a notifier to be called when network device events occur.
799 * The notifier passed is linked into the kernel structures and must
800 * not be reused until it has been unregistered. A negative errno code
801 * is returned on a failure.
804 int register_netdevice_notifier(struct notifier_block *nb)
806 return notifier_chain_register(&netdev_chain, nb);
810 * unregister_netdevice_notifier - unregister a network notifier block
811 * @nb: notifier
813 * Unregister a notifier previously registered by
814 * register_netdevice_notifier(). The notifier is unlinked into the
815 * kernel structures and may then be reused. A negative errno code
816 * is returned on a failure.
819 int unregister_netdevice_notifier(struct notifier_block *nb)
821 return notifier_chain_unregister(&netdev_chain,nb);
825 * Support routine. Sends outgoing frames to any network
826 * taps currently in use.
829 void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
831 struct packet_type *ptype;
832 get_fast_time(&skb->stamp);
834 br_read_lock(BR_NETPROTO_LOCK);
835 for (ptype = ptype_all; ptype!=NULL; ptype = ptype->next)
837 /* Never send packets back to the socket
838 * they originated from - MvS (miquels@drinkel.ow.org)
840 if ((ptype->dev == dev || !ptype->dev) &&
841 ((struct sock *)ptype->data != skb->sk))
843 struct sk_buff *skb2;
844 if ((skb2 = skb_clone(skb, GFP_ATOMIC)) == NULL)
845 break;
847 /* skb->nh should be correctly
848 set by sender, so that the second statement is
849 just protection against buggy protocols.
851 skb2->mac.raw = skb2->data;
853 if (skb2->nh.raw < skb2->data || skb2->nh.raw >= skb2->tail) {
854 if (net_ratelimit())
855 printk(KERN_DEBUG "protocol %04x is buggy, dev %s\n", skb2->protocol, dev->name);
856 skb2->nh.raw = skb2->data;
857 if (dev->hard_header)
858 skb2->nh.raw += dev->hard_header_len;
861 skb2->h.raw = skb2->nh.raw;
862 skb2->pkt_type = PACKET_OUTGOING;
863 skb2->rx_dev = skb->dev;
864 dev_hold(skb2->rx_dev);
865 ptype->func(skb2, skb->dev, ptype);
868 br_read_unlock(BR_NETPROTO_LOCK);
872 * Fast path for loopback frames.
875 void dev_loopback_xmit(struct sk_buff *skb)
877 struct sk_buff *newskb=skb_clone(skb, GFP_ATOMIC);
878 if (newskb==NULL)
879 return;
881 newskb->mac.raw = newskb->data;
882 skb_pull(newskb, newskb->nh.raw - newskb->data);
883 newskb->pkt_type = PACKET_LOOPBACK;
884 newskb->ip_summed = CHECKSUM_UNNECESSARY;
885 if (newskb->dst==NULL)
886 printk(KERN_DEBUG "BUG: packet without dst looped back 1\n");
887 netif_rx(newskb);
891 * dev_queue_xmit - transmit a buffer
892 * @skb: buffer to transmit
894 * Queue a buffer for transmission to a network device. The caller must
895 * have set the device and priority and built the buffer before calling this
896 * function. The function can be called from an interrupt.
898 * A negative errno code is returned on a failure. A success does not
899 * guarantee the frame will be transmitted as it may be dropped due
900 * to congestion or traffic shaping.
903 int dev_queue_xmit(struct sk_buff *skb)
905 struct net_device *dev = skb->dev;
906 struct Qdisc *q;
908 /* Grab device queue */
909 spin_lock_bh(&dev->queue_lock);
910 q = dev->qdisc;
911 if (q->enqueue) {
912 int ret = q->enqueue(skb, q);
914 qdisc_run(dev);
916 spin_unlock_bh(&dev->queue_lock);
917 return ret == NET_XMIT_BYPASS ? NET_XMIT_SUCCESS : ret;
920 /* The device has no queue. Common case for software devices:
921 loopback, all the sorts of tunnels...
923 Really, it is unlikely that xmit_lock protection is necessary here.
924 (f.e. loopback and IP tunnels are clean ignoring statistics counters.)
925 However, it is possible, that they rely on protection
926 made by us here.
928 Check this and shot the lock. It is not prone from deadlocks.
929 Either shot noqueue qdisc, it is even simpler 8)
931 if (dev->flags&IFF_UP) {
932 int cpu = smp_processor_id();
934 if (dev->xmit_lock_owner != cpu) {
935 spin_unlock(&dev->queue_lock);
936 spin_lock(&dev->xmit_lock);
937 dev->xmit_lock_owner = cpu;
939 if (!netif_queue_stopped(dev)) {
940 if (netdev_nit)
941 dev_queue_xmit_nit(skb,dev);
943 if (dev->hard_start_xmit(skb, dev) == 0) {
944 dev->xmit_lock_owner = -1;
945 spin_unlock_bh(&dev->xmit_lock);
946 return 0;
949 dev->xmit_lock_owner = -1;
950 spin_unlock_bh(&dev->xmit_lock);
951 if (net_ratelimit())
952 printk(KERN_DEBUG "Virtual device %s asks to queue packet!\n", dev->name);
953 kfree_skb(skb);
954 return -ENETDOWN;
955 } else {
956 /* Recursion is detected! It is possible, unfortunately */
957 if (net_ratelimit())
958 printk(KERN_DEBUG "Dead loop on virtual device %s, fix it urgently!\n", dev->name);
961 spin_unlock_bh(&dev->queue_lock);
963 kfree_skb(skb);
964 return -ENETDOWN;
968 /*=======================================================================
969 Receiver routines
970 =======================================================================*/
972 int netdev_max_backlog = 300;
974 struct netif_rx_stats netdev_rx_stat[NR_CPUS];
977 #ifdef CONFIG_NET_HW_FLOWCONTROL
978 static atomic_t netdev_dropping = ATOMIC_INIT(0);
979 static unsigned long netdev_fc_mask = 1;
980 unsigned long netdev_fc_xoff = 0;
981 spinlock_t netdev_fc_lock = SPIN_LOCK_UNLOCKED;
983 static struct
985 void (*stimul)(struct net_device *);
986 struct net_device *dev;
987 } netdev_fc_slots[32];
989 int netdev_register_fc(struct net_device *dev, void (*stimul)(struct net_device *dev))
991 int bit = 0;
992 unsigned long flags;
994 spin_lock_irqsave(&netdev_fc_lock, flags);
995 if (netdev_fc_mask != ~0UL) {
996 bit = ffz(netdev_fc_mask);
997 netdev_fc_slots[bit].stimul = stimul;
998 netdev_fc_slots[bit].dev = dev;
999 set_bit(bit, &netdev_fc_mask);
1000 clear_bit(bit, &netdev_fc_xoff);
1002 spin_unlock_irqrestore(&netdev_fc_lock, flags);
1003 return bit;
1006 void netdev_unregister_fc(int bit)
1008 unsigned long flags;
1010 spin_lock_irqsave(&netdev_fc_lock, flags);
1011 if (bit > 0) {
1012 netdev_fc_slots[bit].stimul = NULL;
1013 netdev_fc_slots[bit].dev = NULL;
1014 clear_bit(bit, &netdev_fc_mask);
1015 clear_bit(bit, &netdev_fc_xoff);
1017 spin_unlock_irqrestore(&netdev_fc_lock, flags);
1020 static void netdev_wakeup(void)
1022 unsigned long xoff;
1024 spin_lock(&netdev_fc_lock);
1025 xoff = netdev_fc_xoff;
1026 netdev_fc_xoff = 0;
1027 while (xoff) {
1028 int i = ffz(~xoff);
1029 xoff &= ~(1<<i);
1030 netdev_fc_slots[i].stimul(netdev_fc_slots[i].dev);
1032 spin_unlock(&netdev_fc_lock);
1034 #endif
1037 * netif_rx - post buffer to the network code
1038 * @skb: buffer to post
1040 * This function receives a packet from a device driver and queues it for
1041 * the upper (protocol) levels to process. It always succeeds. The buffer
1042 * may be dropped during processing for congestion control or by the
1043 * protocol layers.
1046 void netif_rx(struct sk_buff *skb)
1048 int this_cpu = smp_processor_id();
1049 struct softnet_data *queue;
1050 unsigned long flags;
1052 if (skb->stamp.tv_sec == 0)
1053 get_fast_time(&skb->stamp);
1055 /* The code is rearranged so that the path is the most
1056 short when CPU is congested, but is still operating.
1058 queue = &softnet_data[this_cpu];
1060 local_irq_save(flags);
1062 netdev_rx_stat[this_cpu].total++;
1063 if (queue->input_pkt_queue.qlen <= netdev_max_backlog) {
1064 if (queue->input_pkt_queue.qlen) {
1065 if (queue->throttle)
1066 goto drop;
1068 enqueue:
1069 if (skb->rx_dev)
1070 dev_put(skb->rx_dev);
1071 skb->rx_dev = skb->dev;
1072 dev_hold(skb->rx_dev);
1073 __skb_queue_tail(&queue->input_pkt_queue,skb);
1074 __cpu_raise_softirq(this_cpu, NET_RX_SOFTIRQ);
1075 local_irq_restore(flags);
1076 return;
1079 if (queue->throttle) {
1080 queue->throttle = 0;
1081 #ifdef CONFIG_NET_HW_FLOWCONTROL
1082 if (atomic_dec_and_test(&netdev_dropping))
1083 netdev_wakeup();
1084 #endif
1086 goto enqueue;
1089 if (queue->throttle == 0) {
1090 queue->throttle = 1;
1091 netdev_rx_stat[this_cpu].throttled++;
1092 #ifdef CONFIG_NET_HW_FLOWCONTROL
1093 atomic_inc(&netdev_dropping);
1094 #endif
1097 drop:
1098 netdev_rx_stat[this_cpu].dropped++;
1099 local_irq_restore(flags);
1101 kfree_skb(skb);
1104 /* Deliver skb to an old protocol, which is not threaded well
1105 or which do not understand shared skbs.
1107 static void deliver_to_old_ones(struct packet_type *pt, struct sk_buff *skb, int last)
1109 static spinlock_t net_bh_lock = SPIN_LOCK_UNLOCKED;
1111 if (!last) {
1112 skb = skb_clone(skb, GFP_ATOMIC);
1113 if (skb == NULL)
1114 return;
1117 /* The assumption (correct one) is that old protocols
1118 did not depened on BHs different of NET_BH and TIMER_BH.
1121 /* Emulate NET_BH with special spinlock */
1122 spin_lock(&net_bh_lock);
1124 /* Disable timers and wait for all timers completion */
1125 tasklet_disable(bh_task_vec+TIMER_BH);
1127 pt->func(skb, skb->dev, pt);
1129 tasklet_enable(bh_task_vec+TIMER_BH);
1130 spin_unlock(&net_bh_lock);
1133 /* Reparent skb to master device. This function is called
1134 * only from net_rx_action under BR_NETPROTO_LOCK. It is misuse
1135 * of BR_NETPROTO_LOCK, but it is OK for now.
1137 static __inline__ void skb_bond(struct sk_buff *skb)
1139 struct net_device *dev = skb->rx_dev;
1141 if (dev->master) {
1142 dev_hold(dev->master);
1143 skb->dev = skb->rx_dev = dev->master;
1144 dev_put(dev);
1148 static void net_tx_action(struct softirq_action *h)
1150 int cpu = smp_processor_id();
1152 if (softnet_data[cpu].completion_queue) {
1153 struct sk_buff *clist;
1155 local_irq_disable();
1156 clist = softnet_data[cpu].completion_queue;
1157 softnet_data[cpu].completion_queue = NULL;
1158 local_irq_enable();
1160 while (clist != NULL) {
1161 struct sk_buff *skb = clist;
1162 clist = clist->next;
1164 BUG_TRAP(atomic_read(&skb->users) == 0);
1165 __kfree_skb(skb);
1169 if (softnet_data[cpu].output_queue) {
1170 struct net_device *head;
1172 local_irq_disable();
1173 head = softnet_data[cpu].output_queue;
1174 softnet_data[cpu].output_queue = NULL;
1175 local_irq_enable();
1177 while (head != NULL) {
1178 struct net_device *dev = head;
1179 head = head->next_sched;
1181 clear_bit(__LINK_STATE_SCHED, &dev->state);
1183 if (spin_trylock(&dev->queue_lock)) {
1184 qdisc_run(dev);
1185 spin_unlock(&dev->queue_lock);
1186 } else {
1187 netif_schedule(dev);
1194 * net_call_rx_atomic
1195 * @fn: function to call
1197 * Make a function call that is atomic with respect to the protocol
1198 * layers.
1201 void net_call_rx_atomic(void (*fn)(void))
1203 br_write_lock_bh(BR_NETPROTO_LOCK);
1204 fn();
1205 br_write_unlock_bh(BR_NETPROTO_LOCK);
1208 #if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
1209 void (*br_handle_frame_hook)(struct sk_buff *skb) = NULL;
1210 #endif
1212 static void __inline__ handle_bridge(struct sk_buff *skb,
1213 struct packet_type *pt_prev)
1215 if (pt_prev) {
1216 if (!pt_prev->data)
1217 deliver_to_old_ones(pt_prev, skb, 0);
1218 else {
1219 atomic_inc(&skb->users);
1220 pt_prev->func(skb, skb->dev, pt_prev);
1224 br_handle_frame_hook(skb);
1228 static void net_rx_action(struct softirq_action *h)
1230 int this_cpu = smp_processor_id();
1231 struct softnet_data *queue = &softnet_data[this_cpu];
1232 unsigned long start_time = jiffies;
1233 int bugdet = netdev_max_backlog;
1235 br_read_lock(BR_NETPROTO_LOCK);
1237 for (;;) {
1238 struct sk_buff *skb;
1240 local_irq_disable();
1241 skb = __skb_dequeue(&queue->input_pkt_queue);
1242 local_irq_enable();
1244 if (skb == NULL)
1245 break;
1247 skb_bond(skb);
1249 #ifdef CONFIG_NET_FASTROUTE
1250 if (skb->pkt_type == PACKET_FASTROUTE) {
1251 netdev_rx_stat[this_cpu].fastroute_deferred_out++;
1252 dev_queue_xmit(skb);
1253 continue;
1255 #endif
1256 skb->h.raw = skb->nh.raw = skb->data;
1258 struct packet_type *ptype, *pt_prev;
1259 unsigned short type = skb->protocol;
1261 pt_prev = NULL;
1262 for (ptype = ptype_all; ptype; ptype = ptype->next) {
1263 if (!ptype->dev || ptype->dev == skb->dev) {
1264 if (pt_prev) {
1265 if (!pt_prev->data) {
1266 deliver_to_old_ones(pt_prev, skb, 0);
1267 } else {
1268 atomic_inc(&skb->users);
1269 pt_prev->func(skb,
1270 skb->dev,
1271 pt_prev);
1274 pt_prev = ptype;
1278 #if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
1279 if (skb->dev->br_port != NULL &&
1280 br_handle_frame_hook != NULL) {
1281 handle_bridge(skb, pt_prev);
1282 continue;
1284 #endif
1286 for (ptype=ptype_base[ntohs(type)&15];ptype;ptype=ptype->next) {
1287 if (ptype->type == type &&
1288 (!ptype->dev || ptype->dev == skb->dev)) {
1289 if (pt_prev) {
1290 if (!pt_prev->data)
1291 deliver_to_old_ones(pt_prev, skb, 0);
1292 else {
1293 atomic_inc(&skb->users);
1294 pt_prev->func(skb,
1295 skb->dev,
1296 pt_prev);
1299 pt_prev = ptype;
1303 if (pt_prev) {
1304 if (!pt_prev->data)
1305 deliver_to_old_ones(pt_prev, skb, 1);
1306 else
1307 pt_prev->func(skb, skb->dev, pt_prev);
1308 } else
1309 kfree_skb(skb);
1312 if (bugdet-- < 0 || jiffies - start_time > 1)
1313 goto softnet_break;
1315 br_read_unlock(BR_NETPROTO_LOCK);
1317 local_irq_disable();
1318 if (queue->throttle) {
1319 queue->throttle = 0;
1320 #ifdef CONFIG_NET_HW_FLOWCONTROL
1321 if (atomic_dec_and_test(&netdev_dropping))
1322 netdev_wakeup();
1323 #endif
1325 local_irq_enable();
1327 NET_PROFILE_LEAVE(softnet_process);
1328 return;
1330 softnet_break:
1331 br_read_unlock(BR_NETPROTO_LOCK);
1333 local_irq_disable();
1334 netdev_rx_stat[this_cpu].time_squeeze++;
1335 __cpu_raise_softirq(this_cpu, NET_RX_SOFTIRQ);
1336 local_irq_enable();
1338 NET_PROFILE_LEAVE(softnet_process);
1339 return;
1342 static gifconf_func_t * gifconf_list [NPROTO];
1345 * register_gifconf - register a SIOCGIF handler
1346 * @family: Address family
1347 * @gifconf: Function handler
1349 * Register protocol dependent address dumping routines. The handler
1350 * that is passed must not be freed or reused until it has been replaced
1351 * by another handler.
1354 int register_gifconf(unsigned int family, gifconf_func_t * gifconf)
1356 if (family>=NPROTO)
1357 return -EINVAL;
1358 gifconf_list[family] = gifconf;
1359 return 0;
1364 * Map an interface index to its name (SIOCGIFNAME)
1368 * We need this ioctl for efficient implementation of the
1369 * if_indextoname() function required by the IPv6 API. Without
1370 * it, we would have to search all the interfaces to find a
1371 * match. --pb
1374 static int dev_ifname(struct ifreq *arg)
1376 struct net_device *dev;
1377 struct ifreq ifr;
1380 * Fetch the caller's info block.
1383 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
1384 return -EFAULT;
1386 read_lock(&dev_base_lock);
1387 dev = __dev_get_by_index(ifr.ifr_ifindex);
1388 if (!dev) {
1389 read_unlock(&dev_base_lock);
1390 return -ENODEV;
1393 strcpy(ifr.ifr_name, dev->name);
1394 read_unlock(&dev_base_lock);
1396 if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
1397 return -EFAULT;
1398 return 0;
1402 * Perform a SIOCGIFCONF call. This structure will change
1403 * size eventually, and there is nothing I can do about it.
1404 * Thus we will need a 'compatibility mode'.
1407 static int dev_ifconf(char *arg)
1409 struct ifconf ifc;
1410 struct net_device *dev;
1411 char *pos;
1412 int len;
1413 int total;
1414 int i;
1417 * Fetch the caller's info block.
1420 if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
1421 return -EFAULT;
1423 pos = ifc.ifc_buf;
1424 len = ifc.ifc_len;
1427 * Loop over the interfaces, and write an info block for each.
1430 total = 0;
1431 for (dev = dev_base; dev != NULL; dev = dev->next) {
1432 for (i=0; i<NPROTO; i++) {
1433 if (gifconf_list[i]) {
1434 int done;
1435 if (pos==NULL) {
1436 done = gifconf_list[i](dev, NULL, 0);
1437 } else {
1438 done = gifconf_list[i](dev, pos+total, len-total);
1440 if (done<0) {
1441 return -EFAULT;
1443 total += done;
1449 * All done. Write the updated control block back to the caller.
1451 ifc.ifc_len = total;
1453 if (copy_to_user(arg, &ifc, sizeof(struct ifconf)))
1454 return -EFAULT;
1457 * Both BSD and Solaris return 0 here, so we do too.
1459 return 0;
1463 * This is invoked by the /proc filesystem handler to display a device
1464 * in detail.
1467 #ifdef CONFIG_PROC_FS
1469 static int sprintf_stats(char *buffer, struct net_device *dev)
1471 struct net_device_stats *stats = (dev->get_stats ? dev->get_stats(dev): NULL);
1472 int size;
1474 if (stats)
1475 size = sprintf(buffer, "%6s:%8lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu %8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n",
1476 dev->name,
1477 stats->rx_bytes,
1478 stats->rx_packets, stats->rx_errors,
1479 stats->rx_dropped + stats->rx_missed_errors,
1480 stats->rx_fifo_errors,
1481 stats->rx_length_errors + stats->rx_over_errors
1482 + stats->rx_crc_errors + stats->rx_frame_errors,
1483 stats->rx_compressed, stats->multicast,
1484 stats->tx_bytes,
1485 stats->tx_packets, stats->tx_errors, stats->tx_dropped,
1486 stats->tx_fifo_errors, stats->collisions,
1487 stats->tx_carrier_errors + stats->tx_aborted_errors
1488 + stats->tx_window_errors + stats->tx_heartbeat_errors,
1489 stats->tx_compressed);
1490 else
1491 size = sprintf(buffer, "%6s: No statistics available.\n", dev->name);
1493 return size;
1497 * Called from the PROCfs module. This now uses the new arbitrary sized /proc/net interface
1498 * to create /proc/net/dev
1501 static int dev_get_info(char *buffer, char **start, off_t offset, int length)
1503 int len = 0;
1504 off_t begin = 0;
1505 off_t pos = 0;
1506 int size;
1507 struct net_device *dev;
1510 size = sprintf(buffer,
1511 "Inter-| Receive | Transmit\n"
1512 " face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed\n");
1514 pos += size;
1515 len += size;
1518 read_lock(&dev_base_lock);
1519 for (dev = dev_base; dev != NULL; dev = dev->next) {
1520 size = sprintf_stats(buffer+len, dev);
1521 len += size;
1522 pos = begin + len;
1524 if (pos < offset) {
1525 len = 0;
1526 begin = pos;
1528 if (pos > offset + length)
1529 break;
1531 read_unlock(&dev_base_lock);
1533 *start = buffer + (offset - begin); /* Start of wanted data */
1534 len -= (offset - begin); /* Start slop */
1535 if (len > length)
1536 len = length; /* Ending slop */
1537 if (len < 0)
1538 len = 0;
1539 return len;
1542 static int dev_proc_stats(char *buffer, char **start, off_t offset,
1543 int length, int *eof, void *data)
1545 int i, lcpu;
1546 int len=0;
1548 for (lcpu=0; lcpu<smp_num_cpus; lcpu++) {
1549 i = cpu_logical_map(lcpu);
1550 len += sprintf(buffer+len, "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
1551 netdev_rx_stat[i].total,
1552 netdev_rx_stat[i].dropped,
1553 netdev_rx_stat[i].time_squeeze,
1554 netdev_rx_stat[i].throttled,
1555 netdev_rx_stat[i].fastroute_hit,
1556 netdev_rx_stat[i].fastroute_success,
1557 netdev_rx_stat[i].fastroute_defer,
1558 netdev_rx_stat[i].fastroute_deferred_out,
1559 #if 0
1560 netdev_rx_stat[i].fastroute_latency_reduction
1561 #else
1562 netdev_rx_stat[i].cpu_collision
1563 #endif
1567 len -= offset;
1569 if (len > length)
1570 len = length;
1571 if (len < 0)
1572 len = 0;
1574 *start = buffer + offset;
1575 *eof = 1;
1577 return len;
1580 #endif /* CONFIG_PROC_FS */
1583 #ifdef WIRELESS_EXT
1584 #ifdef CONFIG_PROC_FS
1587 * Print one entry of /proc/net/wireless
1588 * This is a clone of /proc/net/dev (just above)
1590 static int sprintf_wireless_stats(char *buffer, struct net_device *dev)
1592 /* Get stats from the driver */
1593 struct iw_statistics *stats = (dev->get_wireless_stats ?
1594 dev->get_wireless_stats(dev) :
1595 (struct iw_statistics *) NULL);
1596 int size;
1598 if (stats != (struct iw_statistics *) NULL) {
1599 size = sprintf(buffer,
1600 "%6s: %04x %3d%c %3d%c %3d%c %6d %6d %6d\n",
1601 dev->name,
1602 stats->status,
1603 stats->qual.qual,
1604 stats->qual.updated & 1 ? '.' : ' ',
1605 stats->qual.level,
1606 stats->qual.updated & 2 ? '.' : ' ',
1607 stats->qual.noise,
1608 stats->qual.updated & 4 ? '.' : ' ',
1609 stats->discard.nwid,
1610 stats->discard.code,
1611 stats->discard.misc);
1612 stats->qual.updated = 0;
1614 else
1615 size = 0;
1617 return size;
1621 * Print info for /proc/net/wireless (print all entries)
1622 * This is a clone of /proc/net/dev (just above)
1624 static int dev_get_wireless_info(char * buffer, char **start, off_t offset,
1625 int length)
1627 int len = 0;
1628 off_t begin = 0;
1629 off_t pos = 0;
1630 int size;
1632 struct net_device * dev;
1634 size = sprintf(buffer,
1635 "Inter-| sta-| Quality | Discarded packets\n"
1636 " face | tus | link level noise | nwid crypt misc\n"
1639 pos += size;
1640 len += size;
1642 read_lock(&dev_base_lock);
1643 for (dev = dev_base; dev != NULL; dev = dev->next) {
1644 size = sprintf_wireless_stats(buffer + len, dev);
1645 len += size;
1646 pos = begin + len;
1648 if (pos < offset) {
1649 len = 0;
1650 begin = pos;
1652 if (pos > offset + length)
1653 break;
1655 read_unlock(&dev_base_lock);
1657 *start = buffer + (offset - begin); /* Start of wanted data */
1658 len -= (offset - begin); /* Start slop */
1659 if (len > length)
1660 len = length; /* Ending slop */
1661 if (len < 0)
1662 len = 0;
1664 return len;
1666 #endif /* CONFIG_PROC_FS */
1667 #endif /* WIRELESS_EXT */
1670 * netdev_set_master - set up master/slave pair
1671 * @slave: slave device
1672 * @master: new master device
1674 * Changes the master device of the slave. Pass %NULL to break the
1675 * bonding. The caller must hold the RTNL semaphore. On a failure
1676 * a negative errno code is returned. On success the reference counts
1677 * are adjusted, %RTM_NEWLINK is sent to the routing socket and the
1678 * function returns zero.
1681 int netdev_set_master(struct net_device *slave, struct net_device *master)
1683 struct net_device *old = slave->master;
1685 ASSERT_RTNL();
1687 if (master) {
1688 if (old)
1689 return -EBUSY;
1690 dev_hold(master);
1693 br_write_lock_bh(BR_NETPROTO_LOCK);
1694 slave->master = master;
1695 br_write_unlock_bh(BR_NETPROTO_LOCK);
1697 if (old)
1698 dev_put(old);
1700 if (master)
1701 slave->flags |= IFF_SLAVE;
1702 else
1703 slave->flags &= ~IFF_SLAVE;
1705 rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
1706 return 0;
1710 * dev_set_promiscuity - update promiscuity count on a device
1711 * @dev: device
1712 * @inc: modifier
1714 * Add or remove promsicuity from a device. While the count in the device
1715 * remains above zero the interface remains promiscuous. Once it hits zero
1716 * the device reverts back to normal filtering operation. A negative inc
1717 * value is used to drop promiscuity on the device.
1720 void dev_set_promiscuity(struct net_device *dev, int inc)
1722 unsigned short old_flags = dev->flags;
1724 dev->flags |= IFF_PROMISC;
1725 if ((dev->promiscuity += inc) == 0)
1726 dev->flags &= ~IFF_PROMISC;
1727 if (dev->flags^old_flags) {
1728 #ifdef CONFIG_NET_FASTROUTE
1729 if (dev->flags&IFF_PROMISC) {
1730 netdev_fastroute_obstacles++;
1731 dev_clear_fastroute(dev);
1732 } else
1733 netdev_fastroute_obstacles--;
1734 #endif
1735 dev_mc_upload(dev);
1736 printk(KERN_INFO "device %s %s promiscuous mode\n",
1737 dev->name, (dev->flags&IFF_PROMISC) ? "entered" : "left");
1742 * dev_set_allmulti - update allmulti count on a device
1743 * @dev: device
1744 * @inc: modifier
1746 * Add or remove reception of all multicast frames to a device. While the
1747 * count in the device remains above zero the interface remains listening
1748 * to all interfaces. Once it hits zero the device reverts back to normal
1749 * filtering operation. A negative @inc value is used to drop the counter
1750 * when releasing a resource needing all multicasts.
1753 void dev_set_allmulti(struct net_device *dev, int inc)
1755 unsigned short old_flags = dev->flags;
1757 dev->flags |= IFF_ALLMULTI;
1758 if ((dev->allmulti += inc) == 0)
1759 dev->flags &= ~IFF_ALLMULTI;
1760 if (dev->flags^old_flags)
1761 dev_mc_upload(dev);
1764 int dev_change_flags(struct net_device *dev, unsigned flags)
1766 int ret;
1767 int old_flags = dev->flags;
1770 * Set the flags on our device.
1773 dev->flags = (flags & (IFF_DEBUG|IFF_NOTRAILERS|IFF_NOARP|IFF_DYNAMIC|
1774 IFF_MULTICAST|IFF_PORTSEL|IFF_AUTOMEDIA)) |
1775 (dev->flags & (IFF_UP|IFF_VOLATILE|IFF_PROMISC|IFF_ALLMULTI));
1778 * Load in the correct multicast list now the flags have changed.
1781 dev_mc_upload(dev);
1784 * Have we downed the interface. We handle IFF_UP ourselves
1785 * according to user attempts to set it, rather than blindly
1786 * setting it.
1789 ret = 0;
1790 if ((old_flags^flags)&IFF_UP) /* Bit is different ? */
1792 ret = ((old_flags & IFF_UP) ? dev_close : dev_open)(dev);
1794 if (ret == 0)
1795 dev_mc_upload(dev);
1798 if (dev->flags&IFF_UP &&
1799 ((old_flags^dev->flags)&~(IFF_UP|IFF_PROMISC|IFF_ALLMULTI|IFF_VOLATILE)))
1800 notifier_call_chain(&netdev_chain, NETDEV_CHANGE, dev);
1802 if ((flags^dev->gflags)&IFF_PROMISC) {
1803 int inc = (flags&IFF_PROMISC) ? +1 : -1;
1804 dev->gflags ^= IFF_PROMISC;
1805 dev_set_promiscuity(dev, inc);
1808 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
1809 is important. Some (broken) drivers set IFF_PROMISC, when
1810 IFF_ALLMULTI is requested not asking us and not reporting.
1812 if ((flags^dev->gflags)&IFF_ALLMULTI) {
1813 int inc = (flags&IFF_ALLMULTI) ? +1 : -1;
1814 dev->gflags ^= IFF_ALLMULTI;
1815 dev_set_allmulti(dev, inc);
1818 if (old_flags^dev->flags)
1819 rtmsg_ifinfo(RTM_NEWLINK, dev, old_flags^dev->flags);
1821 return ret;
1825 * Perform the SIOCxIFxxx calls.
1828 static int dev_ifsioc(struct ifreq *ifr, unsigned int cmd)
1830 struct net_device *dev;
1831 int err;
1833 if ((dev = __dev_get_by_name(ifr->ifr_name)) == NULL)
1834 return -ENODEV;
1836 switch(cmd)
1838 case SIOCGIFFLAGS: /* Get interface flags */
1839 ifr->ifr_flags = (dev->flags&~(IFF_PROMISC|IFF_ALLMULTI|IFF_RUNNING))
1840 |(dev->gflags&(IFF_PROMISC|IFF_ALLMULTI));
1841 if (netif_running(dev) && netif_carrier_ok(dev))
1842 ifr->ifr_flags |= IFF_RUNNING;
1843 return 0;
1845 case SIOCSIFFLAGS: /* Set interface flags */
1846 return dev_change_flags(dev, ifr->ifr_flags);
1848 case SIOCGIFMETRIC: /* Get the metric on the interface (currently unused) */
1849 ifr->ifr_metric = 0;
1850 return 0;
1852 case SIOCSIFMETRIC: /* Set the metric on the interface (currently unused) */
1853 return -EOPNOTSUPP;
1855 case SIOCGIFMTU: /* Get the MTU of a device */
1856 ifr->ifr_mtu = dev->mtu;
1857 return 0;
1859 case SIOCSIFMTU: /* Set the MTU of a device */
1860 if (ifr->ifr_mtu == dev->mtu)
1861 return 0;
1864 * MTU must be positive.
1867 if (ifr->ifr_mtu<0)
1868 return -EINVAL;
1870 if (!netif_device_present(dev))
1871 return -ENODEV;
1873 if (dev->change_mtu)
1874 err = dev->change_mtu(dev, ifr->ifr_mtu);
1875 else {
1876 dev->mtu = ifr->ifr_mtu;
1877 err = 0;
1879 if (!err && dev->flags&IFF_UP)
1880 notifier_call_chain(&netdev_chain, NETDEV_CHANGEMTU, dev);
1881 return err;
1883 case SIOCGIFHWADDR:
1884 memcpy(ifr->ifr_hwaddr.sa_data,dev->dev_addr, MAX_ADDR_LEN);
1885 ifr->ifr_hwaddr.sa_family=dev->type;
1886 return 0;
1888 case SIOCSIFHWADDR:
1889 if (dev->set_mac_address == NULL)
1890 return -EOPNOTSUPP;
1891 if (ifr->ifr_hwaddr.sa_family!=dev->type)
1892 return -EINVAL;
1893 if (!netif_device_present(dev))
1894 return -ENODEV;
1895 err = dev->set_mac_address(dev, &ifr->ifr_hwaddr);
1896 if (!err)
1897 notifier_call_chain(&netdev_chain, NETDEV_CHANGEADDR, dev);
1898 return err;
1900 case SIOCSIFHWBROADCAST:
1901 if (ifr->ifr_hwaddr.sa_family!=dev->type)
1902 return -EINVAL;
1903 memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data, MAX_ADDR_LEN);
1904 notifier_call_chain(&netdev_chain, NETDEV_CHANGEADDR, dev);
1905 return 0;
1907 case SIOCGIFMAP:
1908 ifr->ifr_map.mem_start=dev->mem_start;
1909 ifr->ifr_map.mem_end=dev->mem_end;
1910 ifr->ifr_map.base_addr=dev->base_addr;
1911 ifr->ifr_map.irq=dev->irq;
1912 ifr->ifr_map.dma=dev->dma;
1913 ifr->ifr_map.port=dev->if_port;
1914 return 0;
1916 case SIOCSIFMAP:
1917 if (dev->set_config) {
1918 if (!netif_device_present(dev))
1919 return -ENODEV;
1920 return dev->set_config(dev,&ifr->ifr_map);
1922 return -EOPNOTSUPP;
1924 case SIOCADDMULTI:
1925 if (dev->set_multicast_list == NULL ||
1926 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
1927 return -EINVAL;
1928 if (!netif_device_present(dev))
1929 return -ENODEV;
1930 dev_mc_add(dev,ifr->ifr_hwaddr.sa_data, dev->addr_len, 1);
1931 return 0;
1933 case SIOCDELMULTI:
1934 if (dev->set_multicast_list == NULL ||
1935 ifr->ifr_hwaddr.sa_family!=AF_UNSPEC)
1936 return -EINVAL;
1937 if (!netif_device_present(dev))
1938 return -ENODEV;
1939 dev_mc_delete(dev,ifr->ifr_hwaddr.sa_data,dev->addr_len, 1);
1940 return 0;
1942 case SIOCGIFINDEX:
1943 ifr->ifr_ifindex = dev->ifindex;
1944 return 0;
1946 case SIOCGIFTXQLEN:
1947 ifr->ifr_qlen = dev->tx_queue_len;
1948 return 0;
1950 case SIOCSIFTXQLEN:
1951 if (ifr->ifr_qlen<0)
1952 return -EINVAL;
1953 dev->tx_queue_len = ifr->ifr_qlen;
1954 return 0;
1956 case SIOCSIFNAME:
1957 if (dev->flags&IFF_UP)
1958 return -EBUSY;
1959 if (__dev_get_by_name(ifr->ifr_newname))
1960 return -EEXIST;
1961 memcpy(dev->name, ifr->ifr_newname, IFNAMSIZ);
1962 dev->name[IFNAMSIZ-1] = 0;
1963 notifier_call_chain(&netdev_chain, NETDEV_CHANGENAME, dev);
1964 return 0;
1967 * Unknown or private ioctl
1970 default:
1971 if (cmd >= SIOCDEVPRIVATE &&
1972 cmd <= SIOCDEVPRIVATE + 15) {
1973 if (dev->do_ioctl) {
1974 if (!netif_device_present(dev))
1975 return -ENODEV;
1976 return dev->do_ioctl(dev, ifr, cmd);
1978 return -EOPNOTSUPP;
1981 #ifdef WIRELESS_EXT
1982 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) {
1983 if (dev->do_ioctl) {
1984 if (!netif_device_present(dev))
1985 return -ENODEV;
1986 return dev->do_ioctl(dev, ifr, cmd);
1988 return -EOPNOTSUPP;
1990 #endif /* WIRELESS_EXT */
1993 return -EINVAL;
1997 * This function handles all "interface"-type I/O control requests. The actual
1998 * 'doing' part of this is dev_ifsioc above.
2002 * dev_ioctl - network device ioctl
2003 * @cmd: command to issue
2004 * @arg: pointer to a struct ifreq in user space
2006 * Issue ioctl functions to devices. This is normally called by the
2007 * user space syscall interfaces but can sometimes be useful for
2008 * other purposes. The return value is the return from the syscall if
2009 * positive or a negative errno code on error.
2012 int dev_ioctl(unsigned int cmd, void *arg)
2014 struct ifreq ifr;
2015 int ret;
2016 char *colon;
2018 /* One special case: SIOCGIFCONF takes ifconf argument
2019 and requires shared lock, because it sleeps writing
2020 to user space.
2023 if (cmd == SIOCGIFCONF) {
2024 rtnl_shlock();
2025 ret = dev_ifconf((char *) arg);
2026 rtnl_shunlock();
2027 return ret;
2029 if (cmd == SIOCGIFNAME) {
2030 return dev_ifname((struct ifreq *)arg);
2033 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
2034 return -EFAULT;
2036 ifr.ifr_name[IFNAMSIZ-1] = 0;
2038 colon = strchr(ifr.ifr_name, ':');
2039 if (colon)
2040 *colon = 0;
2043 * See which interface the caller is talking about.
2046 switch(cmd)
2049 * These ioctl calls:
2050 * - can be done by all.
2051 * - atomic and do not require locking.
2052 * - return a value
2055 case SIOCGIFFLAGS:
2056 case SIOCGIFMETRIC:
2057 case SIOCGIFMTU:
2058 case SIOCGIFHWADDR:
2059 case SIOCGIFSLAVE:
2060 case SIOCGIFMAP:
2061 case SIOCGIFINDEX:
2062 case SIOCGIFTXQLEN:
2063 dev_load(ifr.ifr_name);
2064 read_lock(&dev_base_lock);
2065 ret = dev_ifsioc(&ifr, cmd);
2066 read_unlock(&dev_base_lock);
2067 if (!ret) {
2068 if (colon)
2069 *colon = ':';
2070 if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
2071 return -EFAULT;
2073 return ret;
2076 * These ioctl calls:
2077 * - require superuser power.
2078 * - require strict serialization.
2079 * - do not return a value
2082 case SIOCSIFFLAGS:
2083 case SIOCSIFMETRIC:
2084 case SIOCSIFMTU:
2085 case SIOCSIFMAP:
2086 case SIOCSIFHWADDR:
2087 case SIOCSIFSLAVE:
2088 case SIOCADDMULTI:
2089 case SIOCDELMULTI:
2090 case SIOCSIFHWBROADCAST:
2091 case SIOCSIFTXQLEN:
2092 case SIOCSIFNAME:
2093 if (!capable(CAP_NET_ADMIN))
2094 return -EPERM;
2095 dev_load(ifr.ifr_name);
2096 rtnl_lock();
2097 ret = dev_ifsioc(&ifr, cmd);
2098 rtnl_unlock();
2099 return ret;
2101 case SIOCGIFMEM:
2102 /* Get the per device memory space. We can add this but currently
2103 do not support it */
2104 case SIOCSIFMEM:
2105 /* Set the per device memory buffer space. Not applicable in our case */
2106 case SIOCSIFLINK:
2107 return -EINVAL;
2110 * Unknown or private ioctl.
2113 default:
2114 if (cmd >= SIOCDEVPRIVATE &&
2115 cmd <= SIOCDEVPRIVATE + 15) {
2116 dev_load(ifr.ifr_name);
2117 rtnl_lock();
2118 ret = dev_ifsioc(&ifr, cmd);
2119 rtnl_unlock();
2120 if (!ret && copy_to_user(arg, &ifr, sizeof(struct ifreq)))
2121 return -EFAULT;
2122 return ret;
2124 #ifdef WIRELESS_EXT
2125 /* Take care of Wireless Extensions */
2126 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) {
2127 /* If command is `set a parameter', or
2128 * `get the encoding parameters', check if
2129 * the user has the right to do it */
2130 if (IW_IS_SET(cmd) || (cmd == SIOCGIWENCODE)) {
2131 if(!capable(CAP_NET_ADMIN))
2132 return -EPERM;
2134 dev_load(ifr.ifr_name);
2135 rtnl_lock();
2136 ret = dev_ifsioc(&ifr, cmd);
2137 rtnl_unlock();
2138 if (!ret && IW_IS_GET(cmd) &&
2139 copy_to_user(arg, &ifr, sizeof(struct ifreq)))
2140 return -EFAULT;
2141 return ret;
2143 #endif /* WIRELESS_EXT */
2144 return -EINVAL;
2150 * dev_new_index - allocate an ifindex
2152 * Returns a suitable unique value for a new device interface number.
2153 * The caller must hold the rtnl semaphore to be sure it remains
2154 * unique.
2157 int dev_new_index(void)
2159 static int ifindex;
2160 for (;;) {
2161 if (++ifindex <= 0)
2162 ifindex=1;
2163 if (__dev_get_by_index(ifindex) == NULL)
2164 return ifindex;
2168 static int dev_boot_phase = 1;
2171 * register_netdevice - register a network device
2172 * @dev: device to register
2174 * Take a completed network device structure and add it to the kernel
2175 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
2176 * chain. 0 is returned on success. A negative errno code is returned
2177 * on a failure to set up the device, or if the name is a duplicate.
2179 * BUGS:
2180 * The locking appears insufficient to guarantee two parallel registers
2181 * will not get the same name.
2184 int register_netdevice(struct net_device *dev)
2186 struct net_device *d, **dp;
2188 spin_lock_init(&dev->queue_lock);
2189 spin_lock_init(&dev->xmit_lock);
2190 dev->xmit_lock_owner = -1;
2191 #ifdef CONFIG_NET_FASTROUTE
2192 dev->fastpath_lock=RW_LOCK_UNLOCKED;
2193 #endif
2195 if (dev_boot_phase) {
2196 /* This is NOT bug, but I am not sure, that all the
2197 devices, initialized before netdev module is started
2198 are sane.
2200 Now they are chained to device boot list
2201 and probed later. If a module is initialized
2202 before netdev, but assumes that dev->init
2203 is really called by register_netdev(), it will fail.
2205 So that this message should be printed for a while.
2207 printk(KERN_INFO "early initialization of device %s is deferred\n", dev->name);
2209 /* Check for existence, and append to tail of chain */
2210 for (dp=&dev_base; (d=*dp) != NULL; dp=&d->next) {
2211 if (d == dev || strcmp(d->name, dev->name) == 0) {
2212 return -EEXIST;
2215 dev->next = NULL;
2216 write_lock_bh(&dev_base_lock);
2217 *dp = dev;
2218 dev_hold(dev);
2219 write_unlock_bh(&dev_base_lock);
2222 * Default initial state at registry is that the
2223 * device is present.
2226 set_bit(__LINK_STATE_PRESENT, &dev->state);
2228 return 0;
2231 dev->iflink = -1;
2233 /* Init, if this function is available */
2234 if (dev->init && dev->init(dev) != 0)
2235 return -EIO;
2237 dev->ifindex = dev_new_index();
2238 if (dev->iflink == -1)
2239 dev->iflink = dev->ifindex;
2241 /* Check for existence, and append to tail of chain */
2242 for (dp=&dev_base; (d=*dp) != NULL; dp=&d->next) {
2243 if (d == dev || strcmp(d->name, dev->name) == 0) {
2244 return -EEXIST;
2248 * nil rebuild_header routine,
2249 * that should be never called and used as just bug trap.
2252 if (dev->rebuild_header == NULL)
2253 dev->rebuild_header = default_rebuild_header;
2256 * Default initial state at registry is that the
2257 * device is present.
2260 set_bit(__LINK_STATE_PRESENT, &dev->state);
2262 dev->next = NULL;
2263 dev_init_scheduler(dev);
2264 write_lock_bh(&dev_base_lock);
2265 *dp = dev;
2266 dev_hold(dev);
2267 dev->deadbeaf = 0;
2268 write_unlock_bh(&dev_base_lock);
2270 /* Notify protocols, that a new device appeared. */
2271 notifier_call_chain(&netdev_chain, NETDEV_REGISTER, dev);
2273 return 0;
2277 * netdev_finish_unregister - complete unregistration
2278 * @dev: device
2280 * Destroy and free a dead device. A value of zero is returned on
2281 * success.
2284 int netdev_finish_unregister(struct net_device *dev)
2286 BUG_TRAP(dev->ip_ptr==NULL);
2287 BUG_TRAP(dev->ip6_ptr==NULL);
2288 BUG_TRAP(dev->dn_ptr==NULL);
2290 if (!dev->deadbeaf) {
2291 printk(KERN_ERR "Freeing alive device %p, %s\n", dev, dev->name);
2292 return 0;
2294 #ifdef NET_REFCNT_DEBUG
2295 printk(KERN_DEBUG "netdev_finish_unregister: %s%s.\n", dev->name, dev->new_style?"":", old style");
2296 #endif
2297 if (dev->destructor)
2298 dev->destructor(dev);
2299 if (dev->new_style)
2300 kfree(dev);
2301 return 0;
2305 * unregister_netdevice - remove device from the kernel
2306 * @dev: device
2308 * This function shuts down a device interface and removes it
2309 * from the kernel tables. On success 0 is returned, on a failure
2310 * a negative errno code is returned.
2313 int unregister_netdevice(struct net_device *dev)
2315 unsigned long now, warning_time;
2316 struct net_device *d, **dp;
2318 /* If device is running, close it first. */
2319 if (dev->flags & IFF_UP)
2320 dev_close(dev);
2322 BUG_TRAP(dev->deadbeaf==0);
2323 dev->deadbeaf = 1;
2325 /* And unlink it from device chain. */
2326 for (dp = &dev_base; (d=*dp) != NULL; dp=&d->next) {
2327 if (d == dev) {
2328 write_lock_bh(&dev_base_lock);
2329 *dp = d->next;
2330 write_unlock_bh(&dev_base_lock);
2331 break;
2334 if (d == NULL) {
2335 printk(KERN_DEBUG "unregister_netdevice: device %s/%p never was registered\n", dev->name, dev);
2336 return -ENODEV;
2339 if (dev_boot_phase == 0) {
2340 #ifdef CONFIG_NET_FASTROUTE
2341 dev_clear_fastroute(dev);
2342 #endif
2344 /* Shutdown queueing discipline. */
2345 dev_shutdown(dev);
2347 /* Notify protocols, that we are about to destroy
2348 this device. They should clean all the things.
2350 notifier_call_chain(&netdev_chain, NETDEV_UNREGISTER, dev);
2353 * Flush the multicast chain
2355 dev_mc_discard(dev);
2358 if (dev->uninit)
2359 dev->uninit(dev);
2361 /* Notifier chain MUST detach us from master device. */
2362 BUG_TRAP(dev->master==NULL);
2364 if (dev->new_style) {
2365 #ifdef NET_REFCNT_DEBUG
2366 if (atomic_read(&dev->refcnt) != 1)
2367 printk(KERN_DEBUG "unregister_netdevice: holding %s refcnt=%d\n", dev->name, atomic_read(&dev->refcnt)-1);
2368 #endif
2369 dev_put(dev);
2370 return 0;
2373 /* Last reference is our one */
2374 if (atomic_read(&dev->refcnt) == 1) {
2375 dev_put(dev);
2376 return 0;
2379 #ifdef NET_REFCNT_DEBUG
2380 printk("unregister_netdevice: waiting %s refcnt=%d\n", dev->name, atomic_read(&dev->refcnt));
2381 #endif
2383 /* EXPLANATION. If dev->refcnt is not now 1 (our own reference)
2384 it means that someone in the kernel still has a reference
2385 to this device and we cannot release it.
2387 "New style" devices have destructors, hence we can return from this
2388 function and destructor will do all the work later. As of kernel 2.4.0
2389 there are very few "New Style" devices.
2391 "Old style" devices expect that the device is free of any references
2392 upon exit from this function.
2393 We cannot return from this function until all such references have
2394 fallen away. This is because the caller of this function will probably
2395 immediately kfree(*dev) and then be unloaded via sys_delete_module.
2397 So, we linger until all references fall away. The duration of the
2398 linger is basically unbounded! It is driven by, for example, the
2399 current setting of sysctl_ipfrag_time.
2401 After 1 second, we start to rebroadcast unregister notifications
2402 in hope that careless clients will release the device.
2406 now = warning_time = jiffies;
2407 while (atomic_read(&dev->refcnt) != 1) {
2408 if ((jiffies - now) > 1*HZ) {
2409 /* Rebroadcast unregister notification */
2410 notifier_call_chain(&netdev_chain, NETDEV_UNREGISTER, dev);
2412 current->state = TASK_INTERRUPTIBLE;
2413 schedule_timeout(HZ/4);
2414 current->state = TASK_RUNNING;
2415 if ((jiffies - warning_time) > 10*HZ) {
2416 printk(KERN_EMERG "unregister_netdevice: waiting for %s to "
2417 "become free. Usage count = %d\n",
2418 dev->name, atomic_read(&dev->refcnt));
2419 warning_time = jiffies;
2422 dev_put(dev);
2423 return 0;
2428 * Initialize the DEV module. At boot time this walks the device list and
2429 * unhooks any devices that fail to initialise (normally hardware not
2430 * present) and leaves us with a valid list of present and active devices.
2434 extern void net_device_init(void);
2435 extern void ip_auto_config(void);
2437 int __init net_dev_init(void)
2439 struct net_device *dev, **dp;
2440 int i;
2442 #ifdef CONFIG_NET_SCHED
2443 pktsched_init();
2444 #endif
2447 * Initialise the packet receive queues.
2450 for (i = 0; i < NR_CPUS; i++) {
2451 struct softnet_data *queue;
2453 queue = &softnet_data[i];
2454 skb_queue_head_init(&queue->input_pkt_queue);
2455 queue->throttle = 0;
2456 queue->completion_queue = NULL;
2459 #ifdef CONFIG_NET_PROFILE
2460 net_profile_init();
2461 NET_PROFILE_REGISTER(dev_queue_xmit);
2462 NET_PROFILE_REGISTER(softnet_process);
2463 #endif
2465 * Add the devices.
2466 * If the call to dev->init fails, the dev is removed
2467 * from the chain disconnecting the device until the
2468 * next reboot.
2470 * NB At boot phase networking is dead. No locking is required.
2471 * But we still preserve dev_base_lock for sanity.
2474 dp = &dev_base;
2475 while ((dev = *dp) != NULL) {
2476 spin_lock_init(&dev->queue_lock);
2477 spin_lock_init(&dev->xmit_lock);
2478 #ifdef CONFIG_NET_FASTROUTE
2479 dev->fastpath_lock = RW_LOCK_UNLOCKED;
2480 #endif
2481 dev->xmit_lock_owner = -1;
2482 dev->iflink = -1;
2483 dev_hold(dev);
2486 * Check boot time settings for the device.
2488 if (!netdev_boot_setup_check(dev)) {
2490 * No settings found - allocate name. If the init()
2491 * fails the name will be reissued correctly.
2493 if (strchr(dev->name, '%'))
2494 dev_alloc_name(dev, dev->name);
2497 if (dev->init && dev->init(dev)) {
2499 * It failed to come up. Unhook it.
2501 write_lock_bh(&dev_base_lock);
2502 *dp = dev->next;
2503 dev->deadbeaf = 1;
2504 write_unlock_bh(&dev_base_lock);
2505 dev_put(dev);
2506 } else {
2507 dp = &dev->next;
2508 dev->ifindex = dev_new_index();
2509 if (dev->iflink == -1)
2510 dev->iflink = dev->ifindex;
2511 if (dev->rebuild_header == NULL)
2512 dev->rebuild_header = default_rebuild_header;
2513 dev_init_scheduler(dev);
2514 set_bit(__LINK_STATE_PRESENT, &dev->state);
2518 #ifdef CONFIG_PROC_FS
2519 proc_net_create("dev", 0, dev_get_info);
2520 create_proc_read_entry("net/softnet_stat", 0, 0, dev_proc_stats, NULL);
2521 #ifdef WIRELESS_EXT
2522 proc_net_create("wireless", 0, dev_get_wireless_info);
2523 #endif /* WIRELESS_EXT */
2524 #endif /* CONFIG_PROC_FS */
2526 dev_boot_phase = 0;
2528 open_softirq(NET_TX_SOFTIRQ, net_tx_action, NULL);
2529 open_softirq(NET_RX_SOFTIRQ, net_rx_action, NULL);
2531 dst_init();
2532 dev_mcast_init();
2535 * Initialise network devices
2538 net_device_init();
2540 return 0;