Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/torvalds/linux-2.6
[linux-2.6.git] / net / core / dev.c
blobf48d1b24f9cea4adf733c1612250961d00547886
1 /*
2 * NET3 Protocol independent device support routines.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Derived from the non IP parts of dev.c 1.0.19
10 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
14 * Additional Authors:
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
22 * Changes:
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
34 * drivers
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
44 * call a packet.
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
50 * changes.
51 * Rudi Cilibrasi : Pass the right thing to
52 * set_mac_address()
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
58 * 1 device.
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
66 * the backlog queue.
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
75 #include <asm/uaccess.h>
76 #include <asm/system.h>
77 #include <linux/bitops.h>
78 #include <linux/capability.h>
79 #include <linux/cpu.h>
80 #include <linux/types.h>
81 #include <linux/kernel.h>
82 #include <linux/sched.h>
83 #include <linux/mutex.h>
84 #include <linux/string.h>
85 #include <linux/mm.h>
86 #include <linux/socket.h>
87 #include <linux/sockios.h>
88 #include <linux/errno.h>
89 #include <linux/interrupt.h>
90 #include <linux/if_ether.h>
91 #include <linux/netdevice.h>
92 #include <linux/etherdevice.h>
93 #include <linux/ethtool.h>
94 #include <linux/notifier.h>
95 #include <linux/skbuff.h>
96 #include <net/net_namespace.h>
97 #include <net/sock.h>
98 #include <linux/rtnetlink.h>
99 #include <linux/proc_fs.h>
100 #include <linux/seq_file.h>
101 #include <linux/stat.h>
102 #include <linux/if_bridge.h>
103 #include <linux/if_macvlan.h>
104 #include <net/dst.h>
105 #include <net/pkt_sched.h>
106 #include <net/checksum.h>
107 #include <linux/highmem.h>
108 #include <linux/init.h>
109 #include <linux/kmod.h>
110 #include <linux/module.h>
111 #include <linux/kallsyms.h>
112 #include <linux/netpoll.h>
113 #include <linux/rcupdate.h>
114 #include <linux/delay.h>
115 #include <net/wext.h>
116 #include <net/iw_handler.h>
117 #include <asm/current.h>
118 #include <linux/audit.h>
119 #include <linux/dmaengine.h>
120 #include <linux/err.h>
121 #include <linux/ctype.h>
122 #include <linux/if_arp.h>
123 #include <linux/if_vlan.h>
124 #include <linux/ip.h>
125 #include <linux/ipv6.h>
126 #include <linux/in.h>
127 #include <linux/jhash.h>
128 #include <linux/random.h>
130 #include "net-sysfs.h"
133 * The list of packet types we will receive (as opposed to discard)
134 * and the routines to invoke.
136 * Why 16. Because with 16 the only overlap we get on a hash of the
137 * low nibble of the protocol value is RARP/SNAP/X.25.
139 * NOTE: That is no longer true with the addition of VLAN tags. Not
140 * sure which should go first, but I bet it won't make much
141 * difference if we are running VLANs. The good news is that
142 * this protocol won't be in the list unless compiled in, so
143 * the average user (w/out VLANs) will not be adversely affected.
144 * --BLG
146 * 0800 IP
147 * 8100 802.1Q VLAN
148 * 0001 802.3
149 * 0002 AX.25
150 * 0004 802.2
151 * 8035 RARP
152 * 0005 SNAP
153 * 0805 X.25
154 * 0806 ARP
155 * 8137 IPX
156 * 0009 Localtalk
157 * 86DD IPv6
160 #define PTYPE_HASH_SIZE (16)
161 #define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
163 static DEFINE_SPINLOCK(ptype_lock);
164 static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
165 static struct list_head ptype_all __read_mostly; /* Taps */
167 #ifdef CONFIG_NET_DMA
168 struct net_dma {
169 struct dma_client client;
170 spinlock_t lock;
171 cpumask_t channel_mask;
172 struct dma_chan **channels;
175 static enum dma_state_client
176 netdev_dma_event(struct dma_client *client, struct dma_chan *chan,
177 enum dma_state state);
179 static struct net_dma net_dma = {
180 .client = {
181 .event_callback = netdev_dma_event,
184 #endif
187 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
188 * semaphore.
190 * Pure readers hold dev_base_lock for reading.
192 * Writers must hold the rtnl semaphore while they loop through the
193 * dev_base_head list, and hold dev_base_lock for writing when they do the
194 * actual updates. This allows pure readers to access the list even
195 * while a writer is preparing to update it.
197 * To put it another way, dev_base_lock is held for writing only to
198 * protect against pure readers; the rtnl semaphore provides the
199 * protection against other writers.
201 * See, for example usages, register_netdevice() and
202 * unregister_netdevice(), which must be called with the rtnl
203 * semaphore held.
205 DEFINE_RWLOCK(dev_base_lock);
207 EXPORT_SYMBOL(dev_base_lock);
209 #define NETDEV_HASHBITS 8
210 #define NETDEV_HASHENTRIES (1 << NETDEV_HASHBITS)
212 static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
214 unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
215 return &net->dev_name_head[hash & ((1 << NETDEV_HASHBITS) - 1)];
218 static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
220 return &net->dev_index_head[ifindex & ((1 << NETDEV_HASHBITS) - 1)];
223 /* Device list insertion */
224 static int list_netdevice(struct net_device *dev)
226 struct net *net = dev_net(dev);
228 ASSERT_RTNL();
230 write_lock_bh(&dev_base_lock);
231 list_add_tail(&dev->dev_list, &net->dev_base_head);
232 hlist_add_head(&dev->name_hlist, dev_name_hash(net, dev->name));
233 hlist_add_head(&dev->index_hlist, dev_index_hash(net, dev->ifindex));
234 write_unlock_bh(&dev_base_lock);
235 return 0;
238 /* Device list removal */
239 static void unlist_netdevice(struct net_device *dev)
241 ASSERT_RTNL();
243 /* Unlink dev from the device chain */
244 write_lock_bh(&dev_base_lock);
245 list_del(&dev->dev_list);
246 hlist_del(&dev->name_hlist);
247 hlist_del(&dev->index_hlist);
248 write_unlock_bh(&dev_base_lock);
252 * Our notifier list
255 static RAW_NOTIFIER_HEAD(netdev_chain);
258 * Device drivers call our routines to queue packets here. We empty the
259 * queue in the local softnet handler.
262 DEFINE_PER_CPU(struct softnet_data, softnet_data);
264 #ifdef CONFIG_LOCKDEP
266 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
267 * according to dev->type
269 static const unsigned short netdev_lock_type[] =
270 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
271 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
272 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
273 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
274 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
275 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
276 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
277 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
278 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
279 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
280 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
281 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
282 ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211,
283 ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_VOID,
284 ARPHRD_NONE};
286 static const char *netdev_lock_name[] =
287 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
288 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
289 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
290 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
291 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
292 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
293 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
294 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
295 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
296 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
297 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
298 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
299 "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211",
300 "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_VOID",
301 "_xmit_NONE"};
303 static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
304 static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
306 static inline unsigned short netdev_lock_pos(unsigned short dev_type)
308 int i;
310 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
311 if (netdev_lock_type[i] == dev_type)
312 return i;
313 /* the last key is used by default */
314 return ARRAY_SIZE(netdev_lock_type) - 1;
317 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
318 unsigned short dev_type)
320 int i;
322 i = netdev_lock_pos(dev_type);
323 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
324 netdev_lock_name[i]);
327 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
329 int i;
331 i = netdev_lock_pos(dev->type);
332 lockdep_set_class_and_name(&dev->addr_list_lock,
333 &netdev_addr_lock_key[i],
334 netdev_lock_name[i]);
336 #else
337 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
338 unsigned short dev_type)
341 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
344 #endif
346 /*******************************************************************************
348 Protocol management and registration routines
350 *******************************************************************************/
353 * Add a protocol ID to the list. Now that the input handler is
354 * smarter we can dispense with all the messy stuff that used to be
355 * here.
357 * BEWARE!!! Protocol handlers, mangling input packets,
358 * MUST BE last in hash buckets and checking protocol handlers
359 * MUST start from promiscuous ptype_all chain in net_bh.
360 * It is true now, do not change it.
361 * Explanation follows: if protocol handler, mangling packet, will
362 * be the first on list, it is not able to sense, that packet
363 * is cloned and should be copied-on-write, so that it will
364 * change it and subsequent readers will get broken packet.
365 * --ANK (980803)
369 * dev_add_pack - add packet handler
370 * @pt: packet type declaration
372 * Add a protocol handler to the networking stack. The passed &packet_type
373 * is linked into kernel lists and may not be freed until it has been
374 * removed from the kernel lists.
376 * This call does not sleep therefore it can not
377 * guarantee all CPU's that are in middle of receiving packets
378 * will see the new packet type (until the next received packet).
381 void dev_add_pack(struct packet_type *pt)
383 int hash;
385 spin_lock_bh(&ptype_lock);
386 if (pt->type == htons(ETH_P_ALL))
387 list_add_rcu(&pt->list, &ptype_all);
388 else {
389 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
390 list_add_rcu(&pt->list, &ptype_base[hash]);
392 spin_unlock_bh(&ptype_lock);
396 * __dev_remove_pack - remove packet handler
397 * @pt: packet type declaration
399 * Remove a protocol handler that was previously added to the kernel
400 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
401 * from the kernel lists and can be freed or reused once this function
402 * returns.
404 * The packet type might still be in use by receivers
405 * and must not be freed until after all the CPU's have gone
406 * through a quiescent state.
408 void __dev_remove_pack(struct packet_type *pt)
410 struct list_head *head;
411 struct packet_type *pt1;
413 spin_lock_bh(&ptype_lock);
415 if (pt->type == htons(ETH_P_ALL))
416 head = &ptype_all;
417 else
418 head = &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
420 list_for_each_entry(pt1, head, list) {
421 if (pt == pt1) {
422 list_del_rcu(&pt->list);
423 goto out;
427 printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
428 out:
429 spin_unlock_bh(&ptype_lock);
432 * dev_remove_pack - remove packet handler
433 * @pt: packet type declaration
435 * Remove a protocol handler that was previously added to the kernel
436 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
437 * from the kernel lists and can be freed or reused once this function
438 * returns.
440 * This call sleeps to guarantee that no CPU is looking at the packet
441 * type after return.
443 void dev_remove_pack(struct packet_type *pt)
445 __dev_remove_pack(pt);
447 synchronize_net();
450 /******************************************************************************
452 Device Boot-time Settings Routines
454 *******************************************************************************/
456 /* Boot time configuration table */
457 static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
460 * netdev_boot_setup_add - add new setup entry
461 * @name: name of the device
462 * @map: configured settings for the device
464 * Adds new setup entry to the dev_boot_setup list. The function
465 * returns 0 on error and 1 on success. This is a generic routine to
466 * all netdevices.
468 static int netdev_boot_setup_add(char *name, struct ifmap *map)
470 struct netdev_boot_setup *s;
471 int i;
473 s = dev_boot_setup;
474 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
475 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
476 memset(s[i].name, 0, sizeof(s[i].name));
477 strlcpy(s[i].name, name, IFNAMSIZ);
478 memcpy(&s[i].map, map, sizeof(s[i].map));
479 break;
483 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
487 * netdev_boot_setup_check - check boot time settings
488 * @dev: the netdevice
490 * Check boot time settings for the device.
491 * The found settings are set for the device to be used
492 * later in the device probing.
493 * Returns 0 if no settings found, 1 if they are.
495 int netdev_boot_setup_check(struct net_device *dev)
497 struct netdev_boot_setup *s = dev_boot_setup;
498 int i;
500 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
501 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
502 !strcmp(dev->name, s[i].name)) {
503 dev->irq = s[i].map.irq;
504 dev->base_addr = s[i].map.base_addr;
505 dev->mem_start = s[i].map.mem_start;
506 dev->mem_end = s[i].map.mem_end;
507 return 1;
510 return 0;
515 * netdev_boot_base - get address from boot time settings
516 * @prefix: prefix for network device
517 * @unit: id for network device
519 * Check boot time settings for the base address of device.
520 * The found settings are set for the device to be used
521 * later in the device probing.
522 * Returns 0 if no settings found.
524 unsigned long netdev_boot_base(const char *prefix, int unit)
526 const struct netdev_boot_setup *s = dev_boot_setup;
527 char name[IFNAMSIZ];
528 int i;
530 sprintf(name, "%s%d", prefix, unit);
533 * If device already registered then return base of 1
534 * to indicate not to probe for this interface
536 if (__dev_get_by_name(&init_net, name))
537 return 1;
539 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
540 if (!strcmp(name, s[i].name))
541 return s[i].map.base_addr;
542 return 0;
546 * Saves at boot time configured settings for any netdevice.
548 int __init netdev_boot_setup(char *str)
550 int ints[5];
551 struct ifmap map;
553 str = get_options(str, ARRAY_SIZE(ints), ints);
554 if (!str || !*str)
555 return 0;
557 /* Save settings */
558 memset(&map, 0, sizeof(map));
559 if (ints[0] > 0)
560 map.irq = ints[1];
561 if (ints[0] > 1)
562 map.base_addr = ints[2];
563 if (ints[0] > 2)
564 map.mem_start = ints[3];
565 if (ints[0] > 3)
566 map.mem_end = ints[4];
568 /* Add new entry to the list */
569 return netdev_boot_setup_add(str, &map);
572 __setup("netdev=", netdev_boot_setup);
574 /*******************************************************************************
576 Device Interface Subroutines
578 *******************************************************************************/
581 * __dev_get_by_name - find a device by its name
582 * @net: the applicable net namespace
583 * @name: name to find
585 * Find an interface by name. Must be called under RTNL semaphore
586 * or @dev_base_lock. If the name is found a pointer to the device
587 * is returned. If the name is not found then %NULL is returned. The
588 * reference counters are not incremented so the caller must be
589 * careful with locks.
592 struct net_device *__dev_get_by_name(struct net *net, const char *name)
594 struct hlist_node *p;
596 hlist_for_each(p, dev_name_hash(net, name)) {
597 struct net_device *dev
598 = hlist_entry(p, struct net_device, name_hlist);
599 if (!strncmp(dev->name, name, IFNAMSIZ))
600 return dev;
602 return NULL;
606 * dev_get_by_name - find a device by its name
607 * @net: the applicable net namespace
608 * @name: name to find
610 * Find an interface by name. This can be called from any
611 * context and does its own locking. The returned handle has
612 * the usage count incremented and the caller must use dev_put() to
613 * release it when it is no longer needed. %NULL is returned if no
614 * matching device is found.
617 struct net_device *dev_get_by_name(struct net *net, const char *name)
619 struct net_device *dev;
621 read_lock(&dev_base_lock);
622 dev = __dev_get_by_name(net, name);
623 if (dev)
624 dev_hold(dev);
625 read_unlock(&dev_base_lock);
626 return dev;
630 * __dev_get_by_index - find a device by its ifindex
631 * @net: the applicable net namespace
632 * @ifindex: index of device
634 * Search for an interface by index. Returns %NULL if the device
635 * is not found or a pointer to the device. The device has not
636 * had its reference counter increased so the caller must be careful
637 * about locking. The caller must hold either the RTNL semaphore
638 * or @dev_base_lock.
641 struct net_device *__dev_get_by_index(struct net *net, int ifindex)
643 struct hlist_node *p;
645 hlist_for_each(p, dev_index_hash(net, ifindex)) {
646 struct net_device *dev
647 = hlist_entry(p, struct net_device, index_hlist);
648 if (dev->ifindex == ifindex)
649 return dev;
651 return NULL;
656 * dev_get_by_index - find a device by its ifindex
657 * @net: the applicable net namespace
658 * @ifindex: index of device
660 * Search for an interface by index. Returns NULL if the device
661 * is not found or a pointer to the device. The device returned has
662 * had a reference added and the pointer is safe until the user calls
663 * dev_put to indicate they have finished with it.
666 struct net_device *dev_get_by_index(struct net *net, int ifindex)
668 struct net_device *dev;
670 read_lock(&dev_base_lock);
671 dev = __dev_get_by_index(net, ifindex);
672 if (dev)
673 dev_hold(dev);
674 read_unlock(&dev_base_lock);
675 return dev;
679 * dev_getbyhwaddr - find a device by its hardware address
680 * @net: the applicable net namespace
681 * @type: media type of device
682 * @ha: hardware address
684 * Search for an interface by MAC address. Returns NULL if the device
685 * is not found or a pointer to the device. The caller must hold the
686 * rtnl semaphore. The returned device has not had its ref count increased
687 * and the caller must therefore be careful about locking
689 * BUGS:
690 * If the API was consistent this would be __dev_get_by_hwaddr
693 struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *ha)
695 struct net_device *dev;
697 ASSERT_RTNL();
699 for_each_netdev(net, dev)
700 if (dev->type == type &&
701 !memcmp(dev->dev_addr, ha, dev->addr_len))
702 return dev;
704 return NULL;
707 EXPORT_SYMBOL(dev_getbyhwaddr);
709 struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
711 struct net_device *dev;
713 ASSERT_RTNL();
714 for_each_netdev(net, dev)
715 if (dev->type == type)
716 return dev;
718 return NULL;
721 EXPORT_SYMBOL(__dev_getfirstbyhwtype);
723 struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
725 struct net_device *dev;
727 rtnl_lock();
728 dev = __dev_getfirstbyhwtype(net, type);
729 if (dev)
730 dev_hold(dev);
731 rtnl_unlock();
732 return dev;
735 EXPORT_SYMBOL(dev_getfirstbyhwtype);
738 * dev_get_by_flags - find any device with given flags
739 * @net: the applicable net namespace
740 * @if_flags: IFF_* values
741 * @mask: bitmask of bits in if_flags to check
743 * Search for any interface with the given flags. Returns NULL if a device
744 * is not found or a pointer to the device. The device returned has
745 * had a reference added and the pointer is safe until the user calls
746 * dev_put to indicate they have finished with it.
749 struct net_device * dev_get_by_flags(struct net *net, unsigned short if_flags, unsigned short mask)
751 struct net_device *dev, *ret;
753 ret = NULL;
754 read_lock(&dev_base_lock);
755 for_each_netdev(net, dev) {
756 if (((dev->flags ^ if_flags) & mask) == 0) {
757 dev_hold(dev);
758 ret = dev;
759 break;
762 read_unlock(&dev_base_lock);
763 return ret;
767 * dev_valid_name - check if name is okay for network device
768 * @name: name string
770 * Network device names need to be valid file names to
771 * to allow sysfs to work. We also disallow any kind of
772 * whitespace.
774 int dev_valid_name(const char *name)
776 if (*name == '\0')
777 return 0;
778 if (strlen(name) >= IFNAMSIZ)
779 return 0;
780 if (!strcmp(name, ".") || !strcmp(name, ".."))
781 return 0;
783 while (*name) {
784 if (*name == '/' || isspace(*name))
785 return 0;
786 name++;
788 return 1;
792 * __dev_alloc_name - allocate a name for a device
793 * @net: network namespace to allocate the device name in
794 * @name: name format string
795 * @buf: scratch buffer and result name string
797 * Passed a format string - eg "lt%d" it will try and find a suitable
798 * id. It scans list of devices to build up a free map, then chooses
799 * the first empty slot. The caller must hold the dev_base or rtnl lock
800 * while allocating the name and adding the device in order to avoid
801 * duplicates.
802 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
803 * Returns the number of the unit assigned or a negative errno code.
806 static int __dev_alloc_name(struct net *net, const char *name, char *buf)
808 int i = 0;
809 const char *p;
810 const int max_netdevices = 8*PAGE_SIZE;
811 unsigned long *inuse;
812 struct net_device *d;
814 p = strnchr(name, IFNAMSIZ-1, '%');
815 if (p) {
817 * Verify the string as this thing may have come from
818 * the user. There must be either one "%d" and no other "%"
819 * characters.
821 if (p[1] != 'd' || strchr(p + 2, '%'))
822 return -EINVAL;
824 /* Use one page as a bit array of possible slots */
825 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
826 if (!inuse)
827 return -ENOMEM;
829 for_each_netdev(net, d) {
830 if (!sscanf(d->name, name, &i))
831 continue;
832 if (i < 0 || i >= max_netdevices)
833 continue;
835 /* avoid cases where sscanf is not exact inverse of printf */
836 snprintf(buf, IFNAMSIZ, name, i);
837 if (!strncmp(buf, d->name, IFNAMSIZ))
838 set_bit(i, inuse);
841 i = find_first_zero_bit(inuse, max_netdevices);
842 free_page((unsigned long) inuse);
845 snprintf(buf, IFNAMSIZ, name, i);
846 if (!__dev_get_by_name(net, buf))
847 return i;
849 /* It is possible to run out of possible slots
850 * when the name is long and there isn't enough space left
851 * for the digits, or if all bits are used.
853 return -ENFILE;
857 * dev_alloc_name - allocate a name for a device
858 * @dev: device
859 * @name: name format string
861 * Passed a format string - eg "lt%d" it will try and find a suitable
862 * id. It scans list of devices to build up a free map, then chooses
863 * the first empty slot. The caller must hold the dev_base or rtnl lock
864 * while allocating the name and adding the device in order to avoid
865 * duplicates.
866 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
867 * Returns the number of the unit assigned or a negative errno code.
870 int dev_alloc_name(struct net_device *dev, const char *name)
872 char buf[IFNAMSIZ];
873 struct net *net;
874 int ret;
876 BUG_ON(!dev_net(dev));
877 net = dev_net(dev);
878 ret = __dev_alloc_name(net, name, buf);
879 if (ret >= 0)
880 strlcpy(dev->name, buf, IFNAMSIZ);
881 return ret;
886 * dev_change_name - change name of a device
887 * @dev: device
888 * @newname: name (or format string) must be at least IFNAMSIZ
890 * Change name of a device, can pass format strings "eth%d".
891 * for wildcarding.
893 int dev_change_name(struct net_device *dev, char *newname)
895 char oldname[IFNAMSIZ];
896 int err = 0;
897 int ret;
898 struct net *net;
900 ASSERT_RTNL();
901 BUG_ON(!dev_net(dev));
903 net = dev_net(dev);
904 if (dev->flags & IFF_UP)
905 return -EBUSY;
907 if (!dev_valid_name(newname))
908 return -EINVAL;
910 if (strncmp(newname, dev->name, IFNAMSIZ) == 0)
911 return 0;
913 memcpy(oldname, dev->name, IFNAMSIZ);
915 if (strchr(newname, '%')) {
916 err = dev_alloc_name(dev, newname);
917 if (err < 0)
918 return err;
919 strcpy(newname, dev->name);
921 else if (__dev_get_by_name(net, newname))
922 return -EEXIST;
923 else
924 strlcpy(dev->name, newname, IFNAMSIZ);
926 rollback:
927 err = device_rename(&dev->dev, dev->name);
928 if (err) {
929 memcpy(dev->name, oldname, IFNAMSIZ);
930 return err;
933 write_lock_bh(&dev_base_lock);
934 hlist_del(&dev->name_hlist);
935 hlist_add_head(&dev->name_hlist, dev_name_hash(net, dev->name));
936 write_unlock_bh(&dev_base_lock);
938 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
939 ret = notifier_to_errno(ret);
941 if (ret) {
942 if (err) {
943 printk(KERN_ERR
944 "%s: name change rollback failed: %d.\n",
945 dev->name, ret);
946 } else {
947 err = ret;
948 memcpy(dev->name, oldname, IFNAMSIZ);
949 goto rollback;
953 return err;
957 * netdev_features_change - device changes features
958 * @dev: device to cause notification
960 * Called to indicate a device has changed features.
962 void netdev_features_change(struct net_device *dev)
964 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
966 EXPORT_SYMBOL(netdev_features_change);
969 * netdev_state_change - device changes state
970 * @dev: device to cause notification
972 * Called to indicate a device has changed state. This function calls
973 * the notifier chains for netdev_chain and sends a NEWLINK message
974 * to the routing socket.
976 void netdev_state_change(struct net_device *dev)
978 if (dev->flags & IFF_UP) {
979 call_netdevice_notifiers(NETDEV_CHANGE, dev);
980 rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
984 void netdev_bonding_change(struct net_device *dev)
986 call_netdevice_notifiers(NETDEV_BONDING_FAILOVER, dev);
988 EXPORT_SYMBOL(netdev_bonding_change);
991 * dev_load - load a network module
992 * @net: the applicable net namespace
993 * @name: name of interface
995 * If a network interface is not present and the process has suitable
996 * privileges this function loads the module. If module loading is not
997 * available in this kernel then it becomes a nop.
1000 void dev_load(struct net *net, const char *name)
1002 struct net_device *dev;
1004 read_lock(&dev_base_lock);
1005 dev = __dev_get_by_name(net, name);
1006 read_unlock(&dev_base_lock);
1008 if (!dev && capable(CAP_SYS_MODULE))
1009 request_module("%s", name);
1013 * dev_open - prepare an interface for use.
1014 * @dev: device to open
1016 * Takes a device from down to up state. The device's private open
1017 * function is invoked and then the multicast lists are loaded. Finally
1018 * the device is moved into the up state and a %NETDEV_UP message is
1019 * sent to the netdev notifier chain.
1021 * Calling this function on an active interface is a nop. On a failure
1022 * a negative errno code is returned.
1024 int dev_open(struct net_device *dev)
1026 int ret = 0;
1028 ASSERT_RTNL();
1031 * Is it already up?
1034 if (dev->flags & IFF_UP)
1035 return 0;
1038 * Is it even present?
1040 if (!netif_device_present(dev))
1041 return -ENODEV;
1044 * Call device private open method
1046 set_bit(__LINK_STATE_START, &dev->state);
1048 if (dev->validate_addr)
1049 ret = dev->validate_addr(dev);
1051 if (!ret && dev->open)
1052 ret = dev->open(dev);
1055 * If it went open OK then:
1058 if (ret)
1059 clear_bit(__LINK_STATE_START, &dev->state);
1060 else {
1062 * Set the flags.
1064 dev->flags |= IFF_UP;
1067 * Initialize multicasting status
1069 dev_set_rx_mode(dev);
1072 * Wakeup transmit queue engine
1074 dev_activate(dev);
1077 * ... and announce new interface.
1079 call_netdevice_notifiers(NETDEV_UP, dev);
1082 return ret;
1086 * dev_close - shutdown an interface.
1087 * @dev: device to shutdown
1089 * This function moves an active device into down state. A
1090 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1091 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1092 * chain.
1094 int dev_close(struct net_device *dev)
1096 ASSERT_RTNL();
1098 might_sleep();
1100 if (!(dev->flags & IFF_UP))
1101 return 0;
1104 * Tell people we are going down, so that they can
1105 * prepare to death, when device is still operating.
1107 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
1109 clear_bit(__LINK_STATE_START, &dev->state);
1111 /* Synchronize to scheduled poll. We cannot touch poll list,
1112 * it can be even on different cpu. So just clear netif_running().
1114 * dev->stop() will invoke napi_disable() on all of it's
1115 * napi_struct instances on this device.
1117 smp_mb__after_clear_bit(); /* Commit netif_running(). */
1119 dev_deactivate(dev);
1122 * Call the device specific close. This cannot fail.
1123 * Only if device is UP
1125 * We allow it to be called even after a DETACH hot-plug
1126 * event.
1128 if (dev->stop)
1129 dev->stop(dev);
1132 * Device is now down.
1135 dev->flags &= ~IFF_UP;
1138 * Tell people we are down
1140 call_netdevice_notifiers(NETDEV_DOWN, dev);
1142 return 0;
1147 * dev_disable_lro - disable Large Receive Offload on a device
1148 * @dev: device
1150 * Disable Large Receive Offload (LRO) on a net device. Must be
1151 * called under RTNL. This is needed if received packets may be
1152 * forwarded to another interface.
1154 void dev_disable_lro(struct net_device *dev)
1156 if (dev->ethtool_ops && dev->ethtool_ops->get_flags &&
1157 dev->ethtool_ops->set_flags) {
1158 u32 flags = dev->ethtool_ops->get_flags(dev);
1159 if (flags & ETH_FLAG_LRO) {
1160 flags &= ~ETH_FLAG_LRO;
1161 dev->ethtool_ops->set_flags(dev, flags);
1164 WARN_ON(dev->features & NETIF_F_LRO);
1166 EXPORT_SYMBOL(dev_disable_lro);
1169 static int dev_boot_phase = 1;
1172 * Device change register/unregister. These are not inline or static
1173 * as we export them to the world.
1177 * register_netdevice_notifier - register a network notifier block
1178 * @nb: notifier
1180 * Register a notifier to be called when network device events occur.
1181 * The notifier passed is linked into the kernel structures and must
1182 * not be reused until it has been unregistered. A negative errno code
1183 * is returned on a failure.
1185 * When registered all registration and up events are replayed
1186 * to the new notifier to allow device to have a race free
1187 * view of the network device list.
1190 int register_netdevice_notifier(struct notifier_block *nb)
1192 struct net_device *dev;
1193 struct net_device *last;
1194 struct net *net;
1195 int err;
1197 rtnl_lock();
1198 err = raw_notifier_chain_register(&netdev_chain, nb);
1199 if (err)
1200 goto unlock;
1201 if (dev_boot_phase)
1202 goto unlock;
1203 for_each_net(net) {
1204 for_each_netdev(net, dev) {
1205 err = nb->notifier_call(nb, NETDEV_REGISTER, dev);
1206 err = notifier_to_errno(err);
1207 if (err)
1208 goto rollback;
1210 if (!(dev->flags & IFF_UP))
1211 continue;
1213 nb->notifier_call(nb, NETDEV_UP, dev);
1217 unlock:
1218 rtnl_unlock();
1219 return err;
1221 rollback:
1222 last = dev;
1223 for_each_net(net) {
1224 for_each_netdev(net, dev) {
1225 if (dev == last)
1226 break;
1228 if (dev->flags & IFF_UP) {
1229 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1230 nb->notifier_call(nb, NETDEV_DOWN, dev);
1232 nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
1236 raw_notifier_chain_unregister(&netdev_chain, nb);
1237 goto unlock;
1241 * unregister_netdevice_notifier - unregister a network notifier block
1242 * @nb: notifier
1244 * Unregister a notifier previously registered by
1245 * register_netdevice_notifier(). The notifier is unlinked into the
1246 * kernel structures and may then be reused. A negative errno code
1247 * is returned on a failure.
1250 int unregister_netdevice_notifier(struct notifier_block *nb)
1252 int err;
1254 rtnl_lock();
1255 err = raw_notifier_chain_unregister(&netdev_chain, nb);
1256 rtnl_unlock();
1257 return err;
1261 * call_netdevice_notifiers - call all network notifier blocks
1262 * @val: value passed unmodified to notifier function
1263 * @dev: net_device pointer passed unmodified to notifier function
1265 * Call all network notifier blocks. Parameters and return value
1266 * are as for raw_notifier_call_chain().
1269 int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
1271 return raw_notifier_call_chain(&netdev_chain, val, dev);
1274 /* When > 0 there are consumers of rx skb time stamps */
1275 static atomic_t netstamp_needed = ATOMIC_INIT(0);
1277 void net_enable_timestamp(void)
1279 atomic_inc(&netstamp_needed);
1282 void net_disable_timestamp(void)
1284 atomic_dec(&netstamp_needed);
1287 static inline void net_timestamp(struct sk_buff *skb)
1289 if (atomic_read(&netstamp_needed))
1290 __net_timestamp(skb);
1291 else
1292 skb->tstamp.tv64 = 0;
1296 * Support routine. Sends outgoing frames to any network
1297 * taps currently in use.
1300 static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1302 struct packet_type *ptype;
1304 net_timestamp(skb);
1306 rcu_read_lock();
1307 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1308 /* Never send packets back to the socket
1309 * they originated from - MvS (miquels@drinkel.ow.org)
1311 if ((ptype->dev == dev || !ptype->dev) &&
1312 (ptype->af_packet_priv == NULL ||
1313 (struct sock *)ptype->af_packet_priv != skb->sk)) {
1314 struct sk_buff *skb2= skb_clone(skb, GFP_ATOMIC);
1315 if (!skb2)
1316 break;
1318 /* skb->nh should be correctly
1319 set by sender, so that the second statement is
1320 just protection against buggy protocols.
1322 skb_reset_mac_header(skb2);
1324 if (skb_network_header(skb2) < skb2->data ||
1325 skb2->network_header > skb2->tail) {
1326 if (net_ratelimit())
1327 printk(KERN_CRIT "protocol %04x is "
1328 "buggy, dev %s\n",
1329 skb2->protocol, dev->name);
1330 skb_reset_network_header(skb2);
1333 skb2->transport_header = skb2->network_header;
1334 skb2->pkt_type = PACKET_OUTGOING;
1335 ptype->func(skb2, skb->dev, ptype, skb->dev);
1338 rcu_read_unlock();
1342 static inline void __netif_reschedule(struct Qdisc *q)
1344 struct softnet_data *sd;
1345 unsigned long flags;
1347 local_irq_save(flags);
1348 sd = &__get_cpu_var(softnet_data);
1349 q->next_sched = sd->output_queue;
1350 sd->output_queue = q;
1351 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1352 local_irq_restore(flags);
1355 void __netif_schedule(struct Qdisc *q)
1357 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
1358 __netif_reschedule(q);
1360 EXPORT_SYMBOL(__netif_schedule);
1362 void dev_kfree_skb_irq(struct sk_buff *skb)
1364 if (atomic_dec_and_test(&skb->users)) {
1365 struct softnet_data *sd;
1366 unsigned long flags;
1368 local_irq_save(flags);
1369 sd = &__get_cpu_var(softnet_data);
1370 skb->next = sd->completion_queue;
1371 sd->completion_queue = skb;
1372 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1373 local_irq_restore(flags);
1376 EXPORT_SYMBOL(dev_kfree_skb_irq);
1378 void dev_kfree_skb_any(struct sk_buff *skb)
1380 if (in_irq() || irqs_disabled())
1381 dev_kfree_skb_irq(skb);
1382 else
1383 dev_kfree_skb(skb);
1385 EXPORT_SYMBOL(dev_kfree_skb_any);
1389 * netif_device_detach - mark device as removed
1390 * @dev: network device
1392 * Mark device as removed from system and therefore no longer available.
1394 void netif_device_detach(struct net_device *dev)
1396 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
1397 netif_running(dev)) {
1398 netif_stop_queue(dev);
1401 EXPORT_SYMBOL(netif_device_detach);
1404 * netif_device_attach - mark device as attached
1405 * @dev: network device
1407 * Mark device as attached from system and restart if needed.
1409 void netif_device_attach(struct net_device *dev)
1411 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
1412 netif_running(dev)) {
1413 netif_wake_queue(dev);
1414 __netdev_watchdog_up(dev);
1417 EXPORT_SYMBOL(netif_device_attach);
1419 static bool can_checksum_protocol(unsigned long features, __be16 protocol)
1421 return ((features & NETIF_F_GEN_CSUM) ||
1422 ((features & NETIF_F_IP_CSUM) &&
1423 protocol == htons(ETH_P_IP)) ||
1424 ((features & NETIF_F_IPV6_CSUM) &&
1425 protocol == htons(ETH_P_IPV6)));
1428 static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb)
1430 if (can_checksum_protocol(dev->features, skb->protocol))
1431 return true;
1433 if (skb->protocol == htons(ETH_P_8021Q)) {
1434 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
1435 if (can_checksum_protocol(dev->features & dev->vlan_features,
1436 veh->h_vlan_encapsulated_proto))
1437 return true;
1440 return false;
1444 * Invalidate hardware checksum when packet is to be mangled, and
1445 * complete checksum manually on outgoing path.
1447 int skb_checksum_help(struct sk_buff *skb)
1449 __wsum csum;
1450 int ret = 0, offset;
1452 if (skb->ip_summed == CHECKSUM_COMPLETE)
1453 goto out_set_summed;
1455 if (unlikely(skb_shinfo(skb)->gso_size)) {
1456 /* Let GSO fix up the checksum. */
1457 goto out_set_summed;
1460 offset = skb->csum_start - skb_headroom(skb);
1461 BUG_ON(offset >= skb_headlen(skb));
1462 csum = skb_checksum(skb, offset, skb->len - offset, 0);
1464 offset += skb->csum_offset;
1465 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
1467 if (skb_cloned(skb) &&
1468 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
1469 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1470 if (ret)
1471 goto out;
1474 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
1475 out_set_summed:
1476 skb->ip_summed = CHECKSUM_NONE;
1477 out:
1478 return ret;
1482 * skb_gso_segment - Perform segmentation on skb.
1483 * @skb: buffer to segment
1484 * @features: features for the output path (see dev->features)
1486 * This function segments the given skb and returns a list of segments.
1488 * It may return NULL if the skb requires no segmentation. This is
1489 * only possible when GSO is used for verifying header integrity.
1491 struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
1493 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
1494 struct packet_type *ptype;
1495 __be16 type = skb->protocol;
1496 int err;
1498 BUG_ON(skb_shinfo(skb)->frag_list);
1500 skb_reset_mac_header(skb);
1501 skb->mac_len = skb->network_header - skb->mac_header;
1502 __skb_pull(skb, skb->mac_len);
1504 if (WARN_ON(skb->ip_summed != CHECKSUM_PARTIAL)) {
1505 if (skb_header_cloned(skb) &&
1506 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
1507 return ERR_PTR(err);
1510 rcu_read_lock();
1511 list_for_each_entry_rcu(ptype,
1512 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
1513 if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
1514 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
1515 err = ptype->gso_send_check(skb);
1516 segs = ERR_PTR(err);
1517 if (err || skb_gso_ok(skb, features))
1518 break;
1519 __skb_push(skb, (skb->data -
1520 skb_network_header(skb)));
1522 segs = ptype->gso_segment(skb, features);
1523 break;
1526 rcu_read_unlock();
1528 __skb_push(skb, skb->data - skb_mac_header(skb));
1530 return segs;
1533 EXPORT_SYMBOL(skb_gso_segment);
1535 /* Take action when hardware reception checksum errors are detected. */
1536 #ifdef CONFIG_BUG
1537 void netdev_rx_csum_fault(struct net_device *dev)
1539 if (net_ratelimit()) {
1540 printk(KERN_ERR "%s: hw csum failure.\n",
1541 dev ? dev->name : "<unknown>");
1542 dump_stack();
1545 EXPORT_SYMBOL(netdev_rx_csum_fault);
1546 #endif
1548 /* Actually, we should eliminate this check as soon as we know, that:
1549 * 1. IOMMU is present and allows to map all the memory.
1550 * 2. No high memory really exists on this machine.
1553 static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
1555 #ifdef CONFIG_HIGHMEM
1556 int i;
1558 if (dev->features & NETIF_F_HIGHDMA)
1559 return 0;
1561 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1562 if (PageHighMem(skb_shinfo(skb)->frags[i].page))
1563 return 1;
1565 #endif
1566 return 0;
1569 struct dev_gso_cb {
1570 void (*destructor)(struct sk_buff *skb);
1573 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
1575 static void dev_gso_skb_destructor(struct sk_buff *skb)
1577 struct dev_gso_cb *cb;
1579 do {
1580 struct sk_buff *nskb = skb->next;
1582 skb->next = nskb->next;
1583 nskb->next = NULL;
1584 kfree_skb(nskb);
1585 } while (skb->next);
1587 cb = DEV_GSO_CB(skb);
1588 if (cb->destructor)
1589 cb->destructor(skb);
1593 * dev_gso_segment - Perform emulated hardware segmentation on skb.
1594 * @skb: buffer to segment
1596 * This function segments the given skb and stores the list of segments
1597 * in skb->next.
1599 static int dev_gso_segment(struct sk_buff *skb)
1601 struct net_device *dev = skb->dev;
1602 struct sk_buff *segs;
1603 int features = dev->features & ~(illegal_highdma(dev, skb) ?
1604 NETIF_F_SG : 0);
1606 segs = skb_gso_segment(skb, features);
1608 /* Verifying header integrity only. */
1609 if (!segs)
1610 return 0;
1612 if (IS_ERR(segs))
1613 return PTR_ERR(segs);
1615 skb->next = segs;
1616 DEV_GSO_CB(skb)->destructor = skb->destructor;
1617 skb->destructor = dev_gso_skb_destructor;
1619 return 0;
1622 int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
1623 struct netdev_queue *txq)
1625 if (likely(!skb->next)) {
1626 if (!list_empty(&ptype_all))
1627 dev_queue_xmit_nit(skb, dev);
1629 if (netif_needs_gso(dev, skb)) {
1630 if (unlikely(dev_gso_segment(skb)))
1631 goto out_kfree_skb;
1632 if (skb->next)
1633 goto gso;
1636 return dev->hard_start_xmit(skb, dev);
1639 gso:
1640 do {
1641 struct sk_buff *nskb = skb->next;
1642 int rc;
1644 skb->next = nskb->next;
1645 nskb->next = NULL;
1646 rc = dev->hard_start_xmit(nskb, dev);
1647 if (unlikely(rc)) {
1648 nskb->next = skb->next;
1649 skb->next = nskb;
1650 return rc;
1652 if (unlikely(netif_tx_queue_stopped(txq) && skb->next))
1653 return NETDEV_TX_BUSY;
1654 } while (skb->next);
1656 skb->destructor = DEV_GSO_CB(skb)->destructor;
1658 out_kfree_skb:
1659 kfree_skb(skb);
1660 return 0;
1663 static u32 simple_tx_hashrnd;
1664 static int simple_tx_hashrnd_initialized = 0;
1666 static u16 simple_tx_hash(struct net_device *dev, struct sk_buff *skb)
1668 u32 addr1, addr2, ports;
1669 u32 hash, ihl;
1670 u8 ip_proto;
1672 if (unlikely(!simple_tx_hashrnd_initialized)) {
1673 get_random_bytes(&simple_tx_hashrnd, 4);
1674 simple_tx_hashrnd_initialized = 1;
1677 switch (skb->protocol) {
1678 case __constant_htons(ETH_P_IP):
1679 ip_proto = ip_hdr(skb)->protocol;
1680 addr1 = ip_hdr(skb)->saddr;
1681 addr2 = ip_hdr(skb)->daddr;
1682 ihl = ip_hdr(skb)->ihl;
1683 break;
1684 case __constant_htons(ETH_P_IPV6):
1685 ip_proto = ipv6_hdr(skb)->nexthdr;
1686 addr1 = ipv6_hdr(skb)->saddr.s6_addr32[3];
1687 addr2 = ipv6_hdr(skb)->daddr.s6_addr32[3];
1688 ihl = (40 >> 2);
1689 break;
1690 default:
1691 return 0;
1695 switch (ip_proto) {
1696 case IPPROTO_TCP:
1697 case IPPROTO_UDP:
1698 case IPPROTO_DCCP:
1699 case IPPROTO_ESP:
1700 case IPPROTO_AH:
1701 case IPPROTO_SCTP:
1702 case IPPROTO_UDPLITE:
1703 ports = *((u32 *) (skb_network_header(skb) + (ihl * 4)));
1704 break;
1706 default:
1707 ports = 0;
1708 break;
1711 hash = jhash_3words(addr1, addr2, ports, simple_tx_hashrnd);
1713 return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32);
1716 static struct netdev_queue *dev_pick_tx(struct net_device *dev,
1717 struct sk_buff *skb)
1719 u16 queue_index = 0;
1721 if (dev->select_queue)
1722 queue_index = dev->select_queue(dev, skb);
1723 else if (dev->real_num_tx_queues > 1)
1724 queue_index = simple_tx_hash(dev, skb);
1726 skb_set_queue_mapping(skb, queue_index);
1727 return netdev_get_tx_queue(dev, queue_index);
1731 * dev_queue_xmit - transmit a buffer
1732 * @skb: buffer to transmit
1734 * Queue a buffer for transmission to a network device. The caller must
1735 * have set the device and priority and built the buffer before calling
1736 * this function. The function can be called from an interrupt.
1738 * A negative errno code is returned on a failure. A success does not
1739 * guarantee the frame will be transmitted as it may be dropped due
1740 * to congestion or traffic shaping.
1742 * -----------------------------------------------------------------------------------
1743 * I notice this method can also return errors from the queue disciplines,
1744 * including NET_XMIT_DROP, which is a positive value. So, errors can also
1745 * be positive.
1747 * Regardless of the return value, the skb is consumed, so it is currently
1748 * difficult to retry a send to this method. (You can bump the ref count
1749 * before sending to hold a reference for retry if you are careful.)
1751 * When calling this method, interrupts MUST be enabled. This is because
1752 * the BH enable code must have IRQs enabled so that it will not deadlock.
1753 * --BLG
1755 int dev_queue_xmit(struct sk_buff *skb)
1757 struct net_device *dev = skb->dev;
1758 struct netdev_queue *txq;
1759 struct Qdisc *q;
1760 int rc = -ENOMEM;
1762 /* GSO will handle the following emulations directly. */
1763 if (netif_needs_gso(dev, skb))
1764 goto gso;
1766 if (skb_shinfo(skb)->frag_list &&
1767 !(dev->features & NETIF_F_FRAGLIST) &&
1768 __skb_linearize(skb))
1769 goto out_kfree_skb;
1771 /* Fragmented skb is linearized if device does not support SG,
1772 * or if at least one of fragments is in highmem and device
1773 * does not support DMA from it.
1775 if (skb_shinfo(skb)->nr_frags &&
1776 (!(dev->features & NETIF_F_SG) || illegal_highdma(dev, skb)) &&
1777 __skb_linearize(skb))
1778 goto out_kfree_skb;
1780 /* If packet is not checksummed and device does not support
1781 * checksumming for this protocol, complete checksumming here.
1783 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1784 skb_set_transport_header(skb, skb->csum_start -
1785 skb_headroom(skb));
1786 if (!dev_can_checksum(dev, skb) && skb_checksum_help(skb))
1787 goto out_kfree_skb;
1790 gso:
1791 /* Disable soft irqs for various locks below. Also
1792 * stops preemption for RCU.
1794 rcu_read_lock_bh();
1796 txq = dev_pick_tx(dev, skb);
1797 q = rcu_dereference(txq->qdisc);
1799 #ifdef CONFIG_NET_CLS_ACT
1800 skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_EGRESS);
1801 #endif
1802 if (q->enqueue) {
1803 spinlock_t *root_lock = qdisc_lock(q);
1805 spin_lock(root_lock);
1807 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
1808 kfree_skb(skb);
1809 rc = NET_XMIT_DROP;
1810 } else {
1811 rc = qdisc_enqueue_root(skb, q);
1812 qdisc_run(q);
1814 spin_unlock(root_lock);
1816 goto out;
1819 /* The device has no queue. Common case for software devices:
1820 loopback, all the sorts of tunnels...
1822 Really, it is unlikely that netif_tx_lock protection is necessary
1823 here. (f.e. loopback and IP tunnels are clean ignoring statistics
1824 counters.)
1825 However, it is possible, that they rely on protection
1826 made by us here.
1828 Check this and shot the lock. It is not prone from deadlocks.
1829 Either shot noqueue qdisc, it is even simpler 8)
1831 if (dev->flags & IFF_UP) {
1832 int cpu = smp_processor_id(); /* ok because BHs are off */
1834 if (txq->xmit_lock_owner != cpu) {
1836 HARD_TX_LOCK(dev, txq, cpu);
1838 if (!netif_tx_queue_stopped(txq)) {
1839 rc = 0;
1840 if (!dev_hard_start_xmit(skb, dev, txq)) {
1841 HARD_TX_UNLOCK(dev, txq);
1842 goto out;
1845 HARD_TX_UNLOCK(dev, txq);
1846 if (net_ratelimit())
1847 printk(KERN_CRIT "Virtual device %s asks to "
1848 "queue packet!\n", dev->name);
1849 } else {
1850 /* Recursion is detected! It is possible,
1851 * unfortunately */
1852 if (net_ratelimit())
1853 printk(KERN_CRIT "Dead loop on virtual device "
1854 "%s, fix it urgently!\n", dev->name);
1858 rc = -ENETDOWN;
1859 rcu_read_unlock_bh();
1861 out_kfree_skb:
1862 kfree_skb(skb);
1863 return rc;
1864 out:
1865 rcu_read_unlock_bh();
1866 return rc;
1870 /*=======================================================================
1871 Receiver routines
1872 =======================================================================*/
1874 int netdev_max_backlog __read_mostly = 1000;
1875 int netdev_budget __read_mostly = 300;
1876 int weight_p __read_mostly = 64; /* old backlog weight */
1878 DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, };
1882 * netif_rx - post buffer to the network code
1883 * @skb: buffer to post
1885 * This function receives a packet from a device driver and queues it for
1886 * the upper (protocol) levels to process. It always succeeds. The buffer
1887 * may be dropped during processing for congestion control or by the
1888 * protocol layers.
1890 * return values:
1891 * NET_RX_SUCCESS (no congestion)
1892 * NET_RX_DROP (packet was dropped)
1896 int netif_rx(struct sk_buff *skb)
1898 struct softnet_data *queue;
1899 unsigned long flags;
1901 /* if netpoll wants it, pretend we never saw it */
1902 if (netpoll_rx(skb))
1903 return NET_RX_DROP;
1905 if (!skb->tstamp.tv64)
1906 net_timestamp(skb);
1909 * The code is rearranged so that the path is the most
1910 * short when CPU is congested, but is still operating.
1912 local_irq_save(flags);
1913 queue = &__get_cpu_var(softnet_data);
1915 __get_cpu_var(netdev_rx_stat).total++;
1916 if (queue->input_pkt_queue.qlen <= netdev_max_backlog) {
1917 if (queue->input_pkt_queue.qlen) {
1918 enqueue:
1919 __skb_queue_tail(&queue->input_pkt_queue, skb);
1920 local_irq_restore(flags);
1921 return NET_RX_SUCCESS;
1924 napi_schedule(&queue->backlog);
1925 goto enqueue;
1928 __get_cpu_var(netdev_rx_stat).dropped++;
1929 local_irq_restore(flags);
1931 kfree_skb(skb);
1932 return NET_RX_DROP;
1935 int netif_rx_ni(struct sk_buff *skb)
1937 int err;
1939 preempt_disable();
1940 err = netif_rx(skb);
1941 if (local_softirq_pending())
1942 do_softirq();
1943 preempt_enable();
1945 return err;
1948 EXPORT_SYMBOL(netif_rx_ni);
1950 static void net_tx_action(struct softirq_action *h)
1952 struct softnet_data *sd = &__get_cpu_var(softnet_data);
1954 if (sd->completion_queue) {
1955 struct sk_buff *clist;
1957 local_irq_disable();
1958 clist = sd->completion_queue;
1959 sd->completion_queue = NULL;
1960 local_irq_enable();
1962 while (clist) {
1963 struct sk_buff *skb = clist;
1964 clist = clist->next;
1966 WARN_ON(atomic_read(&skb->users));
1967 __kfree_skb(skb);
1971 if (sd->output_queue) {
1972 struct Qdisc *head;
1974 local_irq_disable();
1975 head = sd->output_queue;
1976 sd->output_queue = NULL;
1977 local_irq_enable();
1979 while (head) {
1980 struct Qdisc *q = head;
1981 spinlock_t *root_lock;
1983 head = head->next_sched;
1985 root_lock = qdisc_lock(q);
1986 if (spin_trylock(root_lock)) {
1987 smp_mb__before_clear_bit();
1988 clear_bit(__QDISC_STATE_SCHED,
1989 &q->state);
1990 qdisc_run(q);
1991 spin_unlock(root_lock);
1992 } else {
1993 if (!test_bit(__QDISC_STATE_DEACTIVATED,
1994 &q->state)) {
1995 __netif_reschedule(q);
1996 } else {
1997 smp_mb__before_clear_bit();
1998 clear_bit(__QDISC_STATE_SCHED,
1999 &q->state);
2006 static inline int deliver_skb(struct sk_buff *skb,
2007 struct packet_type *pt_prev,
2008 struct net_device *orig_dev)
2010 atomic_inc(&skb->users);
2011 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
2014 #if defined(CONFIG_BRIDGE) || defined (CONFIG_BRIDGE_MODULE)
2015 /* These hooks defined here for ATM */
2016 struct net_bridge;
2017 struct net_bridge_fdb_entry *(*br_fdb_get_hook)(struct net_bridge *br,
2018 unsigned char *addr);
2019 void (*br_fdb_put_hook)(struct net_bridge_fdb_entry *ent) __read_mostly;
2022 * If bridge module is loaded call bridging hook.
2023 * returns NULL if packet was consumed.
2025 struct sk_buff *(*br_handle_frame_hook)(struct net_bridge_port *p,
2026 struct sk_buff *skb) __read_mostly;
2027 static inline struct sk_buff *handle_bridge(struct sk_buff *skb,
2028 struct packet_type **pt_prev, int *ret,
2029 struct net_device *orig_dev)
2031 struct net_bridge_port *port;
2033 if (skb->pkt_type == PACKET_LOOPBACK ||
2034 (port = rcu_dereference(skb->dev->br_port)) == NULL)
2035 return skb;
2037 if (*pt_prev) {
2038 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2039 *pt_prev = NULL;
2042 return br_handle_frame_hook(port, skb);
2044 #else
2045 #define handle_bridge(skb, pt_prev, ret, orig_dev) (skb)
2046 #endif
2048 #if defined(CONFIG_MACVLAN) || defined(CONFIG_MACVLAN_MODULE)
2049 struct sk_buff *(*macvlan_handle_frame_hook)(struct sk_buff *skb) __read_mostly;
2050 EXPORT_SYMBOL_GPL(macvlan_handle_frame_hook);
2052 static inline struct sk_buff *handle_macvlan(struct sk_buff *skb,
2053 struct packet_type **pt_prev,
2054 int *ret,
2055 struct net_device *orig_dev)
2057 if (skb->dev->macvlan_port == NULL)
2058 return skb;
2060 if (*pt_prev) {
2061 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2062 *pt_prev = NULL;
2064 return macvlan_handle_frame_hook(skb);
2066 #else
2067 #define handle_macvlan(skb, pt_prev, ret, orig_dev) (skb)
2068 #endif
2070 #ifdef CONFIG_NET_CLS_ACT
2071 /* TODO: Maybe we should just force sch_ingress to be compiled in
2072 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
2073 * a compare and 2 stores extra right now if we dont have it on
2074 * but have CONFIG_NET_CLS_ACT
2075 * NOTE: This doesnt stop any functionality; if you dont have
2076 * the ingress scheduler, you just cant add policies on ingress.
2079 static int ing_filter(struct sk_buff *skb)
2081 struct net_device *dev = skb->dev;
2082 u32 ttl = G_TC_RTTL(skb->tc_verd);
2083 struct netdev_queue *rxq;
2084 int result = TC_ACT_OK;
2085 struct Qdisc *q;
2087 if (MAX_RED_LOOP < ttl++) {
2088 printk(KERN_WARNING
2089 "Redir loop detected Dropping packet (%d->%d)\n",
2090 skb->iif, dev->ifindex);
2091 return TC_ACT_SHOT;
2094 skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
2095 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
2097 rxq = &dev->rx_queue;
2099 q = rxq->qdisc;
2100 if (q != &noop_qdisc) {
2101 spin_lock(qdisc_lock(q));
2102 if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
2103 result = qdisc_enqueue_root(skb, q);
2104 spin_unlock(qdisc_lock(q));
2107 return result;
2110 static inline struct sk_buff *handle_ing(struct sk_buff *skb,
2111 struct packet_type **pt_prev,
2112 int *ret, struct net_device *orig_dev)
2114 if (skb->dev->rx_queue.qdisc == &noop_qdisc)
2115 goto out;
2117 if (*pt_prev) {
2118 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2119 *pt_prev = NULL;
2120 } else {
2121 /* Huh? Why does turning on AF_PACKET affect this? */
2122 skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd);
2125 switch (ing_filter(skb)) {
2126 case TC_ACT_SHOT:
2127 case TC_ACT_STOLEN:
2128 kfree_skb(skb);
2129 return NULL;
2132 out:
2133 skb->tc_verd = 0;
2134 return skb;
2136 #endif
2139 * netif_nit_deliver - deliver received packets to network taps
2140 * @skb: buffer
2142 * This function is used to deliver incoming packets to network
2143 * taps. It should be used when the normal netif_receive_skb path
2144 * is bypassed, for example because of VLAN acceleration.
2146 void netif_nit_deliver(struct sk_buff *skb)
2148 struct packet_type *ptype;
2150 if (list_empty(&ptype_all))
2151 return;
2153 skb_reset_network_header(skb);
2154 skb_reset_transport_header(skb);
2155 skb->mac_len = skb->network_header - skb->mac_header;
2157 rcu_read_lock();
2158 list_for_each_entry_rcu(ptype, &ptype_all, list) {
2159 if (!ptype->dev || ptype->dev == skb->dev)
2160 deliver_skb(skb, ptype, skb->dev);
2162 rcu_read_unlock();
2166 * netif_receive_skb - process receive buffer from network
2167 * @skb: buffer to process
2169 * netif_receive_skb() is the main receive data processing function.
2170 * It always succeeds. The buffer may be dropped during processing
2171 * for congestion control or by the protocol layers.
2173 * This function may only be called from softirq context and interrupts
2174 * should be enabled.
2176 * Return values (usually ignored):
2177 * NET_RX_SUCCESS: no congestion
2178 * NET_RX_DROP: packet was dropped
2180 int netif_receive_skb(struct sk_buff *skb)
2182 struct packet_type *ptype, *pt_prev;
2183 struct net_device *orig_dev;
2184 struct net_device *null_or_orig;
2185 int ret = NET_RX_DROP;
2186 __be16 type;
2188 /* if we've gotten here through NAPI, check netpoll */
2189 if (netpoll_receive_skb(skb))
2190 return NET_RX_DROP;
2192 if (!skb->tstamp.tv64)
2193 net_timestamp(skb);
2195 if (!skb->iif)
2196 skb->iif = skb->dev->ifindex;
2198 null_or_orig = NULL;
2199 orig_dev = skb->dev;
2200 if (orig_dev->master) {
2201 if (skb_bond_should_drop(skb))
2202 null_or_orig = orig_dev; /* deliver only exact match */
2203 else
2204 skb->dev = orig_dev->master;
2207 __get_cpu_var(netdev_rx_stat).total++;
2209 skb_reset_network_header(skb);
2210 skb_reset_transport_header(skb);
2211 skb->mac_len = skb->network_header - skb->mac_header;
2213 pt_prev = NULL;
2215 rcu_read_lock();
2217 /* Don't receive packets in an exiting network namespace */
2218 if (!net_alive(dev_net(skb->dev)))
2219 goto out;
2221 #ifdef CONFIG_NET_CLS_ACT
2222 if (skb->tc_verd & TC_NCLS) {
2223 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
2224 goto ncls;
2226 #endif
2228 list_for_each_entry_rcu(ptype, &ptype_all, list) {
2229 if (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
2230 ptype->dev == orig_dev) {
2231 if (pt_prev)
2232 ret = deliver_skb(skb, pt_prev, orig_dev);
2233 pt_prev = ptype;
2237 #ifdef CONFIG_NET_CLS_ACT
2238 skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
2239 if (!skb)
2240 goto out;
2241 ncls:
2242 #endif
2244 skb = handle_bridge(skb, &pt_prev, &ret, orig_dev);
2245 if (!skb)
2246 goto out;
2247 skb = handle_macvlan(skb, &pt_prev, &ret, orig_dev);
2248 if (!skb)
2249 goto out;
2251 type = skb->protocol;
2252 list_for_each_entry_rcu(ptype,
2253 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
2254 if (ptype->type == type &&
2255 (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
2256 ptype->dev == orig_dev)) {
2257 if (pt_prev)
2258 ret = deliver_skb(skb, pt_prev, orig_dev);
2259 pt_prev = ptype;
2263 if (pt_prev) {
2264 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
2265 } else {
2266 kfree_skb(skb);
2267 /* Jamal, now you will not able to escape explaining
2268 * me how you were going to use this. :-)
2270 ret = NET_RX_DROP;
2273 out:
2274 rcu_read_unlock();
2275 return ret;
2278 /* Network device is going away, flush any packets still pending */
2279 static void flush_backlog(void *arg)
2281 struct net_device *dev = arg;
2282 struct softnet_data *queue = &__get_cpu_var(softnet_data);
2283 struct sk_buff *skb, *tmp;
2285 skb_queue_walk_safe(&queue->input_pkt_queue, skb, tmp)
2286 if (skb->dev == dev) {
2287 __skb_unlink(skb, &queue->input_pkt_queue);
2288 kfree_skb(skb);
2292 static int process_backlog(struct napi_struct *napi, int quota)
2294 int work = 0;
2295 struct softnet_data *queue = &__get_cpu_var(softnet_data);
2296 unsigned long start_time = jiffies;
2298 napi->weight = weight_p;
2299 do {
2300 struct sk_buff *skb;
2302 local_irq_disable();
2303 skb = __skb_dequeue(&queue->input_pkt_queue);
2304 if (!skb) {
2305 __napi_complete(napi);
2306 local_irq_enable();
2307 break;
2309 local_irq_enable();
2311 netif_receive_skb(skb);
2312 } while (++work < quota && jiffies == start_time);
2314 return work;
2318 * __napi_schedule - schedule for receive
2319 * @n: entry to schedule
2321 * The entry's receive function will be scheduled to run
2323 void __napi_schedule(struct napi_struct *n)
2325 unsigned long flags;
2327 local_irq_save(flags);
2328 list_add_tail(&n->poll_list, &__get_cpu_var(softnet_data).poll_list);
2329 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2330 local_irq_restore(flags);
2332 EXPORT_SYMBOL(__napi_schedule);
2335 static void net_rx_action(struct softirq_action *h)
2337 struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
2338 unsigned long start_time = jiffies;
2339 int budget = netdev_budget;
2340 void *have;
2342 local_irq_disable();
2344 while (!list_empty(list)) {
2345 struct napi_struct *n;
2346 int work, weight;
2348 /* If softirq window is exhuasted then punt.
2350 * Note that this is a slight policy change from the
2351 * previous NAPI code, which would allow up to 2
2352 * jiffies to pass before breaking out. The test
2353 * used to be "jiffies - start_time > 1".
2355 if (unlikely(budget <= 0 || jiffies != start_time))
2356 goto softnet_break;
2358 local_irq_enable();
2360 /* Even though interrupts have been re-enabled, this
2361 * access is safe because interrupts can only add new
2362 * entries to the tail of this list, and only ->poll()
2363 * calls can remove this head entry from the list.
2365 n = list_entry(list->next, struct napi_struct, poll_list);
2367 have = netpoll_poll_lock(n);
2369 weight = n->weight;
2371 /* This NAPI_STATE_SCHED test is for avoiding a race
2372 * with netpoll's poll_napi(). Only the entity which
2373 * obtains the lock and sees NAPI_STATE_SCHED set will
2374 * actually make the ->poll() call. Therefore we avoid
2375 * accidently calling ->poll() when NAPI is not scheduled.
2377 work = 0;
2378 if (test_bit(NAPI_STATE_SCHED, &n->state))
2379 work = n->poll(n, weight);
2381 WARN_ON_ONCE(work > weight);
2383 budget -= work;
2385 local_irq_disable();
2387 /* Drivers must not modify the NAPI state if they
2388 * consume the entire weight. In such cases this code
2389 * still "owns" the NAPI instance and therefore can
2390 * move the instance around on the list at-will.
2392 if (unlikely(work == weight)) {
2393 if (unlikely(napi_disable_pending(n)))
2394 __napi_complete(n);
2395 else
2396 list_move_tail(&n->poll_list, list);
2399 netpoll_poll_unlock(have);
2401 out:
2402 local_irq_enable();
2404 #ifdef CONFIG_NET_DMA
2406 * There may not be any more sk_buffs coming right now, so push
2407 * any pending DMA copies to hardware
2409 if (!cpus_empty(net_dma.channel_mask)) {
2410 int chan_idx;
2411 for_each_cpu_mask_nr(chan_idx, net_dma.channel_mask) {
2412 struct dma_chan *chan = net_dma.channels[chan_idx];
2413 if (chan)
2414 dma_async_memcpy_issue_pending(chan);
2417 #endif
2419 return;
2421 softnet_break:
2422 __get_cpu_var(netdev_rx_stat).time_squeeze++;
2423 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2424 goto out;
2427 static gifconf_func_t * gifconf_list [NPROTO];
2430 * register_gifconf - register a SIOCGIF handler
2431 * @family: Address family
2432 * @gifconf: Function handler
2434 * Register protocol dependent address dumping routines. The handler
2435 * that is passed must not be freed or reused until it has been replaced
2436 * by another handler.
2438 int register_gifconf(unsigned int family, gifconf_func_t * gifconf)
2440 if (family >= NPROTO)
2441 return -EINVAL;
2442 gifconf_list[family] = gifconf;
2443 return 0;
2448 * Map an interface index to its name (SIOCGIFNAME)
2452 * We need this ioctl for efficient implementation of the
2453 * if_indextoname() function required by the IPv6 API. Without
2454 * it, we would have to search all the interfaces to find a
2455 * match. --pb
2458 static int dev_ifname(struct net *net, struct ifreq __user *arg)
2460 struct net_device *dev;
2461 struct ifreq ifr;
2464 * Fetch the caller's info block.
2467 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
2468 return -EFAULT;
2470 read_lock(&dev_base_lock);
2471 dev = __dev_get_by_index(net, ifr.ifr_ifindex);
2472 if (!dev) {
2473 read_unlock(&dev_base_lock);
2474 return -ENODEV;
2477 strcpy(ifr.ifr_name, dev->name);
2478 read_unlock(&dev_base_lock);
2480 if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
2481 return -EFAULT;
2482 return 0;
2486 * Perform a SIOCGIFCONF call. This structure will change
2487 * size eventually, and there is nothing I can do about it.
2488 * Thus we will need a 'compatibility mode'.
2491 static int dev_ifconf(struct net *net, char __user *arg)
2493 struct ifconf ifc;
2494 struct net_device *dev;
2495 char __user *pos;
2496 int len;
2497 int total;
2498 int i;
2501 * Fetch the caller's info block.
2504 if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
2505 return -EFAULT;
2507 pos = ifc.ifc_buf;
2508 len = ifc.ifc_len;
2511 * Loop over the interfaces, and write an info block for each.
2514 total = 0;
2515 for_each_netdev(net, dev) {
2516 for (i = 0; i < NPROTO; i++) {
2517 if (gifconf_list[i]) {
2518 int done;
2519 if (!pos)
2520 done = gifconf_list[i](dev, NULL, 0);
2521 else
2522 done = gifconf_list[i](dev, pos + total,
2523 len - total);
2524 if (done < 0)
2525 return -EFAULT;
2526 total += done;
2532 * All done. Write the updated control block back to the caller.
2534 ifc.ifc_len = total;
2537 * Both BSD and Solaris return 0 here, so we do too.
2539 return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
2542 #ifdef CONFIG_PROC_FS
2544 * This is invoked by the /proc filesystem handler to display a device
2545 * in detail.
2547 void *dev_seq_start(struct seq_file *seq, loff_t *pos)
2548 __acquires(dev_base_lock)
2550 struct net *net = seq_file_net(seq);
2551 loff_t off;
2552 struct net_device *dev;
2554 read_lock(&dev_base_lock);
2555 if (!*pos)
2556 return SEQ_START_TOKEN;
2558 off = 1;
2559 for_each_netdev(net, dev)
2560 if (off++ == *pos)
2561 return dev;
2563 return NULL;
2566 void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2568 struct net *net = seq_file_net(seq);
2569 ++*pos;
2570 return v == SEQ_START_TOKEN ?
2571 first_net_device(net) : next_net_device((struct net_device *)v);
2574 void dev_seq_stop(struct seq_file *seq, void *v)
2575 __releases(dev_base_lock)
2577 read_unlock(&dev_base_lock);
2580 static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
2582 struct net_device_stats *stats = dev->get_stats(dev);
2584 seq_printf(seq, "%6s:%8lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu "
2585 "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n",
2586 dev->name, stats->rx_bytes, stats->rx_packets,
2587 stats->rx_errors,
2588 stats->rx_dropped + stats->rx_missed_errors,
2589 stats->rx_fifo_errors,
2590 stats->rx_length_errors + stats->rx_over_errors +
2591 stats->rx_crc_errors + stats->rx_frame_errors,
2592 stats->rx_compressed, stats->multicast,
2593 stats->tx_bytes, stats->tx_packets,
2594 stats->tx_errors, stats->tx_dropped,
2595 stats->tx_fifo_errors, stats->collisions,
2596 stats->tx_carrier_errors +
2597 stats->tx_aborted_errors +
2598 stats->tx_window_errors +
2599 stats->tx_heartbeat_errors,
2600 stats->tx_compressed);
2604 * Called from the PROCfs module. This now uses the new arbitrary sized
2605 * /proc/net interface to create /proc/net/dev
2607 static int dev_seq_show(struct seq_file *seq, void *v)
2609 if (v == SEQ_START_TOKEN)
2610 seq_puts(seq, "Inter-| Receive "
2611 " | Transmit\n"
2612 " face |bytes packets errs drop fifo frame "
2613 "compressed multicast|bytes packets errs "
2614 "drop fifo colls carrier compressed\n");
2615 else
2616 dev_seq_printf_stats(seq, v);
2617 return 0;
2620 static struct netif_rx_stats *softnet_get_online(loff_t *pos)
2622 struct netif_rx_stats *rc = NULL;
2624 while (*pos < nr_cpu_ids)
2625 if (cpu_online(*pos)) {
2626 rc = &per_cpu(netdev_rx_stat, *pos);
2627 break;
2628 } else
2629 ++*pos;
2630 return rc;
2633 static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
2635 return softnet_get_online(pos);
2638 static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2640 ++*pos;
2641 return softnet_get_online(pos);
2644 static void softnet_seq_stop(struct seq_file *seq, void *v)
2648 static int softnet_seq_show(struct seq_file *seq, void *v)
2650 struct netif_rx_stats *s = v;
2652 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
2653 s->total, s->dropped, s->time_squeeze, 0,
2654 0, 0, 0, 0, /* was fastroute */
2655 s->cpu_collision );
2656 return 0;
2659 static const struct seq_operations dev_seq_ops = {
2660 .start = dev_seq_start,
2661 .next = dev_seq_next,
2662 .stop = dev_seq_stop,
2663 .show = dev_seq_show,
2666 static int dev_seq_open(struct inode *inode, struct file *file)
2668 return seq_open_net(inode, file, &dev_seq_ops,
2669 sizeof(struct seq_net_private));
2672 static const struct file_operations dev_seq_fops = {
2673 .owner = THIS_MODULE,
2674 .open = dev_seq_open,
2675 .read = seq_read,
2676 .llseek = seq_lseek,
2677 .release = seq_release_net,
2680 static const struct seq_operations softnet_seq_ops = {
2681 .start = softnet_seq_start,
2682 .next = softnet_seq_next,
2683 .stop = softnet_seq_stop,
2684 .show = softnet_seq_show,
2687 static int softnet_seq_open(struct inode *inode, struct file *file)
2689 return seq_open(file, &softnet_seq_ops);
2692 static const struct file_operations softnet_seq_fops = {
2693 .owner = THIS_MODULE,
2694 .open = softnet_seq_open,
2695 .read = seq_read,
2696 .llseek = seq_lseek,
2697 .release = seq_release,
2700 static void *ptype_get_idx(loff_t pos)
2702 struct packet_type *pt = NULL;
2703 loff_t i = 0;
2704 int t;
2706 list_for_each_entry_rcu(pt, &ptype_all, list) {
2707 if (i == pos)
2708 return pt;
2709 ++i;
2712 for (t = 0; t < PTYPE_HASH_SIZE; t++) {
2713 list_for_each_entry_rcu(pt, &ptype_base[t], list) {
2714 if (i == pos)
2715 return pt;
2716 ++i;
2719 return NULL;
2722 static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
2723 __acquires(RCU)
2725 rcu_read_lock();
2726 return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
2729 static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2731 struct packet_type *pt;
2732 struct list_head *nxt;
2733 int hash;
2735 ++*pos;
2736 if (v == SEQ_START_TOKEN)
2737 return ptype_get_idx(0);
2739 pt = v;
2740 nxt = pt->list.next;
2741 if (pt->type == htons(ETH_P_ALL)) {
2742 if (nxt != &ptype_all)
2743 goto found;
2744 hash = 0;
2745 nxt = ptype_base[0].next;
2746 } else
2747 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
2749 while (nxt == &ptype_base[hash]) {
2750 if (++hash >= PTYPE_HASH_SIZE)
2751 return NULL;
2752 nxt = ptype_base[hash].next;
2754 found:
2755 return list_entry(nxt, struct packet_type, list);
2758 static void ptype_seq_stop(struct seq_file *seq, void *v)
2759 __releases(RCU)
2761 rcu_read_unlock();
2764 static void ptype_seq_decode(struct seq_file *seq, void *sym)
2766 #ifdef CONFIG_KALLSYMS
2767 unsigned long offset = 0, symsize;
2768 const char *symname;
2769 char *modname;
2770 char namebuf[128];
2772 symname = kallsyms_lookup((unsigned long)sym, &symsize, &offset,
2773 &modname, namebuf);
2775 if (symname) {
2776 char *delim = ":";
2778 if (!modname)
2779 modname = delim = "";
2780 seq_printf(seq, "%s%s%s%s+0x%lx", delim, modname, delim,
2781 symname, offset);
2782 return;
2784 #endif
2786 seq_printf(seq, "[%p]", sym);
2789 static int ptype_seq_show(struct seq_file *seq, void *v)
2791 struct packet_type *pt = v;
2793 if (v == SEQ_START_TOKEN)
2794 seq_puts(seq, "Type Device Function\n");
2795 else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) {
2796 if (pt->type == htons(ETH_P_ALL))
2797 seq_puts(seq, "ALL ");
2798 else
2799 seq_printf(seq, "%04x", ntohs(pt->type));
2801 seq_printf(seq, " %-8s ",
2802 pt->dev ? pt->dev->name : "");
2803 ptype_seq_decode(seq, pt->func);
2804 seq_putc(seq, '\n');
2807 return 0;
2810 static const struct seq_operations ptype_seq_ops = {
2811 .start = ptype_seq_start,
2812 .next = ptype_seq_next,
2813 .stop = ptype_seq_stop,
2814 .show = ptype_seq_show,
2817 static int ptype_seq_open(struct inode *inode, struct file *file)
2819 return seq_open_net(inode, file, &ptype_seq_ops,
2820 sizeof(struct seq_net_private));
2823 static const struct file_operations ptype_seq_fops = {
2824 .owner = THIS_MODULE,
2825 .open = ptype_seq_open,
2826 .read = seq_read,
2827 .llseek = seq_lseek,
2828 .release = seq_release_net,
2832 static int __net_init dev_proc_net_init(struct net *net)
2834 int rc = -ENOMEM;
2836 if (!proc_net_fops_create(net, "dev", S_IRUGO, &dev_seq_fops))
2837 goto out;
2838 if (!proc_net_fops_create(net, "softnet_stat", S_IRUGO, &softnet_seq_fops))
2839 goto out_dev;
2840 if (!proc_net_fops_create(net, "ptype", S_IRUGO, &ptype_seq_fops))
2841 goto out_softnet;
2843 if (wext_proc_init(net))
2844 goto out_ptype;
2845 rc = 0;
2846 out:
2847 return rc;
2848 out_ptype:
2849 proc_net_remove(net, "ptype");
2850 out_softnet:
2851 proc_net_remove(net, "softnet_stat");
2852 out_dev:
2853 proc_net_remove(net, "dev");
2854 goto out;
2857 static void __net_exit dev_proc_net_exit(struct net *net)
2859 wext_proc_exit(net);
2861 proc_net_remove(net, "ptype");
2862 proc_net_remove(net, "softnet_stat");
2863 proc_net_remove(net, "dev");
2866 static struct pernet_operations __net_initdata dev_proc_ops = {
2867 .init = dev_proc_net_init,
2868 .exit = dev_proc_net_exit,
2871 static int __init dev_proc_init(void)
2873 return register_pernet_subsys(&dev_proc_ops);
2875 #else
2876 #define dev_proc_init() 0
2877 #endif /* CONFIG_PROC_FS */
2881 * netdev_set_master - set up master/slave pair
2882 * @slave: slave device
2883 * @master: new master device
2885 * Changes the master device of the slave. Pass %NULL to break the
2886 * bonding. The caller must hold the RTNL semaphore. On a failure
2887 * a negative errno code is returned. On success the reference counts
2888 * are adjusted, %RTM_NEWLINK is sent to the routing socket and the
2889 * function returns zero.
2891 int netdev_set_master(struct net_device *slave, struct net_device *master)
2893 struct net_device *old = slave->master;
2895 ASSERT_RTNL();
2897 if (master) {
2898 if (old)
2899 return -EBUSY;
2900 dev_hold(master);
2903 slave->master = master;
2905 synchronize_net();
2907 if (old)
2908 dev_put(old);
2910 if (master)
2911 slave->flags |= IFF_SLAVE;
2912 else
2913 slave->flags &= ~IFF_SLAVE;
2915 rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
2916 return 0;
2919 static int __dev_set_promiscuity(struct net_device *dev, int inc)
2921 unsigned short old_flags = dev->flags;
2923 ASSERT_RTNL();
2925 dev->flags |= IFF_PROMISC;
2926 dev->promiscuity += inc;
2927 if (dev->promiscuity == 0) {
2929 * Avoid overflow.
2930 * If inc causes overflow, untouch promisc and return error.
2932 if (inc < 0)
2933 dev->flags &= ~IFF_PROMISC;
2934 else {
2935 dev->promiscuity -= inc;
2936 printk(KERN_WARNING "%s: promiscuity touches roof, "
2937 "set promiscuity failed, promiscuity feature "
2938 "of device might be broken.\n", dev->name);
2939 return -EOVERFLOW;
2942 if (dev->flags != old_flags) {
2943 printk(KERN_INFO "device %s %s promiscuous mode\n",
2944 dev->name, (dev->flags & IFF_PROMISC) ? "entered" :
2945 "left");
2946 if (audit_enabled)
2947 audit_log(current->audit_context, GFP_ATOMIC,
2948 AUDIT_ANOM_PROMISCUOUS,
2949 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
2950 dev->name, (dev->flags & IFF_PROMISC),
2951 (old_flags & IFF_PROMISC),
2952 audit_get_loginuid(current),
2953 current->uid, current->gid,
2954 audit_get_sessionid(current));
2956 if (dev->change_rx_flags)
2957 dev->change_rx_flags(dev, IFF_PROMISC);
2959 return 0;
2963 * dev_set_promiscuity - update promiscuity count on a device
2964 * @dev: device
2965 * @inc: modifier
2967 * Add or remove promiscuity from a device. While the count in the device
2968 * remains above zero the interface remains promiscuous. Once it hits zero
2969 * the device reverts back to normal filtering operation. A negative inc
2970 * value is used to drop promiscuity on the device.
2971 * Return 0 if successful or a negative errno code on error.
2973 int dev_set_promiscuity(struct net_device *dev, int inc)
2975 unsigned short old_flags = dev->flags;
2976 int err;
2978 err = __dev_set_promiscuity(dev, inc);
2979 if (err < 0)
2980 return err;
2981 if (dev->flags != old_flags)
2982 dev_set_rx_mode(dev);
2983 return err;
2987 * dev_set_allmulti - update allmulti count on a device
2988 * @dev: device
2989 * @inc: modifier
2991 * Add or remove reception of all multicast frames to a device. While the
2992 * count in the device remains above zero the interface remains listening
2993 * to all interfaces. Once it hits zero the device reverts back to normal
2994 * filtering operation. A negative @inc value is used to drop the counter
2995 * when releasing a resource needing all multicasts.
2996 * Return 0 if successful or a negative errno code on error.
2999 int dev_set_allmulti(struct net_device *dev, int inc)
3001 unsigned short old_flags = dev->flags;
3003 ASSERT_RTNL();
3005 dev->flags |= IFF_ALLMULTI;
3006 dev->allmulti += inc;
3007 if (dev->allmulti == 0) {
3009 * Avoid overflow.
3010 * If inc causes overflow, untouch allmulti and return error.
3012 if (inc < 0)
3013 dev->flags &= ~IFF_ALLMULTI;
3014 else {
3015 dev->allmulti -= inc;
3016 printk(KERN_WARNING "%s: allmulti touches roof, "
3017 "set allmulti failed, allmulti feature of "
3018 "device might be broken.\n", dev->name);
3019 return -EOVERFLOW;
3022 if (dev->flags ^ old_flags) {
3023 if (dev->change_rx_flags)
3024 dev->change_rx_flags(dev, IFF_ALLMULTI);
3025 dev_set_rx_mode(dev);
3027 return 0;
3031 * Upload unicast and multicast address lists to device and
3032 * configure RX filtering. When the device doesn't support unicast
3033 * filtering it is put in promiscuous mode while unicast addresses
3034 * are present.
3036 void __dev_set_rx_mode(struct net_device *dev)
3038 /* dev_open will call this function so the list will stay sane. */
3039 if (!(dev->flags&IFF_UP))
3040 return;
3042 if (!netif_device_present(dev))
3043 return;
3045 if (dev->set_rx_mode)
3046 dev->set_rx_mode(dev);
3047 else {
3048 /* Unicast addresses changes may only happen under the rtnl,
3049 * therefore calling __dev_set_promiscuity here is safe.
3051 if (dev->uc_count > 0 && !dev->uc_promisc) {
3052 __dev_set_promiscuity(dev, 1);
3053 dev->uc_promisc = 1;
3054 } else if (dev->uc_count == 0 && dev->uc_promisc) {
3055 __dev_set_promiscuity(dev, -1);
3056 dev->uc_promisc = 0;
3059 if (dev->set_multicast_list)
3060 dev->set_multicast_list(dev);
3064 void dev_set_rx_mode(struct net_device *dev)
3066 netif_addr_lock_bh(dev);
3067 __dev_set_rx_mode(dev);
3068 netif_addr_unlock_bh(dev);
3071 int __dev_addr_delete(struct dev_addr_list **list, int *count,
3072 void *addr, int alen, int glbl)
3074 struct dev_addr_list *da;
3076 for (; (da = *list) != NULL; list = &da->next) {
3077 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
3078 alen == da->da_addrlen) {
3079 if (glbl) {
3080 int old_glbl = da->da_gusers;
3081 da->da_gusers = 0;
3082 if (old_glbl == 0)
3083 break;
3085 if (--da->da_users)
3086 return 0;
3088 *list = da->next;
3089 kfree(da);
3090 (*count)--;
3091 return 0;
3094 return -ENOENT;
3097 int __dev_addr_add(struct dev_addr_list **list, int *count,
3098 void *addr, int alen, int glbl)
3100 struct dev_addr_list *da;
3102 for (da = *list; da != NULL; da = da->next) {
3103 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
3104 da->da_addrlen == alen) {
3105 if (glbl) {
3106 int old_glbl = da->da_gusers;
3107 da->da_gusers = 1;
3108 if (old_glbl)
3109 return 0;
3111 da->da_users++;
3112 return 0;
3116 da = kzalloc(sizeof(*da), GFP_ATOMIC);
3117 if (da == NULL)
3118 return -ENOMEM;
3119 memcpy(da->da_addr, addr, alen);
3120 da->da_addrlen = alen;
3121 da->da_users = 1;
3122 da->da_gusers = glbl ? 1 : 0;
3123 da->next = *list;
3124 *list = da;
3125 (*count)++;
3126 return 0;
3130 * dev_unicast_delete - Release secondary unicast address.
3131 * @dev: device
3132 * @addr: address to delete
3133 * @alen: length of @addr
3135 * Release reference to a secondary unicast address and remove it
3136 * from the device if the reference count drops to zero.
3138 * The caller must hold the rtnl_mutex.
3140 int dev_unicast_delete(struct net_device *dev, void *addr, int alen)
3142 int err;
3144 ASSERT_RTNL();
3146 netif_addr_lock_bh(dev);
3147 err = __dev_addr_delete(&dev->uc_list, &dev->uc_count, addr, alen, 0);
3148 if (!err)
3149 __dev_set_rx_mode(dev);
3150 netif_addr_unlock_bh(dev);
3151 return err;
3153 EXPORT_SYMBOL(dev_unicast_delete);
3156 * dev_unicast_add - add a secondary unicast address
3157 * @dev: device
3158 * @addr: address to add
3159 * @alen: length of @addr
3161 * Add a secondary unicast address to the device or increase
3162 * the reference count if it already exists.
3164 * The caller must hold the rtnl_mutex.
3166 int dev_unicast_add(struct net_device *dev, void *addr, int alen)
3168 int err;
3170 ASSERT_RTNL();
3172 netif_addr_lock_bh(dev);
3173 err = __dev_addr_add(&dev->uc_list, &dev->uc_count, addr, alen, 0);
3174 if (!err)
3175 __dev_set_rx_mode(dev);
3176 netif_addr_unlock_bh(dev);
3177 return err;
3179 EXPORT_SYMBOL(dev_unicast_add);
3181 int __dev_addr_sync(struct dev_addr_list **to, int *to_count,
3182 struct dev_addr_list **from, int *from_count)
3184 struct dev_addr_list *da, *next;
3185 int err = 0;
3187 da = *from;
3188 while (da != NULL) {
3189 next = da->next;
3190 if (!da->da_synced) {
3191 err = __dev_addr_add(to, to_count,
3192 da->da_addr, da->da_addrlen, 0);
3193 if (err < 0)
3194 break;
3195 da->da_synced = 1;
3196 da->da_users++;
3197 } else if (da->da_users == 1) {
3198 __dev_addr_delete(to, to_count,
3199 da->da_addr, da->da_addrlen, 0);
3200 __dev_addr_delete(from, from_count,
3201 da->da_addr, da->da_addrlen, 0);
3203 da = next;
3205 return err;
3208 void __dev_addr_unsync(struct dev_addr_list **to, int *to_count,
3209 struct dev_addr_list **from, int *from_count)
3211 struct dev_addr_list *da, *next;
3213 da = *from;
3214 while (da != NULL) {
3215 next = da->next;
3216 if (da->da_synced) {
3217 __dev_addr_delete(to, to_count,
3218 da->da_addr, da->da_addrlen, 0);
3219 da->da_synced = 0;
3220 __dev_addr_delete(from, from_count,
3221 da->da_addr, da->da_addrlen, 0);
3223 da = next;
3228 * dev_unicast_sync - Synchronize device's unicast list to another device
3229 * @to: destination device
3230 * @from: source device
3232 * Add newly added addresses to the destination device and release
3233 * addresses that have no users left. The source device must be
3234 * locked by netif_tx_lock_bh.
3236 * This function is intended to be called from the dev->set_rx_mode
3237 * function of layered software devices.
3239 int dev_unicast_sync(struct net_device *to, struct net_device *from)
3241 int err = 0;
3243 netif_addr_lock_bh(to);
3244 err = __dev_addr_sync(&to->uc_list, &to->uc_count,
3245 &from->uc_list, &from->uc_count);
3246 if (!err)
3247 __dev_set_rx_mode(to);
3248 netif_addr_unlock_bh(to);
3249 return err;
3251 EXPORT_SYMBOL(dev_unicast_sync);
3254 * dev_unicast_unsync - Remove synchronized addresses from the destination device
3255 * @to: destination device
3256 * @from: source device
3258 * Remove all addresses that were added to the destination device by
3259 * dev_unicast_sync(). This function is intended to be called from the
3260 * dev->stop function of layered software devices.
3262 void dev_unicast_unsync(struct net_device *to, struct net_device *from)
3264 netif_addr_lock_bh(from);
3265 netif_addr_lock(to);
3267 __dev_addr_unsync(&to->uc_list, &to->uc_count,
3268 &from->uc_list, &from->uc_count);
3269 __dev_set_rx_mode(to);
3271 netif_addr_unlock(to);
3272 netif_addr_unlock_bh(from);
3274 EXPORT_SYMBOL(dev_unicast_unsync);
3276 static void __dev_addr_discard(struct dev_addr_list **list)
3278 struct dev_addr_list *tmp;
3280 while (*list != NULL) {
3281 tmp = *list;
3282 *list = tmp->next;
3283 if (tmp->da_users > tmp->da_gusers)
3284 printk("__dev_addr_discard: address leakage! "
3285 "da_users=%d\n", tmp->da_users);
3286 kfree(tmp);
3290 static void dev_addr_discard(struct net_device *dev)
3292 netif_addr_lock_bh(dev);
3294 __dev_addr_discard(&dev->uc_list);
3295 dev->uc_count = 0;
3297 __dev_addr_discard(&dev->mc_list);
3298 dev->mc_count = 0;
3300 netif_addr_unlock_bh(dev);
3303 unsigned dev_get_flags(const struct net_device *dev)
3305 unsigned flags;
3307 flags = (dev->flags & ~(IFF_PROMISC |
3308 IFF_ALLMULTI |
3309 IFF_RUNNING |
3310 IFF_LOWER_UP |
3311 IFF_DORMANT)) |
3312 (dev->gflags & (IFF_PROMISC |
3313 IFF_ALLMULTI));
3315 if (netif_running(dev)) {
3316 if (netif_oper_up(dev))
3317 flags |= IFF_RUNNING;
3318 if (netif_carrier_ok(dev))
3319 flags |= IFF_LOWER_UP;
3320 if (netif_dormant(dev))
3321 flags |= IFF_DORMANT;
3324 return flags;
3327 int dev_change_flags(struct net_device *dev, unsigned flags)
3329 int ret, changes;
3330 int old_flags = dev->flags;
3332 ASSERT_RTNL();
3335 * Set the flags on our device.
3338 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
3339 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
3340 IFF_AUTOMEDIA)) |
3341 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
3342 IFF_ALLMULTI));
3345 * Load in the correct multicast list now the flags have changed.
3348 if (dev->change_rx_flags && (old_flags ^ flags) & IFF_MULTICAST)
3349 dev->change_rx_flags(dev, IFF_MULTICAST);
3351 dev_set_rx_mode(dev);
3354 * Have we downed the interface. We handle IFF_UP ourselves
3355 * according to user attempts to set it, rather than blindly
3356 * setting it.
3359 ret = 0;
3360 if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
3361 ret = ((old_flags & IFF_UP) ? dev_close : dev_open)(dev);
3363 if (!ret)
3364 dev_set_rx_mode(dev);
3367 if (dev->flags & IFF_UP &&
3368 ((old_flags ^ dev->flags) &~ (IFF_UP | IFF_PROMISC | IFF_ALLMULTI |
3369 IFF_VOLATILE)))
3370 call_netdevice_notifiers(NETDEV_CHANGE, dev);
3372 if ((flags ^ dev->gflags) & IFF_PROMISC) {
3373 int inc = (flags & IFF_PROMISC) ? +1 : -1;
3374 dev->gflags ^= IFF_PROMISC;
3375 dev_set_promiscuity(dev, inc);
3378 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
3379 is important. Some (broken) drivers set IFF_PROMISC, when
3380 IFF_ALLMULTI is requested not asking us and not reporting.
3382 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
3383 int inc = (flags & IFF_ALLMULTI) ? +1 : -1;
3384 dev->gflags ^= IFF_ALLMULTI;
3385 dev_set_allmulti(dev, inc);
3388 /* Exclude state transition flags, already notified */
3389 changes = (old_flags ^ dev->flags) & ~(IFF_UP | IFF_RUNNING);
3390 if (changes)
3391 rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
3393 return ret;
3396 int dev_set_mtu(struct net_device *dev, int new_mtu)
3398 int err;
3400 if (new_mtu == dev->mtu)
3401 return 0;
3403 /* MTU must be positive. */
3404 if (new_mtu < 0)
3405 return -EINVAL;
3407 if (!netif_device_present(dev))
3408 return -ENODEV;
3410 err = 0;
3411 if (dev->change_mtu)
3412 err = dev->change_mtu(dev, new_mtu);
3413 else
3414 dev->mtu = new_mtu;
3415 if (!err && dev->flags & IFF_UP)
3416 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
3417 return err;
3420 int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
3422 int err;
3424 if (!dev->set_mac_address)
3425 return -EOPNOTSUPP;
3426 if (sa->sa_family != dev->type)
3427 return -EINVAL;
3428 if (!netif_device_present(dev))
3429 return -ENODEV;
3430 err = dev->set_mac_address(dev, sa);
3431 if (!err)
3432 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
3433 return err;
3437 * Perform the SIOCxIFxxx calls, inside read_lock(dev_base_lock)
3439 static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd)
3441 int err;
3442 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
3444 if (!dev)
3445 return -ENODEV;
3447 switch (cmd) {
3448 case SIOCGIFFLAGS: /* Get interface flags */
3449 ifr->ifr_flags = dev_get_flags(dev);
3450 return 0;
3452 case SIOCGIFMETRIC: /* Get the metric on the interface
3453 (currently unused) */
3454 ifr->ifr_metric = 0;
3455 return 0;
3457 case SIOCGIFMTU: /* Get the MTU of a device */
3458 ifr->ifr_mtu = dev->mtu;
3459 return 0;
3461 case SIOCGIFHWADDR:
3462 if (!dev->addr_len)
3463 memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
3464 else
3465 memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
3466 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
3467 ifr->ifr_hwaddr.sa_family = dev->type;
3468 return 0;
3470 case SIOCGIFSLAVE:
3471 err = -EINVAL;
3472 break;
3474 case SIOCGIFMAP:
3475 ifr->ifr_map.mem_start = dev->mem_start;
3476 ifr->ifr_map.mem_end = dev->mem_end;
3477 ifr->ifr_map.base_addr = dev->base_addr;
3478 ifr->ifr_map.irq = dev->irq;
3479 ifr->ifr_map.dma = dev->dma;
3480 ifr->ifr_map.port = dev->if_port;
3481 return 0;
3483 case SIOCGIFINDEX:
3484 ifr->ifr_ifindex = dev->ifindex;
3485 return 0;
3487 case SIOCGIFTXQLEN:
3488 ifr->ifr_qlen = dev->tx_queue_len;
3489 return 0;
3491 default:
3492 /* dev_ioctl() should ensure this case
3493 * is never reached
3495 WARN_ON(1);
3496 err = -EINVAL;
3497 break;
3500 return err;
3504 * Perform the SIOCxIFxxx calls, inside rtnl_lock()
3506 static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
3508 int err;
3509 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
3511 if (!dev)
3512 return -ENODEV;
3514 switch (cmd) {
3515 case SIOCSIFFLAGS: /* Set interface flags */
3516 return dev_change_flags(dev, ifr->ifr_flags);
3518 case SIOCSIFMETRIC: /* Set the metric on the interface
3519 (currently unused) */
3520 return -EOPNOTSUPP;
3522 case SIOCSIFMTU: /* Set the MTU of a device */
3523 return dev_set_mtu(dev, ifr->ifr_mtu);
3525 case SIOCSIFHWADDR:
3526 return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
3528 case SIOCSIFHWBROADCAST:
3529 if (ifr->ifr_hwaddr.sa_family != dev->type)
3530 return -EINVAL;
3531 memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
3532 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
3533 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
3534 return 0;
3536 case SIOCSIFMAP:
3537 if (dev->set_config) {
3538 if (!netif_device_present(dev))
3539 return -ENODEV;
3540 return dev->set_config(dev, &ifr->ifr_map);
3542 return -EOPNOTSUPP;
3544 case SIOCADDMULTI:
3545 if ((!dev->set_multicast_list && !dev->set_rx_mode) ||
3546 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
3547 return -EINVAL;
3548 if (!netif_device_present(dev))
3549 return -ENODEV;
3550 return dev_mc_add(dev, ifr->ifr_hwaddr.sa_data,
3551 dev->addr_len, 1);
3553 case SIOCDELMULTI:
3554 if ((!dev->set_multicast_list && !dev->set_rx_mode) ||
3555 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
3556 return -EINVAL;
3557 if (!netif_device_present(dev))
3558 return -ENODEV;
3559 return dev_mc_delete(dev, ifr->ifr_hwaddr.sa_data,
3560 dev->addr_len, 1);
3562 case SIOCSIFTXQLEN:
3563 if (ifr->ifr_qlen < 0)
3564 return -EINVAL;
3565 dev->tx_queue_len = ifr->ifr_qlen;
3566 return 0;
3568 case SIOCSIFNAME:
3569 ifr->ifr_newname[IFNAMSIZ-1] = '\0';
3570 return dev_change_name(dev, ifr->ifr_newname);
3573 * Unknown or private ioctl
3576 default:
3577 if ((cmd >= SIOCDEVPRIVATE &&
3578 cmd <= SIOCDEVPRIVATE + 15) ||
3579 cmd == SIOCBONDENSLAVE ||
3580 cmd == SIOCBONDRELEASE ||
3581 cmd == SIOCBONDSETHWADDR ||
3582 cmd == SIOCBONDSLAVEINFOQUERY ||
3583 cmd == SIOCBONDINFOQUERY ||
3584 cmd == SIOCBONDCHANGEACTIVE ||
3585 cmd == SIOCGMIIPHY ||
3586 cmd == SIOCGMIIREG ||
3587 cmd == SIOCSMIIREG ||
3588 cmd == SIOCBRADDIF ||
3589 cmd == SIOCBRDELIF ||
3590 cmd == SIOCWANDEV) {
3591 err = -EOPNOTSUPP;
3592 if (dev->do_ioctl) {
3593 if (netif_device_present(dev))
3594 err = dev->do_ioctl(dev, ifr,
3595 cmd);
3596 else
3597 err = -ENODEV;
3599 } else
3600 err = -EINVAL;
3603 return err;
3607 * This function handles all "interface"-type I/O control requests. The actual
3608 * 'doing' part of this is dev_ifsioc above.
3612 * dev_ioctl - network device ioctl
3613 * @net: the applicable net namespace
3614 * @cmd: command to issue
3615 * @arg: pointer to a struct ifreq in user space
3617 * Issue ioctl functions to devices. This is normally called by the
3618 * user space syscall interfaces but can sometimes be useful for
3619 * other purposes. The return value is the return from the syscall if
3620 * positive or a negative errno code on error.
3623 int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
3625 struct ifreq ifr;
3626 int ret;
3627 char *colon;
3629 /* One special case: SIOCGIFCONF takes ifconf argument
3630 and requires shared lock, because it sleeps writing
3631 to user space.
3634 if (cmd == SIOCGIFCONF) {
3635 rtnl_lock();
3636 ret = dev_ifconf(net, (char __user *) arg);
3637 rtnl_unlock();
3638 return ret;
3640 if (cmd == SIOCGIFNAME)
3641 return dev_ifname(net, (struct ifreq __user *)arg);
3643 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
3644 return -EFAULT;
3646 ifr.ifr_name[IFNAMSIZ-1] = 0;
3648 colon = strchr(ifr.ifr_name, ':');
3649 if (colon)
3650 *colon = 0;
3653 * See which interface the caller is talking about.
3656 switch (cmd) {
3658 * These ioctl calls:
3659 * - can be done by all.
3660 * - atomic and do not require locking.
3661 * - return a value
3663 case SIOCGIFFLAGS:
3664 case SIOCGIFMETRIC:
3665 case SIOCGIFMTU:
3666 case SIOCGIFHWADDR:
3667 case SIOCGIFSLAVE:
3668 case SIOCGIFMAP:
3669 case SIOCGIFINDEX:
3670 case SIOCGIFTXQLEN:
3671 dev_load(net, ifr.ifr_name);
3672 read_lock(&dev_base_lock);
3673 ret = dev_ifsioc_locked(net, &ifr, cmd);
3674 read_unlock(&dev_base_lock);
3675 if (!ret) {
3676 if (colon)
3677 *colon = ':';
3678 if (copy_to_user(arg, &ifr,
3679 sizeof(struct ifreq)))
3680 ret = -EFAULT;
3682 return ret;
3684 case SIOCETHTOOL:
3685 dev_load(net, ifr.ifr_name);
3686 rtnl_lock();
3687 ret = dev_ethtool(net, &ifr);
3688 rtnl_unlock();
3689 if (!ret) {
3690 if (colon)
3691 *colon = ':';
3692 if (copy_to_user(arg, &ifr,
3693 sizeof(struct ifreq)))
3694 ret = -EFAULT;
3696 return ret;
3699 * These ioctl calls:
3700 * - require superuser power.
3701 * - require strict serialization.
3702 * - return a value
3704 case SIOCGMIIPHY:
3705 case SIOCGMIIREG:
3706 case SIOCSIFNAME:
3707 if (!capable(CAP_NET_ADMIN))
3708 return -EPERM;
3709 dev_load(net, ifr.ifr_name);
3710 rtnl_lock();
3711 ret = dev_ifsioc(net, &ifr, cmd);
3712 rtnl_unlock();
3713 if (!ret) {
3714 if (colon)
3715 *colon = ':';
3716 if (copy_to_user(arg, &ifr,
3717 sizeof(struct ifreq)))
3718 ret = -EFAULT;
3720 return ret;
3723 * These ioctl calls:
3724 * - require superuser power.
3725 * - require strict serialization.
3726 * - do not return a value
3728 case SIOCSIFFLAGS:
3729 case SIOCSIFMETRIC:
3730 case SIOCSIFMTU:
3731 case SIOCSIFMAP:
3732 case SIOCSIFHWADDR:
3733 case SIOCSIFSLAVE:
3734 case SIOCADDMULTI:
3735 case SIOCDELMULTI:
3736 case SIOCSIFHWBROADCAST:
3737 case SIOCSIFTXQLEN:
3738 case SIOCSMIIREG:
3739 case SIOCBONDENSLAVE:
3740 case SIOCBONDRELEASE:
3741 case SIOCBONDSETHWADDR:
3742 case SIOCBONDCHANGEACTIVE:
3743 case SIOCBRADDIF:
3744 case SIOCBRDELIF:
3745 if (!capable(CAP_NET_ADMIN))
3746 return -EPERM;
3747 /* fall through */
3748 case SIOCBONDSLAVEINFOQUERY:
3749 case SIOCBONDINFOQUERY:
3750 dev_load(net, ifr.ifr_name);
3751 rtnl_lock();
3752 ret = dev_ifsioc(net, &ifr, cmd);
3753 rtnl_unlock();
3754 return ret;
3756 case SIOCGIFMEM:
3757 /* Get the per device memory space. We can add this but
3758 * currently do not support it */
3759 case SIOCSIFMEM:
3760 /* Set the per device memory buffer space.
3761 * Not applicable in our case */
3762 case SIOCSIFLINK:
3763 return -EINVAL;
3766 * Unknown or private ioctl.
3768 default:
3769 if (cmd == SIOCWANDEV ||
3770 (cmd >= SIOCDEVPRIVATE &&
3771 cmd <= SIOCDEVPRIVATE + 15)) {
3772 dev_load(net, ifr.ifr_name);
3773 rtnl_lock();
3774 ret = dev_ifsioc(net, &ifr, cmd);
3775 rtnl_unlock();
3776 if (!ret && copy_to_user(arg, &ifr,
3777 sizeof(struct ifreq)))
3778 ret = -EFAULT;
3779 return ret;
3781 /* Take care of Wireless Extensions */
3782 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
3783 return wext_handle_ioctl(net, &ifr, cmd, arg);
3784 return -EINVAL;
3790 * dev_new_index - allocate an ifindex
3791 * @net: the applicable net namespace
3793 * Returns a suitable unique value for a new device interface
3794 * number. The caller must hold the rtnl semaphore or the
3795 * dev_base_lock to be sure it remains unique.
3797 static int dev_new_index(struct net *net)
3799 static int ifindex;
3800 for (;;) {
3801 if (++ifindex <= 0)
3802 ifindex = 1;
3803 if (!__dev_get_by_index(net, ifindex))
3804 return ifindex;
3808 /* Delayed registration/unregisteration */
3809 static DEFINE_SPINLOCK(net_todo_list_lock);
3810 static LIST_HEAD(net_todo_list);
3812 static void net_set_todo(struct net_device *dev)
3814 spin_lock(&net_todo_list_lock);
3815 list_add_tail(&dev->todo_list, &net_todo_list);
3816 spin_unlock(&net_todo_list_lock);
3819 static void rollback_registered(struct net_device *dev)
3821 BUG_ON(dev_boot_phase);
3822 ASSERT_RTNL();
3824 /* Some devices call without registering for initialization unwind. */
3825 if (dev->reg_state == NETREG_UNINITIALIZED) {
3826 printk(KERN_DEBUG "unregister_netdevice: device %s/%p never "
3827 "was registered\n", dev->name, dev);
3829 WARN_ON(1);
3830 return;
3833 BUG_ON(dev->reg_state != NETREG_REGISTERED);
3835 /* If device is running, close it first. */
3836 dev_close(dev);
3838 /* And unlink it from device chain. */
3839 unlist_netdevice(dev);
3841 dev->reg_state = NETREG_UNREGISTERING;
3843 synchronize_net();
3845 /* Shutdown queueing discipline. */
3846 dev_shutdown(dev);
3849 /* Notify protocols, that we are about to destroy
3850 this device. They should clean all the things.
3852 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
3855 * Flush the unicast and multicast chains
3857 dev_addr_discard(dev);
3859 if (dev->uninit)
3860 dev->uninit(dev);
3862 /* Notifier chain MUST detach us from master device. */
3863 WARN_ON(dev->master);
3865 /* Remove entries from kobject tree */
3866 netdev_unregister_kobject(dev);
3868 synchronize_net();
3870 dev_put(dev);
3873 static void __netdev_init_queue_locks_one(struct net_device *dev,
3874 struct netdev_queue *dev_queue,
3875 void *_unused)
3877 spin_lock_init(&dev_queue->_xmit_lock);
3878 netdev_set_xmit_lockdep_class(&dev_queue->_xmit_lock, dev->type);
3879 dev_queue->xmit_lock_owner = -1;
3882 static void netdev_init_queue_locks(struct net_device *dev)
3884 netdev_for_each_tx_queue(dev, __netdev_init_queue_locks_one, NULL);
3885 __netdev_init_queue_locks_one(dev, &dev->rx_queue, NULL);
3889 * register_netdevice - register a network device
3890 * @dev: device to register
3892 * Take a completed network device structure and add it to the kernel
3893 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
3894 * chain. 0 is returned on success. A negative errno code is returned
3895 * on a failure to set up the device, or if the name is a duplicate.
3897 * Callers must hold the rtnl semaphore. You may want
3898 * register_netdev() instead of this.
3900 * BUGS:
3901 * The locking appears insufficient to guarantee two parallel registers
3902 * will not get the same name.
3905 int register_netdevice(struct net_device *dev)
3907 struct hlist_head *head;
3908 struct hlist_node *p;
3909 int ret;
3910 struct net *net;
3912 BUG_ON(dev_boot_phase);
3913 ASSERT_RTNL();
3915 might_sleep();
3917 /* When net_device's are persistent, this will be fatal. */
3918 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
3919 BUG_ON(!dev_net(dev));
3920 net = dev_net(dev);
3922 spin_lock_init(&dev->addr_list_lock);
3923 netdev_set_addr_lockdep_class(dev);
3924 netdev_init_queue_locks(dev);
3926 dev->iflink = -1;
3928 /* Init, if this function is available */
3929 if (dev->init) {
3930 ret = dev->init(dev);
3931 if (ret) {
3932 if (ret > 0)
3933 ret = -EIO;
3934 goto out;
3938 if (!dev_valid_name(dev->name)) {
3939 ret = -EINVAL;
3940 goto err_uninit;
3943 dev->ifindex = dev_new_index(net);
3944 if (dev->iflink == -1)
3945 dev->iflink = dev->ifindex;
3947 /* Check for existence of name */
3948 head = dev_name_hash(net, dev->name);
3949 hlist_for_each(p, head) {
3950 struct net_device *d
3951 = hlist_entry(p, struct net_device, name_hlist);
3952 if (!strncmp(d->name, dev->name, IFNAMSIZ)) {
3953 ret = -EEXIST;
3954 goto err_uninit;
3958 /* Fix illegal checksum combinations */
3959 if ((dev->features & NETIF_F_HW_CSUM) &&
3960 (dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
3961 printk(KERN_NOTICE "%s: mixed HW and IP checksum settings.\n",
3962 dev->name);
3963 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
3966 if ((dev->features & NETIF_F_NO_CSUM) &&
3967 (dev->features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
3968 printk(KERN_NOTICE "%s: mixed no checksumming and other settings.\n",
3969 dev->name);
3970 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM);
3974 /* Fix illegal SG+CSUM combinations. */
3975 if ((dev->features & NETIF_F_SG) &&
3976 !(dev->features & NETIF_F_ALL_CSUM)) {
3977 printk(KERN_NOTICE "%s: Dropping NETIF_F_SG since no checksum feature.\n",
3978 dev->name);
3979 dev->features &= ~NETIF_F_SG;
3982 /* TSO requires that SG is present as well. */
3983 if ((dev->features & NETIF_F_TSO) &&
3984 !(dev->features & NETIF_F_SG)) {
3985 printk(KERN_NOTICE "%s: Dropping NETIF_F_TSO since no SG feature.\n",
3986 dev->name);
3987 dev->features &= ~NETIF_F_TSO;
3989 if (dev->features & NETIF_F_UFO) {
3990 if (!(dev->features & NETIF_F_HW_CSUM)) {
3991 printk(KERN_ERR "%s: Dropping NETIF_F_UFO since no "
3992 "NETIF_F_HW_CSUM feature.\n",
3993 dev->name);
3994 dev->features &= ~NETIF_F_UFO;
3996 if (!(dev->features & NETIF_F_SG)) {
3997 printk(KERN_ERR "%s: Dropping NETIF_F_UFO since no "
3998 "NETIF_F_SG feature.\n",
3999 dev->name);
4000 dev->features &= ~NETIF_F_UFO;
4004 /* Enable software GSO if SG is supported. */
4005 if (dev->features & NETIF_F_SG)
4006 dev->features |= NETIF_F_GSO;
4008 netdev_initialize_kobject(dev);
4009 ret = netdev_register_kobject(dev);
4010 if (ret)
4011 goto err_uninit;
4012 dev->reg_state = NETREG_REGISTERED;
4015 * Default initial state at registry is that the
4016 * device is present.
4019 set_bit(__LINK_STATE_PRESENT, &dev->state);
4021 dev_init_scheduler(dev);
4022 dev_hold(dev);
4023 list_netdevice(dev);
4025 /* Notify protocols, that a new device appeared. */
4026 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
4027 ret = notifier_to_errno(ret);
4028 if (ret) {
4029 rollback_registered(dev);
4030 dev->reg_state = NETREG_UNREGISTERED;
4033 out:
4034 return ret;
4036 err_uninit:
4037 if (dev->uninit)
4038 dev->uninit(dev);
4039 goto out;
4043 * register_netdev - register a network device
4044 * @dev: device to register
4046 * Take a completed network device structure and add it to the kernel
4047 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
4048 * chain. 0 is returned on success. A negative errno code is returned
4049 * on a failure to set up the device, or if the name is a duplicate.
4051 * This is a wrapper around register_netdevice that takes the rtnl semaphore
4052 * and expands the device name if you passed a format string to
4053 * alloc_netdev.
4055 int register_netdev(struct net_device *dev)
4057 int err;
4059 rtnl_lock();
4062 * If the name is a format string the caller wants us to do a
4063 * name allocation.
4065 if (strchr(dev->name, '%')) {
4066 err = dev_alloc_name(dev, dev->name);
4067 if (err < 0)
4068 goto out;
4071 err = register_netdevice(dev);
4072 out:
4073 rtnl_unlock();
4074 return err;
4076 EXPORT_SYMBOL(register_netdev);
4079 * netdev_wait_allrefs - wait until all references are gone.
4081 * This is called when unregistering network devices.
4083 * Any protocol or device that holds a reference should register
4084 * for netdevice notification, and cleanup and put back the
4085 * reference if they receive an UNREGISTER event.
4086 * We can get stuck here if buggy protocols don't correctly
4087 * call dev_put.
4089 static void netdev_wait_allrefs(struct net_device *dev)
4091 unsigned long rebroadcast_time, warning_time;
4093 rebroadcast_time = warning_time = jiffies;
4094 while (atomic_read(&dev->refcnt) != 0) {
4095 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
4096 rtnl_lock();
4098 /* Rebroadcast unregister notification */
4099 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
4101 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
4102 &dev->state)) {
4103 /* We must not have linkwatch events
4104 * pending on unregister. If this
4105 * happens, we simply run the queue
4106 * unscheduled, resulting in a noop
4107 * for this device.
4109 linkwatch_run_queue();
4112 __rtnl_unlock();
4114 rebroadcast_time = jiffies;
4117 msleep(250);
4119 if (time_after(jiffies, warning_time + 10 * HZ)) {
4120 printk(KERN_EMERG "unregister_netdevice: "
4121 "waiting for %s to become free. Usage "
4122 "count = %d\n",
4123 dev->name, atomic_read(&dev->refcnt));
4124 warning_time = jiffies;
4129 /* The sequence is:
4131 * rtnl_lock();
4132 * ...
4133 * register_netdevice(x1);
4134 * register_netdevice(x2);
4135 * ...
4136 * unregister_netdevice(y1);
4137 * unregister_netdevice(y2);
4138 * ...
4139 * rtnl_unlock();
4140 * free_netdev(y1);
4141 * free_netdev(y2);
4143 * We are invoked by rtnl_unlock() after it drops the semaphore.
4144 * This allows us to deal with problems:
4145 * 1) We can delete sysfs objects which invoke hotplug
4146 * without deadlocking with linkwatch via keventd.
4147 * 2) Since we run with the RTNL semaphore not held, we can sleep
4148 * safely in order to wait for the netdev refcnt to drop to zero.
4150 static DEFINE_MUTEX(net_todo_run_mutex);
4151 void netdev_run_todo(void)
4153 struct list_head list;
4155 /* Need to guard against multiple cpu's getting out of order. */
4156 mutex_lock(&net_todo_run_mutex);
4158 /* Not safe to do outside the semaphore. We must not return
4159 * until all unregister events invoked by the local processor
4160 * have been completed (either by this todo run, or one on
4161 * another cpu).
4163 if (list_empty(&net_todo_list))
4164 goto out;
4166 /* Snapshot list, allow later requests */
4167 spin_lock(&net_todo_list_lock);
4168 list_replace_init(&net_todo_list, &list);
4169 spin_unlock(&net_todo_list_lock);
4171 while (!list_empty(&list)) {
4172 struct net_device *dev
4173 = list_entry(list.next, struct net_device, todo_list);
4174 list_del(&dev->todo_list);
4176 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
4177 printk(KERN_ERR "network todo '%s' but state %d\n",
4178 dev->name, dev->reg_state);
4179 dump_stack();
4180 continue;
4183 dev->reg_state = NETREG_UNREGISTERED;
4185 on_each_cpu(flush_backlog, dev, 1);
4187 netdev_wait_allrefs(dev);
4189 /* paranoia */
4190 BUG_ON(atomic_read(&dev->refcnt));
4191 WARN_ON(dev->ip_ptr);
4192 WARN_ON(dev->ip6_ptr);
4193 WARN_ON(dev->dn_ptr);
4195 if (dev->destructor)
4196 dev->destructor(dev);
4198 /* Free network device */
4199 kobject_put(&dev->dev.kobj);
4202 out:
4203 mutex_unlock(&net_todo_run_mutex);
4206 static struct net_device_stats *internal_stats(struct net_device *dev)
4208 return &dev->stats;
4211 static void netdev_init_one_queue(struct net_device *dev,
4212 struct netdev_queue *queue,
4213 void *_unused)
4215 queue->dev = dev;
4218 static void netdev_init_queues(struct net_device *dev)
4220 netdev_init_one_queue(dev, &dev->rx_queue, NULL);
4221 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
4222 spin_lock_init(&dev->tx_global_lock);
4226 * alloc_netdev_mq - allocate network device
4227 * @sizeof_priv: size of private data to allocate space for
4228 * @name: device name format string
4229 * @setup: callback to initialize device
4230 * @queue_count: the number of subqueues to allocate
4232 * Allocates a struct net_device with private data area for driver use
4233 * and performs basic initialization. Also allocates subquue structs
4234 * for each queue on the device at the end of the netdevice.
4236 struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
4237 void (*setup)(struct net_device *), unsigned int queue_count)
4239 struct netdev_queue *tx;
4240 struct net_device *dev;
4241 size_t alloc_size;
4242 void *p;
4244 BUG_ON(strlen(name) >= sizeof(dev->name));
4246 alloc_size = sizeof(struct net_device);
4247 if (sizeof_priv) {
4248 /* ensure 32-byte alignment of private area */
4249 alloc_size = (alloc_size + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST;
4250 alloc_size += sizeof_priv;
4252 /* ensure 32-byte alignment of whole construct */
4253 alloc_size += NETDEV_ALIGN_CONST;
4255 p = kzalloc(alloc_size, GFP_KERNEL);
4256 if (!p) {
4257 printk(KERN_ERR "alloc_netdev: Unable to allocate device.\n");
4258 return NULL;
4261 tx = kcalloc(queue_count, sizeof(struct netdev_queue), GFP_KERNEL);
4262 if (!tx) {
4263 printk(KERN_ERR "alloc_netdev: Unable to allocate "
4264 "tx qdiscs.\n");
4265 kfree(p);
4266 return NULL;
4269 dev = (struct net_device *)
4270 (((long)p + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST);
4271 dev->padded = (char *)dev - (char *)p;
4272 dev_net_set(dev, &init_net);
4274 dev->_tx = tx;
4275 dev->num_tx_queues = queue_count;
4276 dev->real_num_tx_queues = queue_count;
4278 if (sizeof_priv) {
4279 dev->priv = ((char *)dev +
4280 ((sizeof(struct net_device) + NETDEV_ALIGN_CONST)
4281 & ~NETDEV_ALIGN_CONST));
4284 dev->gso_max_size = GSO_MAX_SIZE;
4286 netdev_init_queues(dev);
4288 dev->get_stats = internal_stats;
4289 netpoll_netdev_init(dev);
4290 setup(dev);
4291 strcpy(dev->name, name);
4292 return dev;
4294 EXPORT_SYMBOL(alloc_netdev_mq);
4297 * free_netdev - free network device
4298 * @dev: device
4300 * This function does the last stage of destroying an allocated device
4301 * interface. The reference to the device object is released.
4302 * If this is the last reference then it will be freed.
4304 void free_netdev(struct net_device *dev)
4306 release_net(dev_net(dev));
4308 kfree(dev->_tx);
4310 /* Compatibility with error handling in drivers */
4311 if (dev->reg_state == NETREG_UNINITIALIZED) {
4312 kfree((char *)dev - dev->padded);
4313 return;
4316 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
4317 dev->reg_state = NETREG_RELEASED;
4319 /* will free via device release */
4320 put_device(&dev->dev);
4323 /* Synchronize with packet receive processing. */
4324 void synchronize_net(void)
4326 might_sleep();
4327 synchronize_rcu();
4331 * unregister_netdevice - remove device from the kernel
4332 * @dev: device
4334 * This function shuts down a device interface and removes it
4335 * from the kernel tables.
4337 * Callers must hold the rtnl semaphore. You may want
4338 * unregister_netdev() instead of this.
4341 void unregister_netdevice(struct net_device *dev)
4343 ASSERT_RTNL();
4345 rollback_registered(dev);
4346 /* Finish processing unregister after unlock */
4347 net_set_todo(dev);
4351 * unregister_netdev - remove device from the kernel
4352 * @dev: device
4354 * This function shuts down a device interface and removes it
4355 * from the kernel tables.
4357 * This is just a wrapper for unregister_netdevice that takes
4358 * the rtnl semaphore. In general you want to use this and not
4359 * unregister_netdevice.
4361 void unregister_netdev(struct net_device *dev)
4363 rtnl_lock();
4364 unregister_netdevice(dev);
4365 rtnl_unlock();
4368 EXPORT_SYMBOL(unregister_netdev);
4371 * dev_change_net_namespace - move device to different nethost namespace
4372 * @dev: device
4373 * @net: network namespace
4374 * @pat: If not NULL name pattern to try if the current device name
4375 * is already taken in the destination network namespace.
4377 * This function shuts down a device interface and moves it
4378 * to a new network namespace. On success 0 is returned, on
4379 * a failure a netagive errno code is returned.
4381 * Callers must hold the rtnl semaphore.
4384 int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
4386 char buf[IFNAMSIZ];
4387 const char *destname;
4388 int err;
4390 ASSERT_RTNL();
4392 /* Don't allow namespace local devices to be moved. */
4393 err = -EINVAL;
4394 if (dev->features & NETIF_F_NETNS_LOCAL)
4395 goto out;
4397 /* Ensure the device has been registrered */
4398 err = -EINVAL;
4399 if (dev->reg_state != NETREG_REGISTERED)
4400 goto out;
4402 /* Get out if there is nothing todo */
4403 err = 0;
4404 if (net_eq(dev_net(dev), net))
4405 goto out;
4407 /* Pick the destination device name, and ensure
4408 * we can use it in the destination network namespace.
4410 err = -EEXIST;
4411 destname = dev->name;
4412 if (__dev_get_by_name(net, destname)) {
4413 /* We get here if we can't use the current device name */
4414 if (!pat)
4415 goto out;
4416 if (!dev_valid_name(pat))
4417 goto out;
4418 if (strchr(pat, '%')) {
4419 if (__dev_alloc_name(net, pat, buf) < 0)
4420 goto out;
4421 destname = buf;
4422 } else
4423 destname = pat;
4424 if (__dev_get_by_name(net, destname))
4425 goto out;
4429 * And now a mini version of register_netdevice unregister_netdevice.
4432 /* If device is running close it first. */
4433 dev_close(dev);
4435 /* And unlink it from device chain */
4436 err = -ENODEV;
4437 unlist_netdevice(dev);
4439 synchronize_net();
4441 /* Shutdown queueing discipline. */
4442 dev_shutdown(dev);
4444 /* Notify protocols, that we are about to destroy
4445 this device. They should clean all the things.
4447 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
4450 * Flush the unicast and multicast chains
4452 dev_addr_discard(dev);
4454 /* Actually switch the network namespace */
4455 dev_net_set(dev, net);
4457 /* Assign the new device name */
4458 if (destname != dev->name)
4459 strcpy(dev->name, destname);
4461 /* If there is an ifindex conflict assign a new one */
4462 if (__dev_get_by_index(net, dev->ifindex)) {
4463 int iflink = (dev->iflink == dev->ifindex);
4464 dev->ifindex = dev_new_index(net);
4465 if (iflink)
4466 dev->iflink = dev->ifindex;
4469 /* Fixup kobjects */
4470 netdev_unregister_kobject(dev);
4471 err = netdev_register_kobject(dev);
4472 WARN_ON(err);
4474 /* Add the device back in the hashes */
4475 list_netdevice(dev);
4477 /* Notify protocols, that a new device appeared. */
4478 call_netdevice_notifiers(NETDEV_REGISTER, dev);
4480 synchronize_net();
4481 err = 0;
4482 out:
4483 return err;
4486 static int dev_cpu_callback(struct notifier_block *nfb,
4487 unsigned long action,
4488 void *ocpu)
4490 struct sk_buff **list_skb;
4491 struct Qdisc **list_net;
4492 struct sk_buff *skb;
4493 unsigned int cpu, oldcpu = (unsigned long)ocpu;
4494 struct softnet_data *sd, *oldsd;
4496 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
4497 return NOTIFY_OK;
4499 local_irq_disable();
4500 cpu = smp_processor_id();
4501 sd = &per_cpu(softnet_data, cpu);
4502 oldsd = &per_cpu(softnet_data, oldcpu);
4504 /* Find end of our completion_queue. */
4505 list_skb = &sd->completion_queue;
4506 while (*list_skb)
4507 list_skb = &(*list_skb)->next;
4508 /* Append completion queue from offline CPU. */
4509 *list_skb = oldsd->completion_queue;
4510 oldsd->completion_queue = NULL;
4512 /* Find end of our output_queue. */
4513 list_net = &sd->output_queue;
4514 while (*list_net)
4515 list_net = &(*list_net)->next_sched;
4516 /* Append output queue from offline CPU. */
4517 *list_net = oldsd->output_queue;
4518 oldsd->output_queue = NULL;
4520 raise_softirq_irqoff(NET_TX_SOFTIRQ);
4521 local_irq_enable();
4523 /* Process offline CPU's input_pkt_queue */
4524 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue)))
4525 netif_rx(skb);
4527 return NOTIFY_OK;
4530 #ifdef CONFIG_NET_DMA
4532 * net_dma_rebalance - try to maintain one DMA channel per CPU
4533 * @net_dma: DMA client and associated data (lock, channels, channel_mask)
4535 * This is called when the number of channels allocated to the net_dma client
4536 * changes. The net_dma client tries to have one DMA channel per CPU.
4539 static void net_dma_rebalance(struct net_dma *net_dma)
4541 unsigned int cpu, i, n, chan_idx;
4542 struct dma_chan *chan;
4544 if (cpus_empty(net_dma->channel_mask)) {
4545 for_each_online_cpu(cpu)
4546 rcu_assign_pointer(per_cpu(softnet_data, cpu).net_dma, NULL);
4547 return;
4550 i = 0;
4551 cpu = first_cpu(cpu_online_map);
4553 for_each_cpu_mask_nr(chan_idx, net_dma->channel_mask) {
4554 chan = net_dma->channels[chan_idx];
4556 n = ((num_online_cpus() / cpus_weight(net_dma->channel_mask))
4557 + (i < (num_online_cpus() %
4558 cpus_weight(net_dma->channel_mask)) ? 1 : 0));
4560 while(n) {
4561 per_cpu(softnet_data, cpu).net_dma = chan;
4562 cpu = next_cpu(cpu, cpu_online_map);
4563 n--;
4565 i++;
4570 * netdev_dma_event - event callback for the net_dma_client
4571 * @client: should always be net_dma_client
4572 * @chan: DMA channel for the event
4573 * @state: DMA state to be handled
4575 static enum dma_state_client
4576 netdev_dma_event(struct dma_client *client, struct dma_chan *chan,
4577 enum dma_state state)
4579 int i, found = 0, pos = -1;
4580 struct net_dma *net_dma =
4581 container_of(client, struct net_dma, client);
4582 enum dma_state_client ack = DMA_DUP; /* default: take no action */
4584 spin_lock(&net_dma->lock);
4585 switch (state) {
4586 case DMA_RESOURCE_AVAILABLE:
4587 for (i = 0; i < nr_cpu_ids; i++)
4588 if (net_dma->channels[i] == chan) {
4589 found = 1;
4590 break;
4591 } else if (net_dma->channels[i] == NULL && pos < 0)
4592 pos = i;
4594 if (!found && pos >= 0) {
4595 ack = DMA_ACK;
4596 net_dma->channels[pos] = chan;
4597 cpu_set(pos, net_dma->channel_mask);
4598 net_dma_rebalance(net_dma);
4600 break;
4601 case DMA_RESOURCE_REMOVED:
4602 for (i = 0; i < nr_cpu_ids; i++)
4603 if (net_dma->channels[i] == chan) {
4604 found = 1;
4605 pos = i;
4606 break;
4609 if (found) {
4610 ack = DMA_ACK;
4611 cpu_clear(pos, net_dma->channel_mask);
4612 net_dma->channels[i] = NULL;
4613 net_dma_rebalance(net_dma);
4615 break;
4616 default:
4617 break;
4619 spin_unlock(&net_dma->lock);
4621 return ack;
4625 * netdev_dma_regiser - register the networking subsystem as a DMA client
4627 static int __init netdev_dma_register(void)
4629 net_dma.channels = kzalloc(nr_cpu_ids * sizeof(struct net_dma),
4630 GFP_KERNEL);
4631 if (unlikely(!net_dma.channels)) {
4632 printk(KERN_NOTICE
4633 "netdev_dma: no memory for net_dma.channels\n");
4634 return -ENOMEM;
4636 spin_lock_init(&net_dma.lock);
4637 dma_cap_set(DMA_MEMCPY, net_dma.client.cap_mask);
4638 dma_async_client_register(&net_dma.client);
4639 dma_async_client_chan_request(&net_dma.client);
4640 return 0;
4643 #else
4644 static int __init netdev_dma_register(void) { return -ENODEV; }
4645 #endif /* CONFIG_NET_DMA */
4648 * netdev_compute_feature - compute conjunction of two feature sets
4649 * @all: first feature set
4650 * @one: second feature set
4652 * Computes a new feature set after adding a device with feature set
4653 * @one to the master device with current feature set @all. Returns
4654 * the new feature set.
4656 int netdev_compute_features(unsigned long all, unsigned long one)
4658 /* if device needs checksumming, downgrade to hw checksumming */
4659 if (all & NETIF_F_NO_CSUM && !(one & NETIF_F_NO_CSUM))
4660 all ^= NETIF_F_NO_CSUM | NETIF_F_HW_CSUM;
4662 /* if device can't do all checksum, downgrade to ipv4/ipv6 */
4663 if (all & NETIF_F_HW_CSUM && !(one & NETIF_F_HW_CSUM))
4664 all ^= NETIF_F_HW_CSUM
4665 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
4667 if (one & NETIF_F_GSO)
4668 one |= NETIF_F_GSO_SOFTWARE;
4669 one |= NETIF_F_GSO;
4672 * If even one device supports a GSO protocol with software fallback,
4673 * enable it for all.
4675 all |= one & NETIF_F_GSO_SOFTWARE;
4677 /* If even one device supports robust GSO, enable it for all. */
4678 if (one & NETIF_F_GSO_ROBUST)
4679 all |= NETIF_F_GSO_ROBUST;
4681 all &= one | NETIF_F_LLTX;
4683 if (!(all & NETIF_F_ALL_CSUM))
4684 all &= ~NETIF_F_SG;
4685 if (!(all & NETIF_F_SG))
4686 all &= ~NETIF_F_GSO_MASK;
4688 return all;
4690 EXPORT_SYMBOL(netdev_compute_features);
4692 static struct hlist_head *netdev_create_hash(void)
4694 int i;
4695 struct hlist_head *hash;
4697 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
4698 if (hash != NULL)
4699 for (i = 0; i < NETDEV_HASHENTRIES; i++)
4700 INIT_HLIST_HEAD(&hash[i]);
4702 return hash;
4705 /* Initialize per network namespace state */
4706 static int __net_init netdev_init(struct net *net)
4708 INIT_LIST_HEAD(&net->dev_base_head);
4710 net->dev_name_head = netdev_create_hash();
4711 if (net->dev_name_head == NULL)
4712 goto err_name;
4714 net->dev_index_head = netdev_create_hash();
4715 if (net->dev_index_head == NULL)
4716 goto err_idx;
4718 return 0;
4720 err_idx:
4721 kfree(net->dev_name_head);
4722 err_name:
4723 return -ENOMEM;
4726 char *netdev_drivername(struct net_device *dev, char *buffer, int len)
4728 struct device_driver *driver;
4729 struct device *parent;
4731 if (len <= 0 || !buffer)
4732 return buffer;
4733 buffer[0] = 0;
4735 parent = dev->dev.parent;
4737 if (!parent)
4738 return buffer;
4740 driver = parent->driver;
4741 if (driver && driver->name)
4742 strlcpy(buffer, driver->name, len);
4743 return buffer;
4746 static void __net_exit netdev_exit(struct net *net)
4748 kfree(net->dev_name_head);
4749 kfree(net->dev_index_head);
4752 static struct pernet_operations __net_initdata netdev_net_ops = {
4753 .init = netdev_init,
4754 .exit = netdev_exit,
4757 static void __net_exit default_device_exit(struct net *net)
4759 struct net_device *dev, *next;
4761 * Push all migratable of the network devices back to the
4762 * initial network namespace
4764 rtnl_lock();
4765 for_each_netdev_safe(net, dev, next) {
4766 int err;
4767 char fb_name[IFNAMSIZ];
4769 /* Ignore unmoveable devices (i.e. loopback) */
4770 if (dev->features & NETIF_F_NETNS_LOCAL)
4771 continue;
4773 /* Push remaing network devices to init_net */
4774 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
4775 err = dev_change_net_namespace(dev, &init_net, fb_name);
4776 if (err) {
4777 printk(KERN_EMERG "%s: failed to move %s to init_net: %d\n",
4778 __func__, dev->name, err);
4779 BUG();
4782 rtnl_unlock();
4785 static struct pernet_operations __net_initdata default_device_ops = {
4786 .exit = default_device_exit,
4790 * Initialize the DEV module. At boot time this walks the device list and
4791 * unhooks any devices that fail to initialise (normally hardware not
4792 * present) and leaves us with a valid list of present and active devices.
4797 * This is called single threaded during boot, so no need
4798 * to take the rtnl semaphore.
4800 static int __init net_dev_init(void)
4802 int i, rc = -ENOMEM;
4804 BUG_ON(!dev_boot_phase);
4806 if (dev_proc_init())
4807 goto out;
4809 if (netdev_kobject_init())
4810 goto out;
4812 INIT_LIST_HEAD(&ptype_all);
4813 for (i = 0; i < PTYPE_HASH_SIZE; i++)
4814 INIT_LIST_HEAD(&ptype_base[i]);
4816 if (register_pernet_subsys(&netdev_net_ops))
4817 goto out;
4819 if (register_pernet_device(&default_device_ops))
4820 goto out;
4823 * Initialise the packet receive queues.
4826 for_each_possible_cpu(i) {
4827 struct softnet_data *queue;
4829 queue = &per_cpu(softnet_data, i);
4830 skb_queue_head_init(&queue->input_pkt_queue);
4831 queue->completion_queue = NULL;
4832 INIT_LIST_HEAD(&queue->poll_list);
4834 queue->backlog.poll = process_backlog;
4835 queue->backlog.weight = weight_p;
4838 netdev_dma_register();
4840 dev_boot_phase = 0;
4842 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
4843 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
4845 hotcpu_notifier(dev_cpu_callback, 0);
4846 dst_init();
4847 dev_mcast_init();
4848 rc = 0;
4849 out:
4850 return rc;
4853 subsys_initcall(net_dev_init);
4855 EXPORT_SYMBOL(__dev_get_by_index);
4856 EXPORT_SYMBOL(__dev_get_by_name);
4857 EXPORT_SYMBOL(__dev_remove_pack);
4858 EXPORT_SYMBOL(dev_valid_name);
4859 EXPORT_SYMBOL(dev_add_pack);
4860 EXPORT_SYMBOL(dev_alloc_name);
4861 EXPORT_SYMBOL(dev_close);
4862 EXPORT_SYMBOL(dev_get_by_flags);
4863 EXPORT_SYMBOL(dev_get_by_index);
4864 EXPORT_SYMBOL(dev_get_by_name);
4865 EXPORT_SYMBOL(dev_open);
4866 EXPORT_SYMBOL(dev_queue_xmit);
4867 EXPORT_SYMBOL(dev_remove_pack);
4868 EXPORT_SYMBOL(dev_set_allmulti);
4869 EXPORT_SYMBOL(dev_set_promiscuity);
4870 EXPORT_SYMBOL(dev_change_flags);
4871 EXPORT_SYMBOL(dev_set_mtu);
4872 EXPORT_SYMBOL(dev_set_mac_address);
4873 EXPORT_SYMBOL(free_netdev);
4874 EXPORT_SYMBOL(netdev_boot_setup_check);
4875 EXPORT_SYMBOL(netdev_set_master);
4876 EXPORT_SYMBOL(netdev_state_change);
4877 EXPORT_SYMBOL(netif_receive_skb);
4878 EXPORT_SYMBOL(netif_rx);
4879 EXPORT_SYMBOL(register_gifconf);
4880 EXPORT_SYMBOL(register_netdevice);
4881 EXPORT_SYMBOL(register_netdevice_notifier);
4882 EXPORT_SYMBOL(skb_checksum_help);
4883 EXPORT_SYMBOL(synchronize_net);
4884 EXPORT_SYMBOL(unregister_netdevice);
4885 EXPORT_SYMBOL(unregister_netdevice_notifier);
4886 EXPORT_SYMBOL(net_enable_timestamp);
4887 EXPORT_SYMBOL(net_disable_timestamp);
4888 EXPORT_SYMBOL(dev_get_flags);
4890 #if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
4891 EXPORT_SYMBOL(br_handle_frame_hook);
4892 EXPORT_SYMBOL(br_fdb_get_hook);
4893 EXPORT_SYMBOL(br_fdb_put_hook);
4894 #endif
4896 #ifdef CONFIG_KMOD
4897 EXPORT_SYMBOL(dev_load);
4898 #endif
4900 EXPORT_PER_CPU_SYMBOL(softnet_data);