netdev: docbook comment update (revised)
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / net / core / dev.c
blob2cc258b7accec4c2f82c4dc5f3a88bc7c61b1df8
1 /*
2 * NET3 Protocol independent device support routines.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Derived from the non IP parts of dev.c 1.0.19
10 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
14 * Additional Authors:
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
22 * Changes:
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
34 * drivers
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
44 * call a packet.
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
50 * changes.
51 * Rudi Cilibrasi : Pass the right thing to
52 * set_mac_address()
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
58 * 1 device.
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
66 * the backlog queue.
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
75 #include <asm/uaccess.h>
76 #include <asm/system.h>
77 #include <linux/bitops.h>
78 #include <linux/capability.h>
79 #include <linux/cpu.h>
80 #include <linux/types.h>
81 #include <linux/kernel.h>
82 #include <linux/sched.h>
83 #include <linux/mutex.h>
84 #include <linux/string.h>
85 #include <linux/mm.h>
86 #include <linux/socket.h>
87 #include <linux/sockios.h>
88 #include <linux/errno.h>
89 #include <linux/interrupt.h>
90 #include <linux/if_ether.h>
91 #include <linux/netdevice.h>
92 #include <linux/etherdevice.h>
93 #include <linux/ethtool.h>
94 #include <linux/notifier.h>
95 #include <linux/skbuff.h>
96 #include <net/net_namespace.h>
97 #include <net/sock.h>
98 #include <linux/rtnetlink.h>
99 #include <linux/proc_fs.h>
100 #include <linux/seq_file.h>
101 #include <linux/stat.h>
102 #include <linux/if_bridge.h>
103 #include <linux/if_macvlan.h>
104 #include <net/dst.h>
105 #include <net/pkt_sched.h>
106 #include <net/checksum.h>
107 #include <linux/highmem.h>
108 #include <linux/init.h>
109 #include <linux/kmod.h>
110 #include <linux/module.h>
111 #include <linux/kallsyms.h>
112 #include <linux/netpoll.h>
113 #include <linux/rcupdate.h>
114 #include <linux/delay.h>
115 #include <net/wext.h>
116 #include <net/iw_handler.h>
117 #include <asm/current.h>
118 #include <linux/audit.h>
119 #include <linux/dmaengine.h>
120 #include <linux/err.h>
121 #include <linux/ctype.h>
122 #include <linux/if_arp.h>
123 #include <linux/if_vlan.h>
124 #include <linux/ip.h>
125 #include <linux/ipv6.h>
126 #include <linux/in.h>
127 #include <linux/jhash.h>
128 #include <linux/random.h>
130 #include "net-sysfs.h"
133 * The list of packet types we will receive (as opposed to discard)
134 * and the routines to invoke.
136 * Why 16. Because with 16 the only overlap we get on a hash of the
137 * low nibble of the protocol value is RARP/SNAP/X.25.
139 * NOTE: That is no longer true with the addition of VLAN tags. Not
140 * sure which should go first, but I bet it won't make much
141 * difference if we are running VLANs. The good news is that
142 * this protocol won't be in the list unless compiled in, so
143 * the average user (w/out VLANs) will not be adversely affected.
144 * --BLG
146 * 0800 IP
147 * 8100 802.1Q VLAN
148 * 0001 802.3
149 * 0002 AX.25
150 * 0004 802.2
151 * 8035 RARP
152 * 0005 SNAP
153 * 0805 X.25
154 * 0806 ARP
155 * 8137 IPX
156 * 0009 Localtalk
157 * 86DD IPv6
160 #define PTYPE_HASH_SIZE (16)
161 #define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
163 static DEFINE_SPINLOCK(ptype_lock);
164 static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
165 static struct list_head ptype_all __read_mostly; /* Taps */
167 #ifdef CONFIG_NET_DMA
168 struct net_dma {
169 struct dma_client client;
170 spinlock_t lock;
171 cpumask_t channel_mask;
172 struct dma_chan **channels;
175 static enum dma_state_client
176 netdev_dma_event(struct dma_client *client, struct dma_chan *chan,
177 enum dma_state state);
179 static struct net_dma net_dma = {
180 .client = {
181 .event_callback = netdev_dma_event,
184 #endif
187 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
188 * semaphore.
190 * Pure readers hold dev_base_lock for reading.
192 * Writers must hold the rtnl semaphore while they loop through the
193 * dev_base_head list, and hold dev_base_lock for writing when they do the
194 * actual updates. This allows pure readers to access the list even
195 * while a writer is preparing to update it.
197 * To put it another way, dev_base_lock is held for writing only to
198 * protect against pure readers; the rtnl semaphore provides the
199 * protection against other writers.
201 * See, for example usages, register_netdevice() and
202 * unregister_netdevice(), which must be called with the rtnl
203 * semaphore held.
205 DEFINE_RWLOCK(dev_base_lock);
207 EXPORT_SYMBOL(dev_base_lock);
209 #define NETDEV_HASHBITS 8
210 #define NETDEV_HASHENTRIES (1 << NETDEV_HASHBITS)
212 static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
214 unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
215 return &net->dev_name_head[hash & ((1 << NETDEV_HASHBITS) - 1)];
218 static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
220 return &net->dev_index_head[ifindex & ((1 << NETDEV_HASHBITS) - 1)];
223 /* Device list insertion */
224 static int list_netdevice(struct net_device *dev)
226 struct net *net = dev_net(dev);
228 ASSERT_RTNL();
230 write_lock_bh(&dev_base_lock);
231 list_add_tail(&dev->dev_list, &net->dev_base_head);
232 hlist_add_head(&dev->name_hlist, dev_name_hash(net, dev->name));
233 hlist_add_head(&dev->index_hlist, dev_index_hash(net, dev->ifindex));
234 write_unlock_bh(&dev_base_lock);
235 return 0;
238 /* Device list removal */
239 static void unlist_netdevice(struct net_device *dev)
241 ASSERT_RTNL();
243 /* Unlink dev from the device chain */
244 write_lock_bh(&dev_base_lock);
245 list_del(&dev->dev_list);
246 hlist_del(&dev->name_hlist);
247 hlist_del(&dev->index_hlist);
248 write_unlock_bh(&dev_base_lock);
252 * Our notifier list
255 static RAW_NOTIFIER_HEAD(netdev_chain);
258 * Device drivers call our routines to queue packets here. We empty the
259 * queue in the local softnet handler.
262 DEFINE_PER_CPU(struct softnet_data, softnet_data);
264 #ifdef CONFIG_LOCKDEP
266 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
267 * according to dev->type
269 static const unsigned short netdev_lock_type[] =
270 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
271 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
272 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
273 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
274 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
275 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
276 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
277 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
278 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
279 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
280 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
281 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
282 ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211,
283 ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_VOID,
284 ARPHRD_NONE};
286 static const char *netdev_lock_name[] =
287 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
288 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
289 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
290 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
291 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
292 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
293 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
294 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
295 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
296 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
297 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
298 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
299 "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211",
300 "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_VOID",
301 "_xmit_NONE"};
303 static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
304 static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
306 static inline unsigned short netdev_lock_pos(unsigned short dev_type)
308 int i;
310 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
311 if (netdev_lock_type[i] == dev_type)
312 return i;
313 /* the last key is used by default */
314 return ARRAY_SIZE(netdev_lock_type) - 1;
317 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
318 unsigned short dev_type)
320 int i;
322 i = netdev_lock_pos(dev_type);
323 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
324 netdev_lock_name[i]);
327 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
329 int i;
331 i = netdev_lock_pos(dev->type);
332 lockdep_set_class_and_name(&dev->addr_list_lock,
333 &netdev_addr_lock_key[i],
334 netdev_lock_name[i]);
336 #else
337 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
338 unsigned short dev_type)
341 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
344 #endif
346 /*******************************************************************************
348 Protocol management and registration routines
350 *******************************************************************************/
353 * Add a protocol ID to the list. Now that the input handler is
354 * smarter we can dispense with all the messy stuff that used to be
355 * here.
357 * BEWARE!!! Protocol handlers, mangling input packets,
358 * MUST BE last in hash buckets and checking protocol handlers
359 * MUST start from promiscuous ptype_all chain in net_bh.
360 * It is true now, do not change it.
361 * Explanation follows: if protocol handler, mangling packet, will
362 * be the first on list, it is not able to sense, that packet
363 * is cloned and should be copied-on-write, so that it will
364 * change it and subsequent readers will get broken packet.
365 * --ANK (980803)
369 * dev_add_pack - add packet handler
370 * @pt: packet type declaration
372 * Add a protocol handler to the networking stack. The passed &packet_type
373 * is linked into kernel lists and may not be freed until it has been
374 * removed from the kernel lists.
376 * This call does not sleep therefore it can not
377 * guarantee all CPU's that are in middle of receiving packets
378 * will see the new packet type (until the next received packet).
381 void dev_add_pack(struct packet_type *pt)
383 int hash;
385 spin_lock_bh(&ptype_lock);
386 if (pt->type == htons(ETH_P_ALL))
387 list_add_rcu(&pt->list, &ptype_all);
388 else {
389 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
390 list_add_rcu(&pt->list, &ptype_base[hash]);
392 spin_unlock_bh(&ptype_lock);
396 * __dev_remove_pack - remove packet handler
397 * @pt: packet type declaration
399 * Remove a protocol handler that was previously added to the kernel
400 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
401 * from the kernel lists and can be freed or reused once this function
402 * returns.
404 * The packet type might still be in use by receivers
405 * and must not be freed until after all the CPU's have gone
406 * through a quiescent state.
408 void __dev_remove_pack(struct packet_type *pt)
410 struct list_head *head;
411 struct packet_type *pt1;
413 spin_lock_bh(&ptype_lock);
415 if (pt->type == htons(ETH_P_ALL))
416 head = &ptype_all;
417 else
418 head = &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
420 list_for_each_entry(pt1, head, list) {
421 if (pt == pt1) {
422 list_del_rcu(&pt->list);
423 goto out;
427 printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
428 out:
429 spin_unlock_bh(&ptype_lock);
432 * dev_remove_pack - remove packet handler
433 * @pt: packet type declaration
435 * Remove a protocol handler that was previously added to the kernel
436 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
437 * from the kernel lists and can be freed or reused once this function
438 * returns.
440 * This call sleeps to guarantee that no CPU is looking at the packet
441 * type after return.
443 void dev_remove_pack(struct packet_type *pt)
445 __dev_remove_pack(pt);
447 synchronize_net();
450 /******************************************************************************
452 Device Boot-time Settings Routines
454 *******************************************************************************/
456 /* Boot time configuration table */
457 static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
460 * netdev_boot_setup_add - add new setup entry
461 * @name: name of the device
462 * @map: configured settings for the device
464 * Adds new setup entry to the dev_boot_setup list. The function
465 * returns 0 on error and 1 on success. This is a generic routine to
466 * all netdevices.
468 static int netdev_boot_setup_add(char *name, struct ifmap *map)
470 struct netdev_boot_setup *s;
471 int i;
473 s = dev_boot_setup;
474 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
475 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
476 memset(s[i].name, 0, sizeof(s[i].name));
477 strlcpy(s[i].name, name, IFNAMSIZ);
478 memcpy(&s[i].map, map, sizeof(s[i].map));
479 break;
483 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
487 * netdev_boot_setup_check - check boot time settings
488 * @dev: the netdevice
490 * Check boot time settings for the device.
491 * The found settings are set for the device to be used
492 * later in the device probing.
493 * Returns 0 if no settings found, 1 if they are.
495 int netdev_boot_setup_check(struct net_device *dev)
497 struct netdev_boot_setup *s = dev_boot_setup;
498 int i;
500 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
501 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
502 !strcmp(dev->name, s[i].name)) {
503 dev->irq = s[i].map.irq;
504 dev->base_addr = s[i].map.base_addr;
505 dev->mem_start = s[i].map.mem_start;
506 dev->mem_end = s[i].map.mem_end;
507 return 1;
510 return 0;
515 * netdev_boot_base - get address from boot time settings
516 * @prefix: prefix for network device
517 * @unit: id for network device
519 * Check boot time settings for the base address of device.
520 * The found settings are set for the device to be used
521 * later in the device probing.
522 * Returns 0 if no settings found.
524 unsigned long netdev_boot_base(const char *prefix, int unit)
526 const struct netdev_boot_setup *s = dev_boot_setup;
527 char name[IFNAMSIZ];
528 int i;
530 sprintf(name, "%s%d", prefix, unit);
533 * If device already registered then return base of 1
534 * to indicate not to probe for this interface
536 if (__dev_get_by_name(&init_net, name))
537 return 1;
539 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
540 if (!strcmp(name, s[i].name))
541 return s[i].map.base_addr;
542 return 0;
546 * Saves at boot time configured settings for any netdevice.
548 int __init netdev_boot_setup(char *str)
550 int ints[5];
551 struct ifmap map;
553 str = get_options(str, ARRAY_SIZE(ints), ints);
554 if (!str || !*str)
555 return 0;
557 /* Save settings */
558 memset(&map, 0, sizeof(map));
559 if (ints[0] > 0)
560 map.irq = ints[1];
561 if (ints[0] > 1)
562 map.base_addr = ints[2];
563 if (ints[0] > 2)
564 map.mem_start = ints[3];
565 if (ints[0] > 3)
566 map.mem_end = ints[4];
568 /* Add new entry to the list */
569 return netdev_boot_setup_add(str, &map);
572 __setup("netdev=", netdev_boot_setup);
574 /*******************************************************************************
576 Device Interface Subroutines
578 *******************************************************************************/
581 * __dev_get_by_name - find a device by its name
582 * @net: the applicable net namespace
583 * @name: name to find
585 * Find an interface by name. Must be called under RTNL semaphore
586 * or @dev_base_lock. If the name is found a pointer to the device
587 * is returned. If the name is not found then %NULL is returned. The
588 * reference counters are not incremented so the caller must be
589 * careful with locks.
592 struct net_device *__dev_get_by_name(struct net *net, const char *name)
594 struct hlist_node *p;
596 hlist_for_each(p, dev_name_hash(net, name)) {
597 struct net_device *dev
598 = hlist_entry(p, struct net_device, name_hlist);
599 if (!strncmp(dev->name, name, IFNAMSIZ))
600 return dev;
602 return NULL;
606 * dev_get_by_name - find a device by its name
607 * @net: the applicable net namespace
608 * @name: name to find
610 * Find an interface by name. This can be called from any
611 * context and does its own locking. The returned handle has
612 * the usage count incremented and the caller must use dev_put() to
613 * release it when it is no longer needed. %NULL is returned if no
614 * matching device is found.
617 struct net_device *dev_get_by_name(struct net *net, const char *name)
619 struct net_device *dev;
621 read_lock(&dev_base_lock);
622 dev = __dev_get_by_name(net, name);
623 if (dev)
624 dev_hold(dev);
625 read_unlock(&dev_base_lock);
626 return dev;
630 * __dev_get_by_index - find a device by its ifindex
631 * @net: the applicable net namespace
632 * @ifindex: index of device
634 * Search for an interface by index. Returns %NULL if the device
635 * is not found or a pointer to the device. The device has not
636 * had its reference counter increased so the caller must be careful
637 * about locking. The caller must hold either the RTNL semaphore
638 * or @dev_base_lock.
641 struct net_device *__dev_get_by_index(struct net *net, int ifindex)
643 struct hlist_node *p;
645 hlist_for_each(p, dev_index_hash(net, ifindex)) {
646 struct net_device *dev
647 = hlist_entry(p, struct net_device, index_hlist);
648 if (dev->ifindex == ifindex)
649 return dev;
651 return NULL;
656 * dev_get_by_index - find a device by its ifindex
657 * @net: the applicable net namespace
658 * @ifindex: index of device
660 * Search for an interface by index. Returns NULL if the device
661 * is not found or a pointer to the device. The device returned has
662 * had a reference added and the pointer is safe until the user calls
663 * dev_put to indicate they have finished with it.
666 struct net_device *dev_get_by_index(struct net *net, int ifindex)
668 struct net_device *dev;
670 read_lock(&dev_base_lock);
671 dev = __dev_get_by_index(net, ifindex);
672 if (dev)
673 dev_hold(dev);
674 read_unlock(&dev_base_lock);
675 return dev;
679 * dev_getbyhwaddr - find a device by its hardware address
680 * @net: the applicable net namespace
681 * @type: media type of device
682 * @ha: hardware address
684 * Search for an interface by MAC address. Returns NULL if the device
685 * is not found or a pointer to the device. The caller must hold the
686 * rtnl semaphore. The returned device has not had its ref count increased
687 * and the caller must therefore be careful about locking
689 * BUGS:
690 * If the API was consistent this would be __dev_get_by_hwaddr
693 struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *ha)
695 struct net_device *dev;
697 ASSERT_RTNL();
699 for_each_netdev(net, dev)
700 if (dev->type == type &&
701 !memcmp(dev->dev_addr, ha, dev->addr_len))
702 return dev;
704 return NULL;
707 EXPORT_SYMBOL(dev_getbyhwaddr);
709 struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
711 struct net_device *dev;
713 ASSERT_RTNL();
714 for_each_netdev(net, dev)
715 if (dev->type == type)
716 return dev;
718 return NULL;
721 EXPORT_SYMBOL(__dev_getfirstbyhwtype);
723 struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
725 struct net_device *dev;
727 rtnl_lock();
728 dev = __dev_getfirstbyhwtype(net, type);
729 if (dev)
730 dev_hold(dev);
731 rtnl_unlock();
732 return dev;
735 EXPORT_SYMBOL(dev_getfirstbyhwtype);
738 * dev_get_by_flags - find any device with given flags
739 * @net: the applicable net namespace
740 * @if_flags: IFF_* values
741 * @mask: bitmask of bits in if_flags to check
743 * Search for any interface with the given flags. Returns NULL if a device
744 * is not found or a pointer to the device. The device returned has
745 * had a reference added and the pointer is safe until the user calls
746 * dev_put to indicate they have finished with it.
749 struct net_device * dev_get_by_flags(struct net *net, unsigned short if_flags, unsigned short mask)
751 struct net_device *dev, *ret;
753 ret = NULL;
754 read_lock(&dev_base_lock);
755 for_each_netdev(net, dev) {
756 if (((dev->flags ^ if_flags) & mask) == 0) {
757 dev_hold(dev);
758 ret = dev;
759 break;
762 read_unlock(&dev_base_lock);
763 return ret;
767 * dev_valid_name - check if name is okay for network device
768 * @name: name string
770 * Network device names need to be valid file names to
771 * to allow sysfs to work. We also disallow any kind of
772 * whitespace.
774 int dev_valid_name(const char *name)
776 if (*name == '\0')
777 return 0;
778 if (strlen(name) >= IFNAMSIZ)
779 return 0;
780 if (!strcmp(name, ".") || !strcmp(name, ".."))
781 return 0;
783 while (*name) {
784 if (*name == '/' || isspace(*name))
785 return 0;
786 name++;
788 return 1;
792 * __dev_alloc_name - allocate a name for a device
793 * @net: network namespace to allocate the device name in
794 * @name: name format string
795 * @buf: scratch buffer and result name string
797 * Passed a format string - eg "lt%d" it will try and find a suitable
798 * id. It scans list of devices to build up a free map, then chooses
799 * the first empty slot. The caller must hold the dev_base or rtnl lock
800 * while allocating the name and adding the device in order to avoid
801 * duplicates.
802 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
803 * Returns the number of the unit assigned or a negative errno code.
806 static int __dev_alloc_name(struct net *net, const char *name, char *buf)
808 int i = 0;
809 const char *p;
810 const int max_netdevices = 8*PAGE_SIZE;
811 unsigned long *inuse;
812 struct net_device *d;
814 p = strnchr(name, IFNAMSIZ-1, '%');
815 if (p) {
817 * Verify the string as this thing may have come from
818 * the user. There must be either one "%d" and no other "%"
819 * characters.
821 if (p[1] != 'd' || strchr(p + 2, '%'))
822 return -EINVAL;
824 /* Use one page as a bit array of possible slots */
825 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
826 if (!inuse)
827 return -ENOMEM;
829 for_each_netdev(net, d) {
830 if (!sscanf(d->name, name, &i))
831 continue;
832 if (i < 0 || i >= max_netdevices)
833 continue;
835 /* avoid cases where sscanf is not exact inverse of printf */
836 snprintf(buf, IFNAMSIZ, name, i);
837 if (!strncmp(buf, d->name, IFNAMSIZ))
838 set_bit(i, inuse);
841 i = find_first_zero_bit(inuse, max_netdevices);
842 free_page((unsigned long) inuse);
845 snprintf(buf, IFNAMSIZ, name, i);
846 if (!__dev_get_by_name(net, buf))
847 return i;
849 /* It is possible to run out of possible slots
850 * when the name is long and there isn't enough space left
851 * for the digits, or if all bits are used.
853 return -ENFILE;
857 * dev_alloc_name - allocate a name for a device
858 * @dev: device
859 * @name: name format string
861 * Passed a format string - eg "lt%d" it will try and find a suitable
862 * id. It scans list of devices to build up a free map, then chooses
863 * the first empty slot. The caller must hold the dev_base or rtnl lock
864 * while allocating the name and adding the device in order to avoid
865 * duplicates.
866 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
867 * Returns the number of the unit assigned or a negative errno code.
870 int dev_alloc_name(struct net_device *dev, const char *name)
872 char buf[IFNAMSIZ];
873 struct net *net;
874 int ret;
876 BUG_ON(!dev_net(dev));
877 net = dev_net(dev);
878 ret = __dev_alloc_name(net, name, buf);
879 if (ret >= 0)
880 strlcpy(dev->name, buf, IFNAMSIZ);
881 return ret;
886 * dev_change_name - change name of a device
887 * @dev: device
888 * @newname: name (or format string) must be at least IFNAMSIZ
890 * Change name of a device, can pass format strings "eth%d".
891 * for wildcarding.
893 int dev_change_name(struct net_device *dev, const char *newname)
895 char oldname[IFNAMSIZ];
896 int err = 0;
897 int ret;
898 struct net *net;
900 ASSERT_RTNL();
901 BUG_ON(!dev_net(dev));
903 net = dev_net(dev);
904 if (dev->flags & IFF_UP)
905 return -EBUSY;
907 if (!dev_valid_name(newname))
908 return -EINVAL;
910 if (strncmp(newname, dev->name, IFNAMSIZ) == 0)
911 return 0;
913 memcpy(oldname, dev->name, IFNAMSIZ);
915 if (strchr(newname, '%')) {
916 err = dev_alloc_name(dev, newname);
917 if (err < 0)
918 return err;
920 else if (__dev_get_by_name(net, newname))
921 return -EEXIST;
922 else
923 strlcpy(dev->name, newname, IFNAMSIZ);
925 rollback:
926 err = device_rename(&dev->dev, dev->name);
927 if (err) {
928 memcpy(dev->name, oldname, IFNAMSIZ);
929 return err;
932 write_lock_bh(&dev_base_lock);
933 hlist_del(&dev->name_hlist);
934 hlist_add_head(&dev->name_hlist, dev_name_hash(net, dev->name));
935 write_unlock_bh(&dev_base_lock);
937 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
938 ret = notifier_to_errno(ret);
940 if (ret) {
941 if (err) {
942 printk(KERN_ERR
943 "%s: name change rollback failed: %d.\n",
944 dev->name, ret);
945 } else {
946 err = ret;
947 memcpy(dev->name, oldname, IFNAMSIZ);
948 goto rollback;
952 return err;
956 * dev_set_alias - change ifalias of a device
957 * @dev: device
958 * @alias: name up to IFALIASZ
959 * @len: limit of bytes to copy from info
961 * Set ifalias for a device,
963 int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
965 ASSERT_RTNL();
967 if (len >= IFALIASZ)
968 return -EINVAL;
970 if (!len) {
971 if (dev->ifalias) {
972 kfree(dev->ifalias);
973 dev->ifalias = NULL;
975 return 0;
978 dev->ifalias = krealloc(dev->ifalias, len+1, GFP_KERNEL);
979 if (!dev->ifalias)
980 return -ENOMEM;
982 strlcpy(dev->ifalias, alias, len+1);
983 return len;
988 * netdev_features_change - device changes features
989 * @dev: device to cause notification
991 * Called to indicate a device has changed features.
993 void netdev_features_change(struct net_device *dev)
995 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
997 EXPORT_SYMBOL(netdev_features_change);
1000 * netdev_state_change - device changes state
1001 * @dev: device to cause notification
1003 * Called to indicate a device has changed state. This function calls
1004 * the notifier chains for netdev_chain and sends a NEWLINK message
1005 * to the routing socket.
1007 void netdev_state_change(struct net_device *dev)
1009 if (dev->flags & IFF_UP) {
1010 call_netdevice_notifiers(NETDEV_CHANGE, dev);
1011 rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
1015 void netdev_bonding_change(struct net_device *dev)
1017 call_netdevice_notifiers(NETDEV_BONDING_FAILOVER, dev);
1019 EXPORT_SYMBOL(netdev_bonding_change);
1022 * dev_load - load a network module
1023 * @net: the applicable net namespace
1024 * @name: name of interface
1026 * If a network interface is not present and the process has suitable
1027 * privileges this function loads the module. If module loading is not
1028 * available in this kernel then it becomes a nop.
1031 void dev_load(struct net *net, const char *name)
1033 struct net_device *dev;
1035 read_lock(&dev_base_lock);
1036 dev = __dev_get_by_name(net, name);
1037 read_unlock(&dev_base_lock);
1039 if (!dev && capable(CAP_SYS_MODULE))
1040 request_module("%s", name);
1044 * dev_open - prepare an interface for use.
1045 * @dev: device to open
1047 * Takes a device from down to up state. The device's private open
1048 * function is invoked and then the multicast lists are loaded. Finally
1049 * the device is moved into the up state and a %NETDEV_UP message is
1050 * sent to the netdev notifier chain.
1052 * Calling this function on an active interface is a nop. On a failure
1053 * a negative errno code is returned.
1055 int dev_open(struct net_device *dev)
1057 int ret = 0;
1059 ASSERT_RTNL();
1062 * Is it already up?
1065 if (dev->flags & IFF_UP)
1066 return 0;
1069 * Is it even present?
1071 if (!netif_device_present(dev))
1072 return -ENODEV;
1075 * Call device private open method
1077 set_bit(__LINK_STATE_START, &dev->state);
1079 if (dev->validate_addr)
1080 ret = dev->validate_addr(dev);
1082 if (!ret && dev->open)
1083 ret = dev->open(dev);
1086 * If it went open OK then:
1089 if (ret)
1090 clear_bit(__LINK_STATE_START, &dev->state);
1091 else {
1093 * Set the flags.
1095 dev->flags |= IFF_UP;
1098 * Initialize multicasting status
1100 dev_set_rx_mode(dev);
1103 * Wakeup transmit queue engine
1105 dev_activate(dev);
1108 * ... and announce new interface.
1110 call_netdevice_notifiers(NETDEV_UP, dev);
1113 return ret;
1117 * dev_close - shutdown an interface.
1118 * @dev: device to shutdown
1120 * This function moves an active device into down state. A
1121 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1122 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1123 * chain.
1125 int dev_close(struct net_device *dev)
1127 ASSERT_RTNL();
1129 might_sleep();
1131 if (!(dev->flags & IFF_UP))
1132 return 0;
1135 * Tell people we are going down, so that they can
1136 * prepare to death, when device is still operating.
1138 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
1140 clear_bit(__LINK_STATE_START, &dev->state);
1142 /* Synchronize to scheduled poll. We cannot touch poll list,
1143 * it can be even on different cpu. So just clear netif_running().
1145 * dev->stop() will invoke napi_disable() on all of it's
1146 * napi_struct instances on this device.
1148 smp_mb__after_clear_bit(); /* Commit netif_running(). */
1150 dev_deactivate(dev);
1153 * Call the device specific close. This cannot fail.
1154 * Only if device is UP
1156 * We allow it to be called even after a DETACH hot-plug
1157 * event.
1159 if (dev->stop)
1160 dev->stop(dev);
1163 * Device is now down.
1166 dev->flags &= ~IFF_UP;
1169 * Tell people we are down
1171 call_netdevice_notifiers(NETDEV_DOWN, dev);
1173 return 0;
1178 * dev_disable_lro - disable Large Receive Offload on a device
1179 * @dev: device
1181 * Disable Large Receive Offload (LRO) on a net device. Must be
1182 * called under RTNL. This is needed if received packets may be
1183 * forwarded to another interface.
1185 void dev_disable_lro(struct net_device *dev)
1187 if (dev->ethtool_ops && dev->ethtool_ops->get_flags &&
1188 dev->ethtool_ops->set_flags) {
1189 u32 flags = dev->ethtool_ops->get_flags(dev);
1190 if (flags & ETH_FLAG_LRO) {
1191 flags &= ~ETH_FLAG_LRO;
1192 dev->ethtool_ops->set_flags(dev, flags);
1195 WARN_ON(dev->features & NETIF_F_LRO);
1197 EXPORT_SYMBOL(dev_disable_lro);
1200 static int dev_boot_phase = 1;
1203 * Device change register/unregister. These are not inline or static
1204 * as we export them to the world.
1208 * register_netdevice_notifier - register a network notifier block
1209 * @nb: notifier
1211 * Register a notifier to be called when network device events occur.
1212 * The notifier passed is linked into the kernel structures and must
1213 * not be reused until it has been unregistered. A negative errno code
1214 * is returned on a failure.
1216 * When registered all registration and up events are replayed
1217 * to the new notifier to allow device to have a race free
1218 * view of the network device list.
1221 int register_netdevice_notifier(struct notifier_block *nb)
1223 struct net_device *dev;
1224 struct net_device *last;
1225 struct net *net;
1226 int err;
1228 rtnl_lock();
1229 err = raw_notifier_chain_register(&netdev_chain, nb);
1230 if (err)
1231 goto unlock;
1232 if (dev_boot_phase)
1233 goto unlock;
1234 for_each_net(net) {
1235 for_each_netdev(net, dev) {
1236 err = nb->notifier_call(nb, NETDEV_REGISTER, dev);
1237 err = notifier_to_errno(err);
1238 if (err)
1239 goto rollback;
1241 if (!(dev->flags & IFF_UP))
1242 continue;
1244 nb->notifier_call(nb, NETDEV_UP, dev);
1248 unlock:
1249 rtnl_unlock();
1250 return err;
1252 rollback:
1253 last = dev;
1254 for_each_net(net) {
1255 for_each_netdev(net, dev) {
1256 if (dev == last)
1257 break;
1259 if (dev->flags & IFF_UP) {
1260 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1261 nb->notifier_call(nb, NETDEV_DOWN, dev);
1263 nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
1267 raw_notifier_chain_unregister(&netdev_chain, nb);
1268 goto unlock;
1272 * unregister_netdevice_notifier - unregister a network notifier block
1273 * @nb: notifier
1275 * Unregister a notifier previously registered by
1276 * register_netdevice_notifier(). The notifier is unlinked into the
1277 * kernel structures and may then be reused. A negative errno code
1278 * is returned on a failure.
1281 int unregister_netdevice_notifier(struct notifier_block *nb)
1283 int err;
1285 rtnl_lock();
1286 err = raw_notifier_chain_unregister(&netdev_chain, nb);
1287 rtnl_unlock();
1288 return err;
1292 * call_netdevice_notifiers - call all network notifier blocks
1293 * @val: value passed unmodified to notifier function
1294 * @dev: net_device pointer passed unmodified to notifier function
1296 * Call all network notifier blocks. Parameters and return value
1297 * are as for raw_notifier_call_chain().
1300 int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
1302 return raw_notifier_call_chain(&netdev_chain, val, dev);
1305 /* When > 0 there are consumers of rx skb time stamps */
1306 static atomic_t netstamp_needed = ATOMIC_INIT(0);
1308 void net_enable_timestamp(void)
1310 atomic_inc(&netstamp_needed);
1313 void net_disable_timestamp(void)
1315 atomic_dec(&netstamp_needed);
1318 static inline void net_timestamp(struct sk_buff *skb)
1320 if (atomic_read(&netstamp_needed))
1321 __net_timestamp(skb);
1322 else
1323 skb->tstamp.tv64 = 0;
1327 * Support routine. Sends outgoing frames to any network
1328 * taps currently in use.
1331 static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1333 struct packet_type *ptype;
1335 net_timestamp(skb);
1337 rcu_read_lock();
1338 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1339 /* Never send packets back to the socket
1340 * they originated from - MvS (miquels@drinkel.ow.org)
1342 if ((ptype->dev == dev || !ptype->dev) &&
1343 (ptype->af_packet_priv == NULL ||
1344 (struct sock *)ptype->af_packet_priv != skb->sk)) {
1345 struct sk_buff *skb2= skb_clone(skb, GFP_ATOMIC);
1346 if (!skb2)
1347 break;
1349 /* skb->nh should be correctly
1350 set by sender, so that the second statement is
1351 just protection against buggy protocols.
1353 skb_reset_mac_header(skb2);
1355 if (skb_network_header(skb2) < skb2->data ||
1356 skb2->network_header > skb2->tail) {
1357 if (net_ratelimit())
1358 printk(KERN_CRIT "protocol %04x is "
1359 "buggy, dev %s\n",
1360 skb2->protocol, dev->name);
1361 skb_reset_network_header(skb2);
1364 skb2->transport_header = skb2->network_header;
1365 skb2->pkt_type = PACKET_OUTGOING;
1366 ptype->func(skb2, skb->dev, ptype, skb->dev);
1369 rcu_read_unlock();
1373 static inline void __netif_reschedule(struct Qdisc *q)
1375 struct softnet_data *sd;
1376 unsigned long flags;
1378 local_irq_save(flags);
1379 sd = &__get_cpu_var(softnet_data);
1380 q->next_sched = sd->output_queue;
1381 sd->output_queue = q;
1382 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1383 local_irq_restore(flags);
1386 void __netif_schedule(struct Qdisc *q)
1388 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
1389 __netif_reschedule(q);
1391 EXPORT_SYMBOL(__netif_schedule);
1393 void dev_kfree_skb_irq(struct sk_buff *skb)
1395 if (atomic_dec_and_test(&skb->users)) {
1396 struct softnet_data *sd;
1397 unsigned long flags;
1399 local_irq_save(flags);
1400 sd = &__get_cpu_var(softnet_data);
1401 skb->next = sd->completion_queue;
1402 sd->completion_queue = skb;
1403 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1404 local_irq_restore(flags);
1407 EXPORT_SYMBOL(dev_kfree_skb_irq);
1409 void dev_kfree_skb_any(struct sk_buff *skb)
1411 if (in_irq() || irqs_disabled())
1412 dev_kfree_skb_irq(skb);
1413 else
1414 dev_kfree_skb(skb);
1416 EXPORT_SYMBOL(dev_kfree_skb_any);
1420 * netif_device_detach - mark device as removed
1421 * @dev: network device
1423 * Mark device as removed from system and therefore no longer available.
1425 void netif_device_detach(struct net_device *dev)
1427 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
1428 netif_running(dev)) {
1429 netif_stop_queue(dev);
1432 EXPORT_SYMBOL(netif_device_detach);
1435 * netif_device_attach - mark device as attached
1436 * @dev: network device
1438 * Mark device as attached from system and restart if needed.
1440 void netif_device_attach(struct net_device *dev)
1442 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
1443 netif_running(dev)) {
1444 netif_wake_queue(dev);
1445 __netdev_watchdog_up(dev);
1448 EXPORT_SYMBOL(netif_device_attach);
1450 static bool can_checksum_protocol(unsigned long features, __be16 protocol)
1452 return ((features & NETIF_F_GEN_CSUM) ||
1453 ((features & NETIF_F_IP_CSUM) &&
1454 protocol == htons(ETH_P_IP)) ||
1455 ((features & NETIF_F_IPV6_CSUM) &&
1456 protocol == htons(ETH_P_IPV6)));
1459 static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb)
1461 if (can_checksum_protocol(dev->features, skb->protocol))
1462 return true;
1464 if (skb->protocol == htons(ETH_P_8021Q)) {
1465 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
1466 if (can_checksum_protocol(dev->features & dev->vlan_features,
1467 veh->h_vlan_encapsulated_proto))
1468 return true;
1471 return false;
1475 * Invalidate hardware checksum when packet is to be mangled, and
1476 * complete checksum manually on outgoing path.
1478 int skb_checksum_help(struct sk_buff *skb)
1480 __wsum csum;
1481 int ret = 0, offset;
1483 if (skb->ip_summed == CHECKSUM_COMPLETE)
1484 goto out_set_summed;
1486 if (unlikely(skb_shinfo(skb)->gso_size)) {
1487 /* Let GSO fix up the checksum. */
1488 goto out_set_summed;
1491 offset = skb->csum_start - skb_headroom(skb);
1492 BUG_ON(offset >= skb_headlen(skb));
1493 csum = skb_checksum(skb, offset, skb->len - offset, 0);
1495 offset += skb->csum_offset;
1496 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
1498 if (skb_cloned(skb) &&
1499 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
1500 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1501 if (ret)
1502 goto out;
1505 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
1506 out_set_summed:
1507 skb->ip_summed = CHECKSUM_NONE;
1508 out:
1509 return ret;
1513 * skb_gso_segment - Perform segmentation on skb.
1514 * @skb: buffer to segment
1515 * @features: features for the output path (see dev->features)
1517 * This function segments the given skb and returns a list of segments.
1519 * It may return NULL if the skb requires no segmentation. This is
1520 * only possible when GSO is used for verifying header integrity.
1522 struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
1524 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
1525 struct packet_type *ptype;
1526 __be16 type = skb->protocol;
1527 int err;
1529 BUG_ON(skb_shinfo(skb)->frag_list);
1531 skb_reset_mac_header(skb);
1532 skb->mac_len = skb->network_header - skb->mac_header;
1533 __skb_pull(skb, skb->mac_len);
1535 if (WARN_ON(skb->ip_summed != CHECKSUM_PARTIAL)) {
1536 if (skb_header_cloned(skb) &&
1537 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
1538 return ERR_PTR(err);
1541 rcu_read_lock();
1542 list_for_each_entry_rcu(ptype,
1543 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
1544 if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
1545 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
1546 err = ptype->gso_send_check(skb);
1547 segs = ERR_PTR(err);
1548 if (err || skb_gso_ok(skb, features))
1549 break;
1550 __skb_push(skb, (skb->data -
1551 skb_network_header(skb)));
1553 segs = ptype->gso_segment(skb, features);
1554 break;
1557 rcu_read_unlock();
1559 __skb_push(skb, skb->data - skb_mac_header(skb));
1561 return segs;
1564 EXPORT_SYMBOL(skb_gso_segment);
1566 /* Take action when hardware reception checksum errors are detected. */
1567 #ifdef CONFIG_BUG
1568 void netdev_rx_csum_fault(struct net_device *dev)
1570 if (net_ratelimit()) {
1571 printk(KERN_ERR "%s: hw csum failure.\n",
1572 dev ? dev->name : "<unknown>");
1573 dump_stack();
1576 EXPORT_SYMBOL(netdev_rx_csum_fault);
1577 #endif
1579 /* Actually, we should eliminate this check as soon as we know, that:
1580 * 1. IOMMU is present and allows to map all the memory.
1581 * 2. No high memory really exists on this machine.
1584 static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
1586 #ifdef CONFIG_HIGHMEM
1587 int i;
1589 if (dev->features & NETIF_F_HIGHDMA)
1590 return 0;
1592 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1593 if (PageHighMem(skb_shinfo(skb)->frags[i].page))
1594 return 1;
1596 #endif
1597 return 0;
1600 struct dev_gso_cb {
1601 void (*destructor)(struct sk_buff *skb);
1604 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
1606 static void dev_gso_skb_destructor(struct sk_buff *skb)
1608 struct dev_gso_cb *cb;
1610 do {
1611 struct sk_buff *nskb = skb->next;
1613 skb->next = nskb->next;
1614 nskb->next = NULL;
1615 kfree_skb(nskb);
1616 } while (skb->next);
1618 cb = DEV_GSO_CB(skb);
1619 if (cb->destructor)
1620 cb->destructor(skb);
1624 * dev_gso_segment - Perform emulated hardware segmentation on skb.
1625 * @skb: buffer to segment
1627 * This function segments the given skb and stores the list of segments
1628 * in skb->next.
1630 static int dev_gso_segment(struct sk_buff *skb)
1632 struct net_device *dev = skb->dev;
1633 struct sk_buff *segs;
1634 int features = dev->features & ~(illegal_highdma(dev, skb) ?
1635 NETIF_F_SG : 0);
1637 segs = skb_gso_segment(skb, features);
1639 /* Verifying header integrity only. */
1640 if (!segs)
1641 return 0;
1643 if (IS_ERR(segs))
1644 return PTR_ERR(segs);
1646 skb->next = segs;
1647 DEV_GSO_CB(skb)->destructor = skb->destructor;
1648 skb->destructor = dev_gso_skb_destructor;
1650 return 0;
1653 int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
1654 struct netdev_queue *txq)
1656 if (likely(!skb->next)) {
1657 if (!list_empty(&ptype_all))
1658 dev_queue_xmit_nit(skb, dev);
1660 if (netif_needs_gso(dev, skb)) {
1661 if (unlikely(dev_gso_segment(skb)))
1662 goto out_kfree_skb;
1663 if (skb->next)
1664 goto gso;
1667 return dev->hard_start_xmit(skb, dev);
1670 gso:
1671 do {
1672 struct sk_buff *nskb = skb->next;
1673 int rc;
1675 skb->next = nskb->next;
1676 nskb->next = NULL;
1677 rc = dev->hard_start_xmit(nskb, dev);
1678 if (unlikely(rc)) {
1679 nskb->next = skb->next;
1680 skb->next = nskb;
1681 return rc;
1683 if (unlikely(netif_tx_queue_stopped(txq) && skb->next))
1684 return NETDEV_TX_BUSY;
1685 } while (skb->next);
1687 skb->destructor = DEV_GSO_CB(skb)->destructor;
1689 out_kfree_skb:
1690 kfree_skb(skb);
1691 return 0;
1694 static u32 simple_tx_hashrnd;
1695 static int simple_tx_hashrnd_initialized = 0;
1697 static u16 simple_tx_hash(struct net_device *dev, struct sk_buff *skb)
1699 u32 addr1, addr2, ports;
1700 u32 hash, ihl;
1701 u8 ip_proto;
1703 if (unlikely(!simple_tx_hashrnd_initialized)) {
1704 get_random_bytes(&simple_tx_hashrnd, 4);
1705 simple_tx_hashrnd_initialized = 1;
1708 switch (skb->protocol) {
1709 case htons(ETH_P_IP):
1710 ip_proto = ip_hdr(skb)->protocol;
1711 addr1 = ip_hdr(skb)->saddr;
1712 addr2 = ip_hdr(skb)->daddr;
1713 ihl = ip_hdr(skb)->ihl;
1714 break;
1715 case htons(ETH_P_IPV6):
1716 ip_proto = ipv6_hdr(skb)->nexthdr;
1717 addr1 = ipv6_hdr(skb)->saddr.s6_addr32[3];
1718 addr2 = ipv6_hdr(skb)->daddr.s6_addr32[3];
1719 ihl = (40 >> 2);
1720 break;
1721 default:
1722 return 0;
1726 switch (ip_proto) {
1727 case IPPROTO_TCP:
1728 case IPPROTO_UDP:
1729 case IPPROTO_DCCP:
1730 case IPPROTO_ESP:
1731 case IPPROTO_AH:
1732 case IPPROTO_SCTP:
1733 case IPPROTO_UDPLITE:
1734 ports = *((u32 *) (skb_network_header(skb) + (ihl * 4)));
1735 break;
1737 default:
1738 ports = 0;
1739 break;
1742 hash = jhash_3words(addr1, addr2, ports, simple_tx_hashrnd);
1744 return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32);
1747 static struct netdev_queue *dev_pick_tx(struct net_device *dev,
1748 struct sk_buff *skb)
1750 u16 queue_index = 0;
1752 if (dev->select_queue)
1753 queue_index = dev->select_queue(dev, skb);
1754 else if (dev->real_num_tx_queues > 1)
1755 queue_index = simple_tx_hash(dev, skb);
1757 skb_set_queue_mapping(skb, queue_index);
1758 return netdev_get_tx_queue(dev, queue_index);
1762 * dev_queue_xmit - transmit a buffer
1763 * @skb: buffer to transmit
1765 * Queue a buffer for transmission to a network device. The caller must
1766 * have set the device and priority and built the buffer before calling
1767 * this function. The function can be called from an interrupt.
1769 * A negative errno code is returned on a failure. A success does not
1770 * guarantee the frame will be transmitted as it may be dropped due
1771 * to congestion or traffic shaping.
1773 * -----------------------------------------------------------------------------------
1774 * I notice this method can also return errors from the queue disciplines,
1775 * including NET_XMIT_DROP, which is a positive value. So, errors can also
1776 * be positive.
1778 * Regardless of the return value, the skb is consumed, so it is currently
1779 * difficult to retry a send to this method. (You can bump the ref count
1780 * before sending to hold a reference for retry if you are careful.)
1782 * When calling this method, interrupts MUST be enabled. This is because
1783 * the BH enable code must have IRQs enabled so that it will not deadlock.
1784 * --BLG
1786 int dev_queue_xmit(struct sk_buff *skb)
1788 struct net_device *dev = skb->dev;
1789 struct netdev_queue *txq;
1790 struct Qdisc *q;
1791 int rc = -ENOMEM;
1793 /* GSO will handle the following emulations directly. */
1794 if (netif_needs_gso(dev, skb))
1795 goto gso;
1797 if (skb_shinfo(skb)->frag_list &&
1798 !(dev->features & NETIF_F_FRAGLIST) &&
1799 __skb_linearize(skb))
1800 goto out_kfree_skb;
1802 /* Fragmented skb is linearized if device does not support SG,
1803 * or if at least one of fragments is in highmem and device
1804 * does not support DMA from it.
1806 if (skb_shinfo(skb)->nr_frags &&
1807 (!(dev->features & NETIF_F_SG) || illegal_highdma(dev, skb)) &&
1808 __skb_linearize(skb))
1809 goto out_kfree_skb;
1811 /* If packet is not checksummed and device does not support
1812 * checksumming for this protocol, complete checksumming here.
1814 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1815 skb_set_transport_header(skb, skb->csum_start -
1816 skb_headroom(skb));
1817 if (!dev_can_checksum(dev, skb) && skb_checksum_help(skb))
1818 goto out_kfree_skb;
1821 gso:
1822 /* Disable soft irqs for various locks below. Also
1823 * stops preemption for RCU.
1825 rcu_read_lock_bh();
1827 txq = dev_pick_tx(dev, skb);
1828 q = rcu_dereference(txq->qdisc);
1830 #ifdef CONFIG_NET_CLS_ACT
1831 skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_EGRESS);
1832 #endif
1833 if (q->enqueue) {
1834 spinlock_t *root_lock = qdisc_lock(q);
1836 spin_lock(root_lock);
1838 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
1839 kfree_skb(skb);
1840 rc = NET_XMIT_DROP;
1841 } else {
1842 rc = qdisc_enqueue_root(skb, q);
1843 qdisc_run(q);
1845 spin_unlock(root_lock);
1847 goto out;
1850 /* The device has no queue. Common case for software devices:
1851 loopback, all the sorts of tunnels...
1853 Really, it is unlikely that netif_tx_lock protection is necessary
1854 here. (f.e. loopback and IP tunnels are clean ignoring statistics
1855 counters.)
1856 However, it is possible, that they rely on protection
1857 made by us here.
1859 Check this and shot the lock. It is not prone from deadlocks.
1860 Either shot noqueue qdisc, it is even simpler 8)
1862 if (dev->flags & IFF_UP) {
1863 int cpu = smp_processor_id(); /* ok because BHs are off */
1865 if (txq->xmit_lock_owner != cpu) {
1867 HARD_TX_LOCK(dev, txq, cpu);
1869 if (!netif_tx_queue_stopped(txq)) {
1870 rc = 0;
1871 if (!dev_hard_start_xmit(skb, dev, txq)) {
1872 HARD_TX_UNLOCK(dev, txq);
1873 goto out;
1876 HARD_TX_UNLOCK(dev, txq);
1877 if (net_ratelimit())
1878 printk(KERN_CRIT "Virtual device %s asks to "
1879 "queue packet!\n", dev->name);
1880 } else {
1881 /* Recursion is detected! It is possible,
1882 * unfortunately */
1883 if (net_ratelimit())
1884 printk(KERN_CRIT "Dead loop on virtual device "
1885 "%s, fix it urgently!\n", dev->name);
1889 rc = -ENETDOWN;
1890 rcu_read_unlock_bh();
1892 out_kfree_skb:
1893 kfree_skb(skb);
1894 return rc;
1895 out:
1896 rcu_read_unlock_bh();
1897 return rc;
1901 /*=======================================================================
1902 Receiver routines
1903 =======================================================================*/
1905 int netdev_max_backlog __read_mostly = 1000;
1906 int netdev_budget __read_mostly = 300;
1907 int weight_p __read_mostly = 64; /* old backlog weight */
1909 DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, };
1913 * netif_rx - post buffer to the network code
1914 * @skb: buffer to post
1916 * This function receives a packet from a device driver and queues it for
1917 * the upper (protocol) levels to process. It always succeeds. The buffer
1918 * may be dropped during processing for congestion control or by the
1919 * protocol layers.
1921 * return values:
1922 * NET_RX_SUCCESS (no congestion)
1923 * NET_RX_DROP (packet was dropped)
1927 int netif_rx(struct sk_buff *skb)
1929 struct softnet_data *queue;
1930 unsigned long flags;
1932 /* if netpoll wants it, pretend we never saw it */
1933 if (netpoll_rx(skb))
1934 return NET_RX_DROP;
1936 if (!skb->tstamp.tv64)
1937 net_timestamp(skb);
1940 * The code is rearranged so that the path is the most
1941 * short when CPU is congested, but is still operating.
1943 local_irq_save(flags);
1944 queue = &__get_cpu_var(softnet_data);
1946 __get_cpu_var(netdev_rx_stat).total++;
1947 if (queue->input_pkt_queue.qlen <= netdev_max_backlog) {
1948 if (queue->input_pkt_queue.qlen) {
1949 enqueue:
1950 __skb_queue_tail(&queue->input_pkt_queue, skb);
1951 local_irq_restore(flags);
1952 return NET_RX_SUCCESS;
1955 napi_schedule(&queue->backlog);
1956 goto enqueue;
1959 __get_cpu_var(netdev_rx_stat).dropped++;
1960 local_irq_restore(flags);
1962 kfree_skb(skb);
1963 return NET_RX_DROP;
1966 int netif_rx_ni(struct sk_buff *skb)
1968 int err;
1970 preempt_disable();
1971 err = netif_rx(skb);
1972 if (local_softirq_pending())
1973 do_softirq();
1974 preempt_enable();
1976 return err;
1979 EXPORT_SYMBOL(netif_rx_ni);
1981 static void net_tx_action(struct softirq_action *h)
1983 struct softnet_data *sd = &__get_cpu_var(softnet_data);
1985 if (sd->completion_queue) {
1986 struct sk_buff *clist;
1988 local_irq_disable();
1989 clist = sd->completion_queue;
1990 sd->completion_queue = NULL;
1991 local_irq_enable();
1993 while (clist) {
1994 struct sk_buff *skb = clist;
1995 clist = clist->next;
1997 WARN_ON(atomic_read(&skb->users));
1998 __kfree_skb(skb);
2002 if (sd->output_queue) {
2003 struct Qdisc *head;
2005 local_irq_disable();
2006 head = sd->output_queue;
2007 sd->output_queue = NULL;
2008 local_irq_enable();
2010 while (head) {
2011 struct Qdisc *q = head;
2012 spinlock_t *root_lock;
2014 head = head->next_sched;
2016 root_lock = qdisc_lock(q);
2017 if (spin_trylock(root_lock)) {
2018 smp_mb__before_clear_bit();
2019 clear_bit(__QDISC_STATE_SCHED,
2020 &q->state);
2021 qdisc_run(q);
2022 spin_unlock(root_lock);
2023 } else {
2024 if (!test_bit(__QDISC_STATE_DEACTIVATED,
2025 &q->state)) {
2026 __netif_reschedule(q);
2027 } else {
2028 smp_mb__before_clear_bit();
2029 clear_bit(__QDISC_STATE_SCHED,
2030 &q->state);
2037 static inline int deliver_skb(struct sk_buff *skb,
2038 struct packet_type *pt_prev,
2039 struct net_device *orig_dev)
2041 atomic_inc(&skb->users);
2042 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
2045 #if defined(CONFIG_BRIDGE) || defined (CONFIG_BRIDGE_MODULE)
2046 /* These hooks defined here for ATM */
2047 struct net_bridge;
2048 struct net_bridge_fdb_entry *(*br_fdb_get_hook)(struct net_bridge *br,
2049 unsigned char *addr);
2050 void (*br_fdb_put_hook)(struct net_bridge_fdb_entry *ent) __read_mostly;
2053 * If bridge module is loaded call bridging hook.
2054 * returns NULL if packet was consumed.
2056 struct sk_buff *(*br_handle_frame_hook)(struct net_bridge_port *p,
2057 struct sk_buff *skb) __read_mostly;
2058 static inline struct sk_buff *handle_bridge(struct sk_buff *skb,
2059 struct packet_type **pt_prev, int *ret,
2060 struct net_device *orig_dev)
2062 struct net_bridge_port *port;
2064 if (skb->pkt_type == PACKET_LOOPBACK ||
2065 (port = rcu_dereference(skb->dev->br_port)) == NULL)
2066 return skb;
2068 if (*pt_prev) {
2069 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2070 *pt_prev = NULL;
2073 return br_handle_frame_hook(port, skb);
2075 #else
2076 #define handle_bridge(skb, pt_prev, ret, orig_dev) (skb)
2077 #endif
2079 #if defined(CONFIG_MACVLAN) || defined(CONFIG_MACVLAN_MODULE)
2080 struct sk_buff *(*macvlan_handle_frame_hook)(struct sk_buff *skb) __read_mostly;
2081 EXPORT_SYMBOL_GPL(macvlan_handle_frame_hook);
2083 static inline struct sk_buff *handle_macvlan(struct sk_buff *skb,
2084 struct packet_type **pt_prev,
2085 int *ret,
2086 struct net_device *orig_dev)
2088 if (skb->dev->macvlan_port == NULL)
2089 return skb;
2091 if (*pt_prev) {
2092 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2093 *pt_prev = NULL;
2095 return macvlan_handle_frame_hook(skb);
2097 #else
2098 #define handle_macvlan(skb, pt_prev, ret, orig_dev) (skb)
2099 #endif
2101 #ifdef CONFIG_NET_CLS_ACT
2102 /* TODO: Maybe we should just force sch_ingress to be compiled in
2103 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
2104 * a compare and 2 stores extra right now if we dont have it on
2105 * but have CONFIG_NET_CLS_ACT
2106 * NOTE: This doesnt stop any functionality; if you dont have
2107 * the ingress scheduler, you just cant add policies on ingress.
2110 static int ing_filter(struct sk_buff *skb)
2112 struct net_device *dev = skb->dev;
2113 u32 ttl = G_TC_RTTL(skb->tc_verd);
2114 struct netdev_queue *rxq;
2115 int result = TC_ACT_OK;
2116 struct Qdisc *q;
2118 if (MAX_RED_LOOP < ttl++) {
2119 printk(KERN_WARNING
2120 "Redir loop detected Dropping packet (%d->%d)\n",
2121 skb->iif, dev->ifindex);
2122 return TC_ACT_SHOT;
2125 skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
2126 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
2128 rxq = &dev->rx_queue;
2130 q = rxq->qdisc;
2131 if (q != &noop_qdisc) {
2132 spin_lock(qdisc_lock(q));
2133 if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
2134 result = qdisc_enqueue_root(skb, q);
2135 spin_unlock(qdisc_lock(q));
2138 return result;
2141 static inline struct sk_buff *handle_ing(struct sk_buff *skb,
2142 struct packet_type **pt_prev,
2143 int *ret, struct net_device *orig_dev)
2145 if (skb->dev->rx_queue.qdisc == &noop_qdisc)
2146 goto out;
2148 if (*pt_prev) {
2149 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2150 *pt_prev = NULL;
2151 } else {
2152 /* Huh? Why does turning on AF_PACKET affect this? */
2153 skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd);
2156 switch (ing_filter(skb)) {
2157 case TC_ACT_SHOT:
2158 case TC_ACT_STOLEN:
2159 kfree_skb(skb);
2160 return NULL;
2163 out:
2164 skb->tc_verd = 0;
2165 return skb;
2167 #endif
2170 * netif_nit_deliver - deliver received packets to network taps
2171 * @skb: buffer
2173 * This function is used to deliver incoming packets to network
2174 * taps. It should be used when the normal netif_receive_skb path
2175 * is bypassed, for example because of VLAN acceleration.
2177 void netif_nit_deliver(struct sk_buff *skb)
2179 struct packet_type *ptype;
2181 if (list_empty(&ptype_all))
2182 return;
2184 skb_reset_network_header(skb);
2185 skb_reset_transport_header(skb);
2186 skb->mac_len = skb->network_header - skb->mac_header;
2188 rcu_read_lock();
2189 list_for_each_entry_rcu(ptype, &ptype_all, list) {
2190 if (!ptype->dev || ptype->dev == skb->dev)
2191 deliver_skb(skb, ptype, skb->dev);
2193 rcu_read_unlock();
2197 * netif_receive_skb - process receive buffer from network
2198 * @skb: buffer to process
2200 * netif_receive_skb() is the main receive data processing function.
2201 * It always succeeds. The buffer may be dropped during processing
2202 * for congestion control or by the protocol layers.
2204 * This function may only be called from softirq context and interrupts
2205 * should be enabled.
2207 * Return values (usually ignored):
2208 * NET_RX_SUCCESS: no congestion
2209 * NET_RX_DROP: packet was dropped
2211 int netif_receive_skb(struct sk_buff *skb)
2213 struct packet_type *ptype, *pt_prev;
2214 struct net_device *orig_dev;
2215 struct net_device *null_or_orig;
2216 int ret = NET_RX_DROP;
2217 __be16 type;
2219 /* if we've gotten here through NAPI, check netpoll */
2220 if (netpoll_receive_skb(skb))
2221 return NET_RX_DROP;
2223 if (!skb->tstamp.tv64)
2224 net_timestamp(skb);
2226 if (!skb->iif)
2227 skb->iif = skb->dev->ifindex;
2229 null_or_orig = NULL;
2230 orig_dev = skb->dev;
2231 if (orig_dev->master) {
2232 if (skb_bond_should_drop(skb))
2233 null_or_orig = orig_dev; /* deliver only exact match */
2234 else
2235 skb->dev = orig_dev->master;
2238 __get_cpu_var(netdev_rx_stat).total++;
2240 skb_reset_network_header(skb);
2241 skb_reset_transport_header(skb);
2242 skb->mac_len = skb->network_header - skb->mac_header;
2244 pt_prev = NULL;
2246 rcu_read_lock();
2248 /* Don't receive packets in an exiting network namespace */
2249 if (!net_alive(dev_net(skb->dev)))
2250 goto out;
2252 #ifdef CONFIG_NET_CLS_ACT
2253 if (skb->tc_verd & TC_NCLS) {
2254 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
2255 goto ncls;
2257 #endif
2259 list_for_each_entry_rcu(ptype, &ptype_all, list) {
2260 if (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
2261 ptype->dev == orig_dev) {
2262 if (pt_prev)
2263 ret = deliver_skb(skb, pt_prev, orig_dev);
2264 pt_prev = ptype;
2268 #ifdef CONFIG_NET_CLS_ACT
2269 skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
2270 if (!skb)
2271 goto out;
2272 ncls:
2273 #endif
2275 skb = handle_bridge(skb, &pt_prev, &ret, orig_dev);
2276 if (!skb)
2277 goto out;
2278 skb = handle_macvlan(skb, &pt_prev, &ret, orig_dev);
2279 if (!skb)
2280 goto out;
2282 type = skb->protocol;
2283 list_for_each_entry_rcu(ptype,
2284 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
2285 if (ptype->type == type &&
2286 (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
2287 ptype->dev == orig_dev)) {
2288 if (pt_prev)
2289 ret = deliver_skb(skb, pt_prev, orig_dev);
2290 pt_prev = ptype;
2294 if (pt_prev) {
2295 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
2296 } else {
2297 kfree_skb(skb);
2298 /* Jamal, now you will not able to escape explaining
2299 * me how you were going to use this. :-)
2301 ret = NET_RX_DROP;
2304 out:
2305 rcu_read_unlock();
2306 return ret;
2309 /* Network device is going away, flush any packets still pending */
2310 static void flush_backlog(void *arg)
2312 struct net_device *dev = arg;
2313 struct softnet_data *queue = &__get_cpu_var(softnet_data);
2314 struct sk_buff *skb, *tmp;
2316 skb_queue_walk_safe(&queue->input_pkt_queue, skb, tmp)
2317 if (skb->dev == dev) {
2318 __skb_unlink(skb, &queue->input_pkt_queue);
2319 kfree_skb(skb);
2323 static int process_backlog(struct napi_struct *napi, int quota)
2325 int work = 0;
2326 struct softnet_data *queue = &__get_cpu_var(softnet_data);
2327 unsigned long start_time = jiffies;
2329 napi->weight = weight_p;
2330 do {
2331 struct sk_buff *skb;
2333 local_irq_disable();
2334 skb = __skb_dequeue(&queue->input_pkt_queue);
2335 if (!skb) {
2336 __napi_complete(napi);
2337 local_irq_enable();
2338 break;
2340 local_irq_enable();
2342 netif_receive_skb(skb);
2343 } while (++work < quota && jiffies == start_time);
2345 return work;
2349 * __napi_schedule - schedule for receive
2350 * @n: entry to schedule
2352 * The entry's receive function will be scheduled to run
2354 void __napi_schedule(struct napi_struct *n)
2356 unsigned long flags;
2358 local_irq_save(flags);
2359 list_add_tail(&n->poll_list, &__get_cpu_var(softnet_data).poll_list);
2360 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2361 local_irq_restore(flags);
2363 EXPORT_SYMBOL(__napi_schedule);
2366 static void net_rx_action(struct softirq_action *h)
2368 struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
2369 unsigned long start_time = jiffies;
2370 int budget = netdev_budget;
2371 void *have;
2373 local_irq_disable();
2375 while (!list_empty(list)) {
2376 struct napi_struct *n;
2377 int work, weight;
2379 /* If softirq window is exhuasted then punt.
2381 * Note that this is a slight policy change from the
2382 * previous NAPI code, which would allow up to 2
2383 * jiffies to pass before breaking out. The test
2384 * used to be "jiffies - start_time > 1".
2386 if (unlikely(budget <= 0 || jiffies != start_time))
2387 goto softnet_break;
2389 local_irq_enable();
2391 /* Even though interrupts have been re-enabled, this
2392 * access is safe because interrupts can only add new
2393 * entries to the tail of this list, and only ->poll()
2394 * calls can remove this head entry from the list.
2396 n = list_entry(list->next, struct napi_struct, poll_list);
2398 have = netpoll_poll_lock(n);
2400 weight = n->weight;
2402 /* This NAPI_STATE_SCHED test is for avoiding a race
2403 * with netpoll's poll_napi(). Only the entity which
2404 * obtains the lock and sees NAPI_STATE_SCHED set will
2405 * actually make the ->poll() call. Therefore we avoid
2406 * accidently calling ->poll() when NAPI is not scheduled.
2408 work = 0;
2409 if (test_bit(NAPI_STATE_SCHED, &n->state))
2410 work = n->poll(n, weight);
2412 WARN_ON_ONCE(work > weight);
2414 budget -= work;
2416 local_irq_disable();
2418 /* Drivers must not modify the NAPI state if they
2419 * consume the entire weight. In such cases this code
2420 * still "owns" the NAPI instance and therefore can
2421 * move the instance around on the list at-will.
2423 if (unlikely(work == weight)) {
2424 if (unlikely(napi_disable_pending(n)))
2425 __napi_complete(n);
2426 else
2427 list_move_tail(&n->poll_list, list);
2430 netpoll_poll_unlock(have);
2432 out:
2433 local_irq_enable();
2435 #ifdef CONFIG_NET_DMA
2437 * There may not be any more sk_buffs coming right now, so push
2438 * any pending DMA copies to hardware
2440 if (!cpus_empty(net_dma.channel_mask)) {
2441 int chan_idx;
2442 for_each_cpu_mask_nr(chan_idx, net_dma.channel_mask) {
2443 struct dma_chan *chan = net_dma.channels[chan_idx];
2444 if (chan)
2445 dma_async_memcpy_issue_pending(chan);
2448 #endif
2450 return;
2452 softnet_break:
2453 __get_cpu_var(netdev_rx_stat).time_squeeze++;
2454 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2455 goto out;
2458 static gifconf_func_t * gifconf_list [NPROTO];
2461 * register_gifconf - register a SIOCGIF handler
2462 * @family: Address family
2463 * @gifconf: Function handler
2465 * Register protocol dependent address dumping routines. The handler
2466 * that is passed must not be freed or reused until it has been replaced
2467 * by another handler.
2469 int register_gifconf(unsigned int family, gifconf_func_t * gifconf)
2471 if (family >= NPROTO)
2472 return -EINVAL;
2473 gifconf_list[family] = gifconf;
2474 return 0;
2479 * Map an interface index to its name (SIOCGIFNAME)
2483 * We need this ioctl for efficient implementation of the
2484 * if_indextoname() function required by the IPv6 API. Without
2485 * it, we would have to search all the interfaces to find a
2486 * match. --pb
2489 static int dev_ifname(struct net *net, struct ifreq __user *arg)
2491 struct net_device *dev;
2492 struct ifreq ifr;
2495 * Fetch the caller's info block.
2498 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
2499 return -EFAULT;
2501 read_lock(&dev_base_lock);
2502 dev = __dev_get_by_index(net, ifr.ifr_ifindex);
2503 if (!dev) {
2504 read_unlock(&dev_base_lock);
2505 return -ENODEV;
2508 strcpy(ifr.ifr_name, dev->name);
2509 read_unlock(&dev_base_lock);
2511 if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
2512 return -EFAULT;
2513 return 0;
2517 * Perform a SIOCGIFCONF call. This structure will change
2518 * size eventually, and there is nothing I can do about it.
2519 * Thus we will need a 'compatibility mode'.
2522 static int dev_ifconf(struct net *net, char __user *arg)
2524 struct ifconf ifc;
2525 struct net_device *dev;
2526 char __user *pos;
2527 int len;
2528 int total;
2529 int i;
2532 * Fetch the caller's info block.
2535 if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
2536 return -EFAULT;
2538 pos = ifc.ifc_buf;
2539 len = ifc.ifc_len;
2542 * Loop over the interfaces, and write an info block for each.
2545 total = 0;
2546 for_each_netdev(net, dev) {
2547 for (i = 0; i < NPROTO; i++) {
2548 if (gifconf_list[i]) {
2549 int done;
2550 if (!pos)
2551 done = gifconf_list[i](dev, NULL, 0);
2552 else
2553 done = gifconf_list[i](dev, pos + total,
2554 len - total);
2555 if (done < 0)
2556 return -EFAULT;
2557 total += done;
2563 * All done. Write the updated control block back to the caller.
2565 ifc.ifc_len = total;
2568 * Both BSD and Solaris return 0 here, so we do too.
2570 return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
2573 #ifdef CONFIG_PROC_FS
2575 * This is invoked by the /proc filesystem handler to display a device
2576 * in detail.
2578 void *dev_seq_start(struct seq_file *seq, loff_t *pos)
2579 __acquires(dev_base_lock)
2581 struct net *net = seq_file_net(seq);
2582 loff_t off;
2583 struct net_device *dev;
2585 read_lock(&dev_base_lock);
2586 if (!*pos)
2587 return SEQ_START_TOKEN;
2589 off = 1;
2590 for_each_netdev(net, dev)
2591 if (off++ == *pos)
2592 return dev;
2594 return NULL;
2597 void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2599 struct net *net = seq_file_net(seq);
2600 ++*pos;
2601 return v == SEQ_START_TOKEN ?
2602 first_net_device(net) : next_net_device((struct net_device *)v);
2605 void dev_seq_stop(struct seq_file *seq, void *v)
2606 __releases(dev_base_lock)
2608 read_unlock(&dev_base_lock);
2611 static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
2613 struct net_device_stats *stats = dev->get_stats(dev);
2615 seq_printf(seq, "%6s:%8lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu "
2616 "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n",
2617 dev->name, stats->rx_bytes, stats->rx_packets,
2618 stats->rx_errors,
2619 stats->rx_dropped + stats->rx_missed_errors,
2620 stats->rx_fifo_errors,
2621 stats->rx_length_errors + stats->rx_over_errors +
2622 stats->rx_crc_errors + stats->rx_frame_errors,
2623 stats->rx_compressed, stats->multicast,
2624 stats->tx_bytes, stats->tx_packets,
2625 stats->tx_errors, stats->tx_dropped,
2626 stats->tx_fifo_errors, stats->collisions,
2627 stats->tx_carrier_errors +
2628 stats->tx_aborted_errors +
2629 stats->tx_window_errors +
2630 stats->tx_heartbeat_errors,
2631 stats->tx_compressed);
2635 * Called from the PROCfs module. This now uses the new arbitrary sized
2636 * /proc/net interface to create /proc/net/dev
2638 static int dev_seq_show(struct seq_file *seq, void *v)
2640 if (v == SEQ_START_TOKEN)
2641 seq_puts(seq, "Inter-| Receive "
2642 " | Transmit\n"
2643 " face |bytes packets errs drop fifo frame "
2644 "compressed multicast|bytes packets errs "
2645 "drop fifo colls carrier compressed\n");
2646 else
2647 dev_seq_printf_stats(seq, v);
2648 return 0;
2651 static struct netif_rx_stats *softnet_get_online(loff_t *pos)
2653 struct netif_rx_stats *rc = NULL;
2655 while (*pos < nr_cpu_ids)
2656 if (cpu_online(*pos)) {
2657 rc = &per_cpu(netdev_rx_stat, *pos);
2658 break;
2659 } else
2660 ++*pos;
2661 return rc;
2664 static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
2666 return softnet_get_online(pos);
2669 static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2671 ++*pos;
2672 return softnet_get_online(pos);
2675 static void softnet_seq_stop(struct seq_file *seq, void *v)
2679 static int softnet_seq_show(struct seq_file *seq, void *v)
2681 struct netif_rx_stats *s = v;
2683 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
2684 s->total, s->dropped, s->time_squeeze, 0,
2685 0, 0, 0, 0, /* was fastroute */
2686 s->cpu_collision );
2687 return 0;
2690 static const struct seq_operations dev_seq_ops = {
2691 .start = dev_seq_start,
2692 .next = dev_seq_next,
2693 .stop = dev_seq_stop,
2694 .show = dev_seq_show,
2697 static int dev_seq_open(struct inode *inode, struct file *file)
2699 return seq_open_net(inode, file, &dev_seq_ops,
2700 sizeof(struct seq_net_private));
2703 static const struct file_operations dev_seq_fops = {
2704 .owner = THIS_MODULE,
2705 .open = dev_seq_open,
2706 .read = seq_read,
2707 .llseek = seq_lseek,
2708 .release = seq_release_net,
2711 static const struct seq_operations softnet_seq_ops = {
2712 .start = softnet_seq_start,
2713 .next = softnet_seq_next,
2714 .stop = softnet_seq_stop,
2715 .show = softnet_seq_show,
2718 static int softnet_seq_open(struct inode *inode, struct file *file)
2720 return seq_open(file, &softnet_seq_ops);
2723 static const struct file_operations softnet_seq_fops = {
2724 .owner = THIS_MODULE,
2725 .open = softnet_seq_open,
2726 .read = seq_read,
2727 .llseek = seq_lseek,
2728 .release = seq_release,
2731 static void *ptype_get_idx(loff_t pos)
2733 struct packet_type *pt = NULL;
2734 loff_t i = 0;
2735 int t;
2737 list_for_each_entry_rcu(pt, &ptype_all, list) {
2738 if (i == pos)
2739 return pt;
2740 ++i;
2743 for (t = 0; t < PTYPE_HASH_SIZE; t++) {
2744 list_for_each_entry_rcu(pt, &ptype_base[t], list) {
2745 if (i == pos)
2746 return pt;
2747 ++i;
2750 return NULL;
2753 static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
2754 __acquires(RCU)
2756 rcu_read_lock();
2757 return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
2760 static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2762 struct packet_type *pt;
2763 struct list_head *nxt;
2764 int hash;
2766 ++*pos;
2767 if (v == SEQ_START_TOKEN)
2768 return ptype_get_idx(0);
2770 pt = v;
2771 nxt = pt->list.next;
2772 if (pt->type == htons(ETH_P_ALL)) {
2773 if (nxt != &ptype_all)
2774 goto found;
2775 hash = 0;
2776 nxt = ptype_base[0].next;
2777 } else
2778 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
2780 while (nxt == &ptype_base[hash]) {
2781 if (++hash >= PTYPE_HASH_SIZE)
2782 return NULL;
2783 nxt = ptype_base[hash].next;
2785 found:
2786 return list_entry(nxt, struct packet_type, list);
2789 static void ptype_seq_stop(struct seq_file *seq, void *v)
2790 __releases(RCU)
2792 rcu_read_unlock();
2795 static void ptype_seq_decode(struct seq_file *seq, void *sym)
2797 #ifdef CONFIG_KALLSYMS
2798 unsigned long offset = 0, symsize;
2799 const char *symname;
2800 char *modname;
2801 char namebuf[128];
2803 symname = kallsyms_lookup((unsigned long)sym, &symsize, &offset,
2804 &modname, namebuf);
2806 if (symname) {
2807 char *delim = ":";
2809 if (!modname)
2810 modname = delim = "";
2811 seq_printf(seq, "%s%s%s%s+0x%lx", delim, modname, delim,
2812 symname, offset);
2813 return;
2815 #endif
2817 seq_printf(seq, "[%p]", sym);
2820 static int ptype_seq_show(struct seq_file *seq, void *v)
2822 struct packet_type *pt = v;
2824 if (v == SEQ_START_TOKEN)
2825 seq_puts(seq, "Type Device Function\n");
2826 else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) {
2827 if (pt->type == htons(ETH_P_ALL))
2828 seq_puts(seq, "ALL ");
2829 else
2830 seq_printf(seq, "%04x", ntohs(pt->type));
2832 seq_printf(seq, " %-8s ",
2833 pt->dev ? pt->dev->name : "");
2834 ptype_seq_decode(seq, pt->func);
2835 seq_putc(seq, '\n');
2838 return 0;
2841 static const struct seq_operations ptype_seq_ops = {
2842 .start = ptype_seq_start,
2843 .next = ptype_seq_next,
2844 .stop = ptype_seq_stop,
2845 .show = ptype_seq_show,
2848 static int ptype_seq_open(struct inode *inode, struct file *file)
2850 return seq_open_net(inode, file, &ptype_seq_ops,
2851 sizeof(struct seq_net_private));
2854 static const struct file_operations ptype_seq_fops = {
2855 .owner = THIS_MODULE,
2856 .open = ptype_seq_open,
2857 .read = seq_read,
2858 .llseek = seq_lseek,
2859 .release = seq_release_net,
2863 static int __net_init dev_proc_net_init(struct net *net)
2865 int rc = -ENOMEM;
2867 if (!proc_net_fops_create(net, "dev", S_IRUGO, &dev_seq_fops))
2868 goto out;
2869 if (!proc_net_fops_create(net, "softnet_stat", S_IRUGO, &softnet_seq_fops))
2870 goto out_dev;
2871 if (!proc_net_fops_create(net, "ptype", S_IRUGO, &ptype_seq_fops))
2872 goto out_softnet;
2874 if (wext_proc_init(net))
2875 goto out_ptype;
2876 rc = 0;
2877 out:
2878 return rc;
2879 out_ptype:
2880 proc_net_remove(net, "ptype");
2881 out_softnet:
2882 proc_net_remove(net, "softnet_stat");
2883 out_dev:
2884 proc_net_remove(net, "dev");
2885 goto out;
2888 static void __net_exit dev_proc_net_exit(struct net *net)
2890 wext_proc_exit(net);
2892 proc_net_remove(net, "ptype");
2893 proc_net_remove(net, "softnet_stat");
2894 proc_net_remove(net, "dev");
2897 static struct pernet_operations __net_initdata dev_proc_ops = {
2898 .init = dev_proc_net_init,
2899 .exit = dev_proc_net_exit,
2902 static int __init dev_proc_init(void)
2904 return register_pernet_subsys(&dev_proc_ops);
2906 #else
2907 #define dev_proc_init() 0
2908 #endif /* CONFIG_PROC_FS */
2912 * netdev_set_master - set up master/slave pair
2913 * @slave: slave device
2914 * @master: new master device
2916 * Changes the master device of the slave. Pass %NULL to break the
2917 * bonding. The caller must hold the RTNL semaphore. On a failure
2918 * a negative errno code is returned. On success the reference counts
2919 * are adjusted, %RTM_NEWLINK is sent to the routing socket and the
2920 * function returns zero.
2922 int netdev_set_master(struct net_device *slave, struct net_device *master)
2924 struct net_device *old = slave->master;
2926 ASSERT_RTNL();
2928 if (master) {
2929 if (old)
2930 return -EBUSY;
2931 dev_hold(master);
2934 slave->master = master;
2936 synchronize_net();
2938 if (old)
2939 dev_put(old);
2941 if (master)
2942 slave->flags |= IFF_SLAVE;
2943 else
2944 slave->flags &= ~IFF_SLAVE;
2946 rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
2947 return 0;
2950 static int __dev_set_promiscuity(struct net_device *dev, int inc)
2952 unsigned short old_flags = dev->flags;
2954 ASSERT_RTNL();
2956 dev->flags |= IFF_PROMISC;
2957 dev->promiscuity += inc;
2958 if (dev->promiscuity == 0) {
2960 * Avoid overflow.
2961 * If inc causes overflow, untouch promisc and return error.
2963 if (inc < 0)
2964 dev->flags &= ~IFF_PROMISC;
2965 else {
2966 dev->promiscuity -= inc;
2967 printk(KERN_WARNING "%s: promiscuity touches roof, "
2968 "set promiscuity failed, promiscuity feature "
2969 "of device might be broken.\n", dev->name);
2970 return -EOVERFLOW;
2973 if (dev->flags != old_flags) {
2974 printk(KERN_INFO "device %s %s promiscuous mode\n",
2975 dev->name, (dev->flags & IFF_PROMISC) ? "entered" :
2976 "left");
2977 if (audit_enabled)
2978 audit_log(current->audit_context, GFP_ATOMIC,
2979 AUDIT_ANOM_PROMISCUOUS,
2980 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
2981 dev->name, (dev->flags & IFF_PROMISC),
2982 (old_flags & IFF_PROMISC),
2983 audit_get_loginuid(current),
2984 current->uid, current->gid,
2985 audit_get_sessionid(current));
2987 if (dev->change_rx_flags)
2988 dev->change_rx_flags(dev, IFF_PROMISC);
2990 return 0;
2994 * dev_set_promiscuity - update promiscuity count on a device
2995 * @dev: device
2996 * @inc: modifier
2998 * Add or remove promiscuity from a device. While the count in the device
2999 * remains above zero the interface remains promiscuous. Once it hits zero
3000 * the device reverts back to normal filtering operation. A negative inc
3001 * value is used to drop promiscuity on the device.
3002 * Return 0 if successful or a negative errno code on error.
3004 int dev_set_promiscuity(struct net_device *dev, int inc)
3006 unsigned short old_flags = dev->flags;
3007 int err;
3009 err = __dev_set_promiscuity(dev, inc);
3010 if (err < 0)
3011 return err;
3012 if (dev->flags != old_flags)
3013 dev_set_rx_mode(dev);
3014 return err;
3018 * dev_set_allmulti - update allmulti count on a device
3019 * @dev: device
3020 * @inc: modifier
3022 * Add or remove reception of all multicast frames to a device. While the
3023 * count in the device remains above zero the interface remains listening
3024 * to all interfaces. Once it hits zero the device reverts back to normal
3025 * filtering operation. A negative @inc value is used to drop the counter
3026 * when releasing a resource needing all multicasts.
3027 * Return 0 if successful or a negative errno code on error.
3030 int dev_set_allmulti(struct net_device *dev, int inc)
3032 unsigned short old_flags = dev->flags;
3034 ASSERT_RTNL();
3036 dev->flags |= IFF_ALLMULTI;
3037 dev->allmulti += inc;
3038 if (dev->allmulti == 0) {
3040 * Avoid overflow.
3041 * If inc causes overflow, untouch allmulti and return error.
3043 if (inc < 0)
3044 dev->flags &= ~IFF_ALLMULTI;
3045 else {
3046 dev->allmulti -= inc;
3047 printk(KERN_WARNING "%s: allmulti touches roof, "
3048 "set allmulti failed, allmulti feature of "
3049 "device might be broken.\n", dev->name);
3050 return -EOVERFLOW;
3053 if (dev->flags ^ old_flags) {
3054 if (dev->change_rx_flags)
3055 dev->change_rx_flags(dev, IFF_ALLMULTI);
3056 dev_set_rx_mode(dev);
3058 return 0;
3062 * Upload unicast and multicast address lists to device and
3063 * configure RX filtering. When the device doesn't support unicast
3064 * filtering it is put in promiscuous mode while unicast addresses
3065 * are present.
3067 void __dev_set_rx_mode(struct net_device *dev)
3069 /* dev_open will call this function so the list will stay sane. */
3070 if (!(dev->flags&IFF_UP))
3071 return;
3073 if (!netif_device_present(dev))
3074 return;
3076 if (dev->set_rx_mode)
3077 dev->set_rx_mode(dev);
3078 else {
3079 /* Unicast addresses changes may only happen under the rtnl,
3080 * therefore calling __dev_set_promiscuity here is safe.
3082 if (dev->uc_count > 0 && !dev->uc_promisc) {
3083 __dev_set_promiscuity(dev, 1);
3084 dev->uc_promisc = 1;
3085 } else if (dev->uc_count == 0 && dev->uc_promisc) {
3086 __dev_set_promiscuity(dev, -1);
3087 dev->uc_promisc = 0;
3090 if (dev->set_multicast_list)
3091 dev->set_multicast_list(dev);
3095 void dev_set_rx_mode(struct net_device *dev)
3097 netif_addr_lock_bh(dev);
3098 __dev_set_rx_mode(dev);
3099 netif_addr_unlock_bh(dev);
3102 int __dev_addr_delete(struct dev_addr_list **list, int *count,
3103 void *addr, int alen, int glbl)
3105 struct dev_addr_list *da;
3107 for (; (da = *list) != NULL; list = &da->next) {
3108 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
3109 alen == da->da_addrlen) {
3110 if (glbl) {
3111 int old_glbl = da->da_gusers;
3112 da->da_gusers = 0;
3113 if (old_glbl == 0)
3114 break;
3116 if (--da->da_users)
3117 return 0;
3119 *list = da->next;
3120 kfree(da);
3121 (*count)--;
3122 return 0;
3125 return -ENOENT;
3128 int __dev_addr_add(struct dev_addr_list **list, int *count,
3129 void *addr, int alen, int glbl)
3131 struct dev_addr_list *da;
3133 for (da = *list; da != NULL; da = da->next) {
3134 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
3135 da->da_addrlen == alen) {
3136 if (glbl) {
3137 int old_glbl = da->da_gusers;
3138 da->da_gusers = 1;
3139 if (old_glbl)
3140 return 0;
3142 da->da_users++;
3143 return 0;
3147 da = kzalloc(sizeof(*da), GFP_ATOMIC);
3148 if (da == NULL)
3149 return -ENOMEM;
3150 memcpy(da->da_addr, addr, alen);
3151 da->da_addrlen = alen;
3152 da->da_users = 1;
3153 da->da_gusers = glbl ? 1 : 0;
3154 da->next = *list;
3155 *list = da;
3156 (*count)++;
3157 return 0;
3161 * dev_unicast_delete - Release secondary unicast address.
3162 * @dev: device
3163 * @addr: address to delete
3164 * @alen: length of @addr
3166 * Release reference to a secondary unicast address and remove it
3167 * from the device if the reference count drops to zero.
3169 * The caller must hold the rtnl_mutex.
3171 int dev_unicast_delete(struct net_device *dev, void *addr, int alen)
3173 int err;
3175 ASSERT_RTNL();
3177 netif_addr_lock_bh(dev);
3178 err = __dev_addr_delete(&dev->uc_list, &dev->uc_count, addr, alen, 0);
3179 if (!err)
3180 __dev_set_rx_mode(dev);
3181 netif_addr_unlock_bh(dev);
3182 return err;
3184 EXPORT_SYMBOL(dev_unicast_delete);
3187 * dev_unicast_add - add a secondary unicast address
3188 * @dev: device
3189 * @addr: address to add
3190 * @alen: length of @addr
3192 * Add a secondary unicast address to the device or increase
3193 * the reference count if it already exists.
3195 * The caller must hold the rtnl_mutex.
3197 int dev_unicast_add(struct net_device *dev, void *addr, int alen)
3199 int err;
3201 ASSERT_RTNL();
3203 netif_addr_lock_bh(dev);
3204 err = __dev_addr_add(&dev->uc_list, &dev->uc_count, addr, alen, 0);
3205 if (!err)
3206 __dev_set_rx_mode(dev);
3207 netif_addr_unlock_bh(dev);
3208 return err;
3210 EXPORT_SYMBOL(dev_unicast_add);
3212 int __dev_addr_sync(struct dev_addr_list **to, int *to_count,
3213 struct dev_addr_list **from, int *from_count)
3215 struct dev_addr_list *da, *next;
3216 int err = 0;
3218 da = *from;
3219 while (da != NULL) {
3220 next = da->next;
3221 if (!da->da_synced) {
3222 err = __dev_addr_add(to, to_count,
3223 da->da_addr, da->da_addrlen, 0);
3224 if (err < 0)
3225 break;
3226 da->da_synced = 1;
3227 da->da_users++;
3228 } else if (da->da_users == 1) {
3229 __dev_addr_delete(to, to_count,
3230 da->da_addr, da->da_addrlen, 0);
3231 __dev_addr_delete(from, from_count,
3232 da->da_addr, da->da_addrlen, 0);
3234 da = next;
3236 return err;
3239 void __dev_addr_unsync(struct dev_addr_list **to, int *to_count,
3240 struct dev_addr_list **from, int *from_count)
3242 struct dev_addr_list *da, *next;
3244 da = *from;
3245 while (da != NULL) {
3246 next = da->next;
3247 if (da->da_synced) {
3248 __dev_addr_delete(to, to_count,
3249 da->da_addr, da->da_addrlen, 0);
3250 da->da_synced = 0;
3251 __dev_addr_delete(from, from_count,
3252 da->da_addr, da->da_addrlen, 0);
3254 da = next;
3259 * dev_unicast_sync - Synchronize device's unicast list to another device
3260 * @to: destination device
3261 * @from: source device
3263 * Add newly added addresses to the destination device and release
3264 * addresses that have no users left. The source device must be
3265 * locked by netif_tx_lock_bh.
3267 * This function is intended to be called from the dev->set_rx_mode
3268 * function of layered software devices.
3270 int dev_unicast_sync(struct net_device *to, struct net_device *from)
3272 int err = 0;
3274 netif_addr_lock_bh(to);
3275 err = __dev_addr_sync(&to->uc_list, &to->uc_count,
3276 &from->uc_list, &from->uc_count);
3277 if (!err)
3278 __dev_set_rx_mode(to);
3279 netif_addr_unlock_bh(to);
3280 return err;
3282 EXPORT_SYMBOL(dev_unicast_sync);
3285 * dev_unicast_unsync - Remove synchronized addresses from the destination device
3286 * @to: destination device
3287 * @from: source device
3289 * Remove all addresses that were added to the destination device by
3290 * dev_unicast_sync(). This function is intended to be called from the
3291 * dev->stop function of layered software devices.
3293 void dev_unicast_unsync(struct net_device *to, struct net_device *from)
3295 netif_addr_lock_bh(from);
3296 netif_addr_lock(to);
3298 __dev_addr_unsync(&to->uc_list, &to->uc_count,
3299 &from->uc_list, &from->uc_count);
3300 __dev_set_rx_mode(to);
3302 netif_addr_unlock(to);
3303 netif_addr_unlock_bh(from);
3305 EXPORT_SYMBOL(dev_unicast_unsync);
3307 static void __dev_addr_discard(struct dev_addr_list **list)
3309 struct dev_addr_list *tmp;
3311 while (*list != NULL) {
3312 tmp = *list;
3313 *list = tmp->next;
3314 if (tmp->da_users > tmp->da_gusers)
3315 printk("__dev_addr_discard: address leakage! "
3316 "da_users=%d\n", tmp->da_users);
3317 kfree(tmp);
3321 static void dev_addr_discard(struct net_device *dev)
3323 netif_addr_lock_bh(dev);
3325 __dev_addr_discard(&dev->uc_list);
3326 dev->uc_count = 0;
3328 __dev_addr_discard(&dev->mc_list);
3329 dev->mc_count = 0;
3331 netif_addr_unlock_bh(dev);
3335 * dev_get_flags - get flags reported to userspace
3336 * @dev: device
3338 * Get the combination of flag bits exported through APIs to userspace.
3340 unsigned dev_get_flags(const struct net_device *dev)
3342 unsigned flags;
3344 flags = (dev->flags & ~(IFF_PROMISC |
3345 IFF_ALLMULTI |
3346 IFF_RUNNING |
3347 IFF_LOWER_UP |
3348 IFF_DORMANT)) |
3349 (dev->gflags & (IFF_PROMISC |
3350 IFF_ALLMULTI));
3352 if (netif_running(dev)) {
3353 if (netif_oper_up(dev))
3354 flags |= IFF_RUNNING;
3355 if (netif_carrier_ok(dev))
3356 flags |= IFF_LOWER_UP;
3357 if (netif_dormant(dev))
3358 flags |= IFF_DORMANT;
3361 return flags;
3365 * dev_change_flags - change device settings
3366 * @dev: device
3367 * @flags: device state flags
3369 * Change settings on device based state flags. The flags are
3370 * in the userspace exported format.
3372 int dev_change_flags(struct net_device *dev, unsigned flags)
3374 int ret, changes;
3375 int old_flags = dev->flags;
3377 ASSERT_RTNL();
3380 * Set the flags on our device.
3383 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
3384 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
3385 IFF_AUTOMEDIA)) |
3386 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
3387 IFF_ALLMULTI));
3390 * Load in the correct multicast list now the flags have changed.
3393 if (dev->change_rx_flags && (old_flags ^ flags) & IFF_MULTICAST)
3394 dev->change_rx_flags(dev, IFF_MULTICAST);
3396 dev_set_rx_mode(dev);
3399 * Have we downed the interface. We handle IFF_UP ourselves
3400 * according to user attempts to set it, rather than blindly
3401 * setting it.
3404 ret = 0;
3405 if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
3406 ret = ((old_flags & IFF_UP) ? dev_close : dev_open)(dev);
3408 if (!ret)
3409 dev_set_rx_mode(dev);
3412 if (dev->flags & IFF_UP &&
3413 ((old_flags ^ dev->flags) &~ (IFF_UP | IFF_PROMISC | IFF_ALLMULTI |
3414 IFF_VOLATILE)))
3415 call_netdevice_notifiers(NETDEV_CHANGE, dev);
3417 if ((flags ^ dev->gflags) & IFF_PROMISC) {
3418 int inc = (flags & IFF_PROMISC) ? +1 : -1;
3419 dev->gflags ^= IFF_PROMISC;
3420 dev_set_promiscuity(dev, inc);
3423 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
3424 is important. Some (broken) drivers set IFF_PROMISC, when
3425 IFF_ALLMULTI is requested not asking us and not reporting.
3427 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
3428 int inc = (flags & IFF_ALLMULTI) ? +1 : -1;
3429 dev->gflags ^= IFF_ALLMULTI;
3430 dev_set_allmulti(dev, inc);
3433 /* Exclude state transition flags, already notified */
3434 changes = (old_flags ^ dev->flags) & ~(IFF_UP | IFF_RUNNING);
3435 if (changes)
3436 rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
3438 return ret;
3442 * dev_set_mtu - Change maximum transfer unit
3443 * @dev: device
3444 * @new_mtu: new transfer unit
3446 * Change the maximum transfer size of the network device.
3448 int dev_set_mtu(struct net_device *dev, int new_mtu)
3450 int err;
3452 if (new_mtu == dev->mtu)
3453 return 0;
3455 /* MTU must be positive. */
3456 if (new_mtu < 0)
3457 return -EINVAL;
3459 if (!netif_device_present(dev))
3460 return -ENODEV;
3462 err = 0;
3463 if (dev->change_mtu)
3464 err = dev->change_mtu(dev, new_mtu);
3465 else
3466 dev->mtu = new_mtu;
3467 if (!err && dev->flags & IFF_UP)
3468 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
3469 return err;
3473 * dev_set_mac_address - Change Media Access Control Address
3474 * @dev: device
3475 * @sa: new address
3477 * Change the hardware (MAC) address of the device
3479 int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
3481 int err;
3483 if (!dev->set_mac_address)
3484 return -EOPNOTSUPP;
3485 if (sa->sa_family != dev->type)
3486 return -EINVAL;
3487 if (!netif_device_present(dev))
3488 return -ENODEV;
3489 err = dev->set_mac_address(dev, sa);
3490 if (!err)
3491 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
3492 return err;
3496 * Perform the SIOCxIFxxx calls, inside read_lock(dev_base_lock)
3498 static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd)
3500 int err;
3501 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
3503 if (!dev)
3504 return -ENODEV;
3506 switch (cmd) {
3507 case SIOCGIFFLAGS: /* Get interface flags */
3508 ifr->ifr_flags = dev_get_flags(dev);
3509 return 0;
3511 case SIOCGIFMETRIC: /* Get the metric on the interface
3512 (currently unused) */
3513 ifr->ifr_metric = 0;
3514 return 0;
3516 case SIOCGIFMTU: /* Get the MTU of a device */
3517 ifr->ifr_mtu = dev->mtu;
3518 return 0;
3520 case SIOCGIFHWADDR:
3521 if (!dev->addr_len)
3522 memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
3523 else
3524 memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
3525 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
3526 ifr->ifr_hwaddr.sa_family = dev->type;
3527 return 0;
3529 case SIOCGIFSLAVE:
3530 err = -EINVAL;
3531 break;
3533 case SIOCGIFMAP:
3534 ifr->ifr_map.mem_start = dev->mem_start;
3535 ifr->ifr_map.mem_end = dev->mem_end;
3536 ifr->ifr_map.base_addr = dev->base_addr;
3537 ifr->ifr_map.irq = dev->irq;
3538 ifr->ifr_map.dma = dev->dma;
3539 ifr->ifr_map.port = dev->if_port;
3540 return 0;
3542 case SIOCGIFINDEX:
3543 ifr->ifr_ifindex = dev->ifindex;
3544 return 0;
3546 case SIOCGIFTXQLEN:
3547 ifr->ifr_qlen = dev->tx_queue_len;
3548 return 0;
3550 default:
3551 /* dev_ioctl() should ensure this case
3552 * is never reached
3554 WARN_ON(1);
3555 err = -EINVAL;
3556 break;
3559 return err;
3563 * Perform the SIOCxIFxxx calls, inside rtnl_lock()
3565 static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
3567 int err;
3568 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
3570 if (!dev)
3571 return -ENODEV;
3573 switch (cmd) {
3574 case SIOCSIFFLAGS: /* Set interface flags */
3575 return dev_change_flags(dev, ifr->ifr_flags);
3577 case SIOCSIFMETRIC: /* Set the metric on the interface
3578 (currently unused) */
3579 return -EOPNOTSUPP;
3581 case SIOCSIFMTU: /* Set the MTU of a device */
3582 return dev_set_mtu(dev, ifr->ifr_mtu);
3584 case SIOCSIFHWADDR:
3585 return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
3587 case SIOCSIFHWBROADCAST:
3588 if (ifr->ifr_hwaddr.sa_family != dev->type)
3589 return -EINVAL;
3590 memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
3591 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
3592 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
3593 return 0;
3595 case SIOCSIFMAP:
3596 if (dev->set_config) {
3597 if (!netif_device_present(dev))
3598 return -ENODEV;
3599 return dev->set_config(dev, &ifr->ifr_map);
3601 return -EOPNOTSUPP;
3603 case SIOCADDMULTI:
3604 if ((!dev->set_multicast_list && !dev->set_rx_mode) ||
3605 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
3606 return -EINVAL;
3607 if (!netif_device_present(dev))
3608 return -ENODEV;
3609 return dev_mc_add(dev, ifr->ifr_hwaddr.sa_data,
3610 dev->addr_len, 1);
3612 case SIOCDELMULTI:
3613 if ((!dev->set_multicast_list && !dev->set_rx_mode) ||
3614 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
3615 return -EINVAL;
3616 if (!netif_device_present(dev))
3617 return -ENODEV;
3618 return dev_mc_delete(dev, ifr->ifr_hwaddr.sa_data,
3619 dev->addr_len, 1);
3621 case SIOCSIFTXQLEN:
3622 if (ifr->ifr_qlen < 0)
3623 return -EINVAL;
3624 dev->tx_queue_len = ifr->ifr_qlen;
3625 return 0;
3627 case SIOCSIFNAME:
3628 ifr->ifr_newname[IFNAMSIZ-1] = '\0';
3629 return dev_change_name(dev, ifr->ifr_newname);
3632 * Unknown or private ioctl
3635 default:
3636 if ((cmd >= SIOCDEVPRIVATE &&
3637 cmd <= SIOCDEVPRIVATE + 15) ||
3638 cmd == SIOCBONDENSLAVE ||
3639 cmd == SIOCBONDRELEASE ||
3640 cmd == SIOCBONDSETHWADDR ||
3641 cmd == SIOCBONDSLAVEINFOQUERY ||
3642 cmd == SIOCBONDINFOQUERY ||
3643 cmd == SIOCBONDCHANGEACTIVE ||
3644 cmd == SIOCGMIIPHY ||
3645 cmd == SIOCGMIIREG ||
3646 cmd == SIOCSMIIREG ||
3647 cmd == SIOCBRADDIF ||
3648 cmd == SIOCBRDELIF ||
3649 cmd == SIOCWANDEV) {
3650 err = -EOPNOTSUPP;
3651 if (dev->do_ioctl) {
3652 if (netif_device_present(dev))
3653 err = dev->do_ioctl(dev, ifr,
3654 cmd);
3655 else
3656 err = -ENODEV;
3658 } else
3659 err = -EINVAL;
3662 return err;
3666 * This function handles all "interface"-type I/O control requests. The actual
3667 * 'doing' part of this is dev_ifsioc above.
3671 * dev_ioctl - network device ioctl
3672 * @net: the applicable net namespace
3673 * @cmd: command to issue
3674 * @arg: pointer to a struct ifreq in user space
3676 * Issue ioctl functions to devices. This is normally called by the
3677 * user space syscall interfaces but can sometimes be useful for
3678 * other purposes. The return value is the return from the syscall if
3679 * positive or a negative errno code on error.
3682 int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
3684 struct ifreq ifr;
3685 int ret;
3686 char *colon;
3688 /* One special case: SIOCGIFCONF takes ifconf argument
3689 and requires shared lock, because it sleeps writing
3690 to user space.
3693 if (cmd == SIOCGIFCONF) {
3694 rtnl_lock();
3695 ret = dev_ifconf(net, (char __user *) arg);
3696 rtnl_unlock();
3697 return ret;
3699 if (cmd == SIOCGIFNAME)
3700 return dev_ifname(net, (struct ifreq __user *)arg);
3702 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
3703 return -EFAULT;
3705 ifr.ifr_name[IFNAMSIZ-1] = 0;
3707 colon = strchr(ifr.ifr_name, ':');
3708 if (colon)
3709 *colon = 0;
3712 * See which interface the caller is talking about.
3715 switch (cmd) {
3717 * These ioctl calls:
3718 * - can be done by all.
3719 * - atomic and do not require locking.
3720 * - return a value
3722 case SIOCGIFFLAGS:
3723 case SIOCGIFMETRIC:
3724 case SIOCGIFMTU:
3725 case SIOCGIFHWADDR:
3726 case SIOCGIFSLAVE:
3727 case SIOCGIFMAP:
3728 case SIOCGIFINDEX:
3729 case SIOCGIFTXQLEN:
3730 dev_load(net, ifr.ifr_name);
3731 read_lock(&dev_base_lock);
3732 ret = dev_ifsioc_locked(net, &ifr, cmd);
3733 read_unlock(&dev_base_lock);
3734 if (!ret) {
3735 if (colon)
3736 *colon = ':';
3737 if (copy_to_user(arg, &ifr,
3738 sizeof(struct ifreq)))
3739 ret = -EFAULT;
3741 return ret;
3743 case SIOCETHTOOL:
3744 dev_load(net, ifr.ifr_name);
3745 rtnl_lock();
3746 ret = dev_ethtool(net, &ifr);
3747 rtnl_unlock();
3748 if (!ret) {
3749 if (colon)
3750 *colon = ':';
3751 if (copy_to_user(arg, &ifr,
3752 sizeof(struct ifreq)))
3753 ret = -EFAULT;
3755 return ret;
3758 * These ioctl calls:
3759 * - require superuser power.
3760 * - require strict serialization.
3761 * - return a value
3763 case SIOCGMIIPHY:
3764 case SIOCGMIIREG:
3765 case SIOCSIFNAME:
3766 if (!capable(CAP_NET_ADMIN))
3767 return -EPERM;
3768 dev_load(net, ifr.ifr_name);
3769 rtnl_lock();
3770 ret = dev_ifsioc(net, &ifr, cmd);
3771 rtnl_unlock();
3772 if (!ret) {
3773 if (colon)
3774 *colon = ':';
3775 if (copy_to_user(arg, &ifr,
3776 sizeof(struct ifreq)))
3777 ret = -EFAULT;
3779 return ret;
3782 * These ioctl calls:
3783 * - require superuser power.
3784 * - require strict serialization.
3785 * - do not return a value
3787 case SIOCSIFFLAGS:
3788 case SIOCSIFMETRIC:
3789 case SIOCSIFMTU:
3790 case SIOCSIFMAP:
3791 case SIOCSIFHWADDR:
3792 case SIOCSIFSLAVE:
3793 case SIOCADDMULTI:
3794 case SIOCDELMULTI:
3795 case SIOCSIFHWBROADCAST:
3796 case SIOCSIFTXQLEN:
3797 case SIOCSMIIREG:
3798 case SIOCBONDENSLAVE:
3799 case SIOCBONDRELEASE:
3800 case SIOCBONDSETHWADDR:
3801 case SIOCBONDCHANGEACTIVE:
3802 case SIOCBRADDIF:
3803 case SIOCBRDELIF:
3804 if (!capable(CAP_NET_ADMIN))
3805 return -EPERM;
3806 /* fall through */
3807 case SIOCBONDSLAVEINFOQUERY:
3808 case SIOCBONDINFOQUERY:
3809 dev_load(net, ifr.ifr_name);
3810 rtnl_lock();
3811 ret = dev_ifsioc(net, &ifr, cmd);
3812 rtnl_unlock();
3813 return ret;
3815 case SIOCGIFMEM:
3816 /* Get the per device memory space. We can add this but
3817 * currently do not support it */
3818 case SIOCSIFMEM:
3819 /* Set the per device memory buffer space.
3820 * Not applicable in our case */
3821 case SIOCSIFLINK:
3822 return -EINVAL;
3825 * Unknown or private ioctl.
3827 default:
3828 if (cmd == SIOCWANDEV ||
3829 (cmd >= SIOCDEVPRIVATE &&
3830 cmd <= SIOCDEVPRIVATE + 15)) {
3831 dev_load(net, ifr.ifr_name);
3832 rtnl_lock();
3833 ret = dev_ifsioc(net, &ifr, cmd);
3834 rtnl_unlock();
3835 if (!ret && copy_to_user(arg, &ifr,
3836 sizeof(struct ifreq)))
3837 ret = -EFAULT;
3838 return ret;
3840 /* Take care of Wireless Extensions */
3841 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
3842 return wext_handle_ioctl(net, &ifr, cmd, arg);
3843 return -EINVAL;
3849 * dev_new_index - allocate an ifindex
3850 * @net: the applicable net namespace
3852 * Returns a suitable unique value for a new device interface
3853 * number. The caller must hold the rtnl semaphore or the
3854 * dev_base_lock to be sure it remains unique.
3856 static int dev_new_index(struct net *net)
3858 static int ifindex;
3859 for (;;) {
3860 if (++ifindex <= 0)
3861 ifindex = 1;
3862 if (!__dev_get_by_index(net, ifindex))
3863 return ifindex;
3867 /* Delayed registration/unregisteration */
3868 static DEFINE_SPINLOCK(net_todo_list_lock);
3869 static LIST_HEAD(net_todo_list);
3871 static void net_set_todo(struct net_device *dev)
3873 spin_lock(&net_todo_list_lock);
3874 list_add_tail(&dev->todo_list, &net_todo_list);
3875 spin_unlock(&net_todo_list_lock);
3878 static void rollback_registered(struct net_device *dev)
3880 BUG_ON(dev_boot_phase);
3881 ASSERT_RTNL();
3883 /* Some devices call without registering for initialization unwind. */
3884 if (dev->reg_state == NETREG_UNINITIALIZED) {
3885 printk(KERN_DEBUG "unregister_netdevice: device %s/%p never "
3886 "was registered\n", dev->name, dev);
3888 WARN_ON(1);
3889 return;
3892 BUG_ON(dev->reg_state != NETREG_REGISTERED);
3894 /* If device is running, close it first. */
3895 dev_close(dev);
3897 /* And unlink it from device chain. */
3898 unlist_netdevice(dev);
3900 dev->reg_state = NETREG_UNREGISTERING;
3902 synchronize_net();
3904 /* Shutdown queueing discipline. */
3905 dev_shutdown(dev);
3908 /* Notify protocols, that we are about to destroy
3909 this device. They should clean all the things.
3911 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
3914 * Flush the unicast and multicast chains
3916 dev_addr_discard(dev);
3918 if (dev->uninit)
3919 dev->uninit(dev);
3921 /* Notifier chain MUST detach us from master device. */
3922 WARN_ON(dev->master);
3924 /* Remove entries from kobject tree */
3925 netdev_unregister_kobject(dev);
3927 synchronize_net();
3929 dev_put(dev);
3932 static void __netdev_init_queue_locks_one(struct net_device *dev,
3933 struct netdev_queue *dev_queue,
3934 void *_unused)
3936 spin_lock_init(&dev_queue->_xmit_lock);
3937 netdev_set_xmit_lockdep_class(&dev_queue->_xmit_lock, dev->type);
3938 dev_queue->xmit_lock_owner = -1;
3941 static void netdev_init_queue_locks(struct net_device *dev)
3943 netdev_for_each_tx_queue(dev, __netdev_init_queue_locks_one, NULL);
3944 __netdev_init_queue_locks_one(dev, &dev->rx_queue, NULL);
3948 * register_netdevice - register a network device
3949 * @dev: device to register
3951 * Take a completed network device structure and add it to the kernel
3952 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
3953 * chain. 0 is returned on success. A negative errno code is returned
3954 * on a failure to set up the device, or if the name is a duplicate.
3956 * Callers must hold the rtnl semaphore. You may want
3957 * register_netdev() instead of this.
3959 * BUGS:
3960 * The locking appears insufficient to guarantee two parallel registers
3961 * will not get the same name.
3964 int register_netdevice(struct net_device *dev)
3966 struct hlist_head *head;
3967 struct hlist_node *p;
3968 int ret;
3969 struct net *net;
3971 BUG_ON(dev_boot_phase);
3972 ASSERT_RTNL();
3974 might_sleep();
3976 /* When net_device's are persistent, this will be fatal. */
3977 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
3978 BUG_ON(!dev_net(dev));
3979 net = dev_net(dev);
3981 spin_lock_init(&dev->addr_list_lock);
3982 netdev_set_addr_lockdep_class(dev);
3983 netdev_init_queue_locks(dev);
3985 dev->iflink = -1;
3987 /* Init, if this function is available */
3988 if (dev->init) {
3989 ret = dev->init(dev);
3990 if (ret) {
3991 if (ret > 0)
3992 ret = -EIO;
3993 goto out;
3997 if (!dev_valid_name(dev->name)) {
3998 ret = -EINVAL;
3999 goto err_uninit;
4002 dev->ifindex = dev_new_index(net);
4003 if (dev->iflink == -1)
4004 dev->iflink = dev->ifindex;
4006 /* Check for existence of name */
4007 head = dev_name_hash(net, dev->name);
4008 hlist_for_each(p, head) {
4009 struct net_device *d
4010 = hlist_entry(p, struct net_device, name_hlist);
4011 if (!strncmp(d->name, dev->name, IFNAMSIZ)) {
4012 ret = -EEXIST;
4013 goto err_uninit;
4017 /* Fix illegal checksum combinations */
4018 if ((dev->features & NETIF_F_HW_CSUM) &&
4019 (dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
4020 printk(KERN_NOTICE "%s: mixed HW and IP checksum settings.\n",
4021 dev->name);
4022 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
4025 if ((dev->features & NETIF_F_NO_CSUM) &&
4026 (dev->features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
4027 printk(KERN_NOTICE "%s: mixed no checksumming and other settings.\n",
4028 dev->name);
4029 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM);
4033 /* Fix illegal SG+CSUM combinations. */
4034 if ((dev->features & NETIF_F_SG) &&
4035 !(dev->features & NETIF_F_ALL_CSUM)) {
4036 printk(KERN_NOTICE "%s: Dropping NETIF_F_SG since no checksum feature.\n",
4037 dev->name);
4038 dev->features &= ~NETIF_F_SG;
4041 /* TSO requires that SG is present as well. */
4042 if ((dev->features & NETIF_F_TSO) &&
4043 !(dev->features & NETIF_F_SG)) {
4044 printk(KERN_NOTICE "%s: Dropping NETIF_F_TSO since no SG feature.\n",
4045 dev->name);
4046 dev->features &= ~NETIF_F_TSO;
4048 if (dev->features & NETIF_F_UFO) {
4049 if (!(dev->features & NETIF_F_HW_CSUM)) {
4050 printk(KERN_ERR "%s: Dropping NETIF_F_UFO since no "
4051 "NETIF_F_HW_CSUM feature.\n",
4052 dev->name);
4053 dev->features &= ~NETIF_F_UFO;
4055 if (!(dev->features & NETIF_F_SG)) {
4056 printk(KERN_ERR "%s: Dropping NETIF_F_UFO since no "
4057 "NETIF_F_SG feature.\n",
4058 dev->name);
4059 dev->features &= ~NETIF_F_UFO;
4063 /* Enable software GSO if SG is supported. */
4064 if (dev->features & NETIF_F_SG)
4065 dev->features |= NETIF_F_GSO;
4067 netdev_initialize_kobject(dev);
4068 ret = netdev_register_kobject(dev);
4069 if (ret)
4070 goto err_uninit;
4071 dev->reg_state = NETREG_REGISTERED;
4074 * Default initial state at registry is that the
4075 * device is present.
4078 set_bit(__LINK_STATE_PRESENT, &dev->state);
4080 dev_init_scheduler(dev);
4081 dev_hold(dev);
4082 list_netdevice(dev);
4084 /* Notify protocols, that a new device appeared. */
4085 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
4086 ret = notifier_to_errno(ret);
4087 if (ret) {
4088 rollback_registered(dev);
4089 dev->reg_state = NETREG_UNREGISTERED;
4092 out:
4093 return ret;
4095 err_uninit:
4096 if (dev->uninit)
4097 dev->uninit(dev);
4098 goto out;
4102 * register_netdev - register a network device
4103 * @dev: device to register
4105 * Take a completed network device structure and add it to the kernel
4106 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
4107 * chain. 0 is returned on success. A negative errno code is returned
4108 * on a failure to set up the device, or if the name is a duplicate.
4110 * This is a wrapper around register_netdevice that takes the rtnl semaphore
4111 * and expands the device name if you passed a format string to
4112 * alloc_netdev.
4114 int register_netdev(struct net_device *dev)
4116 int err;
4118 rtnl_lock();
4121 * If the name is a format string the caller wants us to do a
4122 * name allocation.
4124 if (strchr(dev->name, '%')) {
4125 err = dev_alloc_name(dev, dev->name);
4126 if (err < 0)
4127 goto out;
4130 err = register_netdevice(dev);
4131 out:
4132 rtnl_unlock();
4133 return err;
4135 EXPORT_SYMBOL(register_netdev);
4138 * netdev_wait_allrefs - wait until all references are gone.
4140 * This is called when unregistering network devices.
4142 * Any protocol or device that holds a reference should register
4143 * for netdevice notification, and cleanup and put back the
4144 * reference if they receive an UNREGISTER event.
4145 * We can get stuck here if buggy protocols don't correctly
4146 * call dev_put.
4148 static void netdev_wait_allrefs(struct net_device *dev)
4150 unsigned long rebroadcast_time, warning_time;
4152 rebroadcast_time = warning_time = jiffies;
4153 while (atomic_read(&dev->refcnt) != 0) {
4154 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
4155 rtnl_lock();
4157 /* Rebroadcast unregister notification */
4158 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
4160 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
4161 &dev->state)) {
4162 /* We must not have linkwatch events
4163 * pending on unregister. If this
4164 * happens, we simply run the queue
4165 * unscheduled, resulting in a noop
4166 * for this device.
4168 linkwatch_run_queue();
4171 __rtnl_unlock();
4173 rebroadcast_time = jiffies;
4176 msleep(250);
4178 if (time_after(jiffies, warning_time + 10 * HZ)) {
4179 printk(KERN_EMERG "unregister_netdevice: "
4180 "waiting for %s to become free. Usage "
4181 "count = %d\n",
4182 dev->name, atomic_read(&dev->refcnt));
4183 warning_time = jiffies;
4188 /* The sequence is:
4190 * rtnl_lock();
4191 * ...
4192 * register_netdevice(x1);
4193 * register_netdevice(x2);
4194 * ...
4195 * unregister_netdevice(y1);
4196 * unregister_netdevice(y2);
4197 * ...
4198 * rtnl_unlock();
4199 * free_netdev(y1);
4200 * free_netdev(y2);
4202 * We are invoked by rtnl_unlock() after it drops the semaphore.
4203 * This allows us to deal with problems:
4204 * 1) We can delete sysfs objects which invoke hotplug
4205 * without deadlocking with linkwatch via keventd.
4206 * 2) Since we run with the RTNL semaphore not held, we can sleep
4207 * safely in order to wait for the netdev refcnt to drop to zero.
4209 static DEFINE_MUTEX(net_todo_run_mutex);
4210 void netdev_run_todo(void)
4212 struct list_head list;
4214 /* Need to guard against multiple cpu's getting out of order. */
4215 mutex_lock(&net_todo_run_mutex);
4217 /* Not safe to do outside the semaphore. We must not return
4218 * until all unregister events invoked by the local processor
4219 * have been completed (either by this todo run, or one on
4220 * another cpu).
4222 if (list_empty(&net_todo_list))
4223 goto out;
4225 /* Snapshot list, allow later requests */
4226 spin_lock(&net_todo_list_lock);
4227 list_replace_init(&net_todo_list, &list);
4228 spin_unlock(&net_todo_list_lock);
4230 while (!list_empty(&list)) {
4231 struct net_device *dev
4232 = list_entry(list.next, struct net_device, todo_list);
4233 list_del(&dev->todo_list);
4235 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
4236 printk(KERN_ERR "network todo '%s' but state %d\n",
4237 dev->name, dev->reg_state);
4238 dump_stack();
4239 continue;
4242 dev->reg_state = NETREG_UNREGISTERED;
4244 on_each_cpu(flush_backlog, dev, 1);
4246 netdev_wait_allrefs(dev);
4248 /* paranoia */
4249 BUG_ON(atomic_read(&dev->refcnt));
4250 WARN_ON(dev->ip_ptr);
4251 WARN_ON(dev->ip6_ptr);
4252 WARN_ON(dev->dn_ptr);
4254 if (dev->destructor)
4255 dev->destructor(dev);
4257 /* Free network device */
4258 kobject_put(&dev->dev.kobj);
4261 out:
4262 mutex_unlock(&net_todo_run_mutex);
4265 static struct net_device_stats *internal_stats(struct net_device *dev)
4267 return &dev->stats;
4270 static void netdev_init_one_queue(struct net_device *dev,
4271 struct netdev_queue *queue,
4272 void *_unused)
4274 queue->dev = dev;
4277 static void netdev_init_queues(struct net_device *dev)
4279 netdev_init_one_queue(dev, &dev->rx_queue, NULL);
4280 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
4281 spin_lock_init(&dev->tx_global_lock);
4285 * alloc_netdev_mq - allocate network device
4286 * @sizeof_priv: size of private data to allocate space for
4287 * @name: device name format string
4288 * @setup: callback to initialize device
4289 * @queue_count: the number of subqueues to allocate
4291 * Allocates a struct net_device with private data area for driver use
4292 * and performs basic initialization. Also allocates subquue structs
4293 * for each queue on the device at the end of the netdevice.
4295 struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
4296 void (*setup)(struct net_device *), unsigned int queue_count)
4298 struct netdev_queue *tx;
4299 struct net_device *dev;
4300 size_t alloc_size;
4301 void *p;
4303 BUG_ON(strlen(name) >= sizeof(dev->name));
4305 alloc_size = sizeof(struct net_device);
4306 if (sizeof_priv) {
4307 /* ensure 32-byte alignment of private area */
4308 alloc_size = (alloc_size + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST;
4309 alloc_size += sizeof_priv;
4311 /* ensure 32-byte alignment of whole construct */
4312 alloc_size += NETDEV_ALIGN_CONST;
4314 p = kzalloc(alloc_size, GFP_KERNEL);
4315 if (!p) {
4316 printk(KERN_ERR "alloc_netdev: Unable to allocate device.\n");
4317 return NULL;
4320 tx = kcalloc(queue_count, sizeof(struct netdev_queue), GFP_KERNEL);
4321 if (!tx) {
4322 printk(KERN_ERR "alloc_netdev: Unable to allocate "
4323 "tx qdiscs.\n");
4324 kfree(p);
4325 return NULL;
4328 dev = (struct net_device *)
4329 (((long)p + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST);
4330 dev->padded = (char *)dev - (char *)p;
4331 dev_net_set(dev, &init_net);
4333 dev->_tx = tx;
4334 dev->num_tx_queues = queue_count;
4335 dev->real_num_tx_queues = queue_count;
4337 if (sizeof_priv) {
4338 dev->priv = ((char *)dev +
4339 ((sizeof(struct net_device) + NETDEV_ALIGN_CONST)
4340 & ~NETDEV_ALIGN_CONST));
4343 dev->gso_max_size = GSO_MAX_SIZE;
4345 netdev_init_queues(dev);
4347 dev->get_stats = internal_stats;
4348 netpoll_netdev_init(dev);
4349 setup(dev);
4350 strcpy(dev->name, name);
4351 return dev;
4353 EXPORT_SYMBOL(alloc_netdev_mq);
4356 * free_netdev - free network device
4357 * @dev: device
4359 * This function does the last stage of destroying an allocated device
4360 * interface. The reference to the device object is released.
4361 * If this is the last reference then it will be freed.
4363 void free_netdev(struct net_device *dev)
4365 release_net(dev_net(dev));
4367 kfree(dev->_tx);
4369 /* Compatibility with error handling in drivers */
4370 if (dev->reg_state == NETREG_UNINITIALIZED) {
4371 kfree((char *)dev - dev->padded);
4372 return;
4375 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
4376 dev->reg_state = NETREG_RELEASED;
4378 /* will free via device release */
4379 put_device(&dev->dev);
4383 * synchronize_net - Synchronize with packet receive processing
4385 * Wait for packets currently being received to be done.
4386 * Does not block later packets from starting.
4388 void synchronize_net(void)
4390 might_sleep();
4391 synchronize_rcu();
4395 * unregister_netdevice - remove device from the kernel
4396 * @dev: device
4398 * This function shuts down a device interface and removes it
4399 * from the kernel tables.
4401 * Callers must hold the rtnl semaphore. You may want
4402 * unregister_netdev() instead of this.
4405 void unregister_netdevice(struct net_device *dev)
4407 ASSERT_RTNL();
4409 rollback_registered(dev);
4410 /* Finish processing unregister after unlock */
4411 net_set_todo(dev);
4415 * unregister_netdev - remove device from the kernel
4416 * @dev: device
4418 * This function shuts down a device interface and removes it
4419 * from the kernel tables.
4421 * This is just a wrapper for unregister_netdevice that takes
4422 * the rtnl semaphore. In general you want to use this and not
4423 * unregister_netdevice.
4425 void unregister_netdev(struct net_device *dev)
4427 rtnl_lock();
4428 unregister_netdevice(dev);
4429 rtnl_unlock();
4432 EXPORT_SYMBOL(unregister_netdev);
4435 * dev_change_net_namespace - move device to different nethost namespace
4436 * @dev: device
4437 * @net: network namespace
4438 * @pat: If not NULL name pattern to try if the current device name
4439 * is already taken in the destination network namespace.
4441 * This function shuts down a device interface and moves it
4442 * to a new network namespace. On success 0 is returned, on
4443 * a failure a netagive errno code is returned.
4445 * Callers must hold the rtnl semaphore.
4448 int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
4450 char buf[IFNAMSIZ];
4451 const char *destname;
4452 int err;
4454 ASSERT_RTNL();
4456 /* Don't allow namespace local devices to be moved. */
4457 err = -EINVAL;
4458 if (dev->features & NETIF_F_NETNS_LOCAL)
4459 goto out;
4461 /* Ensure the device has been registrered */
4462 err = -EINVAL;
4463 if (dev->reg_state != NETREG_REGISTERED)
4464 goto out;
4466 /* Get out if there is nothing todo */
4467 err = 0;
4468 if (net_eq(dev_net(dev), net))
4469 goto out;
4471 /* Pick the destination device name, and ensure
4472 * we can use it in the destination network namespace.
4474 err = -EEXIST;
4475 destname = dev->name;
4476 if (__dev_get_by_name(net, destname)) {
4477 /* We get here if we can't use the current device name */
4478 if (!pat)
4479 goto out;
4480 if (!dev_valid_name(pat))
4481 goto out;
4482 if (strchr(pat, '%')) {
4483 if (__dev_alloc_name(net, pat, buf) < 0)
4484 goto out;
4485 destname = buf;
4486 } else
4487 destname = pat;
4488 if (__dev_get_by_name(net, destname))
4489 goto out;
4493 * And now a mini version of register_netdevice unregister_netdevice.
4496 /* If device is running close it first. */
4497 dev_close(dev);
4499 /* And unlink it from device chain */
4500 err = -ENODEV;
4501 unlist_netdevice(dev);
4503 synchronize_net();
4505 /* Shutdown queueing discipline. */
4506 dev_shutdown(dev);
4508 /* Notify protocols, that we are about to destroy
4509 this device. They should clean all the things.
4511 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
4514 * Flush the unicast and multicast chains
4516 dev_addr_discard(dev);
4518 /* Actually switch the network namespace */
4519 dev_net_set(dev, net);
4521 /* Assign the new device name */
4522 if (destname != dev->name)
4523 strcpy(dev->name, destname);
4525 /* If there is an ifindex conflict assign a new one */
4526 if (__dev_get_by_index(net, dev->ifindex)) {
4527 int iflink = (dev->iflink == dev->ifindex);
4528 dev->ifindex = dev_new_index(net);
4529 if (iflink)
4530 dev->iflink = dev->ifindex;
4533 /* Fixup kobjects */
4534 netdev_unregister_kobject(dev);
4535 err = netdev_register_kobject(dev);
4536 WARN_ON(err);
4538 /* Add the device back in the hashes */
4539 list_netdevice(dev);
4541 /* Notify protocols, that a new device appeared. */
4542 call_netdevice_notifiers(NETDEV_REGISTER, dev);
4544 synchronize_net();
4545 err = 0;
4546 out:
4547 return err;
4550 static int dev_cpu_callback(struct notifier_block *nfb,
4551 unsigned long action,
4552 void *ocpu)
4554 struct sk_buff **list_skb;
4555 struct Qdisc **list_net;
4556 struct sk_buff *skb;
4557 unsigned int cpu, oldcpu = (unsigned long)ocpu;
4558 struct softnet_data *sd, *oldsd;
4560 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
4561 return NOTIFY_OK;
4563 local_irq_disable();
4564 cpu = smp_processor_id();
4565 sd = &per_cpu(softnet_data, cpu);
4566 oldsd = &per_cpu(softnet_data, oldcpu);
4568 /* Find end of our completion_queue. */
4569 list_skb = &sd->completion_queue;
4570 while (*list_skb)
4571 list_skb = &(*list_skb)->next;
4572 /* Append completion queue from offline CPU. */
4573 *list_skb = oldsd->completion_queue;
4574 oldsd->completion_queue = NULL;
4576 /* Find end of our output_queue. */
4577 list_net = &sd->output_queue;
4578 while (*list_net)
4579 list_net = &(*list_net)->next_sched;
4580 /* Append output queue from offline CPU. */
4581 *list_net = oldsd->output_queue;
4582 oldsd->output_queue = NULL;
4584 raise_softirq_irqoff(NET_TX_SOFTIRQ);
4585 local_irq_enable();
4587 /* Process offline CPU's input_pkt_queue */
4588 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue)))
4589 netif_rx(skb);
4591 return NOTIFY_OK;
4594 #ifdef CONFIG_NET_DMA
4596 * net_dma_rebalance - try to maintain one DMA channel per CPU
4597 * @net_dma: DMA client and associated data (lock, channels, channel_mask)
4599 * This is called when the number of channels allocated to the net_dma client
4600 * changes. The net_dma client tries to have one DMA channel per CPU.
4603 static void net_dma_rebalance(struct net_dma *net_dma)
4605 unsigned int cpu, i, n, chan_idx;
4606 struct dma_chan *chan;
4608 if (cpus_empty(net_dma->channel_mask)) {
4609 for_each_online_cpu(cpu)
4610 rcu_assign_pointer(per_cpu(softnet_data, cpu).net_dma, NULL);
4611 return;
4614 i = 0;
4615 cpu = first_cpu(cpu_online_map);
4617 for_each_cpu_mask_nr(chan_idx, net_dma->channel_mask) {
4618 chan = net_dma->channels[chan_idx];
4620 n = ((num_online_cpus() / cpus_weight(net_dma->channel_mask))
4621 + (i < (num_online_cpus() %
4622 cpus_weight(net_dma->channel_mask)) ? 1 : 0));
4624 while(n) {
4625 per_cpu(softnet_data, cpu).net_dma = chan;
4626 cpu = next_cpu(cpu, cpu_online_map);
4627 n--;
4629 i++;
4634 * netdev_dma_event - event callback for the net_dma_client
4635 * @client: should always be net_dma_client
4636 * @chan: DMA channel for the event
4637 * @state: DMA state to be handled
4639 static enum dma_state_client
4640 netdev_dma_event(struct dma_client *client, struct dma_chan *chan,
4641 enum dma_state state)
4643 int i, found = 0, pos = -1;
4644 struct net_dma *net_dma =
4645 container_of(client, struct net_dma, client);
4646 enum dma_state_client ack = DMA_DUP; /* default: take no action */
4648 spin_lock(&net_dma->lock);
4649 switch (state) {
4650 case DMA_RESOURCE_AVAILABLE:
4651 for (i = 0; i < nr_cpu_ids; i++)
4652 if (net_dma->channels[i] == chan) {
4653 found = 1;
4654 break;
4655 } else if (net_dma->channels[i] == NULL && pos < 0)
4656 pos = i;
4658 if (!found && pos >= 0) {
4659 ack = DMA_ACK;
4660 net_dma->channels[pos] = chan;
4661 cpu_set(pos, net_dma->channel_mask);
4662 net_dma_rebalance(net_dma);
4664 break;
4665 case DMA_RESOURCE_REMOVED:
4666 for (i = 0; i < nr_cpu_ids; i++)
4667 if (net_dma->channels[i] == chan) {
4668 found = 1;
4669 pos = i;
4670 break;
4673 if (found) {
4674 ack = DMA_ACK;
4675 cpu_clear(pos, net_dma->channel_mask);
4676 net_dma->channels[i] = NULL;
4677 net_dma_rebalance(net_dma);
4679 break;
4680 default:
4681 break;
4683 spin_unlock(&net_dma->lock);
4685 return ack;
4689 * netdev_dma_register - register the networking subsystem as a DMA client
4691 static int __init netdev_dma_register(void)
4693 net_dma.channels = kzalloc(nr_cpu_ids * sizeof(struct net_dma),
4694 GFP_KERNEL);
4695 if (unlikely(!net_dma.channels)) {
4696 printk(KERN_NOTICE
4697 "netdev_dma: no memory for net_dma.channels\n");
4698 return -ENOMEM;
4700 spin_lock_init(&net_dma.lock);
4701 dma_cap_set(DMA_MEMCPY, net_dma.client.cap_mask);
4702 dma_async_client_register(&net_dma.client);
4703 dma_async_client_chan_request(&net_dma.client);
4704 return 0;
4707 #else
4708 static int __init netdev_dma_register(void) { return -ENODEV; }
4709 #endif /* CONFIG_NET_DMA */
4712 * netdev_compute_feature - compute conjunction of two feature sets
4713 * @all: first feature set
4714 * @one: second feature set
4716 * Computes a new feature set after adding a device with feature set
4717 * @one to the master device with current feature set @all. Returns
4718 * the new feature set.
4720 int netdev_compute_features(unsigned long all, unsigned long one)
4722 /* if device needs checksumming, downgrade to hw checksumming */
4723 if (all & NETIF_F_NO_CSUM && !(one & NETIF_F_NO_CSUM))
4724 all ^= NETIF_F_NO_CSUM | NETIF_F_HW_CSUM;
4726 /* if device can't do all checksum, downgrade to ipv4/ipv6 */
4727 if (all & NETIF_F_HW_CSUM && !(one & NETIF_F_HW_CSUM))
4728 all ^= NETIF_F_HW_CSUM
4729 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
4731 if (one & NETIF_F_GSO)
4732 one |= NETIF_F_GSO_SOFTWARE;
4733 one |= NETIF_F_GSO;
4736 * If even one device supports a GSO protocol with software fallback,
4737 * enable it for all.
4739 all |= one & NETIF_F_GSO_SOFTWARE;
4741 /* If even one device supports robust GSO, enable it for all. */
4742 if (one & NETIF_F_GSO_ROBUST)
4743 all |= NETIF_F_GSO_ROBUST;
4745 all &= one | NETIF_F_LLTX;
4747 if (!(all & NETIF_F_ALL_CSUM))
4748 all &= ~NETIF_F_SG;
4749 if (!(all & NETIF_F_SG))
4750 all &= ~NETIF_F_GSO_MASK;
4752 return all;
4754 EXPORT_SYMBOL(netdev_compute_features);
4756 static struct hlist_head *netdev_create_hash(void)
4758 int i;
4759 struct hlist_head *hash;
4761 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
4762 if (hash != NULL)
4763 for (i = 0; i < NETDEV_HASHENTRIES; i++)
4764 INIT_HLIST_HEAD(&hash[i]);
4766 return hash;
4769 /* Initialize per network namespace state */
4770 static int __net_init netdev_init(struct net *net)
4772 INIT_LIST_HEAD(&net->dev_base_head);
4774 net->dev_name_head = netdev_create_hash();
4775 if (net->dev_name_head == NULL)
4776 goto err_name;
4778 net->dev_index_head = netdev_create_hash();
4779 if (net->dev_index_head == NULL)
4780 goto err_idx;
4782 return 0;
4784 err_idx:
4785 kfree(net->dev_name_head);
4786 err_name:
4787 return -ENOMEM;
4791 * netdev_drivername - network driver for the device
4792 * @dev: network device
4793 * @buffer: buffer for resulting name
4794 * @len: size of buffer
4796 * Determine network driver for device.
4798 char *netdev_drivername(const struct net_device *dev, char *buffer, int len)
4800 const struct device_driver *driver;
4801 const struct device *parent;
4803 if (len <= 0 || !buffer)
4804 return buffer;
4805 buffer[0] = 0;
4807 parent = dev->dev.parent;
4809 if (!parent)
4810 return buffer;
4812 driver = parent->driver;
4813 if (driver && driver->name)
4814 strlcpy(buffer, driver->name, len);
4815 return buffer;
4818 static void __net_exit netdev_exit(struct net *net)
4820 kfree(net->dev_name_head);
4821 kfree(net->dev_index_head);
4824 static struct pernet_operations __net_initdata netdev_net_ops = {
4825 .init = netdev_init,
4826 .exit = netdev_exit,
4829 static void __net_exit default_device_exit(struct net *net)
4831 struct net_device *dev, *next;
4833 * Push all migratable of the network devices back to the
4834 * initial network namespace
4836 rtnl_lock();
4837 for_each_netdev_safe(net, dev, next) {
4838 int err;
4839 char fb_name[IFNAMSIZ];
4841 /* Ignore unmoveable devices (i.e. loopback) */
4842 if (dev->features & NETIF_F_NETNS_LOCAL)
4843 continue;
4845 /* Push remaing network devices to init_net */
4846 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
4847 err = dev_change_net_namespace(dev, &init_net, fb_name);
4848 if (err) {
4849 printk(KERN_EMERG "%s: failed to move %s to init_net: %d\n",
4850 __func__, dev->name, err);
4851 BUG();
4854 rtnl_unlock();
4857 static struct pernet_operations __net_initdata default_device_ops = {
4858 .exit = default_device_exit,
4862 * Initialize the DEV module. At boot time this walks the device list and
4863 * unhooks any devices that fail to initialise (normally hardware not
4864 * present) and leaves us with a valid list of present and active devices.
4869 * This is called single threaded during boot, so no need
4870 * to take the rtnl semaphore.
4872 static int __init net_dev_init(void)
4874 int i, rc = -ENOMEM;
4876 BUG_ON(!dev_boot_phase);
4878 if (dev_proc_init())
4879 goto out;
4881 if (netdev_kobject_init())
4882 goto out;
4884 INIT_LIST_HEAD(&ptype_all);
4885 for (i = 0; i < PTYPE_HASH_SIZE; i++)
4886 INIT_LIST_HEAD(&ptype_base[i]);
4888 if (register_pernet_subsys(&netdev_net_ops))
4889 goto out;
4891 if (register_pernet_device(&default_device_ops))
4892 goto out;
4895 * Initialise the packet receive queues.
4898 for_each_possible_cpu(i) {
4899 struct softnet_data *queue;
4901 queue = &per_cpu(softnet_data, i);
4902 skb_queue_head_init(&queue->input_pkt_queue);
4903 queue->completion_queue = NULL;
4904 INIT_LIST_HEAD(&queue->poll_list);
4906 queue->backlog.poll = process_backlog;
4907 queue->backlog.weight = weight_p;
4910 netdev_dma_register();
4912 dev_boot_phase = 0;
4914 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
4915 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
4917 hotcpu_notifier(dev_cpu_callback, 0);
4918 dst_init();
4919 dev_mcast_init();
4920 rc = 0;
4921 out:
4922 return rc;
4925 subsys_initcall(net_dev_init);
4927 EXPORT_SYMBOL(__dev_get_by_index);
4928 EXPORT_SYMBOL(__dev_get_by_name);
4929 EXPORT_SYMBOL(__dev_remove_pack);
4930 EXPORT_SYMBOL(dev_valid_name);
4931 EXPORT_SYMBOL(dev_add_pack);
4932 EXPORT_SYMBOL(dev_alloc_name);
4933 EXPORT_SYMBOL(dev_close);
4934 EXPORT_SYMBOL(dev_get_by_flags);
4935 EXPORT_SYMBOL(dev_get_by_index);
4936 EXPORT_SYMBOL(dev_get_by_name);
4937 EXPORT_SYMBOL(dev_open);
4938 EXPORT_SYMBOL(dev_queue_xmit);
4939 EXPORT_SYMBOL(dev_remove_pack);
4940 EXPORT_SYMBOL(dev_set_allmulti);
4941 EXPORT_SYMBOL(dev_set_promiscuity);
4942 EXPORT_SYMBOL(dev_change_flags);
4943 EXPORT_SYMBOL(dev_set_mtu);
4944 EXPORT_SYMBOL(dev_set_mac_address);
4945 EXPORT_SYMBOL(free_netdev);
4946 EXPORT_SYMBOL(netdev_boot_setup_check);
4947 EXPORT_SYMBOL(netdev_set_master);
4948 EXPORT_SYMBOL(netdev_state_change);
4949 EXPORT_SYMBOL(netif_receive_skb);
4950 EXPORT_SYMBOL(netif_rx);
4951 EXPORT_SYMBOL(register_gifconf);
4952 EXPORT_SYMBOL(register_netdevice);
4953 EXPORT_SYMBOL(register_netdevice_notifier);
4954 EXPORT_SYMBOL(skb_checksum_help);
4955 EXPORT_SYMBOL(synchronize_net);
4956 EXPORT_SYMBOL(unregister_netdevice);
4957 EXPORT_SYMBOL(unregister_netdevice_notifier);
4958 EXPORT_SYMBOL(net_enable_timestamp);
4959 EXPORT_SYMBOL(net_disable_timestamp);
4960 EXPORT_SYMBOL(dev_get_flags);
4962 #if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
4963 EXPORT_SYMBOL(br_handle_frame_hook);
4964 EXPORT_SYMBOL(br_fdb_get_hook);
4965 EXPORT_SYMBOL(br_fdb_put_hook);
4966 #endif
4968 #ifdef CONFIG_KMOD
4969 EXPORT_SYMBOL(dev_load);
4970 #endif
4972 EXPORT_PER_CPU_SYMBOL(softnet_data);