net: Introduce for_each_netdev_rcu() iterator
[linux-2.6/kvm.git] / net / core / dev.c
blobbf629ac08b87e0ada8055725e4d929c555ff23a4
1 /*
2 * NET3 Protocol independent device support routines.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Derived from the non IP parts of dev.c 1.0.19
10 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
14 * Additional Authors:
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
22 * Changes:
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
34 * drivers
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
44 * call a packet.
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
50 * changes.
51 * Rudi Cilibrasi : Pass the right thing to
52 * set_mac_address()
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
58 * 1 device.
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
66 * the backlog queue.
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
75 #include <asm/uaccess.h>
76 #include <asm/system.h>
77 #include <linux/bitops.h>
78 #include <linux/capability.h>
79 #include <linux/cpu.h>
80 #include <linux/types.h>
81 #include <linux/kernel.h>
82 #include <linux/sched.h>
83 #include <linux/mutex.h>
84 #include <linux/string.h>
85 #include <linux/mm.h>
86 #include <linux/socket.h>
87 #include <linux/sockios.h>
88 #include <linux/errno.h>
89 #include <linux/interrupt.h>
90 #include <linux/if_ether.h>
91 #include <linux/netdevice.h>
92 #include <linux/etherdevice.h>
93 #include <linux/ethtool.h>
94 #include <linux/notifier.h>
95 #include <linux/skbuff.h>
96 #include <net/net_namespace.h>
97 #include <net/sock.h>
98 #include <linux/rtnetlink.h>
99 #include <linux/proc_fs.h>
100 #include <linux/seq_file.h>
101 #include <linux/stat.h>
102 #include <linux/if_bridge.h>
103 #include <linux/if_macvlan.h>
104 #include <net/dst.h>
105 #include <net/pkt_sched.h>
106 #include <net/checksum.h>
107 #include <linux/highmem.h>
108 #include <linux/init.h>
109 #include <linux/kmod.h>
110 #include <linux/module.h>
111 #include <linux/netpoll.h>
112 #include <linux/rcupdate.h>
113 #include <linux/delay.h>
114 #include <net/wext.h>
115 #include <net/iw_handler.h>
116 #include <asm/current.h>
117 #include <linux/audit.h>
118 #include <linux/dmaengine.h>
119 #include <linux/err.h>
120 #include <linux/ctype.h>
121 #include <linux/if_arp.h>
122 #include <linux/if_vlan.h>
123 #include <linux/ip.h>
124 #include <net/ip.h>
125 #include <linux/ipv6.h>
126 #include <linux/in.h>
127 #include <linux/jhash.h>
128 #include <linux/random.h>
129 #include <trace/events/napi.h>
131 #include "net-sysfs.h"
133 /* Instead of increasing this, you should create a hash table. */
134 #define MAX_GRO_SKBS 8
136 /* This should be increased if a protocol with a bigger head is added. */
137 #define GRO_MAX_HEAD (MAX_HEADER + 128)
140 * The list of packet types we will receive (as opposed to discard)
141 * and the routines to invoke.
143 * Why 16. Because with 16 the only overlap we get on a hash of the
144 * low nibble of the protocol value is RARP/SNAP/X.25.
146 * NOTE: That is no longer true with the addition of VLAN tags. Not
147 * sure which should go first, but I bet it won't make much
148 * difference if we are running VLANs. The good news is that
149 * this protocol won't be in the list unless compiled in, so
150 * the average user (w/out VLANs) will not be adversely affected.
151 * --BLG
153 * 0800 IP
154 * 8100 802.1Q VLAN
155 * 0001 802.3
156 * 0002 AX.25
157 * 0004 802.2
158 * 8035 RARP
159 * 0005 SNAP
160 * 0805 X.25
161 * 0806 ARP
162 * 8137 IPX
163 * 0009 Localtalk
164 * 86DD IPv6
167 #define PTYPE_HASH_SIZE (16)
168 #define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
170 static DEFINE_SPINLOCK(ptype_lock);
171 static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
172 static struct list_head ptype_all __read_mostly; /* Taps */
175 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
176 * semaphore.
178 * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
180 * Writers must hold the rtnl semaphore while they loop through the
181 * dev_base_head list, and hold dev_base_lock for writing when they do the
182 * actual updates. This allows pure readers to access the list even
183 * while a writer is preparing to update it.
185 * To put it another way, dev_base_lock is held for writing only to
186 * protect against pure readers; the rtnl semaphore provides the
187 * protection against other writers.
189 * See, for example usages, register_netdevice() and
190 * unregister_netdevice(), which must be called with the rtnl
191 * semaphore held.
193 DEFINE_RWLOCK(dev_base_lock);
194 EXPORT_SYMBOL(dev_base_lock);
196 static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
198 unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
199 return &net->dev_name_head[hash & (NETDEV_HASHENTRIES - 1)];
202 static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
204 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
207 /* Device list insertion */
208 static int list_netdevice(struct net_device *dev)
210 struct net *net = dev_net(dev);
212 ASSERT_RTNL();
214 write_lock_bh(&dev_base_lock);
215 list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
216 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
217 hlist_add_head_rcu(&dev->index_hlist,
218 dev_index_hash(net, dev->ifindex));
219 write_unlock_bh(&dev_base_lock);
220 return 0;
223 /* Device list removal
224 * caller must respect a RCU grace period before freeing/reusing dev
226 static void unlist_netdevice(struct net_device *dev)
228 ASSERT_RTNL();
230 /* Unlink dev from the device chain */
231 write_lock_bh(&dev_base_lock);
232 list_del_rcu(&dev->dev_list);
233 hlist_del_rcu(&dev->name_hlist);
234 hlist_del_rcu(&dev->index_hlist);
235 write_unlock_bh(&dev_base_lock);
239 * Our notifier list
242 static RAW_NOTIFIER_HEAD(netdev_chain);
245 * Device drivers call our routines to queue packets here. We empty the
246 * queue in the local softnet handler.
249 DEFINE_PER_CPU(struct softnet_data, softnet_data);
250 EXPORT_PER_CPU_SYMBOL(softnet_data);
252 #ifdef CONFIG_LOCKDEP
254 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
255 * according to dev->type
257 static const unsigned short netdev_lock_type[] =
258 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
259 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
260 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
261 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
262 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
263 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
264 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
265 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
266 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
267 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
268 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
269 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
270 ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211,
271 ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET,
272 ARPHRD_PHONET_PIPE, ARPHRD_IEEE802154,
273 ARPHRD_VOID, ARPHRD_NONE};
275 static const char *const netdev_lock_name[] =
276 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
277 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
278 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
279 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
280 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
281 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
282 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
283 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
284 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
285 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
286 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
287 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
288 "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211",
289 "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET",
290 "_xmit_PHONET_PIPE", "_xmit_IEEE802154",
291 "_xmit_VOID", "_xmit_NONE"};
293 static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
294 static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
296 static inline unsigned short netdev_lock_pos(unsigned short dev_type)
298 int i;
300 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
301 if (netdev_lock_type[i] == dev_type)
302 return i;
303 /* the last key is used by default */
304 return ARRAY_SIZE(netdev_lock_type) - 1;
307 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
308 unsigned short dev_type)
310 int i;
312 i = netdev_lock_pos(dev_type);
313 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
314 netdev_lock_name[i]);
317 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
319 int i;
321 i = netdev_lock_pos(dev->type);
322 lockdep_set_class_and_name(&dev->addr_list_lock,
323 &netdev_addr_lock_key[i],
324 netdev_lock_name[i]);
326 #else
327 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
328 unsigned short dev_type)
331 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
334 #endif
336 /*******************************************************************************
338 Protocol management and registration routines
340 *******************************************************************************/
343 * Add a protocol ID to the list. Now that the input handler is
344 * smarter we can dispense with all the messy stuff that used to be
345 * here.
347 * BEWARE!!! Protocol handlers, mangling input packets,
348 * MUST BE last in hash buckets and checking protocol handlers
349 * MUST start from promiscuous ptype_all chain in net_bh.
350 * It is true now, do not change it.
351 * Explanation follows: if protocol handler, mangling packet, will
352 * be the first on list, it is not able to sense, that packet
353 * is cloned and should be copied-on-write, so that it will
354 * change it and subsequent readers will get broken packet.
355 * --ANK (980803)
359 * dev_add_pack - add packet handler
360 * @pt: packet type declaration
362 * Add a protocol handler to the networking stack. The passed &packet_type
363 * is linked into kernel lists and may not be freed until it has been
364 * removed from the kernel lists.
366 * This call does not sleep therefore it can not
367 * guarantee all CPU's that are in middle of receiving packets
368 * will see the new packet type (until the next received packet).
371 void dev_add_pack(struct packet_type *pt)
373 int hash;
375 spin_lock_bh(&ptype_lock);
376 if (pt->type == htons(ETH_P_ALL))
377 list_add_rcu(&pt->list, &ptype_all);
378 else {
379 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
380 list_add_rcu(&pt->list, &ptype_base[hash]);
382 spin_unlock_bh(&ptype_lock);
384 EXPORT_SYMBOL(dev_add_pack);
387 * __dev_remove_pack - remove packet handler
388 * @pt: packet type declaration
390 * Remove a protocol handler that was previously added to the kernel
391 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
392 * from the kernel lists and can be freed or reused once this function
393 * returns.
395 * The packet type might still be in use by receivers
396 * and must not be freed until after all the CPU's have gone
397 * through a quiescent state.
399 void __dev_remove_pack(struct packet_type *pt)
401 struct list_head *head;
402 struct packet_type *pt1;
404 spin_lock_bh(&ptype_lock);
406 if (pt->type == htons(ETH_P_ALL))
407 head = &ptype_all;
408 else
409 head = &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
411 list_for_each_entry(pt1, head, list) {
412 if (pt == pt1) {
413 list_del_rcu(&pt->list);
414 goto out;
418 printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
419 out:
420 spin_unlock_bh(&ptype_lock);
422 EXPORT_SYMBOL(__dev_remove_pack);
425 * dev_remove_pack - remove packet handler
426 * @pt: packet type declaration
428 * Remove a protocol handler that was previously added to the kernel
429 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
430 * from the kernel lists and can be freed or reused once this function
431 * returns.
433 * This call sleeps to guarantee that no CPU is looking at the packet
434 * type after return.
436 void dev_remove_pack(struct packet_type *pt)
438 __dev_remove_pack(pt);
440 synchronize_net();
442 EXPORT_SYMBOL(dev_remove_pack);
444 /******************************************************************************
446 Device Boot-time Settings Routines
448 *******************************************************************************/
450 /* Boot time configuration table */
451 static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
454 * netdev_boot_setup_add - add new setup entry
455 * @name: name of the device
456 * @map: configured settings for the device
458 * Adds new setup entry to the dev_boot_setup list. The function
459 * returns 0 on error and 1 on success. This is a generic routine to
460 * all netdevices.
462 static int netdev_boot_setup_add(char *name, struct ifmap *map)
464 struct netdev_boot_setup *s;
465 int i;
467 s = dev_boot_setup;
468 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
469 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
470 memset(s[i].name, 0, sizeof(s[i].name));
471 strlcpy(s[i].name, name, IFNAMSIZ);
472 memcpy(&s[i].map, map, sizeof(s[i].map));
473 break;
477 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
481 * netdev_boot_setup_check - check boot time settings
482 * @dev: the netdevice
484 * Check boot time settings for the device.
485 * The found settings are set for the device to be used
486 * later in the device probing.
487 * Returns 0 if no settings found, 1 if they are.
489 int netdev_boot_setup_check(struct net_device *dev)
491 struct netdev_boot_setup *s = dev_boot_setup;
492 int i;
494 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
495 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
496 !strcmp(dev->name, s[i].name)) {
497 dev->irq = s[i].map.irq;
498 dev->base_addr = s[i].map.base_addr;
499 dev->mem_start = s[i].map.mem_start;
500 dev->mem_end = s[i].map.mem_end;
501 return 1;
504 return 0;
506 EXPORT_SYMBOL(netdev_boot_setup_check);
510 * netdev_boot_base - get address from boot time settings
511 * @prefix: prefix for network device
512 * @unit: id for network device
514 * Check boot time settings for the base address of device.
515 * The found settings are set for the device to be used
516 * later in the device probing.
517 * Returns 0 if no settings found.
519 unsigned long netdev_boot_base(const char *prefix, int unit)
521 const struct netdev_boot_setup *s = dev_boot_setup;
522 char name[IFNAMSIZ];
523 int i;
525 sprintf(name, "%s%d", prefix, unit);
528 * If device already registered then return base of 1
529 * to indicate not to probe for this interface
531 if (__dev_get_by_name(&init_net, name))
532 return 1;
534 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
535 if (!strcmp(name, s[i].name))
536 return s[i].map.base_addr;
537 return 0;
541 * Saves at boot time configured settings for any netdevice.
543 int __init netdev_boot_setup(char *str)
545 int ints[5];
546 struct ifmap map;
548 str = get_options(str, ARRAY_SIZE(ints), ints);
549 if (!str || !*str)
550 return 0;
552 /* Save settings */
553 memset(&map, 0, sizeof(map));
554 if (ints[0] > 0)
555 map.irq = ints[1];
556 if (ints[0] > 1)
557 map.base_addr = ints[2];
558 if (ints[0] > 2)
559 map.mem_start = ints[3];
560 if (ints[0] > 3)
561 map.mem_end = ints[4];
563 /* Add new entry to the list */
564 return netdev_boot_setup_add(str, &map);
567 __setup("netdev=", netdev_boot_setup);
569 /*******************************************************************************
571 Device Interface Subroutines
573 *******************************************************************************/
576 * __dev_get_by_name - find a device by its name
577 * @net: the applicable net namespace
578 * @name: name to find
580 * Find an interface by name. Must be called under RTNL semaphore
581 * or @dev_base_lock. If the name is found a pointer to the device
582 * is returned. If the name is not found then %NULL is returned. The
583 * reference counters are not incremented so the caller must be
584 * careful with locks.
587 struct net_device *__dev_get_by_name(struct net *net, const char *name)
589 struct hlist_node *p;
590 struct net_device *dev;
591 struct hlist_head *head = dev_name_hash(net, name);
593 hlist_for_each_entry(dev, p, head, name_hlist)
594 if (!strncmp(dev->name, name, IFNAMSIZ))
595 return dev;
597 return NULL;
599 EXPORT_SYMBOL(__dev_get_by_name);
602 * dev_get_by_name_rcu - find a device by its name
603 * @net: the applicable net namespace
604 * @name: name to find
606 * Find an interface by name.
607 * If the name is found a pointer to the device is returned.
608 * If the name is not found then %NULL is returned.
609 * The reference counters are not incremented so the caller must be
610 * careful with locks. The caller must hold RCU lock.
613 struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
615 struct hlist_node *p;
616 struct net_device *dev;
617 struct hlist_head *head = dev_name_hash(net, name);
619 hlist_for_each_entry_rcu(dev, p, head, name_hlist)
620 if (!strncmp(dev->name, name, IFNAMSIZ))
621 return dev;
623 return NULL;
625 EXPORT_SYMBOL(dev_get_by_name_rcu);
628 * dev_get_by_name - find a device by its name
629 * @net: the applicable net namespace
630 * @name: name to find
632 * Find an interface by name. This can be called from any
633 * context and does its own locking. The returned handle has
634 * the usage count incremented and the caller must use dev_put() to
635 * release it when it is no longer needed. %NULL is returned if no
636 * matching device is found.
639 struct net_device *dev_get_by_name(struct net *net, const char *name)
641 struct net_device *dev;
643 rcu_read_lock();
644 dev = dev_get_by_name_rcu(net, name);
645 if (dev)
646 dev_hold(dev);
647 rcu_read_unlock();
648 return dev;
650 EXPORT_SYMBOL(dev_get_by_name);
653 * __dev_get_by_index - find a device by its ifindex
654 * @net: the applicable net namespace
655 * @ifindex: index of device
657 * Search for an interface by index. Returns %NULL if the device
658 * is not found or a pointer to the device. The device has not
659 * had its reference counter increased so the caller must be careful
660 * about locking. The caller must hold either the RTNL semaphore
661 * or @dev_base_lock.
664 struct net_device *__dev_get_by_index(struct net *net, int ifindex)
666 struct hlist_node *p;
667 struct net_device *dev;
668 struct hlist_head *head = dev_index_hash(net, ifindex);
670 hlist_for_each_entry(dev, p, head, index_hlist)
671 if (dev->ifindex == ifindex)
672 return dev;
674 return NULL;
676 EXPORT_SYMBOL(__dev_get_by_index);
679 * dev_get_by_index_rcu - find a device by its ifindex
680 * @net: the applicable net namespace
681 * @ifindex: index of device
683 * Search for an interface by index. Returns %NULL if the device
684 * is not found or a pointer to the device. The device has not
685 * had its reference counter increased so the caller must be careful
686 * about locking. The caller must hold RCU lock.
689 struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
691 struct hlist_node *p;
692 struct net_device *dev;
693 struct hlist_head *head = dev_index_hash(net, ifindex);
695 hlist_for_each_entry_rcu(dev, p, head, index_hlist)
696 if (dev->ifindex == ifindex)
697 return dev;
699 return NULL;
701 EXPORT_SYMBOL(dev_get_by_index_rcu);
705 * dev_get_by_index - find a device by its ifindex
706 * @net: the applicable net namespace
707 * @ifindex: index of device
709 * Search for an interface by index. Returns NULL if the device
710 * is not found or a pointer to the device. The device returned has
711 * had a reference added and the pointer is safe until the user calls
712 * dev_put to indicate they have finished with it.
715 struct net_device *dev_get_by_index(struct net *net, int ifindex)
717 struct net_device *dev;
719 rcu_read_lock();
720 dev = dev_get_by_index_rcu(net, ifindex);
721 if (dev)
722 dev_hold(dev);
723 rcu_read_unlock();
724 return dev;
726 EXPORT_SYMBOL(dev_get_by_index);
729 * dev_getbyhwaddr - find a device by its hardware address
730 * @net: the applicable net namespace
731 * @type: media type of device
732 * @ha: hardware address
734 * Search for an interface by MAC address. Returns NULL if the device
735 * is not found or a pointer to the device. The caller must hold the
736 * rtnl semaphore. The returned device has not had its ref count increased
737 * and the caller must therefore be careful about locking
739 * BUGS:
740 * If the API was consistent this would be __dev_get_by_hwaddr
743 struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *ha)
745 struct net_device *dev;
747 ASSERT_RTNL();
749 for_each_netdev(net, dev)
750 if (dev->type == type &&
751 !memcmp(dev->dev_addr, ha, dev->addr_len))
752 return dev;
754 return NULL;
756 EXPORT_SYMBOL(dev_getbyhwaddr);
758 struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
760 struct net_device *dev;
762 ASSERT_RTNL();
763 for_each_netdev(net, dev)
764 if (dev->type == type)
765 return dev;
767 return NULL;
769 EXPORT_SYMBOL(__dev_getfirstbyhwtype);
771 struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
773 struct net_device *dev;
775 rtnl_lock();
776 dev = __dev_getfirstbyhwtype(net, type);
777 if (dev)
778 dev_hold(dev);
779 rtnl_unlock();
780 return dev;
782 EXPORT_SYMBOL(dev_getfirstbyhwtype);
785 * dev_get_by_flags - find any device with given flags
786 * @net: the applicable net namespace
787 * @if_flags: IFF_* values
788 * @mask: bitmask of bits in if_flags to check
790 * Search for any interface with the given flags. Returns NULL if a device
791 * is not found or a pointer to the device. The device returned has
792 * had a reference added and the pointer is safe until the user calls
793 * dev_put to indicate they have finished with it.
796 struct net_device *dev_get_by_flags(struct net *net, unsigned short if_flags,
797 unsigned short mask)
799 struct net_device *dev, *ret;
801 ret = NULL;
802 rcu_read_lock();
803 for_each_netdev_rcu(net, dev) {
804 if (((dev->flags ^ if_flags) & mask) == 0) {
805 dev_hold(dev);
806 ret = dev;
807 break;
810 rcu_read_unlock();
811 return ret;
813 EXPORT_SYMBOL(dev_get_by_flags);
816 * dev_valid_name - check if name is okay for network device
817 * @name: name string
819 * Network device names need to be valid file names to
820 * to allow sysfs to work. We also disallow any kind of
821 * whitespace.
823 int dev_valid_name(const char *name)
825 if (*name == '\0')
826 return 0;
827 if (strlen(name) >= IFNAMSIZ)
828 return 0;
829 if (!strcmp(name, ".") || !strcmp(name, ".."))
830 return 0;
832 while (*name) {
833 if (*name == '/' || isspace(*name))
834 return 0;
835 name++;
837 return 1;
839 EXPORT_SYMBOL(dev_valid_name);
842 * __dev_alloc_name - allocate a name for a device
843 * @net: network namespace to allocate the device name in
844 * @name: name format string
845 * @buf: scratch buffer and result name string
847 * Passed a format string - eg "lt%d" it will try and find a suitable
848 * id. It scans list of devices to build up a free map, then chooses
849 * the first empty slot. The caller must hold the dev_base or rtnl lock
850 * while allocating the name and adding the device in order to avoid
851 * duplicates.
852 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
853 * Returns the number of the unit assigned or a negative errno code.
856 static int __dev_alloc_name(struct net *net, const char *name, char *buf)
858 int i = 0;
859 const char *p;
860 const int max_netdevices = 8*PAGE_SIZE;
861 unsigned long *inuse;
862 struct net_device *d;
864 p = strnchr(name, IFNAMSIZ-1, '%');
865 if (p) {
867 * Verify the string as this thing may have come from
868 * the user. There must be either one "%d" and no other "%"
869 * characters.
871 if (p[1] != 'd' || strchr(p + 2, '%'))
872 return -EINVAL;
874 /* Use one page as a bit array of possible slots */
875 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
876 if (!inuse)
877 return -ENOMEM;
879 for_each_netdev(net, d) {
880 if (!sscanf(d->name, name, &i))
881 continue;
882 if (i < 0 || i >= max_netdevices)
883 continue;
885 /* avoid cases where sscanf is not exact inverse of printf */
886 snprintf(buf, IFNAMSIZ, name, i);
887 if (!strncmp(buf, d->name, IFNAMSIZ))
888 set_bit(i, inuse);
891 i = find_first_zero_bit(inuse, max_netdevices);
892 free_page((unsigned long) inuse);
895 snprintf(buf, IFNAMSIZ, name, i);
896 if (!__dev_get_by_name(net, buf))
897 return i;
899 /* It is possible to run out of possible slots
900 * when the name is long and there isn't enough space left
901 * for the digits, or if all bits are used.
903 return -ENFILE;
907 * dev_alloc_name - allocate a name for a device
908 * @dev: device
909 * @name: name format string
911 * Passed a format string - eg "lt%d" it will try and find a suitable
912 * id. It scans list of devices to build up a free map, then chooses
913 * the first empty slot. The caller must hold the dev_base or rtnl lock
914 * while allocating the name and adding the device in order to avoid
915 * duplicates.
916 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
917 * Returns the number of the unit assigned or a negative errno code.
920 int dev_alloc_name(struct net_device *dev, const char *name)
922 char buf[IFNAMSIZ];
923 struct net *net;
924 int ret;
926 BUG_ON(!dev_net(dev));
927 net = dev_net(dev);
928 ret = __dev_alloc_name(net, name, buf);
929 if (ret >= 0)
930 strlcpy(dev->name, buf, IFNAMSIZ);
931 return ret;
933 EXPORT_SYMBOL(dev_alloc_name);
937 * dev_change_name - change name of a device
938 * @dev: device
939 * @newname: name (or format string) must be at least IFNAMSIZ
941 * Change name of a device, can pass format strings "eth%d".
942 * for wildcarding.
944 int dev_change_name(struct net_device *dev, const char *newname)
946 char oldname[IFNAMSIZ];
947 int err = 0;
948 int ret;
949 struct net *net;
951 ASSERT_RTNL();
952 BUG_ON(!dev_net(dev));
954 net = dev_net(dev);
955 if (dev->flags & IFF_UP)
956 return -EBUSY;
958 if (!dev_valid_name(newname))
959 return -EINVAL;
961 if (strncmp(newname, dev->name, IFNAMSIZ) == 0)
962 return 0;
964 memcpy(oldname, dev->name, IFNAMSIZ);
966 if (strchr(newname, '%')) {
967 err = dev_alloc_name(dev, newname);
968 if (err < 0)
969 return err;
970 } else if (__dev_get_by_name(net, newname))
971 return -EEXIST;
972 else
973 strlcpy(dev->name, newname, IFNAMSIZ);
975 rollback:
976 /* For now only devices in the initial network namespace
977 * are in sysfs.
979 if (net == &init_net) {
980 ret = device_rename(&dev->dev, dev->name);
981 if (ret) {
982 memcpy(dev->name, oldname, IFNAMSIZ);
983 return ret;
987 write_lock_bh(&dev_base_lock);
988 hlist_del(&dev->name_hlist);
989 write_unlock_bh(&dev_base_lock);
991 synchronize_rcu();
993 write_lock_bh(&dev_base_lock);
994 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
995 write_unlock_bh(&dev_base_lock);
997 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
998 ret = notifier_to_errno(ret);
1000 if (ret) {
1001 if (err) {
1002 printk(KERN_ERR
1003 "%s: name change rollback failed: %d.\n",
1004 dev->name, ret);
1005 } else {
1006 err = ret;
1007 memcpy(dev->name, oldname, IFNAMSIZ);
1008 goto rollback;
1012 return err;
1016 * dev_set_alias - change ifalias of a device
1017 * @dev: device
1018 * @alias: name up to IFALIASZ
1019 * @len: limit of bytes to copy from info
1021 * Set ifalias for a device,
1023 int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1025 ASSERT_RTNL();
1027 if (len >= IFALIASZ)
1028 return -EINVAL;
1030 if (!len) {
1031 if (dev->ifalias) {
1032 kfree(dev->ifalias);
1033 dev->ifalias = NULL;
1035 return 0;
1038 dev->ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
1039 if (!dev->ifalias)
1040 return -ENOMEM;
1042 strlcpy(dev->ifalias, alias, len+1);
1043 return len;
1048 * netdev_features_change - device changes features
1049 * @dev: device to cause notification
1051 * Called to indicate a device has changed features.
1053 void netdev_features_change(struct net_device *dev)
1055 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
1057 EXPORT_SYMBOL(netdev_features_change);
1060 * netdev_state_change - device changes state
1061 * @dev: device to cause notification
1063 * Called to indicate a device has changed state. This function calls
1064 * the notifier chains for netdev_chain and sends a NEWLINK message
1065 * to the routing socket.
1067 void netdev_state_change(struct net_device *dev)
1069 if (dev->flags & IFF_UP) {
1070 call_netdevice_notifiers(NETDEV_CHANGE, dev);
1071 rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
1074 EXPORT_SYMBOL(netdev_state_change);
1076 void netdev_bonding_change(struct net_device *dev, unsigned long event)
1078 call_netdevice_notifiers(event, dev);
1080 EXPORT_SYMBOL(netdev_bonding_change);
1083 * dev_load - load a network module
1084 * @net: the applicable net namespace
1085 * @name: name of interface
1087 * If a network interface is not present and the process has suitable
1088 * privileges this function loads the module. If module loading is not
1089 * available in this kernel then it becomes a nop.
1092 void dev_load(struct net *net, const char *name)
1094 struct net_device *dev;
1096 rcu_read_lock();
1097 dev = dev_get_by_name_rcu(net, name);
1098 rcu_read_unlock();
1100 if (!dev && capable(CAP_NET_ADMIN))
1101 request_module("%s", name);
1103 EXPORT_SYMBOL(dev_load);
1106 * dev_open - prepare an interface for use.
1107 * @dev: device to open
1109 * Takes a device from down to up state. The device's private open
1110 * function is invoked and then the multicast lists are loaded. Finally
1111 * the device is moved into the up state and a %NETDEV_UP message is
1112 * sent to the netdev notifier chain.
1114 * Calling this function on an active interface is a nop. On a failure
1115 * a negative errno code is returned.
1117 int dev_open(struct net_device *dev)
1119 const struct net_device_ops *ops = dev->netdev_ops;
1120 int ret;
1122 ASSERT_RTNL();
1125 * Is it already up?
1128 if (dev->flags & IFF_UP)
1129 return 0;
1132 * Is it even present?
1134 if (!netif_device_present(dev))
1135 return -ENODEV;
1137 ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1138 ret = notifier_to_errno(ret);
1139 if (ret)
1140 return ret;
1143 * Call device private open method
1145 set_bit(__LINK_STATE_START, &dev->state);
1147 if (ops->ndo_validate_addr)
1148 ret = ops->ndo_validate_addr(dev);
1150 if (!ret && ops->ndo_open)
1151 ret = ops->ndo_open(dev);
1154 * If it went open OK then:
1157 if (ret)
1158 clear_bit(__LINK_STATE_START, &dev->state);
1159 else {
1161 * Set the flags.
1163 dev->flags |= IFF_UP;
1166 * Enable NET_DMA
1168 net_dmaengine_get();
1171 * Initialize multicasting status
1173 dev_set_rx_mode(dev);
1176 * Wakeup transmit queue engine
1178 dev_activate(dev);
1181 * ... and announce new interface.
1183 call_netdevice_notifiers(NETDEV_UP, dev);
1186 return ret;
1188 EXPORT_SYMBOL(dev_open);
1191 * dev_close - shutdown an interface.
1192 * @dev: device to shutdown
1194 * This function moves an active device into down state. A
1195 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1196 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1197 * chain.
1199 int dev_close(struct net_device *dev)
1201 const struct net_device_ops *ops = dev->netdev_ops;
1202 ASSERT_RTNL();
1204 might_sleep();
1206 if (!(dev->flags & IFF_UP))
1207 return 0;
1210 * Tell people we are going down, so that they can
1211 * prepare to death, when device is still operating.
1213 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
1215 clear_bit(__LINK_STATE_START, &dev->state);
1217 /* Synchronize to scheduled poll. We cannot touch poll list,
1218 * it can be even on different cpu. So just clear netif_running().
1220 * dev->stop() will invoke napi_disable() on all of it's
1221 * napi_struct instances on this device.
1223 smp_mb__after_clear_bit(); /* Commit netif_running(). */
1225 dev_deactivate(dev);
1228 * Call the device specific close. This cannot fail.
1229 * Only if device is UP
1231 * We allow it to be called even after a DETACH hot-plug
1232 * event.
1234 if (ops->ndo_stop)
1235 ops->ndo_stop(dev);
1238 * Device is now down.
1241 dev->flags &= ~IFF_UP;
1244 * Tell people we are down
1246 call_netdevice_notifiers(NETDEV_DOWN, dev);
1249 * Shutdown NET_DMA
1251 net_dmaengine_put();
1253 return 0;
1255 EXPORT_SYMBOL(dev_close);
1259 * dev_disable_lro - disable Large Receive Offload on a device
1260 * @dev: device
1262 * Disable Large Receive Offload (LRO) on a net device. Must be
1263 * called under RTNL. This is needed if received packets may be
1264 * forwarded to another interface.
1266 void dev_disable_lro(struct net_device *dev)
1268 if (dev->ethtool_ops && dev->ethtool_ops->get_flags &&
1269 dev->ethtool_ops->set_flags) {
1270 u32 flags = dev->ethtool_ops->get_flags(dev);
1271 if (flags & ETH_FLAG_LRO) {
1272 flags &= ~ETH_FLAG_LRO;
1273 dev->ethtool_ops->set_flags(dev, flags);
1276 WARN_ON(dev->features & NETIF_F_LRO);
1278 EXPORT_SYMBOL(dev_disable_lro);
1281 static int dev_boot_phase = 1;
1284 * Device change register/unregister. These are not inline or static
1285 * as we export them to the world.
1289 * register_netdevice_notifier - register a network notifier block
1290 * @nb: notifier
1292 * Register a notifier to be called when network device events occur.
1293 * The notifier passed is linked into the kernel structures and must
1294 * not be reused until it has been unregistered. A negative errno code
1295 * is returned on a failure.
1297 * When registered all registration and up events are replayed
1298 * to the new notifier to allow device to have a race free
1299 * view of the network device list.
1302 int register_netdevice_notifier(struct notifier_block *nb)
1304 struct net_device *dev;
1305 struct net_device *last;
1306 struct net *net;
1307 int err;
1309 rtnl_lock();
1310 err = raw_notifier_chain_register(&netdev_chain, nb);
1311 if (err)
1312 goto unlock;
1313 if (dev_boot_phase)
1314 goto unlock;
1315 for_each_net(net) {
1316 for_each_netdev(net, dev) {
1317 err = nb->notifier_call(nb, NETDEV_REGISTER, dev);
1318 err = notifier_to_errno(err);
1319 if (err)
1320 goto rollback;
1322 if (!(dev->flags & IFF_UP))
1323 continue;
1325 nb->notifier_call(nb, NETDEV_UP, dev);
1329 unlock:
1330 rtnl_unlock();
1331 return err;
1333 rollback:
1334 last = dev;
1335 for_each_net(net) {
1336 for_each_netdev(net, dev) {
1337 if (dev == last)
1338 break;
1340 if (dev->flags & IFF_UP) {
1341 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1342 nb->notifier_call(nb, NETDEV_DOWN, dev);
1344 nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
1348 raw_notifier_chain_unregister(&netdev_chain, nb);
1349 goto unlock;
1351 EXPORT_SYMBOL(register_netdevice_notifier);
1354 * unregister_netdevice_notifier - unregister a network notifier block
1355 * @nb: notifier
1357 * Unregister a notifier previously registered by
1358 * register_netdevice_notifier(). The notifier is unlinked into the
1359 * kernel structures and may then be reused. A negative errno code
1360 * is returned on a failure.
1363 int unregister_netdevice_notifier(struct notifier_block *nb)
1365 int err;
1367 rtnl_lock();
1368 err = raw_notifier_chain_unregister(&netdev_chain, nb);
1369 rtnl_unlock();
1370 return err;
1372 EXPORT_SYMBOL(unregister_netdevice_notifier);
1375 * call_netdevice_notifiers - call all network notifier blocks
1376 * @val: value passed unmodified to notifier function
1377 * @dev: net_device pointer passed unmodified to notifier function
1379 * Call all network notifier blocks. Parameters and return value
1380 * are as for raw_notifier_call_chain().
1383 int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
1385 return raw_notifier_call_chain(&netdev_chain, val, dev);
1388 /* When > 0 there are consumers of rx skb time stamps */
1389 static atomic_t netstamp_needed = ATOMIC_INIT(0);
1391 void net_enable_timestamp(void)
1393 atomic_inc(&netstamp_needed);
1395 EXPORT_SYMBOL(net_enable_timestamp);
1397 void net_disable_timestamp(void)
1399 atomic_dec(&netstamp_needed);
1401 EXPORT_SYMBOL(net_disable_timestamp);
1403 static inline void net_timestamp(struct sk_buff *skb)
1405 if (atomic_read(&netstamp_needed))
1406 __net_timestamp(skb);
1407 else
1408 skb->tstamp.tv64 = 0;
1412 * Support routine. Sends outgoing frames to any network
1413 * taps currently in use.
1416 static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1418 struct packet_type *ptype;
1420 #ifdef CONFIG_NET_CLS_ACT
1421 if (!(skb->tstamp.tv64 && (G_TC_FROM(skb->tc_verd) & AT_INGRESS)))
1422 net_timestamp(skb);
1423 #else
1424 net_timestamp(skb);
1425 #endif
1427 rcu_read_lock();
1428 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1429 /* Never send packets back to the socket
1430 * they originated from - MvS (miquels@drinkel.ow.org)
1432 if ((ptype->dev == dev || !ptype->dev) &&
1433 (ptype->af_packet_priv == NULL ||
1434 (struct sock *)ptype->af_packet_priv != skb->sk)) {
1435 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1436 if (!skb2)
1437 break;
1439 /* skb->nh should be correctly
1440 set by sender, so that the second statement is
1441 just protection against buggy protocols.
1443 skb_reset_mac_header(skb2);
1445 if (skb_network_header(skb2) < skb2->data ||
1446 skb2->network_header > skb2->tail) {
1447 if (net_ratelimit())
1448 printk(KERN_CRIT "protocol %04x is "
1449 "buggy, dev %s\n",
1450 skb2->protocol, dev->name);
1451 skb_reset_network_header(skb2);
1454 skb2->transport_header = skb2->network_header;
1455 skb2->pkt_type = PACKET_OUTGOING;
1456 ptype->func(skb2, skb->dev, ptype, skb->dev);
1459 rcu_read_unlock();
1463 static inline void __netif_reschedule(struct Qdisc *q)
1465 struct softnet_data *sd;
1466 unsigned long flags;
1468 local_irq_save(flags);
1469 sd = &__get_cpu_var(softnet_data);
1470 q->next_sched = sd->output_queue;
1471 sd->output_queue = q;
1472 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1473 local_irq_restore(flags);
1476 void __netif_schedule(struct Qdisc *q)
1478 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
1479 __netif_reschedule(q);
1481 EXPORT_SYMBOL(__netif_schedule);
1483 void dev_kfree_skb_irq(struct sk_buff *skb)
1485 if (atomic_dec_and_test(&skb->users)) {
1486 struct softnet_data *sd;
1487 unsigned long flags;
1489 local_irq_save(flags);
1490 sd = &__get_cpu_var(softnet_data);
1491 skb->next = sd->completion_queue;
1492 sd->completion_queue = skb;
1493 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1494 local_irq_restore(flags);
1497 EXPORT_SYMBOL(dev_kfree_skb_irq);
1499 void dev_kfree_skb_any(struct sk_buff *skb)
1501 if (in_irq() || irqs_disabled())
1502 dev_kfree_skb_irq(skb);
1503 else
1504 dev_kfree_skb(skb);
1506 EXPORT_SYMBOL(dev_kfree_skb_any);
1510 * netif_device_detach - mark device as removed
1511 * @dev: network device
1513 * Mark device as removed from system and therefore no longer available.
1515 void netif_device_detach(struct net_device *dev)
1517 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
1518 netif_running(dev)) {
1519 netif_tx_stop_all_queues(dev);
1522 EXPORT_SYMBOL(netif_device_detach);
1525 * netif_device_attach - mark device as attached
1526 * @dev: network device
1528 * Mark device as attached from system and restart if needed.
1530 void netif_device_attach(struct net_device *dev)
1532 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
1533 netif_running(dev)) {
1534 netif_tx_wake_all_queues(dev);
1535 __netdev_watchdog_up(dev);
1538 EXPORT_SYMBOL(netif_device_attach);
1540 static bool can_checksum_protocol(unsigned long features, __be16 protocol)
1542 return ((features & NETIF_F_GEN_CSUM) ||
1543 ((features & NETIF_F_IP_CSUM) &&
1544 protocol == htons(ETH_P_IP)) ||
1545 ((features & NETIF_F_IPV6_CSUM) &&
1546 protocol == htons(ETH_P_IPV6)) ||
1547 ((features & NETIF_F_FCOE_CRC) &&
1548 protocol == htons(ETH_P_FCOE)));
1551 static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb)
1553 if (can_checksum_protocol(dev->features, skb->protocol))
1554 return true;
1556 if (skb->protocol == htons(ETH_P_8021Q)) {
1557 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
1558 if (can_checksum_protocol(dev->features & dev->vlan_features,
1559 veh->h_vlan_encapsulated_proto))
1560 return true;
1563 return false;
1567 * Invalidate hardware checksum when packet is to be mangled, and
1568 * complete checksum manually on outgoing path.
1570 int skb_checksum_help(struct sk_buff *skb)
1572 __wsum csum;
1573 int ret = 0, offset;
1575 if (skb->ip_summed == CHECKSUM_COMPLETE)
1576 goto out_set_summed;
1578 if (unlikely(skb_shinfo(skb)->gso_size)) {
1579 /* Let GSO fix up the checksum. */
1580 goto out_set_summed;
1583 offset = skb->csum_start - skb_headroom(skb);
1584 BUG_ON(offset >= skb_headlen(skb));
1585 csum = skb_checksum(skb, offset, skb->len - offset, 0);
1587 offset += skb->csum_offset;
1588 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
1590 if (skb_cloned(skb) &&
1591 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
1592 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1593 if (ret)
1594 goto out;
1597 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
1598 out_set_summed:
1599 skb->ip_summed = CHECKSUM_NONE;
1600 out:
1601 return ret;
1603 EXPORT_SYMBOL(skb_checksum_help);
1606 * skb_gso_segment - Perform segmentation on skb.
1607 * @skb: buffer to segment
1608 * @features: features for the output path (see dev->features)
1610 * This function segments the given skb and returns a list of segments.
1612 * It may return NULL if the skb requires no segmentation. This is
1613 * only possible when GSO is used for verifying header integrity.
1615 struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
1617 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
1618 struct packet_type *ptype;
1619 __be16 type = skb->protocol;
1620 int err;
1622 skb_reset_mac_header(skb);
1623 skb->mac_len = skb->network_header - skb->mac_header;
1624 __skb_pull(skb, skb->mac_len);
1626 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
1627 struct net_device *dev = skb->dev;
1628 struct ethtool_drvinfo info = {};
1630 if (dev && dev->ethtool_ops && dev->ethtool_ops->get_drvinfo)
1631 dev->ethtool_ops->get_drvinfo(dev, &info);
1633 WARN(1, "%s: caps=(0x%lx, 0x%lx) len=%d data_len=%d "
1634 "ip_summed=%d",
1635 info.driver, dev ? dev->features : 0L,
1636 skb->sk ? skb->sk->sk_route_caps : 0L,
1637 skb->len, skb->data_len, skb->ip_summed);
1639 if (skb_header_cloned(skb) &&
1640 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
1641 return ERR_PTR(err);
1644 rcu_read_lock();
1645 list_for_each_entry_rcu(ptype,
1646 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
1647 if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
1648 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
1649 err = ptype->gso_send_check(skb);
1650 segs = ERR_PTR(err);
1651 if (err || skb_gso_ok(skb, features))
1652 break;
1653 __skb_push(skb, (skb->data -
1654 skb_network_header(skb)));
1656 segs = ptype->gso_segment(skb, features);
1657 break;
1660 rcu_read_unlock();
1662 __skb_push(skb, skb->data - skb_mac_header(skb));
1664 return segs;
1666 EXPORT_SYMBOL(skb_gso_segment);
1668 /* Take action when hardware reception checksum errors are detected. */
1669 #ifdef CONFIG_BUG
1670 void netdev_rx_csum_fault(struct net_device *dev)
1672 if (net_ratelimit()) {
1673 printk(KERN_ERR "%s: hw csum failure.\n",
1674 dev ? dev->name : "<unknown>");
1675 dump_stack();
1678 EXPORT_SYMBOL(netdev_rx_csum_fault);
1679 #endif
1681 /* Actually, we should eliminate this check as soon as we know, that:
1682 * 1. IOMMU is present and allows to map all the memory.
1683 * 2. No high memory really exists on this machine.
1686 static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
1688 #ifdef CONFIG_HIGHMEM
1689 int i;
1691 if (dev->features & NETIF_F_HIGHDMA)
1692 return 0;
1694 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1695 if (PageHighMem(skb_shinfo(skb)->frags[i].page))
1696 return 1;
1698 #endif
1699 return 0;
1702 struct dev_gso_cb {
1703 void (*destructor)(struct sk_buff *skb);
1706 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
1708 static void dev_gso_skb_destructor(struct sk_buff *skb)
1710 struct dev_gso_cb *cb;
1712 do {
1713 struct sk_buff *nskb = skb->next;
1715 skb->next = nskb->next;
1716 nskb->next = NULL;
1717 kfree_skb(nskb);
1718 } while (skb->next);
1720 cb = DEV_GSO_CB(skb);
1721 if (cb->destructor)
1722 cb->destructor(skb);
1726 * dev_gso_segment - Perform emulated hardware segmentation on skb.
1727 * @skb: buffer to segment
1729 * This function segments the given skb and stores the list of segments
1730 * in skb->next.
1732 static int dev_gso_segment(struct sk_buff *skb)
1734 struct net_device *dev = skb->dev;
1735 struct sk_buff *segs;
1736 int features = dev->features & ~(illegal_highdma(dev, skb) ?
1737 NETIF_F_SG : 0);
1739 segs = skb_gso_segment(skb, features);
1741 /* Verifying header integrity only. */
1742 if (!segs)
1743 return 0;
1745 if (IS_ERR(segs))
1746 return PTR_ERR(segs);
1748 skb->next = segs;
1749 DEV_GSO_CB(skb)->destructor = skb->destructor;
1750 skb->destructor = dev_gso_skb_destructor;
1752 return 0;
1755 int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
1756 struct netdev_queue *txq)
1758 const struct net_device_ops *ops = dev->netdev_ops;
1759 int rc;
1761 if (likely(!skb->next)) {
1762 if (!list_empty(&ptype_all))
1763 dev_queue_xmit_nit(skb, dev);
1765 if (netif_needs_gso(dev, skb)) {
1766 if (unlikely(dev_gso_segment(skb)))
1767 goto out_kfree_skb;
1768 if (skb->next)
1769 goto gso;
1773 * If device doesnt need skb->dst, release it right now while
1774 * its hot in this cpu cache
1776 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
1777 skb_dst_drop(skb);
1779 rc = ops->ndo_start_xmit(skb, dev);
1780 if (rc == NETDEV_TX_OK)
1781 txq_trans_update(txq);
1783 * TODO: if skb_orphan() was called by
1784 * dev->hard_start_xmit() (for example, the unmodified
1785 * igb driver does that; bnx2 doesn't), then
1786 * skb_tx_software_timestamp() will be unable to send
1787 * back the time stamp.
1789 * How can this be prevented? Always create another
1790 * reference to the socket before calling
1791 * dev->hard_start_xmit()? Prevent that skb_orphan()
1792 * does anything in dev->hard_start_xmit() by clearing
1793 * the skb destructor before the call and restoring it
1794 * afterwards, then doing the skb_orphan() ourselves?
1796 return rc;
1799 gso:
1800 do {
1801 struct sk_buff *nskb = skb->next;
1803 skb->next = nskb->next;
1804 nskb->next = NULL;
1805 rc = ops->ndo_start_xmit(nskb, dev);
1806 if (unlikely(rc != NETDEV_TX_OK)) {
1807 nskb->next = skb->next;
1808 skb->next = nskb;
1809 return rc;
1811 txq_trans_update(txq);
1812 if (unlikely(netif_tx_queue_stopped(txq) && skb->next))
1813 return NETDEV_TX_BUSY;
1814 } while (skb->next);
1816 skb->destructor = DEV_GSO_CB(skb)->destructor;
1818 out_kfree_skb:
1819 kfree_skb(skb);
1820 return NETDEV_TX_OK;
1823 static u32 skb_tx_hashrnd;
1825 u16 skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb)
1827 u32 hash;
1829 if (skb_rx_queue_recorded(skb)) {
1830 hash = skb_get_rx_queue(skb);
1831 while (unlikely(hash >= dev->real_num_tx_queues))
1832 hash -= dev->real_num_tx_queues;
1833 return hash;
1836 if (skb->sk && skb->sk->sk_hash)
1837 hash = skb->sk->sk_hash;
1838 else
1839 hash = skb->protocol;
1841 hash = jhash_1word(hash, skb_tx_hashrnd);
1843 return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32);
1845 EXPORT_SYMBOL(skb_tx_hash);
1847 static struct netdev_queue *dev_pick_tx(struct net_device *dev,
1848 struct sk_buff *skb)
1850 u16 queue_index;
1851 struct sock *sk = skb->sk;
1853 if (sk_tx_queue_recorded(sk)) {
1854 queue_index = sk_tx_queue_get(sk);
1855 } else {
1856 const struct net_device_ops *ops = dev->netdev_ops;
1858 if (ops->ndo_select_queue) {
1859 queue_index = ops->ndo_select_queue(dev, skb);
1860 } else {
1861 queue_index = 0;
1862 if (dev->real_num_tx_queues > 1)
1863 queue_index = skb_tx_hash(dev, skb);
1865 if (sk && sk->sk_dst_cache)
1866 sk_tx_queue_set(sk, queue_index);
1870 skb_set_queue_mapping(skb, queue_index);
1871 return netdev_get_tx_queue(dev, queue_index);
1874 static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
1875 struct net_device *dev,
1876 struct netdev_queue *txq)
1878 spinlock_t *root_lock = qdisc_lock(q);
1879 int rc;
1881 spin_lock(root_lock);
1882 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
1883 kfree_skb(skb);
1884 rc = NET_XMIT_DROP;
1885 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
1886 !test_and_set_bit(__QDISC_STATE_RUNNING, &q->state)) {
1888 * This is a work-conserving queue; there are no old skbs
1889 * waiting to be sent out; and the qdisc is not running -
1890 * xmit the skb directly.
1892 __qdisc_update_bstats(q, skb->len);
1893 if (sch_direct_xmit(skb, q, dev, txq, root_lock))
1894 __qdisc_run(q);
1895 else
1896 clear_bit(__QDISC_STATE_RUNNING, &q->state);
1898 rc = NET_XMIT_SUCCESS;
1899 } else {
1900 rc = qdisc_enqueue_root(skb, q);
1901 qdisc_run(q);
1903 spin_unlock(root_lock);
1905 return rc;
1909 * dev_queue_xmit - transmit a buffer
1910 * @skb: buffer to transmit
1912 * Queue a buffer for transmission to a network device. The caller must
1913 * have set the device and priority and built the buffer before calling
1914 * this function. The function can be called from an interrupt.
1916 * A negative errno code is returned on a failure. A success does not
1917 * guarantee the frame will be transmitted as it may be dropped due
1918 * to congestion or traffic shaping.
1920 * -----------------------------------------------------------------------------------
1921 * I notice this method can also return errors from the queue disciplines,
1922 * including NET_XMIT_DROP, which is a positive value. So, errors can also
1923 * be positive.
1925 * Regardless of the return value, the skb is consumed, so it is currently
1926 * difficult to retry a send to this method. (You can bump the ref count
1927 * before sending to hold a reference for retry if you are careful.)
1929 * When calling this method, interrupts MUST be enabled. This is because
1930 * the BH enable code must have IRQs enabled so that it will not deadlock.
1931 * --BLG
1933 int dev_queue_xmit(struct sk_buff *skb)
1935 struct net_device *dev = skb->dev;
1936 struct netdev_queue *txq;
1937 struct Qdisc *q;
1938 int rc = -ENOMEM;
1940 /* GSO will handle the following emulations directly. */
1941 if (netif_needs_gso(dev, skb))
1942 goto gso;
1944 if (skb_has_frags(skb) &&
1945 !(dev->features & NETIF_F_FRAGLIST) &&
1946 __skb_linearize(skb))
1947 goto out_kfree_skb;
1949 /* Fragmented skb is linearized if device does not support SG,
1950 * or if at least one of fragments is in highmem and device
1951 * does not support DMA from it.
1953 if (skb_shinfo(skb)->nr_frags &&
1954 (!(dev->features & NETIF_F_SG) || illegal_highdma(dev, skb)) &&
1955 __skb_linearize(skb))
1956 goto out_kfree_skb;
1958 /* If packet is not checksummed and device does not support
1959 * checksumming for this protocol, complete checksumming here.
1961 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1962 skb_set_transport_header(skb, skb->csum_start -
1963 skb_headroom(skb));
1964 if (!dev_can_checksum(dev, skb) && skb_checksum_help(skb))
1965 goto out_kfree_skb;
1968 gso:
1969 /* Disable soft irqs for various locks below. Also
1970 * stops preemption for RCU.
1972 rcu_read_lock_bh();
1974 txq = dev_pick_tx(dev, skb);
1975 q = rcu_dereference(txq->qdisc);
1977 #ifdef CONFIG_NET_CLS_ACT
1978 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
1979 #endif
1980 if (q->enqueue) {
1981 rc = __dev_xmit_skb(skb, q, dev, txq);
1982 goto out;
1985 /* The device has no queue. Common case for software devices:
1986 loopback, all the sorts of tunnels...
1988 Really, it is unlikely that netif_tx_lock protection is necessary
1989 here. (f.e. loopback and IP tunnels are clean ignoring statistics
1990 counters.)
1991 However, it is possible, that they rely on protection
1992 made by us here.
1994 Check this and shot the lock. It is not prone from deadlocks.
1995 Either shot noqueue qdisc, it is even simpler 8)
1997 if (dev->flags & IFF_UP) {
1998 int cpu = smp_processor_id(); /* ok because BHs are off */
2000 if (txq->xmit_lock_owner != cpu) {
2002 HARD_TX_LOCK(dev, txq, cpu);
2004 if (!netif_tx_queue_stopped(txq)) {
2005 rc = NET_XMIT_SUCCESS;
2006 if (!dev_hard_start_xmit(skb, dev, txq)) {
2007 HARD_TX_UNLOCK(dev, txq);
2008 goto out;
2011 HARD_TX_UNLOCK(dev, txq);
2012 if (net_ratelimit())
2013 printk(KERN_CRIT "Virtual device %s asks to "
2014 "queue packet!\n", dev->name);
2015 } else {
2016 /* Recursion is detected! It is possible,
2017 * unfortunately */
2018 if (net_ratelimit())
2019 printk(KERN_CRIT "Dead loop on virtual device "
2020 "%s, fix it urgently!\n", dev->name);
2024 rc = -ENETDOWN;
2025 rcu_read_unlock_bh();
2027 out_kfree_skb:
2028 kfree_skb(skb);
2029 return rc;
2030 out:
2031 rcu_read_unlock_bh();
2032 return rc;
2034 EXPORT_SYMBOL(dev_queue_xmit);
2037 /*=======================================================================
2038 Receiver routines
2039 =======================================================================*/
2041 int netdev_max_backlog __read_mostly = 1000;
2042 int netdev_budget __read_mostly = 300;
2043 int weight_p __read_mostly = 64; /* old backlog weight */
2045 DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, };
2049 * netif_rx - post buffer to the network code
2050 * @skb: buffer to post
2052 * This function receives a packet from a device driver and queues it for
2053 * the upper (protocol) levels to process. It always succeeds. The buffer
2054 * may be dropped during processing for congestion control or by the
2055 * protocol layers.
2057 * return values:
2058 * NET_RX_SUCCESS (no congestion)
2059 * NET_RX_DROP (packet was dropped)
2063 int netif_rx(struct sk_buff *skb)
2065 struct softnet_data *queue;
2066 unsigned long flags;
2068 /* if netpoll wants it, pretend we never saw it */
2069 if (netpoll_rx(skb))
2070 return NET_RX_DROP;
2072 if (!skb->tstamp.tv64)
2073 net_timestamp(skb);
2076 * The code is rearranged so that the path is the most
2077 * short when CPU is congested, but is still operating.
2079 local_irq_save(flags);
2080 queue = &__get_cpu_var(softnet_data);
2082 __get_cpu_var(netdev_rx_stat).total++;
2083 if (queue->input_pkt_queue.qlen <= netdev_max_backlog) {
2084 if (queue->input_pkt_queue.qlen) {
2085 enqueue:
2086 __skb_queue_tail(&queue->input_pkt_queue, skb);
2087 local_irq_restore(flags);
2088 return NET_RX_SUCCESS;
2091 napi_schedule(&queue->backlog);
2092 goto enqueue;
2095 __get_cpu_var(netdev_rx_stat).dropped++;
2096 local_irq_restore(flags);
2098 kfree_skb(skb);
2099 return NET_RX_DROP;
2101 EXPORT_SYMBOL(netif_rx);
2103 int netif_rx_ni(struct sk_buff *skb)
2105 int err;
2107 preempt_disable();
2108 err = netif_rx(skb);
2109 if (local_softirq_pending())
2110 do_softirq();
2111 preempt_enable();
2113 return err;
2115 EXPORT_SYMBOL(netif_rx_ni);
2117 static void net_tx_action(struct softirq_action *h)
2119 struct softnet_data *sd = &__get_cpu_var(softnet_data);
2121 if (sd->completion_queue) {
2122 struct sk_buff *clist;
2124 local_irq_disable();
2125 clist = sd->completion_queue;
2126 sd->completion_queue = NULL;
2127 local_irq_enable();
2129 while (clist) {
2130 struct sk_buff *skb = clist;
2131 clist = clist->next;
2133 WARN_ON(atomic_read(&skb->users));
2134 __kfree_skb(skb);
2138 if (sd->output_queue) {
2139 struct Qdisc *head;
2141 local_irq_disable();
2142 head = sd->output_queue;
2143 sd->output_queue = NULL;
2144 local_irq_enable();
2146 while (head) {
2147 struct Qdisc *q = head;
2148 spinlock_t *root_lock;
2150 head = head->next_sched;
2152 root_lock = qdisc_lock(q);
2153 if (spin_trylock(root_lock)) {
2154 smp_mb__before_clear_bit();
2155 clear_bit(__QDISC_STATE_SCHED,
2156 &q->state);
2157 qdisc_run(q);
2158 spin_unlock(root_lock);
2159 } else {
2160 if (!test_bit(__QDISC_STATE_DEACTIVATED,
2161 &q->state)) {
2162 __netif_reschedule(q);
2163 } else {
2164 smp_mb__before_clear_bit();
2165 clear_bit(__QDISC_STATE_SCHED,
2166 &q->state);
2173 static inline int deliver_skb(struct sk_buff *skb,
2174 struct packet_type *pt_prev,
2175 struct net_device *orig_dev)
2177 atomic_inc(&skb->users);
2178 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
2181 #if defined(CONFIG_BRIDGE) || defined (CONFIG_BRIDGE_MODULE)
2183 #if defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE)
2184 /* This hook is defined here for ATM LANE */
2185 int (*br_fdb_test_addr_hook)(struct net_device *dev,
2186 unsigned char *addr) __read_mostly;
2187 EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
2188 #endif
2191 * If bridge module is loaded call bridging hook.
2192 * returns NULL if packet was consumed.
2194 struct sk_buff *(*br_handle_frame_hook)(struct net_bridge_port *p,
2195 struct sk_buff *skb) __read_mostly;
2196 EXPORT_SYMBOL_GPL(br_handle_frame_hook);
2198 static inline struct sk_buff *handle_bridge(struct sk_buff *skb,
2199 struct packet_type **pt_prev, int *ret,
2200 struct net_device *orig_dev)
2202 struct net_bridge_port *port;
2204 if (skb->pkt_type == PACKET_LOOPBACK ||
2205 (port = rcu_dereference(skb->dev->br_port)) == NULL)
2206 return skb;
2208 if (*pt_prev) {
2209 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2210 *pt_prev = NULL;
2213 return br_handle_frame_hook(port, skb);
2215 #else
2216 #define handle_bridge(skb, pt_prev, ret, orig_dev) (skb)
2217 #endif
2219 #if defined(CONFIG_MACVLAN) || defined(CONFIG_MACVLAN_MODULE)
2220 struct sk_buff *(*macvlan_handle_frame_hook)(struct sk_buff *skb) __read_mostly;
2221 EXPORT_SYMBOL_GPL(macvlan_handle_frame_hook);
2223 static inline struct sk_buff *handle_macvlan(struct sk_buff *skb,
2224 struct packet_type **pt_prev,
2225 int *ret,
2226 struct net_device *orig_dev)
2228 if (skb->dev->macvlan_port == NULL)
2229 return skb;
2231 if (*pt_prev) {
2232 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2233 *pt_prev = NULL;
2235 return macvlan_handle_frame_hook(skb);
2237 #else
2238 #define handle_macvlan(skb, pt_prev, ret, orig_dev) (skb)
2239 #endif
2241 #ifdef CONFIG_NET_CLS_ACT
2242 /* TODO: Maybe we should just force sch_ingress to be compiled in
2243 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
2244 * a compare and 2 stores extra right now if we dont have it on
2245 * but have CONFIG_NET_CLS_ACT
2246 * NOTE: This doesnt stop any functionality; if you dont have
2247 * the ingress scheduler, you just cant add policies on ingress.
2250 static int ing_filter(struct sk_buff *skb)
2252 struct net_device *dev = skb->dev;
2253 u32 ttl = G_TC_RTTL(skb->tc_verd);
2254 struct netdev_queue *rxq;
2255 int result = TC_ACT_OK;
2256 struct Qdisc *q;
2258 if (MAX_RED_LOOP < ttl++) {
2259 printk(KERN_WARNING
2260 "Redir loop detected Dropping packet (%d->%d)\n",
2261 skb->iif, dev->ifindex);
2262 return TC_ACT_SHOT;
2265 skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
2266 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
2268 rxq = &dev->rx_queue;
2270 q = rxq->qdisc;
2271 if (q != &noop_qdisc) {
2272 spin_lock(qdisc_lock(q));
2273 if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
2274 result = qdisc_enqueue_root(skb, q);
2275 spin_unlock(qdisc_lock(q));
2278 return result;
2281 static inline struct sk_buff *handle_ing(struct sk_buff *skb,
2282 struct packet_type **pt_prev,
2283 int *ret, struct net_device *orig_dev)
2285 if (skb->dev->rx_queue.qdisc == &noop_qdisc)
2286 goto out;
2288 if (*pt_prev) {
2289 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2290 *pt_prev = NULL;
2291 } else {
2292 /* Huh? Why does turning on AF_PACKET affect this? */
2293 skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd);
2296 switch (ing_filter(skb)) {
2297 case TC_ACT_SHOT:
2298 case TC_ACT_STOLEN:
2299 kfree_skb(skb);
2300 return NULL;
2303 out:
2304 skb->tc_verd = 0;
2305 return skb;
2307 #endif
2310 * netif_nit_deliver - deliver received packets to network taps
2311 * @skb: buffer
2313 * This function is used to deliver incoming packets to network
2314 * taps. It should be used when the normal netif_receive_skb path
2315 * is bypassed, for example because of VLAN acceleration.
2317 void netif_nit_deliver(struct sk_buff *skb)
2319 struct packet_type *ptype;
2321 if (list_empty(&ptype_all))
2322 return;
2324 skb_reset_network_header(skb);
2325 skb_reset_transport_header(skb);
2326 skb->mac_len = skb->network_header - skb->mac_header;
2328 rcu_read_lock();
2329 list_for_each_entry_rcu(ptype, &ptype_all, list) {
2330 if (!ptype->dev || ptype->dev == skb->dev)
2331 deliver_skb(skb, ptype, skb->dev);
2333 rcu_read_unlock();
2337 * netif_receive_skb - process receive buffer from network
2338 * @skb: buffer to process
2340 * netif_receive_skb() is the main receive data processing function.
2341 * It always succeeds. The buffer may be dropped during processing
2342 * for congestion control or by the protocol layers.
2344 * This function may only be called from softirq context and interrupts
2345 * should be enabled.
2347 * Return values (usually ignored):
2348 * NET_RX_SUCCESS: no congestion
2349 * NET_RX_DROP: packet was dropped
2351 int netif_receive_skb(struct sk_buff *skb)
2353 struct packet_type *ptype, *pt_prev;
2354 struct net_device *orig_dev;
2355 struct net_device *null_or_orig;
2356 int ret = NET_RX_DROP;
2357 __be16 type;
2359 if (!skb->tstamp.tv64)
2360 net_timestamp(skb);
2362 if (vlan_tx_tag_present(skb) && vlan_hwaccel_do_receive(skb))
2363 return NET_RX_SUCCESS;
2365 /* if we've gotten here through NAPI, check netpoll */
2366 if (netpoll_receive_skb(skb))
2367 return NET_RX_DROP;
2369 if (!skb->iif)
2370 skb->iif = skb->dev->ifindex;
2372 null_or_orig = NULL;
2373 orig_dev = skb->dev;
2374 if (orig_dev->master) {
2375 if (skb_bond_should_drop(skb))
2376 null_or_orig = orig_dev; /* deliver only exact match */
2377 else
2378 skb->dev = orig_dev->master;
2381 __get_cpu_var(netdev_rx_stat).total++;
2383 skb_reset_network_header(skb);
2384 skb_reset_transport_header(skb);
2385 skb->mac_len = skb->network_header - skb->mac_header;
2387 pt_prev = NULL;
2389 rcu_read_lock();
2391 #ifdef CONFIG_NET_CLS_ACT
2392 if (skb->tc_verd & TC_NCLS) {
2393 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
2394 goto ncls;
2396 #endif
2398 list_for_each_entry_rcu(ptype, &ptype_all, list) {
2399 if (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
2400 ptype->dev == orig_dev) {
2401 if (pt_prev)
2402 ret = deliver_skb(skb, pt_prev, orig_dev);
2403 pt_prev = ptype;
2407 #ifdef CONFIG_NET_CLS_ACT
2408 skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
2409 if (!skb)
2410 goto out;
2411 ncls:
2412 #endif
2414 skb = handle_bridge(skb, &pt_prev, &ret, orig_dev);
2415 if (!skb)
2416 goto out;
2417 skb = handle_macvlan(skb, &pt_prev, &ret, orig_dev);
2418 if (!skb)
2419 goto out;
2421 type = skb->protocol;
2422 list_for_each_entry_rcu(ptype,
2423 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
2424 if (ptype->type == type &&
2425 (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
2426 ptype->dev == orig_dev)) {
2427 if (pt_prev)
2428 ret = deliver_skb(skb, pt_prev, orig_dev);
2429 pt_prev = ptype;
2433 if (pt_prev) {
2434 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
2435 } else {
2436 kfree_skb(skb);
2437 /* Jamal, now you will not able to escape explaining
2438 * me how you were going to use this. :-)
2440 ret = NET_RX_DROP;
2443 out:
2444 rcu_read_unlock();
2445 return ret;
2447 EXPORT_SYMBOL(netif_receive_skb);
2449 /* Network device is going away, flush any packets still pending */
2450 static void flush_backlog(void *arg)
2452 struct net_device *dev = arg;
2453 struct softnet_data *queue = &__get_cpu_var(softnet_data);
2454 struct sk_buff *skb, *tmp;
2456 skb_queue_walk_safe(&queue->input_pkt_queue, skb, tmp)
2457 if (skb->dev == dev) {
2458 __skb_unlink(skb, &queue->input_pkt_queue);
2459 kfree_skb(skb);
2463 static int napi_gro_complete(struct sk_buff *skb)
2465 struct packet_type *ptype;
2466 __be16 type = skb->protocol;
2467 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
2468 int err = -ENOENT;
2470 if (NAPI_GRO_CB(skb)->count == 1) {
2471 skb_shinfo(skb)->gso_size = 0;
2472 goto out;
2475 rcu_read_lock();
2476 list_for_each_entry_rcu(ptype, head, list) {
2477 if (ptype->type != type || ptype->dev || !ptype->gro_complete)
2478 continue;
2480 err = ptype->gro_complete(skb);
2481 break;
2483 rcu_read_unlock();
2485 if (err) {
2486 WARN_ON(&ptype->list == head);
2487 kfree_skb(skb);
2488 return NET_RX_SUCCESS;
2491 out:
2492 return netif_receive_skb(skb);
2495 void napi_gro_flush(struct napi_struct *napi)
2497 struct sk_buff *skb, *next;
2499 for (skb = napi->gro_list; skb; skb = next) {
2500 next = skb->next;
2501 skb->next = NULL;
2502 napi_gro_complete(skb);
2505 napi->gro_count = 0;
2506 napi->gro_list = NULL;
2508 EXPORT_SYMBOL(napi_gro_flush);
2510 enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2512 struct sk_buff **pp = NULL;
2513 struct packet_type *ptype;
2514 __be16 type = skb->protocol;
2515 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
2516 int same_flow;
2517 int mac_len;
2518 enum gro_result ret;
2520 if (!(skb->dev->features & NETIF_F_GRO))
2521 goto normal;
2523 if (skb_is_gso(skb) || skb_has_frags(skb))
2524 goto normal;
2526 rcu_read_lock();
2527 list_for_each_entry_rcu(ptype, head, list) {
2528 if (ptype->type != type || ptype->dev || !ptype->gro_receive)
2529 continue;
2531 skb_set_network_header(skb, skb_gro_offset(skb));
2532 mac_len = skb->network_header - skb->mac_header;
2533 skb->mac_len = mac_len;
2534 NAPI_GRO_CB(skb)->same_flow = 0;
2535 NAPI_GRO_CB(skb)->flush = 0;
2536 NAPI_GRO_CB(skb)->free = 0;
2538 pp = ptype->gro_receive(&napi->gro_list, skb);
2539 break;
2541 rcu_read_unlock();
2543 if (&ptype->list == head)
2544 goto normal;
2546 same_flow = NAPI_GRO_CB(skb)->same_flow;
2547 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
2549 if (pp) {
2550 struct sk_buff *nskb = *pp;
2552 *pp = nskb->next;
2553 nskb->next = NULL;
2554 napi_gro_complete(nskb);
2555 napi->gro_count--;
2558 if (same_flow)
2559 goto ok;
2561 if (NAPI_GRO_CB(skb)->flush || napi->gro_count >= MAX_GRO_SKBS)
2562 goto normal;
2564 napi->gro_count++;
2565 NAPI_GRO_CB(skb)->count = 1;
2566 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
2567 skb->next = napi->gro_list;
2568 napi->gro_list = skb;
2569 ret = GRO_HELD;
2571 pull:
2572 if (skb_headlen(skb) < skb_gro_offset(skb)) {
2573 int grow = skb_gro_offset(skb) - skb_headlen(skb);
2575 BUG_ON(skb->end - skb->tail < grow);
2577 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
2579 skb->tail += grow;
2580 skb->data_len -= grow;
2582 skb_shinfo(skb)->frags[0].page_offset += grow;
2583 skb_shinfo(skb)->frags[0].size -= grow;
2585 if (unlikely(!skb_shinfo(skb)->frags[0].size)) {
2586 put_page(skb_shinfo(skb)->frags[0].page);
2587 memmove(skb_shinfo(skb)->frags,
2588 skb_shinfo(skb)->frags + 1,
2589 --skb_shinfo(skb)->nr_frags);
2594 return ret;
2596 normal:
2597 ret = GRO_NORMAL;
2598 goto pull;
2600 EXPORT_SYMBOL(dev_gro_receive);
2602 static gro_result_t
2603 __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2605 struct sk_buff *p;
2607 if (netpoll_rx_on(skb))
2608 return GRO_NORMAL;
2610 for (p = napi->gro_list; p; p = p->next) {
2611 NAPI_GRO_CB(p)->same_flow = (p->dev == skb->dev)
2612 && !compare_ether_header(skb_mac_header(p),
2613 skb_gro_mac_header(skb));
2614 NAPI_GRO_CB(p)->flush = 0;
2617 return dev_gro_receive(napi, skb);
2620 gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
2622 switch (ret) {
2623 case GRO_NORMAL:
2624 if (netif_receive_skb(skb))
2625 ret = GRO_DROP;
2626 break;
2628 case GRO_DROP:
2629 case GRO_MERGED_FREE:
2630 kfree_skb(skb);
2631 break;
2633 case GRO_HELD:
2634 case GRO_MERGED:
2635 break;
2638 return ret;
2640 EXPORT_SYMBOL(napi_skb_finish);
2642 void skb_gro_reset_offset(struct sk_buff *skb)
2644 NAPI_GRO_CB(skb)->data_offset = 0;
2645 NAPI_GRO_CB(skb)->frag0 = NULL;
2646 NAPI_GRO_CB(skb)->frag0_len = 0;
2648 if (skb->mac_header == skb->tail &&
2649 !PageHighMem(skb_shinfo(skb)->frags[0].page)) {
2650 NAPI_GRO_CB(skb)->frag0 =
2651 page_address(skb_shinfo(skb)->frags[0].page) +
2652 skb_shinfo(skb)->frags[0].page_offset;
2653 NAPI_GRO_CB(skb)->frag0_len = skb_shinfo(skb)->frags[0].size;
2656 EXPORT_SYMBOL(skb_gro_reset_offset);
2658 gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2660 skb_gro_reset_offset(skb);
2662 return napi_skb_finish(__napi_gro_receive(napi, skb), skb);
2664 EXPORT_SYMBOL(napi_gro_receive);
2666 void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
2668 __skb_pull(skb, skb_headlen(skb));
2669 skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb));
2671 napi->skb = skb;
2673 EXPORT_SYMBOL(napi_reuse_skb);
2675 struct sk_buff *napi_get_frags(struct napi_struct *napi)
2677 struct sk_buff *skb = napi->skb;
2679 if (!skb) {
2680 skb = netdev_alloc_skb_ip_align(napi->dev, GRO_MAX_HEAD);
2681 if (skb)
2682 napi->skb = skb;
2684 return skb;
2686 EXPORT_SYMBOL(napi_get_frags);
2688 gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb,
2689 gro_result_t ret)
2691 switch (ret) {
2692 case GRO_NORMAL:
2693 case GRO_HELD:
2694 skb->protocol = eth_type_trans(skb, napi->dev);
2696 if (ret == GRO_HELD)
2697 skb_gro_pull(skb, -ETH_HLEN);
2698 else if (netif_receive_skb(skb))
2699 ret = GRO_DROP;
2700 break;
2702 case GRO_DROP:
2703 case GRO_MERGED_FREE:
2704 napi_reuse_skb(napi, skb);
2705 break;
2707 case GRO_MERGED:
2708 break;
2711 return ret;
2713 EXPORT_SYMBOL(napi_frags_finish);
2715 struct sk_buff *napi_frags_skb(struct napi_struct *napi)
2717 struct sk_buff *skb = napi->skb;
2718 struct ethhdr *eth;
2719 unsigned int hlen;
2720 unsigned int off;
2722 napi->skb = NULL;
2724 skb_reset_mac_header(skb);
2725 skb_gro_reset_offset(skb);
2727 off = skb_gro_offset(skb);
2728 hlen = off + sizeof(*eth);
2729 eth = skb_gro_header_fast(skb, off);
2730 if (skb_gro_header_hard(skb, hlen)) {
2731 eth = skb_gro_header_slow(skb, hlen, off);
2732 if (unlikely(!eth)) {
2733 napi_reuse_skb(napi, skb);
2734 skb = NULL;
2735 goto out;
2739 skb_gro_pull(skb, sizeof(*eth));
2742 * This works because the only protocols we care about don't require
2743 * special handling. We'll fix it up properly at the end.
2745 skb->protocol = eth->h_proto;
2747 out:
2748 return skb;
2750 EXPORT_SYMBOL(napi_frags_skb);
2752 gro_result_t napi_gro_frags(struct napi_struct *napi)
2754 struct sk_buff *skb = napi_frags_skb(napi);
2756 if (!skb)
2757 return GRO_DROP;
2759 return napi_frags_finish(napi, skb, __napi_gro_receive(napi, skb));
2761 EXPORT_SYMBOL(napi_gro_frags);
2763 static int process_backlog(struct napi_struct *napi, int quota)
2765 int work = 0;
2766 struct softnet_data *queue = &__get_cpu_var(softnet_data);
2767 unsigned long start_time = jiffies;
2769 napi->weight = weight_p;
2770 do {
2771 struct sk_buff *skb;
2773 local_irq_disable();
2774 skb = __skb_dequeue(&queue->input_pkt_queue);
2775 if (!skb) {
2776 __napi_complete(napi);
2777 local_irq_enable();
2778 break;
2780 local_irq_enable();
2782 netif_receive_skb(skb);
2783 } while (++work < quota && jiffies == start_time);
2785 return work;
2789 * __napi_schedule - schedule for receive
2790 * @n: entry to schedule
2792 * The entry's receive function will be scheduled to run
2794 void __napi_schedule(struct napi_struct *n)
2796 unsigned long flags;
2798 local_irq_save(flags);
2799 list_add_tail(&n->poll_list, &__get_cpu_var(softnet_data).poll_list);
2800 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2801 local_irq_restore(flags);
2803 EXPORT_SYMBOL(__napi_schedule);
2805 void __napi_complete(struct napi_struct *n)
2807 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
2808 BUG_ON(n->gro_list);
2810 list_del(&n->poll_list);
2811 smp_mb__before_clear_bit();
2812 clear_bit(NAPI_STATE_SCHED, &n->state);
2814 EXPORT_SYMBOL(__napi_complete);
2816 void napi_complete(struct napi_struct *n)
2818 unsigned long flags;
2821 * don't let napi dequeue from the cpu poll list
2822 * just in case its running on a different cpu
2824 if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
2825 return;
2827 napi_gro_flush(n);
2828 local_irq_save(flags);
2829 __napi_complete(n);
2830 local_irq_restore(flags);
2832 EXPORT_SYMBOL(napi_complete);
2834 void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
2835 int (*poll)(struct napi_struct *, int), int weight)
2837 INIT_LIST_HEAD(&napi->poll_list);
2838 napi->gro_count = 0;
2839 napi->gro_list = NULL;
2840 napi->skb = NULL;
2841 napi->poll = poll;
2842 napi->weight = weight;
2843 list_add(&napi->dev_list, &dev->napi_list);
2844 napi->dev = dev;
2845 #ifdef CONFIG_NETPOLL
2846 spin_lock_init(&napi->poll_lock);
2847 napi->poll_owner = -1;
2848 #endif
2849 set_bit(NAPI_STATE_SCHED, &napi->state);
2851 EXPORT_SYMBOL(netif_napi_add);
2853 void netif_napi_del(struct napi_struct *napi)
2855 struct sk_buff *skb, *next;
2857 list_del_init(&napi->dev_list);
2858 napi_free_frags(napi);
2860 for (skb = napi->gro_list; skb; skb = next) {
2861 next = skb->next;
2862 skb->next = NULL;
2863 kfree_skb(skb);
2866 napi->gro_list = NULL;
2867 napi->gro_count = 0;
2869 EXPORT_SYMBOL(netif_napi_del);
2872 static void net_rx_action(struct softirq_action *h)
2874 struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
2875 unsigned long time_limit = jiffies + 2;
2876 int budget = netdev_budget;
2877 void *have;
2879 local_irq_disable();
2881 while (!list_empty(list)) {
2882 struct napi_struct *n;
2883 int work, weight;
2885 /* If softirq window is exhuasted then punt.
2886 * Allow this to run for 2 jiffies since which will allow
2887 * an average latency of 1.5/HZ.
2889 if (unlikely(budget <= 0 || time_after(jiffies, time_limit)))
2890 goto softnet_break;
2892 local_irq_enable();
2894 /* Even though interrupts have been re-enabled, this
2895 * access is safe because interrupts can only add new
2896 * entries to the tail of this list, and only ->poll()
2897 * calls can remove this head entry from the list.
2899 n = list_entry(list->next, struct napi_struct, poll_list);
2901 have = netpoll_poll_lock(n);
2903 weight = n->weight;
2905 /* This NAPI_STATE_SCHED test is for avoiding a race
2906 * with netpoll's poll_napi(). Only the entity which
2907 * obtains the lock and sees NAPI_STATE_SCHED set will
2908 * actually make the ->poll() call. Therefore we avoid
2909 * accidently calling ->poll() when NAPI is not scheduled.
2911 work = 0;
2912 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
2913 work = n->poll(n, weight);
2914 trace_napi_poll(n);
2917 WARN_ON_ONCE(work > weight);
2919 budget -= work;
2921 local_irq_disable();
2923 /* Drivers must not modify the NAPI state if they
2924 * consume the entire weight. In such cases this code
2925 * still "owns" the NAPI instance and therefore can
2926 * move the instance around on the list at-will.
2928 if (unlikely(work == weight)) {
2929 if (unlikely(napi_disable_pending(n))) {
2930 local_irq_enable();
2931 napi_complete(n);
2932 local_irq_disable();
2933 } else
2934 list_move_tail(&n->poll_list, list);
2937 netpoll_poll_unlock(have);
2939 out:
2940 local_irq_enable();
2942 #ifdef CONFIG_NET_DMA
2944 * There may not be any more sk_buffs coming right now, so push
2945 * any pending DMA copies to hardware
2947 dma_issue_pending_all();
2948 #endif
2950 return;
2952 softnet_break:
2953 __get_cpu_var(netdev_rx_stat).time_squeeze++;
2954 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2955 goto out;
2958 static gifconf_func_t *gifconf_list[NPROTO];
2961 * register_gifconf - register a SIOCGIF handler
2962 * @family: Address family
2963 * @gifconf: Function handler
2965 * Register protocol dependent address dumping routines. The handler
2966 * that is passed must not be freed or reused until it has been replaced
2967 * by another handler.
2969 int register_gifconf(unsigned int family, gifconf_func_t *gifconf)
2971 if (family >= NPROTO)
2972 return -EINVAL;
2973 gifconf_list[family] = gifconf;
2974 return 0;
2976 EXPORT_SYMBOL(register_gifconf);
2980 * Map an interface index to its name (SIOCGIFNAME)
2984 * We need this ioctl for efficient implementation of the
2985 * if_indextoname() function required by the IPv6 API. Without
2986 * it, we would have to search all the interfaces to find a
2987 * match. --pb
2990 static int dev_ifname(struct net *net, struct ifreq __user *arg)
2992 struct net_device *dev;
2993 struct ifreq ifr;
2996 * Fetch the caller's info block.
2999 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
3000 return -EFAULT;
3002 rcu_read_lock();
3003 dev = dev_get_by_index_rcu(net, ifr.ifr_ifindex);
3004 if (!dev) {
3005 rcu_read_unlock();
3006 return -ENODEV;
3009 strcpy(ifr.ifr_name, dev->name);
3010 rcu_read_unlock();
3012 if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
3013 return -EFAULT;
3014 return 0;
3018 * Perform a SIOCGIFCONF call. This structure will change
3019 * size eventually, and there is nothing I can do about it.
3020 * Thus we will need a 'compatibility mode'.
3023 static int dev_ifconf(struct net *net, char __user *arg)
3025 struct ifconf ifc;
3026 struct net_device *dev;
3027 char __user *pos;
3028 int len;
3029 int total;
3030 int i;
3033 * Fetch the caller's info block.
3036 if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
3037 return -EFAULT;
3039 pos = ifc.ifc_buf;
3040 len = ifc.ifc_len;
3043 * Loop over the interfaces, and write an info block for each.
3046 total = 0;
3047 for_each_netdev(net, dev) {
3048 for (i = 0; i < NPROTO; i++) {
3049 if (gifconf_list[i]) {
3050 int done;
3051 if (!pos)
3052 done = gifconf_list[i](dev, NULL, 0);
3053 else
3054 done = gifconf_list[i](dev, pos + total,
3055 len - total);
3056 if (done < 0)
3057 return -EFAULT;
3058 total += done;
3064 * All done. Write the updated control block back to the caller.
3066 ifc.ifc_len = total;
3069 * Both BSD and Solaris return 0 here, so we do too.
3071 return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
3074 #ifdef CONFIG_PROC_FS
3076 * This is invoked by the /proc filesystem handler to display a device
3077 * in detail.
3079 void *dev_seq_start(struct seq_file *seq, loff_t *pos)
3080 __acquires(RCU)
3082 struct net *net = seq_file_net(seq);
3083 loff_t off;
3084 struct net_device *dev;
3086 rcu_read_lock();
3087 if (!*pos)
3088 return SEQ_START_TOKEN;
3090 off = 1;
3091 for_each_netdev_rcu(net, dev)
3092 if (off++ == *pos)
3093 return dev;
3095 return NULL;
3098 void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3100 struct net_device *dev = (v == SEQ_START_TOKEN) ?
3101 first_net_device(seq_file_net(seq)) :
3102 next_net_device((struct net_device *)v);
3104 ++*pos;
3105 return rcu_dereference(dev);
3108 void dev_seq_stop(struct seq_file *seq, void *v)
3109 __releases(RCU)
3111 rcu_read_unlock();
3114 static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
3116 const struct net_device_stats *stats = dev_get_stats(dev);
3118 seq_printf(seq, "%6s:%8lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu "
3119 "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n",
3120 dev->name, stats->rx_bytes, stats->rx_packets,
3121 stats->rx_errors,
3122 stats->rx_dropped + stats->rx_missed_errors,
3123 stats->rx_fifo_errors,
3124 stats->rx_length_errors + stats->rx_over_errors +
3125 stats->rx_crc_errors + stats->rx_frame_errors,
3126 stats->rx_compressed, stats->multicast,
3127 stats->tx_bytes, stats->tx_packets,
3128 stats->tx_errors, stats->tx_dropped,
3129 stats->tx_fifo_errors, stats->collisions,
3130 stats->tx_carrier_errors +
3131 stats->tx_aborted_errors +
3132 stats->tx_window_errors +
3133 stats->tx_heartbeat_errors,
3134 stats->tx_compressed);
3138 * Called from the PROCfs module. This now uses the new arbitrary sized
3139 * /proc/net interface to create /proc/net/dev
3141 static int dev_seq_show(struct seq_file *seq, void *v)
3143 if (v == SEQ_START_TOKEN)
3144 seq_puts(seq, "Inter-| Receive "
3145 " | Transmit\n"
3146 " face |bytes packets errs drop fifo frame "
3147 "compressed multicast|bytes packets errs "
3148 "drop fifo colls carrier compressed\n");
3149 else
3150 dev_seq_printf_stats(seq, v);
3151 return 0;
3154 static struct netif_rx_stats *softnet_get_online(loff_t *pos)
3156 struct netif_rx_stats *rc = NULL;
3158 while (*pos < nr_cpu_ids)
3159 if (cpu_online(*pos)) {
3160 rc = &per_cpu(netdev_rx_stat, *pos);
3161 break;
3162 } else
3163 ++*pos;
3164 return rc;
3167 static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
3169 return softnet_get_online(pos);
3172 static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3174 ++*pos;
3175 return softnet_get_online(pos);
3178 static void softnet_seq_stop(struct seq_file *seq, void *v)
3182 static int softnet_seq_show(struct seq_file *seq, void *v)
3184 struct netif_rx_stats *s = v;
3186 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
3187 s->total, s->dropped, s->time_squeeze, 0,
3188 0, 0, 0, 0, /* was fastroute */
3189 s->cpu_collision);
3190 return 0;
3193 static const struct seq_operations dev_seq_ops = {
3194 .start = dev_seq_start,
3195 .next = dev_seq_next,
3196 .stop = dev_seq_stop,
3197 .show = dev_seq_show,
3200 static int dev_seq_open(struct inode *inode, struct file *file)
3202 return seq_open_net(inode, file, &dev_seq_ops,
3203 sizeof(struct seq_net_private));
3206 static const struct file_operations dev_seq_fops = {
3207 .owner = THIS_MODULE,
3208 .open = dev_seq_open,
3209 .read = seq_read,
3210 .llseek = seq_lseek,
3211 .release = seq_release_net,
3214 static const struct seq_operations softnet_seq_ops = {
3215 .start = softnet_seq_start,
3216 .next = softnet_seq_next,
3217 .stop = softnet_seq_stop,
3218 .show = softnet_seq_show,
3221 static int softnet_seq_open(struct inode *inode, struct file *file)
3223 return seq_open(file, &softnet_seq_ops);
3226 static const struct file_operations softnet_seq_fops = {
3227 .owner = THIS_MODULE,
3228 .open = softnet_seq_open,
3229 .read = seq_read,
3230 .llseek = seq_lseek,
3231 .release = seq_release,
3234 static void *ptype_get_idx(loff_t pos)
3236 struct packet_type *pt = NULL;
3237 loff_t i = 0;
3238 int t;
3240 list_for_each_entry_rcu(pt, &ptype_all, list) {
3241 if (i == pos)
3242 return pt;
3243 ++i;
3246 for (t = 0; t < PTYPE_HASH_SIZE; t++) {
3247 list_for_each_entry_rcu(pt, &ptype_base[t], list) {
3248 if (i == pos)
3249 return pt;
3250 ++i;
3253 return NULL;
3256 static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
3257 __acquires(RCU)
3259 rcu_read_lock();
3260 return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
3263 static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3265 struct packet_type *pt;
3266 struct list_head *nxt;
3267 int hash;
3269 ++*pos;
3270 if (v == SEQ_START_TOKEN)
3271 return ptype_get_idx(0);
3273 pt = v;
3274 nxt = pt->list.next;
3275 if (pt->type == htons(ETH_P_ALL)) {
3276 if (nxt != &ptype_all)
3277 goto found;
3278 hash = 0;
3279 nxt = ptype_base[0].next;
3280 } else
3281 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
3283 while (nxt == &ptype_base[hash]) {
3284 if (++hash >= PTYPE_HASH_SIZE)
3285 return NULL;
3286 nxt = ptype_base[hash].next;
3288 found:
3289 return list_entry(nxt, struct packet_type, list);
3292 static void ptype_seq_stop(struct seq_file *seq, void *v)
3293 __releases(RCU)
3295 rcu_read_unlock();
3298 static int ptype_seq_show(struct seq_file *seq, void *v)
3300 struct packet_type *pt = v;
3302 if (v == SEQ_START_TOKEN)
3303 seq_puts(seq, "Type Device Function\n");
3304 else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) {
3305 if (pt->type == htons(ETH_P_ALL))
3306 seq_puts(seq, "ALL ");
3307 else
3308 seq_printf(seq, "%04x", ntohs(pt->type));
3310 seq_printf(seq, " %-8s %pF\n",
3311 pt->dev ? pt->dev->name : "", pt->func);
3314 return 0;
3317 static const struct seq_operations ptype_seq_ops = {
3318 .start = ptype_seq_start,
3319 .next = ptype_seq_next,
3320 .stop = ptype_seq_stop,
3321 .show = ptype_seq_show,
3324 static int ptype_seq_open(struct inode *inode, struct file *file)
3326 return seq_open_net(inode, file, &ptype_seq_ops,
3327 sizeof(struct seq_net_private));
3330 static const struct file_operations ptype_seq_fops = {
3331 .owner = THIS_MODULE,
3332 .open = ptype_seq_open,
3333 .read = seq_read,
3334 .llseek = seq_lseek,
3335 .release = seq_release_net,
3339 static int __net_init dev_proc_net_init(struct net *net)
3341 int rc = -ENOMEM;
3343 if (!proc_net_fops_create(net, "dev", S_IRUGO, &dev_seq_fops))
3344 goto out;
3345 if (!proc_net_fops_create(net, "softnet_stat", S_IRUGO, &softnet_seq_fops))
3346 goto out_dev;
3347 if (!proc_net_fops_create(net, "ptype", S_IRUGO, &ptype_seq_fops))
3348 goto out_softnet;
3350 if (wext_proc_init(net))
3351 goto out_ptype;
3352 rc = 0;
3353 out:
3354 return rc;
3355 out_ptype:
3356 proc_net_remove(net, "ptype");
3357 out_softnet:
3358 proc_net_remove(net, "softnet_stat");
3359 out_dev:
3360 proc_net_remove(net, "dev");
3361 goto out;
3364 static void __net_exit dev_proc_net_exit(struct net *net)
3366 wext_proc_exit(net);
3368 proc_net_remove(net, "ptype");
3369 proc_net_remove(net, "softnet_stat");
3370 proc_net_remove(net, "dev");
3373 static struct pernet_operations __net_initdata dev_proc_ops = {
3374 .init = dev_proc_net_init,
3375 .exit = dev_proc_net_exit,
3378 static int __init dev_proc_init(void)
3380 return register_pernet_subsys(&dev_proc_ops);
3382 #else
3383 #define dev_proc_init() 0
3384 #endif /* CONFIG_PROC_FS */
3388 * netdev_set_master - set up master/slave pair
3389 * @slave: slave device
3390 * @master: new master device
3392 * Changes the master device of the slave. Pass %NULL to break the
3393 * bonding. The caller must hold the RTNL semaphore. On a failure
3394 * a negative errno code is returned. On success the reference counts
3395 * are adjusted, %RTM_NEWLINK is sent to the routing socket and the
3396 * function returns zero.
3398 int netdev_set_master(struct net_device *slave, struct net_device *master)
3400 struct net_device *old = slave->master;
3402 ASSERT_RTNL();
3404 if (master) {
3405 if (old)
3406 return -EBUSY;
3407 dev_hold(master);
3410 slave->master = master;
3412 synchronize_net();
3414 if (old)
3415 dev_put(old);
3417 if (master)
3418 slave->flags |= IFF_SLAVE;
3419 else
3420 slave->flags &= ~IFF_SLAVE;
3422 rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
3423 return 0;
3425 EXPORT_SYMBOL(netdev_set_master);
3427 static void dev_change_rx_flags(struct net_device *dev, int flags)
3429 const struct net_device_ops *ops = dev->netdev_ops;
3431 if ((dev->flags & IFF_UP) && ops->ndo_change_rx_flags)
3432 ops->ndo_change_rx_flags(dev, flags);
3435 static int __dev_set_promiscuity(struct net_device *dev, int inc)
3437 unsigned short old_flags = dev->flags;
3438 uid_t uid;
3439 gid_t gid;
3441 ASSERT_RTNL();
3443 dev->flags |= IFF_PROMISC;
3444 dev->promiscuity += inc;
3445 if (dev->promiscuity == 0) {
3447 * Avoid overflow.
3448 * If inc causes overflow, untouch promisc and return error.
3450 if (inc < 0)
3451 dev->flags &= ~IFF_PROMISC;
3452 else {
3453 dev->promiscuity -= inc;
3454 printk(KERN_WARNING "%s: promiscuity touches roof, "
3455 "set promiscuity failed, promiscuity feature "
3456 "of device might be broken.\n", dev->name);
3457 return -EOVERFLOW;
3460 if (dev->flags != old_flags) {
3461 printk(KERN_INFO "device %s %s promiscuous mode\n",
3462 dev->name, (dev->flags & IFF_PROMISC) ? "entered" :
3463 "left");
3464 if (audit_enabled) {
3465 current_uid_gid(&uid, &gid);
3466 audit_log(current->audit_context, GFP_ATOMIC,
3467 AUDIT_ANOM_PROMISCUOUS,
3468 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
3469 dev->name, (dev->flags & IFF_PROMISC),
3470 (old_flags & IFF_PROMISC),
3471 audit_get_loginuid(current),
3472 uid, gid,
3473 audit_get_sessionid(current));
3476 dev_change_rx_flags(dev, IFF_PROMISC);
3478 return 0;
3482 * dev_set_promiscuity - update promiscuity count on a device
3483 * @dev: device
3484 * @inc: modifier
3486 * Add or remove promiscuity from a device. While the count in the device
3487 * remains above zero the interface remains promiscuous. Once it hits zero
3488 * the device reverts back to normal filtering operation. A negative inc
3489 * value is used to drop promiscuity on the device.
3490 * Return 0 if successful or a negative errno code on error.
3492 int dev_set_promiscuity(struct net_device *dev, int inc)
3494 unsigned short old_flags = dev->flags;
3495 int err;
3497 err = __dev_set_promiscuity(dev, inc);
3498 if (err < 0)
3499 return err;
3500 if (dev->flags != old_flags)
3501 dev_set_rx_mode(dev);
3502 return err;
3504 EXPORT_SYMBOL(dev_set_promiscuity);
3507 * dev_set_allmulti - update allmulti count on a device
3508 * @dev: device
3509 * @inc: modifier
3511 * Add or remove reception of all multicast frames to a device. While the
3512 * count in the device remains above zero the interface remains listening
3513 * to all interfaces. Once it hits zero the device reverts back to normal
3514 * filtering operation. A negative @inc value is used to drop the counter
3515 * when releasing a resource needing all multicasts.
3516 * Return 0 if successful or a negative errno code on error.
3519 int dev_set_allmulti(struct net_device *dev, int inc)
3521 unsigned short old_flags = dev->flags;
3523 ASSERT_RTNL();
3525 dev->flags |= IFF_ALLMULTI;
3526 dev->allmulti += inc;
3527 if (dev->allmulti == 0) {
3529 * Avoid overflow.
3530 * If inc causes overflow, untouch allmulti and return error.
3532 if (inc < 0)
3533 dev->flags &= ~IFF_ALLMULTI;
3534 else {
3535 dev->allmulti -= inc;
3536 printk(KERN_WARNING "%s: allmulti touches roof, "
3537 "set allmulti failed, allmulti feature of "
3538 "device might be broken.\n", dev->name);
3539 return -EOVERFLOW;
3542 if (dev->flags ^ old_flags) {
3543 dev_change_rx_flags(dev, IFF_ALLMULTI);
3544 dev_set_rx_mode(dev);
3546 return 0;
3548 EXPORT_SYMBOL(dev_set_allmulti);
3551 * Upload unicast and multicast address lists to device and
3552 * configure RX filtering. When the device doesn't support unicast
3553 * filtering it is put in promiscuous mode while unicast addresses
3554 * are present.
3556 void __dev_set_rx_mode(struct net_device *dev)
3558 const struct net_device_ops *ops = dev->netdev_ops;
3560 /* dev_open will call this function so the list will stay sane. */
3561 if (!(dev->flags&IFF_UP))
3562 return;
3564 if (!netif_device_present(dev))
3565 return;
3567 if (ops->ndo_set_rx_mode)
3568 ops->ndo_set_rx_mode(dev);
3569 else {
3570 /* Unicast addresses changes may only happen under the rtnl,
3571 * therefore calling __dev_set_promiscuity here is safe.
3573 if (dev->uc.count > 0 && !dev->uc_promisc) {
3574 __dev_set_promiscuity(dev, 1);
3575 dev->uc_promisc = 1;
3576 } else if (dev->uc.count == 0 && dev->uc_promisc) {
3577 __dev_set_promiscuity(dev, -1);
3578 dev->uc_promisc = 0;
3581 if (ops->ndo_set_multicast_list)
3582 ops->ndo_set_multicast_list(dev);
3586 void dev_set_rx_mode(struct net_device *dev)
3588 netif_addr_lock_bh(dev);
3589 __dev_set_rx_mode(dev);
3590 netif_addr_unlock_bh(dev);
3593 /* hw addresses list handling functions */
3595 static int __hw_addr_add(struct netdev_hw_addr_list *list, unsigned char *addr,
3596 int addr_len, unsigned char addr_type)
3598 struct netdev_hw_addr *ha;
3599 int alloc_size;
3601 if (addr_len > MAX_ADDR_LEN)
3602 return -EINVAL;
3604 list_for_each_entry(ha, &list->list, list) {
3605 if (!memcmp(ha->addr, addr, addr_len) &&
3606 ha->type == addr_type) {
3607 ha->refcount++;
3608 return 0;
3613 alloc_size = sizeof(*ha);
3614 if (alloc_size < L1_CACHE_BYTES)
3615 alloc_size = L1_CACHE_BYTES;
3616 ha = kmalloc(alloc_size, GFP_ATOMIC);
3617 if (!ha)
3618 return -ENOMEM;
3619 memcpy(ha->addr, addr, addr_len);
3620 ha->type = addr_type;
3621 ha->refcount = 1;
3622 ha->synced = false;
3623 list_add_tail_rcu(&ha->list, &list->list);
3624 list->count++;
3625 return 0;
3628 static void ha_rcu_free(struct rcu_head *head)
3630 struct netdev_hw_addr *ha;
3632 ha = container_of(head, struct netdev_hw_addr, rcu_head);
3633 kfree(ha);
3636 static int __hw_addr_del(struct netdev_hw_addr_list *list, unsigned char *addr,
3637 int addr_len, unsigned char addr_type)
3639 struct netdev_hw_addr *ha;
3641 list_for_each_entry(ha, &list->list, list) {
3642 if (!memcmp(ha->addr, addr, addr_len) &&
3643 (ha->type == addr_type || !addr_type)) {
3644 if (--ha->refcount)
3645 return 0;
3646 list_del_rcu(&ha->list);
3647 call_rcu(&ha->rcu_head, ha_rcu_free);
3648 list->count--;
3649 return 0;
3652 return -ENOENT;
3655 static int __hw_addr_add_multiple(struct netdev_hw_addr_list *to_list,
3656 struct netdev_hw_addr_list *from_list,
3657 int addr_len,
3658 unsigned char addr_type)
3660 int err;
3661 struct netdev_hw_addr *ha, *ha2;
3662 unsigned char type;
3664 list_for_each_entry(ha, &from_list->list, list) {
3665 type = addr_type ? addr_type : ha->type;
3666 err = __hw_addr_add(to_list, ha->addr, addr_len, type);
3667 if (err)
3668 goto unroll;
3670 return 0;
3672 unroll:
3673 list_for_each_entry(ha2, &from_list->list, list) {
3674 if (ha2 == ha)
3675 break;
3676 type = addr_type ? addr_type : ha2->type;
3677 __hw_addr_del(to_list, ha2->addr, addr_len, type);
3679 return err;
3682 static void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list,
3683 struct netdev_hw_addr_list *from_list,
3684 int addr_len,
3685 unsigned char addr_type)
3687 struct netdev_hw_addr *ha;
3688 unsigned char type;
3690 list_for_each_entry(ha, &from_list->list, list) {
3691 type = addr_type ? addr_type : ha->type;
3692 __hw_addr_del(to_list, ha->addr, addr_len, addr_type);
3696 static int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
3697 struct netdev_hw_addr_list *from_list,
3698 int addr_len)
3700 int err = 0;
3701 struct netdev_hw_addr *ha, *tmp;
3703 list_for_each_entry_safe(ha, tmp, &from_list->list, list) {
3704 if (!ha->synced) {
3705 err = __hw_addr_add(to_list, ha->addr,
3706 addr_len, ha->type);
3707 if (err)
3708 break;
3709 ha->synced = true;
3710 ha->refcount++;
3711 } else if (ha->refcount == 1) {
3712 __hw_addr_del(to_list, ha->addr, addr_len, ha->type);
3713 __hw_addr_del(from_list, ha->addr, addr_len, ha->type);
3716 return err;
3719 static void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
3720 struct netdev_hw_addr_list *from_list,
3721 int addr_len)
3723 struct netdev_hw_addr *ha, *tmp;
3725 list_for_each_entry_safe(ha, tmp, &from_list->list, list) {
3726 if (ha->synced) {
3727 __hw_addr_del(to_list, ha->addr,
3728 addr_len, ha->type);
3729 ha->synced = false;
3730 __hw_addr_del(from_list, ha->addr,
3731 addr_len, ha->type);
3736 static void __hw_addr_flush(struct netdev_hw_addr_list *list)
3738 struct netdev_hw_addr *ha, *tmp;
3740 list_for_each_entry_safe(ha, tmp, &list->list, list) {
3741 list_del_rcu(&ha->list);
3742 call_rcu(&ha->rcu_head, ha_rcu_free);
3744 list->count = 0;
3747 static void __hw_addr_init(struct netdev_hw_addr_list *list)
3749 INIT_LIST_HEAD(&list->list);
3750 list->count = 0;
3753 /* Device addresses handling functions */
3755 static void dev_addr_flush(struct net_device *dev)
3757 /* rtnl_mutex must be held here */
3759 __hw_addr_flush(&dev->dev_addrs);
3760 dev->dev_addr = NULL;
3763 static int dev_addr_init(struct net_device *dev)
3765 unsigned char addr[MAX_ADDR_LEN];
3766 struct netdev_hw_addr *ha;
3767 int err;
3769 /* rtnl_mutex must be held here */
3771 __hw_addr_init(&dev->dev_addrs);
3772 memset(addr, 0, sizeof(addr));
3773 err = __hw_addr_add(&dev->dev_addrs, addr, sizeof(addr),
3774 NETDEV_HW_ADDR_T_LAN);
3775 if (!err) {
3777 * Get the first (previously created) address from the list
3778 * and set dev_addr pointer to this location.
3780 ha = list_first_entry(&dev->dev_addrs.list,
3781 struct netdev_hw_addr, list);
3782 dev->dev_addr = ha->addr;
3784 return err;
3788 * dev_addr_add - Add a device address
3789 * @dev: device
3790 * @addr: address to add
3791 * @addr_type: address type
3793 * Add a device address to the device or increase the reference count if
3794 * it already exists.
3796 * The caller must hold the rtnl_mutex.
3798 int dev_addr_add(struct net_device *dev, unsigned char *addr,
3799 unsigned char addr_type)
3801 int err;
3803 ASSERT_RTNL();
3805 err = __hw_addr_add(&dev->dev_addrs, addr, dev->addr_len, addr_type);
3806 if (!err)
3807 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
3808 return err;
3810 EXPORT_SYMBOL(dev_addr_add);
3813 * dev_addr_del - Release a device address.
3814 * @dev: device
3815 * @addr: address to delete
3816 * @addr_type: address type
3818 * Release reference to a device address and remove it from the device
3819 * if the reference count drops to zero.
3821 * The caller must hold the rtnl_mutex.
3823 int dev_addr_del(struct net_device *dev, unsigned char *addr,
3824 unsigned char addr_type)
3826 int err;
3827 struct netdev_hw_addr *ha;
3829 ASSERT_RTNL();
3832 * We can not remove the first address from the list because
3833 * dev->dev_addr points to that.
3835 ha = list_first_entry(&dev->dev_addrs.list,
3836 struct netdev_hw_addr, list);
3837 if (ha->addr == dev->dev_addr && ha->refcount == 1)
3838 return -ENOENT;
3840 err = __hw_addr_del(&dev->dev_addrs, addr, dev->addr_len,
3841 addr_type);
3842 if (!err)
3843 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
3844 return err;
3846 EXPORT_SYMBOL(dev_addr_del);
3849 * dev_addr_add_multiple - Add device addresses from another device
3850 * @to_dev: device to which addresses will be added
3851 * @from_dev: device from which addresses will be added
3852 * @addr_type: address type - 0 means type will be used from from_dev
3854 * Add device addresses of the one device to another.
3856 * The caller must hold the rtnl_mutex.
3858 int dev_addr_add_multiple(struct net_device *to_dev,
3859 struct net_device *from_dev,
3860 unsigned char addr_type)
3862 int err;
3864 ASSERT_RTNL();
3866 if (from_dev->addr_len != to_dev->addr_len)
3867 return -EINVAL;
3868 err = __hw_addr_add_multiple(&to_dev->dev_addrs, &from_dev->dev_addrs,
3869 to_dev->addr_len, addr_type);
3870 if (!err)
3871 call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev);
3872 return err;
3874 EXPORT_SYMBOL(dev_addr_add_multiple);
3877 * dev_addr_del_multiple - Delete device addresses by another device
3878 * @to_dev: device where the addresses will be deleted
3879 * @from_dev: device by which addresses the addresses will be deleted
3880 * @addr_type: address type - 0 means type will used from from_dev
3882 * Deletes addresses in to device by the list of addresses in from device.
3884 * The caller must hold the rtnl_mutex.
3886 int dev_addr_del_multiple(struct net_device *to_dev,
3887 struct net_device *from_dev,
3888 unsigned char addr_type)
3890 ASSERT_RTNL();
3892 if (from_dev->addr_len != to_dev->addr_len)
3893 return -EINVAL;
3894 __hw_addr_del_multiple(&to_dev->dev_addrs, &from_dev->dev_addrs,
3895 to_dev->addr_len, addr_type);
3896 call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev);
3897 return 0;
3899 EXPORT_SYMBOL(dev_addr_del_multiple);
3901 /* multicast addresses handling functions */
3903 int __dev_addr_delete(struct dev_addr_list **list, int *count,
3904 void *addr, int alen, int glbl)
3906 struct dev_addr_list *da;
3908 for (; (da = *list) != NULL; list = &da->next) {
3909 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
3910 alen == da->da_addrlen) {
3911 if (glbl) {
3912 int old_glbl = da->da_gusers;
3913 da->da_gusers = 0;
3914 if (old_glbl == 0)
3915 break;
3917 if (--da->da_users)
3918 return 0;
3920 *list = da->next;
3921 kfree(da);
3922 (*count)--;
3923 return 0;
3926 return -ENOENT;
3929 int __dev_addr_add(struct dev_addr_list **list, int *count,
3930 void *addr, int alen, int glbl)
3932 struct dev_addr_list *da;
3934 for (da = *list; da != NULL; da = da->next) {
3935 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
3936 da->da_addrlen == alen) {
3937 if (glbl) {
3938 int old_glbl = da->da_gusers;
3939 da->da_gusers = 1;
3940 if (old_glbl)
3941 return 0;
3943 da->da_users++;
3944 return 0;
3948 da = kzalloc(sizeof(*da), GFP_ATOMIC);
3949 if (da == NULL)
3950 return -ENOMEM;
3951 memcpy(da->da_addr, addr, alen);
3952 da->da_addrlen = alen;
3953 da->da_users = 1;
3954 da->da_gusers = glbl ? 1 : 0;
3955 da->next = *list;
3956 *list = da;
3957 (*count)++;
3958 return 0;
3962 * dev_unicast_delete - Release secondary unicast address.
3963 * @dev: device
3964 * @addr: address to delete
3966 * Release reference to a secondary unicast address and remove it
3967 * from the device if the reference count drops to zero.
3969 * The caller must hold the rtnl_mutex.
3971 int dev_unicast_delete(struct net_device *dev, void *addr)
3973 int err;
3975 ASSERT_RTNL();
3977 netif_addr_lock_bh(dev);
3978 err = __hw_addr_del(&dev->uc, addr, dev->addr_len,
3979 NETDEV_HW_ADDR_T_UNICAST);
3980 if (!err)
3981 __dev_set_rx_mode(dev);
3982 netif_addr_unlock_bh(dev);
3983 return err;
3985 EXPORT_SYMBOL(dev_unicast_delete);
3988 * dev_unicast_add - add a secondary unicast address
3989 * @dev: device
3990 * @addr: address to add
3992 * Add a secondary unicast address to the device or increase
3993 * the reference count if it already exists.
3995 * The caller must hold the rtnl_mutex.
3997 int dev_unicast_add(struct net_device *dev, void *addr)
3999 int err;
4001 ASSERT_RTNL();
4003 netif_addr_lock_bh(dev);
4004 err = __hw_addr_add(&dev->uc, addr, dev->addr_len,
4005 NETDEV_HW_ADDR_T_UNICAST);
4006 if (!err)
4007 __dev_set_rx_mode(dev);
4008 netif_addr_unlock_bh(dev);
4009 return err;
4011 EXPORT_SYMBOL(dev_unicast_add);
4013 int __dev_addr_sync(struct dev_addr_list **to, int *to_count,
4014 struct dev_addr_list **from, int *from_count)
4016 struct dev_addr_list *da, *next;
4017 int err = 0;
4019 da = *from;
4020 while (da != NULL) {
4021 next = da->next;
4022 if (!da->da_synced) {
4023 err = __dev_addr_add(to, to_count,
4024 da->da_addr, da->da_addrlen, 0);
4025 if (err < 0)
4026 break;
4027 da->da_synced = 1;
4028 da->da_users++;
4029 } else if (da->da_users == 1) {
4030 __dev_addr_delete(to, to_count,
4031 da->da_addr, da->da_addrlen, 0);
4032 __dev_addr_delete(from, from_count,
4033 da->da_addr, da->da_addrlen, 0);
4035 da = next;
4037 return err;
4039 EXPORT_SYMBOL_GPL(__dev_addr_sync);
4041 void __dev_addr_unsync(struct dev_addr_list **to, int *to_count,
4042 struct dev_addr_list **from, int *from_count)
4044 struct dev_addr_list *da, *next;
4046 da = *from;
4047 while (da != NULL) {
4048 next = da->next;
4049 if (da->da_synced) {
4050 __dev_addr_delete(to, to_count,
4051 da->da_addr, da->da_addrlen, 0);
4052 da->da_synced = 0;
4053 __dev_addr_delete(from, from_count,
4054 da->da_addr, da->da_addrlen, 0);
4056 da = next;
4059 EXPORT_SYMBOL_GPL(__dev_addr_unsync);
4062 * dev_unicast_sync - Synchronize device's unicast list to another device
4063 * @to: destination device
4064 * @from: source device
4066 * Add newly added addresses to the destination device and release
4067 * addresses that have no users left. The source device must be
4068 * locked by netif_tx_lock_bh.
4070 * This function is intended to be called from the dev->set_rx_mode
4071 * function of layered software devices.
4073 int dev_unicast_sync(struct net_device *to, struct net_device *from)
4075 int err = 0;
4077 if (to->addr_len != from->addr_len)
4078 return -EINVAL;
4080 netif_addr_lock_bh(to);
4081 err = __hw_addr_sync(&to->uc, &from->uc, to->addr_len);
4082 if (!err)
4083 __dev_set_rx_mode(to);
4084 netif_addr_unlock_bh(to);
4085 return err;
4087 EXPORT_SYMBOL(dev_unicast_sync);
4090 * dev_unicast_unsync - Remove synchronized addresses from the destination device
4091 * @to: destination device
4092 * @from: source device
4094 * Remove all addresses that were added to the destination device by
4095 * dev_unicast_sync(). This function is intended to be called from the
4096 * dev->stop function of layered software devices.
4098 void dev_unicast_unsync(struct net_device *to, struct net_device *from)
4100 if (to->addr_len != from->addr_len)
4101 return;
4103 netif_addr_lock_bh(from);
4104 netif_addr_lock(to);
4105 __hw_addr_unsync(&to->uc, &from->uc, to->addr_len);
4106 __dev_set_rx_mode(to);
4107 netif_addr_unlock(to);
4108 netif_addr_unlock_bh(from);
4110 EXPORT_SYMBOL(dev_unicast_unsync);
4112 static void dev_unicast_flush(struct net_device *dev)
4114 netif_addr_lock_bh(dev);
4115 __hw_addr_flush(&dev->uc);
4116 netif_addr_unlock_bh(dev);
4119 static void dev_unicast_init(struct net_device *dev)
4121 __hw_addr_init(&dev->uc);
4125 static void __dev_addr_discard(struct dev_addr_list **list)
4127 struct dev_addr_list *tmp;
4129 while (*list != NULL) {
4130 tmp = *list;
4131 *list = tmp->next;
4132 if (tmp->da_users > tmp->da_gusers)
4133 printk("__dev_addr_discard: address leakage! "
4134 "da_users=%d\n", tmp->da_users);
4135 kfree(tmp);
4139 static void dev_addr_discard(struct net_device *dev)
4141 netif_addr_lock_bh(dev);
4143 __dev_addr_discard(&dev->mc_list);
4144 dev->mc_count = 0;
4146 netif_addr_unlock_bh(dev);
4150 * dev_get_flags - get flags reported to userspace
4151 * @dev: device
4153 * Get the combination of flag bits exported through APIs to userspace.
4155 unsigned dev_get_flags(const struct net_device *dev)
4157 unsigned flags;
4159 flags = (dev->flags & ~(IFF_PROMISC |
4160 IFF_ALLMULTI |
4161 IFF_RUNNING |
4162 IFF_LOWER_UP |
4163 IFF_DORMANT)) |
4164 (dev->gflags & (IFF_PROMISC |
4165 IFF_ALLMULTI));
4167 if (netif_running(dev)) {
4168 if (netif_oper_up(dev))
4169 flags |= IFF_RUNNING;
4170 if (netif_carrier_ok(dev))
4171 flags |= IFF_LOWER_UP;
4172 if (netif_dormant(dev))
4173 flags |= IFF_DORMANT;
4176 return flags;
4178 EXPORT_SYMBOL(dev_get_flags);
4181 * dev_change_flags - change device settings
4182 * @dev: device
4183 * @flags: device state flags
4185 * Change settings on device based state flags. The flags are
4186 * in the userspace exported format.
4188 int dev_change_flags(struct net_device *dev, unsigned flags)
4190 int ret, changes;
4191 int old_flags = dev->flags;
4193 ASSERT_RTNL();
4196 * Set the flags on our device.
4199 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
4200 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
4201 IFF_AUTOMEDIA)) |
4202 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
4203 IFF_ALLMULTI));
4206 * Load in the correct multicast list now the flags have changed.
4209 if ((old_flags ^ flags) & IFF_MULTICAST)
4210 dev_change_rx_flags(dev, IFF_MULTICAST);
4212 dev_set_rx_mode(dev);
4215 * Have we downed the interface. We handle IFF_UP ourselves
4216 * according to user attempts to set it, rather than blindly
4217 * setting it.
4220 ret = 0;
4221 if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
4222 ret = ((old_flags & IFF_UP) ? dev_close : dev_open)(dev);
4224 if (!ret)
4225 dev_set_rx_mode(dev);
4228 if (dev->flags & IFF_UP &&
4229 ((old_flags ^ dev->flags) & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI |
4230 IFF_VOLATILE)))
4231 call_netdevice_notifiers(NETDEV_CHANGE, dev);
4233 if ((flags ^ dev->gflags) & IFF_PROMISC) {
4234 int inc = (flags & IFF_PROMISC) ? 1 : -1;
4236 dev->gflags ^= IFF_PROMISC;
4237 dev_set_promiscuity(dev, inc);
4240 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
4241 is important. Some (broken) drivers set IFF_PROMISC, when
4242 IFF_ALLMULTI is requested not asking us and not reporting.
4244 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
4245 int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
4247 dev->gflags ^= IFF_ALLMULTI;
4248 dev_set_allmulti(dev, inc);
4251 /* Exclude state transition flags, already notified */
4252 changes = (old_flags ^ dev->flags) & ~(IFF_UP | IFF_RUNNING);
4253 if (changes)
4254 rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
4256 return ret;
4258 EXPORT_SYMBOL(dev_change_flags);
4261 * dev_set_mtu - Change maximum transfer unit
4262 * @dev: device
4263 * @new_mtu: new transfer unit
4265 * Change the maximum transfer size of the network device.
4267 int dev_set_mtu(struct net_device *dev, int new_mtu)
4269 const struct net_device_ops *ops = dev->netdev_ops;
4270 int err;
4272 if (new_mtu == dev->mtu)
4273 return 0;
4275 /* MTU must be positive. */
4276 if (new_mtu < 0)
4277 return -EINVAL;
4279 if (!netif_device_present(dev))
4280 return -ENODEV;
4282 err = 0;
4283 if (ops->ndo_change_mtu)
4284 err = ops->ndo_change_mtu(dev, new_mtu);
4285 else
4286 dev->mtu = new_mtu;
4288 if (!err && dev->flags & IFF_UP)
4289 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
4290 return err;
4292 EXPORT_SYMBOL(dev_set_mtu);
4295 * dev_set_mac_address - Change Media Access Control Address
4296 * @dev: device
4297 * @sa: new address
4299 * Change the hardware (MAC) address of the device
4301 int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
4303 const struct net_device_ops *ops = dev->netdev_ops;
4304 int err;
4306 if (!ops->ndo_set_mac_address)
4307 return -EOPNOTSUPP;
4308 if (sa->sa_family != dev->type)
4309 return -EINVAL;
4310 if (!netif_device_present(dev))
4311 return -ENODEV;
4312 err = ops->ndo_set_mac_address(dev, sa);
4313 if (!err)
4314 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
4315 return err;
4317 EXPORT_SYMBOL(dev_set_mac_address);
4320 * Perform the SIOCxIFxxx calls, inside rcu_read_lock()
4322 static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd)
4324 int err;
4325 struct net_device *dev = dev_get_by_name_rcu(net, ifr->ifr_name);
4327 if (!dev)
4328 return -ENODEV;
4330 switch (cmd) {
4331 case SIOCGIFFLAGS: /* Get interface flags */
4332 ifr->ifr_flags = (short) dev_get_flags(dev);
4333 return 0;
4335 case SIOCGIFMETRIC: /* Get the metric on the interface
4336 (currently unused) */
4337 ifr->ifr_metric = 0;
4338 return 0;
4340 case SIOCGIFMTU: /* Get the MTU of a device */
4341 ifr->ifr_mtu = dev->mtu;
4342 return 0;
4344 case SIOCGIFHWADDR:
4345 if (!dev->addr_len)
4346 memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
4347 else
4348 memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
4349 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
4350 ifr->ifr_hwaddr.sa_family = dev->type;
4351 return 0;
4353 case SIOCGIFSLAVE:
4354 err = -EINVAL;
4355 break;
4357 case SIOCGIFMAP:
4358 ifr->ifr_map.mem_start = dev->mem_start;
4359 ifr->ifr_map.mem_end = dev->mem_end;
4360 ifr->ifr_map.base_addr = dev->base_addr;
4361 ifr->ifr_map.irq = dev->irq;
4362 ifr->ifr_map.dma = dev->dma;
4363 ifr->ifr_map.port = dev->if_port;
4364 return 0;
4366 case SIOCGIFINDEX:
4367 ifr->ifr_ifindex = dev->ifindex;
4368 return 0;
4370 case SIOCGIFTXQLEN:
4371 ifr->ifr_qlen = dev->tx_queue_len;
4372 return 0;
4374 default:
4375 /* dev_ioctl() should ensure this case
4376 * is never reached
4378 WARN_ON(1);
4379 err = -EINVAL;
4380 break;
4383 return err;
4387 * Perform the SIOCxIFxxx calls, inside rtnl_lock()
4389 static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
4391 int err;
4392 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
4393 const struct net_device_ops *ops;
4395 if (!dev)
4396 return -ENODEV;
4398 ops = dev->netdev_ops;
4400 switch (cmd) {
4401 case SIOCSIFFLAGS: /* Set interface flags */
4402 return dev_change_flags(dev, ifr->ifr_flags);
4404 case SIOCSIFMETRIC: /* Set the metric on the interface
4405 (currently unused) */
4406 return -EOPNOTSUPP;
4408 case SIOCSIFMTU: /* Set the MTU of a device */
4409 return dev_set_mtu(dev, ifr->ifr_mtu);
4411 case SIOCSIFHWADDR:
4412 return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
4414 case SIOCSIFHWBROADCAST:
4415 if (ifr->ifr_hwaddr.sa_family != dev->type)
4416 return -EINVAL;
4417 memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
4418 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
4419 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
4420 return 0;
4422 case SIOCSIFMAP:
4423 if (ops->ndo_set_config) {
4424 if (!netif_device_present(dev))
4425 return -ENODEV;
4426 return ops->ndo_set_config(dev, &ifr->ifr_map);
4428 return -EOPNOTSUPP;
4430 case SIOCADDMULTI:
4431 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
4432 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4433 return -EINVAL;
4434 if (!netif_device_present(dev))
4435 return -ENODEV;
4436 return dev_mc_add(dev, ifr->ifr_hwaddr.sa_data,
4437 dev->addr_len, 1);
4439 case SIOCDELMULTI:
4440 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
4441 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4442 return -EINVAL;
4443 if (!netif_device_present(dev))
4444 return -ENODEV;
4445 return dev_mc_delete(dev, ifr->ifr_hwaddr.sa_data,
4446 dev->addr_len, 1);
4448 case SIOCSIFTXQLEN:
4449 if (ifr->ifr_qlen < 0)
4450 return -EINVAL;
4451 dev->tx_queue_len = ifr->ifr_qlen;
4452 return 0;
4454 case SIOCSIFNAME:
4455 ifr->ifr_newname[IFNAMSIZ-1] = '\0';
4456 return dev_change_name(dev, ifr->ifr_newname);
4459 * Unknown or private ioctl
4461 default:
4462 if ((cmd >= SIOCDEVPRIVATE &&
4463 cmd <= SIOCDEVPRIVATE + 15) ||
4464 cmd == SIOCBONDENSLAVE ||
4465 cmd == SIOCBONDRELEASE ||
4466 cmd == SIOCBONDSETHWADDR ||
4467 cmd == SIOCBONDSLAVEINFOQUERY ||
4468 cmd == SIOCBONDINFOQUERY ||
4469 cmd == SIOCBONDCHANGEACTIVE ||
4470 cmd == SIOCGMIIPHY ||
4471 cmd == SIOCGMIIREG ||
4472 cmd == SIOCSMIIREG ||
4473 cmd == SIOCBRADDIF ||
4474 cmd == SIOCBRDELIF ||
4475 cmd == SIOCSHWTSTAMP ||
4476 cmd == SIOCWANDEV) {
4477 err = -EOPNOTSUPP;
4478 if (ops->ndo_do_ioctl) {
4479 if (netif_device_present(dev))
4480 err = ops->ndo_do_ioctl(dev, ifr, cmd);
4481 else
4482 err = -ENODEV;
4484 } else
4485 err = -EINVAL;
4488 return err;
4492 * This function handles all "interface"-type I/O control requests. The actual
4493 * 'doing' part of this is dev_ifsioc above.
4497 * dev_ioctl - network device ioctl
4498 * @net: the applicable net namespace
4499 * @cmd: command to issue
4500 * @arg: pointer to a struct ifreq in user space
4502 * Issue ioctl functions to devices. This is normally called by the
4503 * user space syscall interfaces but can sometimes be useful for
4504 * other purposes. The return value is the return from the syscall if
4505 * positive or a negative errno code on error.
4508 int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
4510 struct ifreq ifr;
4511 int ret;
4512 char *colon;
4514 /* One special case: SIOCGIFCONF takes ifconf argument
4515 and requires shared lock, because it sleeps writing
4516 to user space.
4519 if (cmd == SIOCGIFCONF) {
4520 rtnl_lock();
4521 ret = dev_ifconf(net, (char __user *) arg);
4522 rtnl_unlock();
4523 return ret;
4525 if (cmd == SIOCGIFNAME)
4526 return dev_ifname(net, (struct ifreq __user *)arg);
4528 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
4529 return -EFAULT;
4531 ifr.ifr_name[IFNAMSIZ-1] = 0;
4533 colon = strchr(ifr.ifr_name, ':');
4534 if (colon)
4535 *colon = 0;
4538 * See which interface the caller is talking about.
4541 switch (cmd) {
4543 * These ioctl calls:
4544 * - can be done by all.
4545 * - atomic and do not require locking.
4546 * - return a value
4548 case SIOCGIFFLAGS:
4549 case SIOCGIFMETRIC:
4550 case SIOCGIFMTU:
4551 case SIOCGIFHWADDR:
4552 case SIOCGIFSLAVE:
4553 case SIOCGIFMAP:
4554 case SIOCGIFINDEX:
4555 case SIOCGIFTXQLEN:
4556 dev_load(net, ifr.ifr_name);
4557 rcu_read_lock();
4558 ret = dev_ifsioc_locked(net, &ifr, cmd);
4559 rcu_read_unlock();
4560 if (!ret) {
4561 if (colon)
4562 *colon = ':';
4563 if (copy_to_user(arg, &ifr,
4564 sizeof(struct ifreq)))
4565 ret = -EFAULT;
4567 return ret;
4569 case SIOCETHTOOL:
4570 dev_load(net, ifr.ifr_name);
4571 rtnl_lock();
4572 ret = dev_ethtool(net, &ifr);
4573 rtnl_unlock();
4574 if (!ret) {
4575 if (colon)
4576 *colon = ':';
4577 if (copy_to_user(arg, &ifr,
4578 sizeof(struct ifreq)))
4579 ret = -EFAULT;
4581 return ret;
4584 * These ioctl calls:
4585 * - require superuser power.
4586 * - require strict serialization.
4587 * - return a value
4589 case SIOCGMIIPHY:
4590 case SIOCGMIIREG:
4591 case SIOCSIFNAME:
4592 if (!capable(CAP_NET_ADMIN))
4593 return -EPERM;
4594 dev_load(net, ifr.ifr_name);
4595 rtnl_lock();
4596 ret = dev_ifsioc(net, &ifr, cmd);
4597 rtnl_unlock();
4598 if (!ret) {
4599 if (colon)
4600 *colon = ':';
4601 if (copy_to_user(arg, &ifr,
4602 sizeof(struct ifreq)))
4603 ret = -EFAULT;
4605 return ret;
4608 * These ioctl calls:
4609 * - require superuser power.
4610 * - require strict serialization.
4611 * - do not return a value
4613 case SIOCSIFFLAGS:
4614 case SIOCSIFMETRIC:
4615 case SIOCSIFMTU:
4616 case SIOCSIFMAP:
4617 case SIOCSIFHWADDR:
4618 case SIOCSIFSLAVE:
4619 case SIOCADDMULTI:
4620 case SIOCDELMULTI:
4621 case SIOCSIFHWBROADCAST:
4622 case SIOCSIFTXQLEN:
4623 case SIOCSMIIREG:
4624 case SIOCBONDENSLAVE:
4625 case SIOCBONDRELEASE:
4626 case SIOCBONDSETHWADDR:
4627 case SIOCBONDCHANGEACTIVE:
4628 case SIOCBRADDIF:
4629 case SIOCBRDELIF:
4630 case SIOCSHWTSTAMP:
4631 if (!capable(CAP_NET_ADMIN))
4632 return -EPERM;
4633 /* fall through */
4634 case SIOCBONDSLAVEINFOQUERY:
4635 case SIOCBONDINFOQUERY:
4636 dev_load(net, ifr.ifr_name);
4637 rtnl_lock();
4638 ret = dev_ifsioc(net, &ifr, cmd);
4639 rtnl_unlock();
4640 return ret;
4642 case SIOCGIFMEM:
4643 /* Get the per device memory space. We can add this but
4644 * currently do not support it */
4645 case SIOCSIFMEM:
4646 /* Set the per device memory buffer space.
4647 * Not applicable in our case */
4648 case SIOCSIFLINK:
4649 return -EINVAL;
4652 * Unknown or private ioctl.
4654 default:
4655 if (cmd == SIOCWANDEV ||
4656 (cmd >= SIOCDEVPRIVATE &&
4657 cmd <= SIOCDEVPRIVATE + 15)) {
4658 dev_load(net, ifr.ifr_name);
4659 rtnl_lock();
4660 ret = dev_ifsioc(net, &ifr, cmd);
4661 rtnl_unlock();
4662 if (!ret && copy_to_user(arg, &ifr,
4663 sizeof(struct ifreq)))
4664 ret = -EFAULT;
4665 return ret;
4667 /* Take care of Wireless Extensions */
4668 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
4669 return wext_handle_ioctl(net, &ifr, cmd, arg);
4670 return -EINVAL;
4676 * dev_new_index - allocate an ifindex
4677 * @net: the applicable net namespace
4679 * Returns a suitable unique value for a new device interface
4680 * number. The caller must hold the rtnl semaphore or the
4681 * dev_base_lock to be sure it remains unique.
4683 static int dev_new_index(struct net *net)
4685 static int ifindex;
4686 for (;;) {
4687 if (++ifindex <= 0)
4688 ifindex = 1;
4689 if (!__dev_get_by_index(net, ifindex))
4690 return ifindex;
4694 /* Delayed registration/unregisteration */
4695 static LIST_HEAD(net_todo_list);
4697 static void net_set_todo(struct net_device *dev)
4699 list_add_tail(&dev->todo_list, &net_todo_list);
4702 static void rollback_registered_many(struct list_head *head)
4704 struct net_device *dev;
4706 BUG_ON(dev_boot_phase);
4707 ASSERT_RTNL();
4709 list_for_each_entry(dev, head, unreg_list) {
4710 /* Some devices call without registering
4711 * for initialization unwind.
4713 if (dev->reg_state == NETREG_UNINITIALIZED) {
4714 pr_debug("unregister_netdevice: device %s/%p never "
4715 "was registered\n", dev->name, dev);
4717 WARN_ON(1);
4718 return;
4721 BUG_ON(dev->reg_state != NETREG_REGISTERED);
4723 /* If device is running, close it first. */
4724 dev_close(dev);
4726 /* And unlink it from device chain. */
4727 unlist_netdevice(dev);
4729 dev->reg_state = NETREG_UNREGISTERING;
4732 synchronize_net();
4734 list_for_each_entry(dev, head, unreg_list) {
4735 /* Shutdown queueing discipline. */
4736 dev_shutdown(dev);
4739 /* Notify protocols, that we are about to destroy
4740 this device. They should clean all the things.
4742 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
4745 * Flush the unicast and multicast chains
4747 dev_unicast_flush(dev);
4748 dev_addr_discard(dev);
4750 if (dev->netdev_ops->ndo_uninit)
4751 dev->netdev_ops->ndo_uninit(dev);
4753 /* Notifier chain MUST detach us from master device. */
4754 WARN_ON(dev->master);
4756 /* Remove entries from kobject tree */
4757 netdev_unregister_kobject(dev);
4760 synchronize_net();
4762 list_for_each_entry(dev, head, unreg_list)
4763 dev_put(dev);
4766 static void rollback_registered(struct net_device *dev)
4768 LIST_HEAD(single);
4770 list_add(&dev->unreg_list, &single);
4771 rollback_registered_many(&single);
4774 static void __netdev_init_queue_locks_one(struct net_device *dev,
4775 struct netdev_queue *dev_queue,
4776 void *_unused)
4778 spin_lock_init(&dev_queue->_xmit_lock);
4779 netdev_set_xmit_lockdep_class(&dev_queue->_xmit_lock, dev->type);
4780 dev_queue->xmit_lock_owner = -1;
4783 static void netdev_init_queue_locks(struct net_device *dev)
4785 netdev_for_each_tx_queue(dev, __netdev_init_queue_locks_one, NULL);
4786 __netdev_init_queue_locks_one(dev, &dev->rx_queue, NULL);
4789 unsigned long netdev_fix_features(unsigned long features, const char *name)
4791 /* Fix illegal SG+CSUM combinations. */
4792 if ((features & NETIF_F_SG) &&
4793 !(features & NETIF_F_ALL_CSUM)) {
4794 if (name)
4795 printk(KERN_NOTICE "%s: Dropping NETIF_F_SG since no "
4796 "checksum feature.\n", name);
4797 features &= ~NETIF_F_SG;
4800 /* TSO requires that SG is present as well. */
4801 if ((features & NETIF_F_TSO) && !(features & NETIF_F_SG)) {
4802 if (name)
4803 printk(KERN_NOTICE "%s: Dropping NETIF_F_TSO since no "
4804 "SG feature.\n", name);
4805 features &= ~NETIF_F_TSO;
4808 if (features & NETIF_F_UFO) {
4809 if (!(features & NETIF_F_GEN_CSUM)) {
4810 if (name)
4811 printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
4812 "since no NETIF_F_HW_CSUM feature.\n",
4813 name);
4814 features &= ~NETIF_F_UFO;
4817 if (!(features & NETIF_F_SG)) {
4818 if (name)
4819 printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
4820 "since no NETIF_F_SG feature.\n", name);
4821 features &= ~NETIF_F_UFO;
4825 return features;
4827 EXPORT_SYMBOL(netdev_fix_features);
4830 * register_netdevice - register a network device
4831 * @dev: device to register
4833 * Take a completed network device structure and add it to the kernel
4834 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
4835 * chain. 0 is returned on success. A negative errno code is returned
4836 * on a failure to set up the device, or if the name is a duplicate.
4838 * Callers must hold the rtnl semaphore. You may want
4839 * register_netdev() instead of this.
4841 * BUGS:
4842 * The locking appears insufficient to guarantee two parallel registers
4843 * will not get the same name.
4846 int register_netdevice(struct net_device *dev)
4848 struct hlist_head *head;
4849 struct hlist_node *p;
4850 int ret;
4851 struct net *net = dev_net(dev);
4853 BUG_ON(dev_boot_phase);
4854 ASSERT_RTNL();
4856 might_sleep();
4858 /* When net_device's are persistent, this will be fatal. */
4859 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
4860 BUG_ON(!net);
4862 spin_lock_init(&dev->addr_list_lock);
4863 netdev_set_addr_lockdep_class(dev);
4864 netdev_init_queue_locks(dev);
4866 dev->iflink = -1;
4868 /* Init, if this function is available */
4869 if (dev->netdev_ops->ndo_init) {
4870 ret = dev->netdev_ops->ndo_init(dev);
4871 if (ret) {
4872 if (ret > 0)
4873 ret = -EIO;
4874 goto out;
4878 if (!dev_valid_name(dev->name)) {
4879 ret = -EINVAL;
4880 goto err_uninit;
4883 dev->ifindex = dev_new_index(net);
4884 if (dev->iflink == -1)
4885 dev->iflink = dev->ifindex;
4887 /* Check for existence of name */
4888 head = dev_name_hash(net, dev->name);
4889 hlist_for_each(p, head) {
4890 struct net_device *d
4891 = hlist_entry(p, struct net_device, name_hlist);
4892 if (!strncmp(d->name, dev->name, IFNAMSIZ)) {
4893 ret = -EEXIST;
4894 goto err_uninit;
4898 /* Fix illegal checksum combinations */
4899 if ((dev->features & NETIF_F_HW_CSUM) &&
4900 (dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
4901 printk(KERN_NOTICE "%s: mixed HW and IP checksum settings.\n",
4902 dev->name);
4903 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
4906 if ((dev->features & NETIF_F_NO_CSUM) &&
4907 (dev->features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
4908 printk(KERN_NOTICE "%s: mixed no checksumming and other settings.\n",
4909 dev->name);
4910 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM);
4913 dev->features = netdev_fix_features(dev->features, dev->name);
4915 /* Enable software GSO if SG is supported. */
4916 if (dev->features & NETIF_F_SG)
4917 dev->features |= NETIF_F_GSO;
4919 netdev_initialize_kobject(dev);
4921 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
4922 ret = notifier_to_errno(ret);
4923 if (ret)
4924 goto err_uninit;
4926 ret = netdev_register_kobject(dev);
4927 if (ret)
4928 goto err_uninit;
4929 dev->reg_state = NETREG_REGISTERED;
4932 * Default initial state at registry is that the
4933 * device is present.
4936 set_bit(__LINK_STATE_PRESENT, &dev->state);
4938 dev_init_scheduler(dev);
4939 dev_hold(dev);
4940 list_netdevice(dev);
4942 /* Notify protocols, that a new device appeared. */
4943 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
4944 ret = notifier_to_errno(ret);
4945 if (ret) {
4946 rollback_registered(dev);
4947 dev->reg_state = NETREG_UNREGISTERED;
4950 out:
4951 return ret;
4953 err_uninit:
4954 if (dev->netdev_ops->ndo_uninit)
4955 dev->netdev_ops->ndo_uninit(dev);
4956 goto out;
4958 EXPORT_SYMBOL(register_netdevice);
4961 * init_dummy_netdev - init a dummy network device for NAPI
4962 * @dev: device to init
4964 * This takes a network device structure and initialize the minimum
4965 * amount of fields so it can be used to schedule NAPI polls without
4966 * registering a full blown interface. This is to be used by drivers
4967 * that need to tie several hardware interfaces to a single NAPI
4968 * poll scheduler due to HW limitations.
4970 int init_dummy_netdev(struct net_device *dev)
4972 /* Clear everything. Note we don't initialize spinlocks
4973 * are they aren't supposed to be taken by any of the
4974 * NAPI code and this dummy netdev is supposed to be
4975 * only ever used for NAPI polls
4977 memset(dev, 0, sizeof(struct net_device));
4979 /* make sure we BUG if trying to hit standard
4980 * register/unregister code path
4982 dev->reg_state = NETREG_DUMMY;
4984 /* initialize the ref count */
4985 atomic_set(&dev->refcnt, 1);
4987 /* NAPI wants this */
4988 INIT_LIST_HEAD(&dev->napi_list);
4990 /* a dummy interface is started by default */
4991 set_bit(__LINK_STATE_PRESENT, &dev->state);
4992 set_bit(__LINK_STATE_START, &dev->state);
4994 return 0;
4996 EXPORT_SYMBOL_GPL(init_dummy_netdev);
5000 * register_netdev - register a network device
5001 * @dev: device to register
5003 * Take a completed network device structure and add it to the kernel
5004 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
5005 * chain. 0 is returned on success. A negative errno code is returned
5006 * on a failure to set up the device, or if the name is a duplicate.
5008 * This is a wrapper around register_netdevice that takes the rtnl semaphore
5009 * and expands the device name if you passed a format string to
5010 * alloc_netdev.
5012 int register_netdev(struct net_device *dev)
5014 int err;
5016 rtnl_lock();
5019 * If the name is a format string the caller wants us to do a
5020 * name allocation.
5022 if (strchr(dev->name, '%')) {
5023 err = dev_alloc_name(dev, dev->name);
5024 if (err < 0)
5025 goto out;
5028 err = register_netdevice(dev);
5029 out:
5030 rtnl_unlock();
5031 return err;
5033 EXPORT_SYMBOL(register_netdev);
5036 * netdev_wait_allrefs - wait until all references are gone.
5038 * This is called when unregistering network devices.
5040 * Any protocol or device that holds a reference should register
5041 * for netdevice notification, and cleanup and put back the
5042 * reference if they receive an UNREGISTER event.
5043 * We can get stuck here if buggy protocols don't correctly
5044 * call dev_put.
5046 static void netdev_wait_allrefs(struct net_device *dev)
5048 unsigned long rebroadcast_time, warning_time;
5050 rebroadcast_time = warning_time = jiffies;
5051 while (atomic_read(&dev->refcnt) != 0) {
5052 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
5053 rtnl_lock();
5055 /* Rebroadcast unregister notification */
5056 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
5058 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
5059 &dev->state)) {
5060 /* We must not have linkwatch events
5061 * pending on unregister. If this
5062 * happens, we simply run the queue
5063 * unscheduled, resulting in a noop
5064 * for this device.
5066 linkwatch_run_queue();
5069 __rtnl_unlock();
5071 rebroadcast_time = jiffies;
5074 msleep(250);
5076 if (time_after(jiffies, warning_time + 10 * HZ)) {
5077 printk(KERN_EMERG "unregister_netdevice: "
5078 "waiting for %s to become free. Usage "
5079 "count = %d\n",
5080 dev->name, atomic_read(&dev->refcnt));
5081 warning_time = jiffies;
5086 /* The sequence is:
5088 * rtnl_lock();
5089 * ...
5090 * register_netdevice(x1);
5091 * register_netdevice(x2);
5092 * ...
5093 * unregister_netdevice(y1);
5094 * unregister_netdevice(y2);
5095 * ...
5096 * rtnl_unlock();
5097 * free_netdev(y1);
5098 * free_netdev(y2);
5100 * We are invoked by rtnl_unlock().
5101 * This allows us to deal with problems:
5102 * 1) We can delete sysfs objects which invoke hotplug
5103 * without deadlocking with linkwatch via keventd.
5104 * 2) Since we run with the RTNL semaphore not held, we can sleep
5105 * safely in order to wait for the netdev refcnt to drop to zero.
5107 * We must not return until all unregister events added during
5108 * the interval the lock was held have been completed.
5110 void netdev_run_todo(void)
5112 struct list_head list;
5114 /* Snapshot list, allow later requests */
5115 list_replace_init(&net_todo_list, &list);
5117 __rtnl_unlock();
5119 while (!list_empty(&list)) {
5120 struct net_device *dev
5121 = list_entry(list.next, struct net_device, todo_list);
5122 list_del(&dev->todo_list);
5124 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
5125 printk(KERN_ERR "network todo '%s' but state %d\n",
5126 dev->name, dev->reg_state);
5127 dump_stack();
5128 continue;
5131 dev->reg_state = NETREG_UNREGISTERED;
5133 on_each_cpu(flush_backlog, dev, 1);
5135 netdev_wait_allrefs(dev);
5137 /* paranoia */
5138 BUG_ON(atomic_read(&dev->refcnt));
5139 WARN_ON(dev->ip_ptr);
5140 WARN_ON(dev->ip6_ptr);
5141 WARN_ON(dev->dn_ptr);
5143 if (dev->destructor)
5144 dev->destructor(dev);
5146 /* Free network device */
5147 kobject_put(&dev->dev.kobj);
5152 * dev_get_stats - get network device statistics
5153 * @dev: device to get statistics from
5155 * Get network statistics from device. The device driver may provide
5156 * its own method by setting dev->netdev_ops->get_stats; otherwise
5157 * the internal statistics structure is used.
5159 const struct net_device_stats *dev_get_stats(struct net_device *dev)
5161 const struct net_device_ops *ops = dev->netdev_ops;
5163 if (ops->ndo_get_stats)
5164 return ops->ndo_get_stats(dev);
5165 else {
5166 unsigned long tx_bytes = 0, tx_packets = 0, tx_dropped = 0;
5167 struct net_device_stats *stats = &dev->stats;
5168 unsigned int i;
5169 struct netdev_queue *txq;
5171 for (i = 0; i < dev->num_tx_queues; i++) {
5172 txq = netdev_get_tx_queue(dev, i);
5173 tx_bytes += txq->tx_bytes;
5174 tx_packets += txq->tx_packets;
5175 tx_dropped += txq->tx_dropped;
5177 if (tx_bytes || tx_packets || tx_dropped) {
5178 stats->tx_bytes = tx_bytes;
5179 stats->tx_packets = tx_packets;
5180 stats->tx_dropped = tx_dropped;
5182 return stats;
5185 EXPORT_SYMBOL(dev_get_stats);
5187 static void netdev_init_one_queue(struct net_device *dev,
5188 struct netdev_queue *queue,
5189 void *_unused)
5191 queue->dev = dev;
5194 static void netdev_init_queues(struct net_device *dev)
5196 netdev_init_one_queue(dev, &dev->rx_queue, NULL);
5197 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
5198 spin_lock_init(&dev->tx_global_lock);
5202 * alloc_netdev_mq - allocate network device
5203 * @sizeof_priv: size of private data to allocate space for
5204 * @name: device name format string
5205 * @setup: callback to initialize device
5206 * @queue_count: the number of subqueues to allocate
5208 * Allocates a struct net_device with private data area for driver use
5209 * and performs basic initialization. Also allocates subquue structs
5210 * for each queue on the device at the end of the netdevice.
5212 struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
5213 void (*setup)(struct net_device *), unsigned int queue_count)
5215 struct netdev_queue *tx;
5216 struct net_device *dev;
5217 size_t alloc_size;
5218 struct net_device *p;
5220 BUG_ON(strlen(name) >= sizeof(dev->name));
5222 alloc_size = sizeof(struct net_device);
5223 if (sizeof_priv) {
5224 /* ensure 32-byte alignment of private area */
5225 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
5226 alloc_size += sizeof_priv;
5228 /* ensure 32-byte alignment of whole construct */
5229 alloc_size += NETDEV_ALIGN - 1;
5231 p = kzalloc(alloc_size, GFP_KERNEL);
5232 if (!p) {
5233 printk(KERN_ERR "alloc_netdev: Unable to allocate device.\n");
5234 return NULL;
5237 tx = kcalloc(queue_count, sizeof(struct netdev_queue), GFP_KERNEL);
5238 if (!tx) {
5239 printk(KERN_ERR "alloc_netdev: Unable to allocate "
5240 "tx qdiscs.\n");
5241 goto free_p;
5244 dev = PTR_ALIGN(p, NETDEV_ALIGN);
5245 dev->padded = (char *)dev - (char *)p;
5247 if (dev_addr_init(dev))
5248 goto free_tx;
5250 dev_unicast_init(dev);
5252 dev_net_set(dev, &init_net);
5254 dev->_tx = tx;
5255 dev->num_tx_queues = queue_count;
5256 dev->real_num_tx_queues = queue_count;
5258 dev->gso_max_size = GSO_MAX_SIZE;
5260 netdev_init_queues(dev);
5262 INIT_LIST_HEAD(&dev->napi_list);
5263 INIT_LIST_HEAD(&dev->unreg_list);
5264 dev->priv_flags = IFF_XMIT_DST_RELEASE;
5265 setup(dev);
5266 strcpy(dev->name, name);
5267 return dev;
5269 free_tx:
5270 kfree(tx);
5272 free_p:
5273 kfree(p);
5274 return NULL;
5276 EXPORT_SYMBOL(alloc_netdev_mq);
5279 * free_netdev - free network device
5280 * @dev: device
5282 * This function does the last stage of destroying an allocated device
5283 * interface. The reference to the device object is released.
5284 * If this is the last reference then it will be freed.
5286 void free_netdev(struct net_device *dev)
5288 struct napi_struct *p, *n;
5290 release_net(dev_net(dev));
5292 kfree(dev->_tx);
5294 /* Flush device addresses */
5295 dev_addr_flush(dev);
5297 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
5298 netif_napi_del(p);
5300 /* Compatibility with error handling in drivers */
5301 if (dev->reg_state == NETREG_UNINITIALIZED) {
5302 kfree((char *)dev - dev->padded);
5303 return;
5306 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
5307 dev->reg_state = NETREG_RELEASED;
5309 /* will free via device release */
5310 put_device(&dev->dev);
5312 EXPORT_SYMBOL(free_netdev);
5315 * synchronize_net - Synchronize with packet receive processing
5317 * Wait for packets currently being received to be done.
5318 * Does not block later packets from starting.
5320 void synchronize_net(void)
5322 might_sleep();
5323 synchronize_rcu();
5325 EXPORT_SYMBOL(synchronize_net);
5328 * unregister_netdevice_queue - remove device from the kernel
5329 * @dev: device
5330 * @head: list
5332 * This function shuts down a device interface and removes it
5333 * from the kernel tables.
5334 * If head not NULL, device is queued to be unregistered later.
5336 * Callers must hold the rtnl semaphore. You may want
5337 * unregister_netdev() instead of this.
5340 void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
5342 ASSERT_RTNL();
5344 if (head) {
5345 list_move_tail(&dev->unreg_list, head);
5346 } else {
5347 rollback_registered(dev);
5348 /* Finish processing unregister after unlock */
5349 net_set_todo(dev);
5352 EXPORT_SYMBOL(unregister_netdevice_queue);
5355 * unregister_netdevice_many - unregister many devices
5356 * @head: list of devices
5359 void unregister_netdevice_many(struct list_head *head)
5361 struct net_device *dev;
5363 if (!list_empty(head)) {
5364 rollback_registered_many(head);
5365 list_for_each_entry(dev, head, unreg_list)
5366 net_set_todo(dev);
5369 EXPORT_SYMBOL(unregister_netdevice_many);
5372 * unregister_netdev - remove device from the kernel
5373 * @dev: device
5375 * This function shuts down a device interface and removes it
5376 * from the kernel tables.
5378 * This is just a wrapper for unregister_netdevice that takes
5379 * the rtnl semaphore. In general you want to use this and not
5380 * unregister_netdevice.
5382 void unregister_netdev(struct net_device *dev)
5384 rtnl_lock();
5385 unregister_netdevice(dev);
5386 rtnl_unlock();
5388 EXPORT_SYMBOL(unregister_netdev);
5391 * dev_change_net_namespace - move device to different nethost namespace
5392 * @dev: device
5393 * @net: network namespace
5394 * @pat: If not NULL name pattern to try if the current device name
5395 * is already taken in the destination network namespace.
5397 * This function shuts down a device interface and moves it
5398 * to a new network namespace. On success 0 is returned, on
5399 * a failure a netagive errno code is returned.
5401 * Callers must hold the rtnl semaphore.
5404 int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
5406 char buf[IFNAMSIZ];
5407 const char *destname;
5408 int err;
5410 ASSERT_RTNL();
5412 /* Don't allow namespace local devices to be moved. */
5413 err = -EINVAL;
5414 if (dev->features & NETIF_F_NETNS_LOCAL)
5415 goto out;
5417 #ifdef CONFIG_SYSFS
5418 /* Don't allow real devices to be moved when sysfs
5419 * is enabled.
5421 err = -EINVAL;
5422 if (dev->dev.parent)
5423 goto out;
5424 #endif
5426 /* Ensure the device has been registrered */
5427 err = -EINVAL;
5428 if (dev->reg_state != NETREG_REGISTERED)
5429 goto out;
5431 /* Get out if there is nothing todo */
5432 err = 0;
5433 if (net_eq(dev_net(dev), net))
5434 goto out;
5436 /* Pick the destination device name, and ensure
5437 * we can use it in the destination network namespace.
5439 err = -EEXIST;
5440 destname = dev->name;
5441 if (__dev_get_by_name(net, destname)) {
5442 /* We get here if we can't use the current device name */
5443 if (!pat)
5444 goto out;
5445 if (!dev_valid_name(pat))
5446 goto out;
5447 if (strchr(pat, '%')) {
5448 if (__dev_alloc_name(net, pat, buf) < 0)
5449 goto out;
5450 destname = buf;
5451 } else
5452 destname = pat;
5453 if (__dev_get_by_name(net, destname))
5454 goto out;
5458 * And now a mini version of register_netdevice unregister_netdevice.
5461 /* If device is running close it first. */
5462 dev_close(dev);
5464 /* And unlink it from device chain */
5465 err = -ENODEV;
5466 unlist_netdevice(dev);
5468 synchronize_net();
5470 /* Shutdown queueing discipline. */
5471 dev_shutdown(dev);
5473 /* Notify protocols, that we are about to destroy
5474 this device. They should clean all the things.
5476 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
5479 * Flush the unicast and multicast chains
5481 dev_unicast_flush(dev);
5482 dev_addr_discard(dev);
5484 netdev_unregister_kobject(dev);
5486 /* Actually switch the network namespace */
5487 dev_net_set(dev, net);
5489 /* Assign the new device name */
5490 if (destname != dev->name)
5491 strcpy(dev->name, destname);
5493 /* If there is an ifindex conflict assign a new one */
5494 if (__dev_get_by_index(net, dev->ifindex)) {
5495 int iflink = (dev->iflink == dev->ifindex);
5496 dev->ifindex = dev_new_index(net);
5497 if (iflink)
5498 dev->iflink = dev->ifindex;
5501 /* Fixup kobjects */
5502 err = netdev_register_kobject(dev);
5503 WARN_ON(err);
5505 /* Add the device back in the hashes */
5506 list_netdevice(dev);
5508 /* Notify protocols, that a new device appeared. */
5509 call_netdevice_notifiers(NETDEV_REGISTER, dev);
5511 synchronize_net();
5512 err = 0;
5513 out:
5514 return err;
5516 EXPORT_SYMBOL_GPL(dev_change_net_namespace);
5518 static int dev_cpu_callback(struct notifier_block *nfb,
5519 unsigned long action,
5520 void *ocpu)
5522 struct sk_buff **list_skb;
5523 struct Qdisc **list_net;
5524 struct sk_buff *skb;
5525 unsigned int cpu, oldcpu = (unsigned long)ocpu;
5526 struct softnet_data *sd, *oldsd;
5528 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
5529 return NOTIFY_OK;
5531 local_irq_disable();
5532 cpu = smp_processor_id();
5533 sd = &per_cpu(softnet_data, cpu);
5534 oldsd = &per_cpu(softnet_data, oldcpu);
5536 /* Find end of our completion_queue. */
5537 list_skb = &sd->completion_queue;
5538 while (*list_skb)
5539 list_skb = &(*list_skb)->next;
5540 /* Append completion queue from offline CPU. */
5541 *list_skb = oldsd->completion_queue;
5542 oldsd->completion_queue = NULL;
5544 /* Find end of our output_queue. */
5545 list_net = &sd->output_queue;
5546 while (*list_net)
5547 list_net = &(*list_net)->next_sched;
5548 /* Append output queue from offline CPU. */
5549 *list_net = oldsd->output_queue;
5550 oldsd->output_queue = NULL;
5552 raise_softirq_irqoff(NET_TX_SOFTIRQ);
5553 local_irq_enable();
5555 /* Process offline CPU's input_pkt_queue */
5556 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue)))
5557 netif_rx(skb);
5559 return NOTIFY_OK;
5564 * netdev_increment_features - increment feature set by one
5565 * @all: current feature set
5566 * @one: new feature set
5567 * @mask: mask feature set
5569 * Computes a new feature set after adding a device with feature set
5570 * @one to the master device with current feature set @all. Will not
5571 * enable anything that is off in @mask. Returns the new feature set.
5573 unsigned long netdev_increment_features(unsigned long all, unsigned long one,
5574 unsigned long mask)
5576 /* If device needs checksumming, downgrade to it. */
5577 if (all & NETIF_F_NO_CSUM && !(one & NETIF_F_NO_CSUM))
5578 all ^= NETIF_F_NO_CSUM | (one & NETIF_F_ALL_CSUM);
5579 else if (mask & NETIF_F_ALL_CSUM) {
5580 /* If one device supports v4/v6 checksumming, set for all. */
5581 if (one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM) &&
5582 !(all & NETIF_F_GEN_CSUM)) {
5583 all &= ~NETIF_F_ALL_CSUM;
5584 all |= one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
5587 /* If one device supports hw checksumming, set for all. */
5588 if (one & NETIF_F_GEN_CSUM && !(all & NETIF_F_GEN_CSUM)) {
5589 all &= ~NETIF_F_ALL_CSUM;
5590 all |= NETIF_F_HW_CSUM;
5594 one |= NETIF_F_ALL_CSUM;
5596 one |= all & NETIF_F_ONE_FOR_ALL;
5597 all &= one | NETIF_F_LLTX | NETIF_F_GSO | NETIF_F_UFO;
5598 all |= one & mask & NETIF_F_ONE_FOR_ALL;
5600 return all;
5602 EXPORT_SYMBOL(netdev_increment_features);
5604 static struct hlist_head *netdev_create_hash(void)
5606 int i;
5607 struct hlist_head *hash;
5609 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
5610 if (hash != NULL)
5611 for (i = 0; i < NETDEV_HASHENTRIES; i++)
5612 INIT_HLIST_HEAD(&hash[i]);
5614 return hash;
5617 /* Initialize per network namespace state */
5618 static int __net_init netdev_init(struct net *net)
5620 INIT_LIST_HEAD(&net->dev_base_head);
5622 net->dev_name_head = netdev_create_hash();
5623 if (net->dev_name_head == NULL)
5624 goto err_name;
5626 net->dev_index_head = netdev_create_hash();
5627 if (net->dev_index_head == NULL)
5628 goto err_idx;
5630 return 0;
5632 err_idx:
5633 kfree(net->dev_name_head);
5634 err_name:
5635 return -ENOMEM;
5639 * netdev_drivername - network driver for the device
5640 * @dev: network device
5641 * @buffer: buffer for resulting name
5642 * @len: size of buffer
5644 * Determine network driver for device.
5646 char *netdev_drivername(const struct net_device *dev, char *buffer, int len)
5648 const struct device_driver *driver;
5649 const struct device *parent;
5651 if (len <= 0 || !buffer)
5652 return buffer;
5653 buffer[0] = 0;
5655 parent = dev->dev.parent;
5657 if (!parent)
5658 return buffer;
5660 driver = parent->driver;
5661 if (driver && driver->name)
5662 strlcpy(buffer, driver->name, len);
5663 return buffer;
5666 static void __net_exit netdev_exit(struct net *net)
5668 kfree(net->dev_name_head);
5669 kfree(net->dev_index_head);
5672 static struct pernet_operations __net_initdata netdev_net_ops = {
5673 .init = netdev_init,
5674 .exit = netdev_exit,
5677 static void __net_exit default_device_exit(struct net *net)
5679 struct net_device *dev;
5681 * Push all migratable of the network devices back to the
5682 * initial network namespace
5684 rtnl_lock();
5685 restart:
5686 for_each_netdev(net, dev) {
5687 int err;
5688 char fb_name[IFNAMSIZ];
5690 /* Ignore unmoveable devices (i.e. loopback) */
5691 if (dev->features & NETIF_F_NETNS_LOCAL)
5692 continue;
5694 /* Delete virtual devices */
5695 if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink) {
5696 dev->rtnl_link_ops->dellink(dev, NULL);
5697 goto restart;
5700 /* Push remaing network devices to init_net */
5701 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
5702 err = dev_change_net_namespace(dev, &init_net, fb_name);
5703 if (err) {
5704 printk(KERN_EMERG "%s: failed to move %s to init_net: %d\n",
5705 __func__, dev->name, err);
5706 BUG();
5708 goto restart;
5710 rtnl_unlock();
5713 static struct pernet_operations __net_initdata default_device_ops = {
5714 .exit = default_device_exit,
5718 * Initialize the DEV module. At boot time this walks the device list and
5719 * unhooks any devices that fail to initialise (normally hardware not
5720 * present) and leaves us with a valid list of present and active devices.
5725 * This is called single threaded during boot, so no need
5726 * to take the rtnl semaphore.
5728 static int __init net_dev_init(void)
5730 int i, rc = -ENOMEM;
5732 BUG_ON(!dev_boot_phase);
5734 if (dev_proc_init())
5735 goto out;
5737 if (netdev_kobject_init())
5738 goto out;
5740 INIT_LIST_HEAD(&ptype_all);
5741 for (i = 0; i < PTYPE_HASH_SIZE; i++)
5742 INIT_LIST_HEAD(&ptype_base[i]);
5744 if (register_pernet_subsys(&netdev_net_ops))
5745 goto out;
5748 * Initialise the packet receive queues.
5751 for_each_possible_cpu(i) {
5752 struct softnet_data *queue;
5754 queue = &per_cpu(softnet_data, i);
5755 skb_queue_head_init(&queue->input_pkt_queue);
5756 queue->completion_queue = NULL;
5757 INIT_LIST_HEAD(&queue->poll_list);
5759 queue->backlog.poll = process_backlog;
5760 queue->backlog.weight = weight_p;
5761 queue->backlog.gro_list = NULL;
5762 queue->backlog.gro_count = 0;
5765 dev_boot_phase = 0;
5767 /* The loopback device is special if any other network devices
5768 * is present in a network namespace the loopback device must
5769 * be present. Since we now dynamically allocate and free the
5770 * loopback device ensure this invariant is maintained by
5771 * keeping the loopback device as the first device on the
5772 * list of network devices. Ensuring the loopback devices
5773 * is the first device that appears and the last network device
5774 * that disappears.
5776 if (register_pernet_device(&loopback_net_ops))
5777 goto out;
5779 if (register_pernet_device(&default_device_ops))
5780 goto out;
5782 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
5783 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
5785 hotcpu_notifier(dev_cpu_callback, 0);
5786 dst_init();
5787 dev_mcast_init();
5788 rc = 0;
5789 out:
5790 return rc;
5793 subsys_initcall(net_dev_init);
5795 static int __init initialize_hashrnd(void)
5797 get_random_bytes(&skb_tx_hashrnd, sizeof(skb_tx_hashrnd));
5798 return 0;
5801 late_initcall_sync(initialize_hashrnd);