net-next: Add netif_get_num_default_rss_queues
[linux-2.6.git] / net / core / dev.c
blob69f7a1a393d8f29998ad9cca0f8bb21a49ea3502
1 /*
2 * NET3 Protocol independent device support routines.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Derived from the non IP parts of dev.c 1.0.19
10 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
14 * Additional Authors:
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
22 * Changes:
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
34 * drivers
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
44 * call a packet.
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
50 * changes.
51 * Rudi Cilibrasi : Pass the right thing to
52 * set_mac_address()
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
58 * 1 device.
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
66 * the backlog queue.
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
75 #include <asm/uaccess.h>
76 #include <linux/bitops.h>
77 #include <linux/capability.h>
78 #include <linux/cpu.h>
79 #include <linux/types.h>
80 #include <linux/kernel.h>
81 #include <linux/hash.h>
82 #include <linux/slab.h>
83 #include <linux/sched.h>
84 #include <linux/mutex.h>
85 #include <linux/string.h>
86 #include <linux/mm.h>
87 #include <linux/socket.h>
88 #include <linux/sockios.h>
89 #include <linux/errno.h>
90 #include <linux/interrupt.h>
91 #include <linux/if_ether.h>
92 #include <linux/netdevice.h>
93 #include <linux/etherdevice.h>
94 #include <linux/ethtool.h>
95 #include <linux/notifier.h>
96 #include <linux/skbuff.h>
97 #include <net/net_namespace.h>
98 #include <net/sock.h>
99 #include <linux/rtnetlink.h>
100 #include <linux/proc_fs.h>
101 #include <linux/seq_file.h>
102 #include <linux/stat.h>
103 #include <net/dst.h>
104 #include <net/pkt_sched.h>
105 #include <net/checksum.h>
106 #include <net/xfrm.h>
107 #include <linux/highmem.h>
108 #include <linux/init.h>
109 #include <linux/kmod.h>
110 #include <linux/module.h>
111 #include <linux/netpoll.h>
112 #include <linux/rcupdate.h>
113 #include <linux/delay.h>
114 #include <net/wext.h>
115 #include <net/iw_handler.h>
116 #include <asm/current.h>
117 #include <linux/audit.h>
118 #include <linux/dmaengine.h>
119 #include <linux/err.h>
120 #include <linux/ctype.h>
121 #include <linux/if_arp.h>
122 #include <linux/if_vlan.h>
123 #include <linux/ip.h>
124 #include <net/ip.h>
125 #include <linux/ipv6.h>
126 #include <linux/in.h>
127 #include <linux/jhash.h>
128 #include <linux/random.h>
129 #include <trace/events/napi.h>
130 #include <trace/events/net.h>
131 #include <trace/events/skb.h>
132 #include <linux/pci.h>
133 #include <linux/inetdevice.h>
134 #include <linux/cpu_rmap.h>
135 #include <linux/net_tstamp.h>
136 #include <linux/static_key.h>
137 #include <net/flow_keys.h>
139 #include "net-sysfs.h"
141 /* Instead of increasing this, you should create a hash table. */
142 #define MAX_GRO_SKBS 8
144 /* This should be increased if a protocol with a bigger head is added. */
145 #define GRO_MAX_HEAD (MAX_HEADER + 128)
148 * The list of packet types we will receive (as opposed to discard)
149 * and the routines to invoke.
151 * Why 16. Because with 16 the only overlap we get on a hash of the
152 * low nibble of the protocol value is RARP/SNAP/X.25.
154 * NOTE: That is no longer true with the addition of VLAN tags. Not
155 * sure which should go first, but I bet it won't make much
156 * difference if we are running VLANs. The good news is that
157 * this protocol won't be in the list unless compiled in, so
158 * the average user (w/out VLANs) will not be adversely affected.
159 * --BLG
161 * 0800 IP
162 * 8100 802.1Q VLAN
163 * 0001 802.3
164 * 0002 AX.25
165 * 0004 802.2
166 * 8035 RARP
167 * 0005 SNAP
168 * 0805 X.25
169 * 0806 ARP
170 * 8137 IPX
171 * 0009 Localtalk
172 * 86DD IPv6
175 #define PTYPE_HASH_SIZE (16)
176 #define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
178 static DEFINE_SPINLOCK(ptype_lock);
179 static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
180 static struct list_head ptype_all __read_mostly; /* Taps */
183 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
184 * semaphore.
186 * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
188 * Writers must hold the rtnl semaphore while they loop through the
189 * dev_base_head list, and hold dev_base_lock for writing when they do the
190 * actual updates. This allows pure readers to access the list even
191 * while a writer is preparing to update it.
193 * To put it another way, dev_base_lock is held for writing only to
194 * protect against pure readers; the rtnl semaphore provides the
195 * protection against other writers.
197 * See, for example usages, register_netdevice() and
198 * unregister_netdevice(), which must be called with the rtnl
199 * semaphore held.
201 DEFINE_RWLOCK(dev_base_lock);
202 EXPORT_SYMBOL(dev_base_lock);
204 static inline void dev_base_seq_inc(struct net *net)
206 while (++net->dev_base_seq == 0);
209 static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
211 unsigned int hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
213 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
216 static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
218 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
221 static inline void rps_lock(struct softnet_data *sd)
223 #ifdef CONFIG_RPS
224 spin_lock(&sd->input_pkt_queue.lock);
225 #endif
228 static inline void rps_unlock(struct softnet_data *sd)
230 #ifdef CONFIG_RPS
231 spin_unlock(&sd->input_pkt_queue.lock);
232 #endif
235 /* Device list insertion */
236 static int list_netdevice(struct net_device *dev)
238 struct net *net = dev_net(dev);
240 ASSERT_RTNL();
242 write_lock_bh(&dev_base_lock);
243 list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
244 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
245 hlist_add_head_rcu(&dev->index_hlist,
246 dev_index_hash(net, dev->ifindex));
247 write_unlock_bh(&dev_base_lock);
249 dev_base_seq_inc(net);
251 return 0;
254 /* Device list removal
255 * caller must respect a RCU grace period before freeing/reusing dev
257 static void unlist_netdevice(struct net_device *dev)
259 ASSERT_RTNL();
261 /* Unlink dev from the device chain */
262 write_lock_bh(&dev_base_lock);
263 list_del_rcu(&dev->dev_list);
264 hlist_del_rcu(&dev->name_hlist);
265 hlist_del_rcu(&dev->index_hlist);
266 write_unlock_bh(&dev_base_lock);
268 dev_base_seq_inc(dev_net(dev));
272 * Our notifier list
275 static RAW_NOTIFIER_HEAD(netdev_chain);
278 * Device drivers call our routines to queue packets here. We empty the
279 * queue in the local softnet handler.
282 DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
283 EXPORT_PER_CPU_SYMBOL(softnet_data);
285 #ifdef CONFIG_LOCKDEP
287 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
288 * according to dev->type
290 static const unsigned short netdev_lock_type[] =
291 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
292 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
293 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
294 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
295 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
296 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
297 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
298 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
299 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
300 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
301 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
302 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
303 ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM,
304 ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
305 ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
307 static const char *const netdev_lock_name[] =
308 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
309 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
310 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
311 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
312 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
313 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
314 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
315 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
316 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
317 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
318 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
319 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
320 "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
321 "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
322 "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
324 static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
325 static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
327 static inline unsigned short netdev_lock_pos(unsigned short dev_type)
329 int i;
331 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
332 if (netdev_lock_type[i] == dev_type)
333 return i;
334 /* the last key is used by default */
335 return ARRAY_SIZE(netdev_lock_type) - 1;
338 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
339 unsigned short dev_type)
341 int i;
343 i = netdev_lock_pos(dev_type);
344 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
345 netdev_lock_name[i]);
348 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
350 int i;
352 i = netdev_lock_pos(dev->type);
353 lockdep_set_class_and_name(&dev->addr_list_lock,
354 &netdev_addr_lock_key[i],
355 netdev_lock_name[i]);
357 #else
358 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
359 unsigned short dev_type)
362 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
365 #endif
367 /*******************************************************************************
369 Protocol management and registration routines
371 *******************************************************************************/
374 * Add a protocol ID to the list. Now that the input handler is
375 * smarter we can dispense with all the messy stuff that used to be
376 * here.
378 * BEWARE!!! Protocol handlers, mangling input packets,
379 * MUST BE last in hash buckets and checking protocol handlers
380 * MUST start from promiscuous ptype_all chain in net_bh.
381 * It is true now, do not change it.
382 * Explanation follows: if protocol handler, mangling packet, will
383 * be the first on list, it is not able to sense, that packet
384 * is cloned and should be copied-on-write, so that it will
385 * change it and subsequent readers will get broken packet.
386 * --ANK (980803)
389 static inline struct list_head *ptype_head(const struct packet_type *pt)
391 if (pt->type == htons(ETH_P_ALL))
392 return &ptype_all;
393 else
394 return &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
398 * dev_add_pack - add packet handler
399 * @pt: packet type declaration
401 * Add a protocol handler to the networking stack. The passed &packet_type
402 * is linked into kernel lists and may not be freed until it has been
403 * removed from the kernel lists.
405 * This call does not sleep therefore it can not
406 * guarantee all CPU's that are in middle of receiving packets
407 * will see the new packet type (until the next received packet).
410 void dev_add_pack(struct packet_type *pt)
412 struct list_head *head = ptype_head(pt);
414 spin_lock(&ptype_lock);
415 list_add_rcu(&pt->list, head);
416 spin_unlock(&ptype_lock);
418 EXPORT_SYMBOL(dev_add_pack);
421 * __dev_remove_pack - remove packet handler
422 * @pt: packet type declaration
424 * Remove a protocol handler that was previously added to the kernel
425 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
426 * from the kernel lists and can be freed or reused once this function
427 * returns.
429 * The packet type might still be in use by receivers
430 * and must not be freed until after all the CPU's have gone
431 * through a quiescent state.
433 void __dev_remove_pack(struct packet_type *pt)
435 struct list_head *head = ptype_head(pt);
436 struct packet_type *pt1;
438 spin_lock(&ptype_lock);
440 list_for_each_entry(pt1, head, list) {
441 if (pt == pt1) {
442 list_del_rcu(&pt->list);
443 goto out;
447 pr_warn("dev_remove_pack: %p not found\n", pt);
448 out:
449 spin_unlock(&ptype_lock);
451 EXPORT_SYMBOL(__dev_remove_pack);
454 * dev_remove_pack - remove packet handler
455 * @pt: packet type declaration
457 * Remove a protocol handler that was previously added to the kernel
458 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
459 * from the kernel lists and can be freed or reused once this function
460 * returns.
462 * This call sleeps to guarantee that no CPU is looking at the packet
463 * type after return.
465 void dev_remove_pack(struct packet_type *pt)
467 __dev_remove_pack(pt);
469 synchronize_net();
471 EXPORT_SYMBOL(dev_remove_pack);
473 /******************************************************************************
475 Device Boot-time Settings Routines
477 *******************************************************************************/
479 /* Boot time configuration table */
480 static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
483 * netdev_boot_setup_add - add new setup entry
484 * @name: name of the device
485 * @map: configured settings for the device
487 * Adds new setup entry to the dev_boot_setup list. The function
488 * returns 0 on error and 1 on success. This is a generic routine to
489 * all netdevices.
491 static int netdev_boot_setup_add(char *name, struct ifmap *map)
493 struct netdev_boot_setup *s;
494 int i;
496 s = dev_boot_setup;
497 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
498 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
499 memset(s[i].name, 0, sizeof(s[i].name));
500 strlcpy(s[i].name, name, IFNAMSIZ);
501 memcpy(&s[i].map, map, sizeof(s[i].map));
502 break;
506 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
510 * netdev_boot_setup_check - check boot time settings
511 * @dev: the netdevice
513 * Check boot time settings for the device.
514 * The found settings are set for the device to be used
515 * later in the device probing.
516 * Returns 0 if no settings found, 1 if they are.
518 int netdev_boot_setup_check(struct net_device *dev)
520 struct netdev_boot_setup *s = dev_boot_setup;
521 int i;
523 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
524 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
525 !strcmp(dev->name, s[i].name)) {
526 dev->irq = s[i].map.irq;
527 dev->base_addr = s[i].map.base_addr;
528 dev->mem_start = s[i].map.mem_start;
529 dev->mem_end = s[i].map.mem_end;
530 return 1;
533 return 0;
535 EXPORT_SYMBOL(netdev_boot_setup_check);
539 * netdev_boot_base - get address from boot time settings
540 * @prefix: prefix for network device
541 * @unit: id for network device
543 * Check boot time settings for the base address of device.
544 * The found settings are set for the device to be used
545 * later in the device probing.
546 * Returns 0 if no settings found.
548 unsigned long netdev_boot_base(const char *prefix, int unit)
550 const struct netdev_boot_setup *s = dev_boot_setup;
551 char name[IFNAMSIZ];
552 int i;
554 sprintf(name, "%s%d", prefix, unit);
557 * If device already registered then return base of 1
558 * to indicate not to probe for this interface
560 if (__dev_get_by_name(&init_net, name))
561 return 1;
563 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
564 if (!strcmp(name, s[i].name))
565 return s[i].map.base_addr;
566 return 0;
570 * Saves at boot time configured settings for any netdevice.
572 int __init netdev_boot_setup(char *str)
574 int ints[5];
575 struct ifmap map;
577 str = get_options(str, ARRAY_SIZE(ints), ints);
578 if (!str || !*str)
579 return 0;
581 /* Save settings */
582 memset(&map, 0, sizeof(map));
583 if (ints[0] > 0)
584 map.irq = ints[1];
585 if (ints[0] > 1)
586 map.base_addr = ints[2];
587 if (ints[0] > 2)
588 map.mem_start = ints[3];
589 if (ints[0] > 3)
590 map.mem_end = ints[4];
592 /* Add new entry to the list */
593 return netdev_boot_setup_add(str, &map);
596 __setup("netdev=", netdev_boot_setup);
598 /*******************************************************************************
600 Device Interface Subroutines
602 *******************************************************************************/
605 * __dev_get_by_name - find a device by its name
606 * @net: the applicable net namespace
607 * @name: name to find
609 * Find an interface by name. Must be called under RTNL semaphore
610 * or @dev_base_lock. If the name is found a pointer to the device
611 * is returned. If the name is not found then %NULL is returned. The
612 * reference counters are not incremented so the caller must be
613 * careful with locks.
616 struct net_device *__dev_get_by_name(struct net *net, const char *name)
618 struct hlist_node *p;
619 struct net_device *dev;
620 struct hlist_head *head = dev_name_hash(net, name);
622 hlist_for_each_entry(dev, p, head, name_hlist)
623 if (!strncmp(dev->name, name, IFNAMSIZ))
624 return dev;
626 return NULL;
628 EXPORT_SYMBOL(__dev_get_by_name);
631 * dev_get_by_name_rcu - find a device by its name
632 * @net: the applicable net namespace
633 * @name: name to find
635 * Find an interface by name.
636 * If the name is found a pointer to the device is returned.
637 * If the name is not found then %NULL is returned.
638 * The reference counters are not incremented so the caller must be
639 * careful with locks. The caller must hold RCU lock.
642 struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
644 struct hlist_node *p;
645 struct net_device *dev;
646 struct hlist_head *head = dev_name_hash(net, name);
648 hlist_for_each_entry_rcu(dev, p, head, name_hlist)
649 if (!strncmp(dev->name, name, IFNAMSIZ))
650 return dev;
652 return NULL;
654 EXPORT_SYMBOL(dev_get_by_name_rcu);
657 * dev_get_by_name - find a device by its name
658 * @net: the applicable net namespace
659 * @name: name to find
661 * Find an interface by name. This can be called from any
662 * context and does its own locking. The returned handle has
663 * the usage count incremented and the caller must use dev_put() to
664 * release it when it is no longer needed. %NULL is returned if no
665 * matching device is found.
668 struct net_device *dev_get_by_name(struct net *net, const char *name)
670 struct net_device *dev;
672 rcu_read_lock();
673 dev = dev_get_by_name_rcu(net, name);
674 if (dev)
675 dev_hold(dev);
676 rcu_read_unlock();
677 return dev;
679 EXPORT_SYMBOL(dev_get_by_name);
682 * __dev_get_by_index - find a device by its ifindex
683 * @net: the applicable net namespace
684 * @ifindex: index of device
686 * Search for an interface by index. Returns %NULL if the device
687 * is not found or a pointer to the device. The device has not
688 * had its reference counter increased so the caller must be careful
689 * about locking. The caller must hold either the RTNL semaphore
690 * or @dev_base_lock.
693 struct net_device *__dev_get_by_index(struct net *net, int ifindex)
695 struct hlist_node *p;
696 struct net_device *dev;
697 struct hlist_head *head = dev_index_hash(net, ifindex);
699 hlist_for_each_entry(dev, p, head, index_hlist)
700 if (dev->ifindex == ifindex)
701 return dev;
703 return NULL;
705 EXPORT_SYMBOL(__dev_get_by_index);
708 * dev_get_by_index_rcu - find a device by its ifindex
709 * @net: the applicable net namespace
710 * @ifindex: index of device
712 * Search for an interface by index. Returns %NULL if the device
713 * is not found or a pointer to the device. The device has not
714 * had its reference counter increased so the caller must be careful
715 * about locking. The caller must hold RCU lock.
718 struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
720 struct hlist_node *p;
721 struct net_device *dev;
722 struct hlist_head *head = dev_index_hash(net, ifindex);
724 hlist_for_each_entry_rcu(dev, p, head, index_hlist)
725 if (dev->ifindex == ifindex)
726 return dev;
728 return NULL;
730 EXPORT_SYMBOL(dev_get_by_index_rcu);
734 * dev_get_by_index - find a device by its ifindex
735 * @net: the applicable net namespace
736 * @ifindex: index of device
738 * Search for an interface by index. Returns NULL if the device
739 * is not found or a pointer to the device. The device returned has
740 * had a reference added and the pointer is safe until the user calls
741 * dev_put to indicate they have finished with it.
744 struct net_device *dev_get_by_index(struct net *net, int ifindex)
746 struct net_device *dev;
748 rcu_read_lock();
749 dev = dev_get_by_index_rcu(net, ifindex);
750 if (dev)
751 dev_hold(dev);
752 rcu_read_unlock();
753 return dev;
755 EXPORT_SYMBOL(dev_get_by_index);
758 * dev_getbyhwaddr_rcu - find a device by its hardware address
759 * @net: the applicable net namespace
760 * @type: media type of device
761 * @ha: hardware address
763 * Search for an interface by MAC address. Returns NULL if the device
764 * is not found or a pointer to the device.
765 * The caller must hold RCU or RTNL.
766 * The returned device has not had its ref count increased
767 * and the caller must therefore be careful about locking
771 struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
772 const char *ha)
774 struct net_device *dev;
776 for_each_netdev_rcu(net, dev)
777 if (dev->type == type &&
778 !memcmp(dev->dev_addr, ha, dev->addr_len))
779 return dev;
781 return NULL;
783 EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
785 struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
787 struct net_device *dev;
789 ASSERT_RTNL();
790 for_each_netdev(net, dev)
791 if (dev->type == type)
792 return dev;
794 return NULL;
796 EXPORT_SYMBOL(__dev_getfirstbyhwtype);
798 struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
800 struct net_device *dev, *ret = NULL;
802 rcu_read_lock();
803 for_each_netdev_rcu(net, dev)
804 if (dev->type == type) {
805 dev_hold(dev);
806 ret = dev;
807 break;
809 rcu_read_unlock();
810 return ret;
812 EXPORT_SYMBOL(dev_getfirstbyhwtype);
815 * dev_get_by_flags_rcu - find any device with given flags
816 * @net: the applicable net namespace
817 * @if_flags: IFF_* values
818 * @mask: bitmask of bits in if_flags to check
820 * Search for any interface with the given flags. Returns NULL if a device
821 * is not found or a pointer to the device. Must be called inside
822 * rcu_read_lock(), and result refcount is unchanged.
825 struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short if_flags,
826 unsigned short mask)
828 struct net_device *dev, *ret;
830 ret = NULL;
831 for_each_netdev_rcu(net, dev) {
832 if (((dev->flags ^ if_flags) & mask) == 0) {
833 ret = dev;
834 break;
837 return ret;
839 EXPORT_SYMBOL(dev_get_by_flags_rcu);
842 * dev_valid_name - check if name is okay for network device
843 * @name: name string
845 * Network device names need to be valid file names to
846 * to allow sysfs to work. We also disallow any kind of
847 * whitespace.
849 bool dev_valid_name(const char *name)
851 if (*name == '\0')
852 return false;
853 if (strlen(name) >= IFNAMSIZ)
854 return false;
855 if (!strcmp(name, ".") || !strcmp(name, ".."))
856 return false;
858 while (*name) {
859 if (*name == '/' || isspace(*name))
860 return false;
861 name++;
863 return true;
865 EXPORT_SYMBOL(dev_valid_name);
868 * __dev_alloc_name - allocate a name for a device
869 * @net: network namespace to allocate the device name in
870 * @name: name format string
871 * @buf: scratch buffer and result name string
873 * Passed a format string - eg "lt%d" it will try and find a suitable
874 * id. It scans list of devices to build up a free map, then chooses
875 * the first empty slot. The caller must hold the dev_base or rtnl lock
876 * while allocating the name and adding the device in order to avoid
877 * duplicates.
878 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
879 * Returns the number of the unit assigned or a negative errno code.
882 static int __dev_alloc_name(struct net *net, const char *name, char *buf)
884 int i = 0;
885 const char *p;
886 const int max_netdevices = 8*PAGE_SIZE;
887 unsigned long *inuse;
888 struct net_device *d;
890 p = strnchr(name, IFNAMSIZ-1, '%');
891 if (p) {
893 * Verify the string as this thing may have come from
894 * the user. There must be either one "%d" and no other "%"
895 * characters.
897 if (p[1] != 'd' || strchr(p + 2, '%'))
898 return -EINVAL;
900 /* Use one page as a bit array of possible slots */
901 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
902 if (!inuse)
903 return -ENOMEM;
905 for_each_netdev(net, d) {
906 if (!sscanf(d->name, name, &i))
907 continue;
908 if (i < 0 || i >= max_netdevices)
909 continue;
911 /* avoid cases where sscanf is not exact inverse of printf */
912 snprintf(buf, IFNAMSIZ, name, i);
913 if (!strncmp(buf, d->name, IFNAMSIZ))
914 set_bit(i, inuse);
917 i = find_first_zero_bit(inuse, max_netdevices);
918 free_page((unsigned long) inuse);
921 if (buf != name)
922 snprintf(buf, IFNAMSIZ, name, i);
923 if (!__dev_get_by_name(net, buf))
924 return i;
926 /* It is possible to run out of possible slots
927 * when the name is long and there isn't enough space left
928 * for the digits, or if all bits are used.
930 return -ENFILE;
934 * dev_alloc_name - allocate a name for a device
935 * @dev: device
936 * @name: name format string
938 * Passed a format string - eg "lt%d" it will try and find a suitable
939 * id. It scans list of devices to build up a free map, then chooses
940 * the first empty slot. The caller must hold the dev_base or rtnl lock
941 * while allocating the name and adding the device in order to avoid
942 * duplicates.
943 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
944 * Returns the number of the unit assigned or a negative errno code.
947 int dev_alloc_name(struct net_device *dev, const char *name)
949 char buf[IFNAMSIZ];
950 struct net *net;
951 int ret;
953 BUG_ON(!dev_net(dev));
954 net = dev_net(dev);
955 ret = __dev_alloc_name(net, name, buf);
956 if (ret >= 0)
957 strlcpy(dev->name, buf, IFNAMSIZ);
958 return ret;
960 EXPORT_SYMBOL(dev_alloc_name);
962 static int dev_get_valid_name(struct net_device *dev, const char *name)
964 struct net *net;
966 BUG_ON(!dev_net(dev));
967 net = dev_net(dev);
969 if (!dev_valid_name(name))
970 return -EINVAL;
972 if (strchr(name, '%'))
973 return dev_alloc_name(dev, name);
974 else if (__dev_get_by_name(net, name))
975 return -EEXIST;
976 else if (dev->name != name)
977 strlcpy(dev->name, name, IFNAMSIZ);
979 return 0;
983 * dev_change_name - change name of a device
984 * @dev: device
985 * @newname: name (or format string) must be at least IFNAMSIZ
987 * Change name of a device, can pass format strings "eth%d".
988 * for wildcarding.
990 int dev_change_name(struct net_device *dev, const char *newname)
992 char oldname[IFNAMSIZ];
993 int err = 0;
994 int ret;
995 struct net *net;
997 ASSERT_RTNL();
998 BUG_ON(!dev_net(dev));
1000 net = dev_net(dev);
1001 if (dev->flags & IFF_UP)
1002 return -EBUSY;
1004 if (strncmp(newname, dev->name, IFNAMSIZ) == 0)
1005 return 0;
1007 memcpy(oldname, dev->name, IFNAMSIZ);
1009 err = dev_get_valid_name(dev, newname);
1010 if (err < 0)
1011 return err;
1013 rollback:
1014 ret = device_rename(&dev->dev, dev->name);
1015 if (ret) {
1016 memcpy(dev->name, oldname, IFNAMSIZ);
1017 return ret;
1020 write_lock_bh(&dev_base_lock);
1021 hlist_del_rcu(&dev->name_hlist);
1022 write_unlock_bh(&dev_base_lock);
1024 synchronize_rcu();
1026 write_lock_bh(&dev_base_lock);
1027 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
1028 write_unlock_bh(&dev_base_lock);
1030 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
1031 ret = notifier_to_errno(ret);
1033 if (ret) {
1034 /* err >= 0 after dev_alloc_name() or stores the first errno */
1035 if (err >= 0) {
1036 err = ret;
1037 memcpy(dev->name, oldname, IFNAMSIZ);
1038 goto rollback;
1039 } else {
1040 pr_err("%s: name change rollback failed: %d\n",
1041 dev->name, ret);
1045 return err;
1049 * dev_set_alias - change ifalias of a device
1050 * @dev: device
1051 * @alias: name up to IFALIASZ
1052 * @len: limit of bytes to copy from info
1054 * Set ifalias for a device,
1056 int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1058 ASSERT_RTNL();
1060 if (len >= IFALIASZ)
1061 return -EINVAL;
1063 if (!len) {
1064 if (dev->ifalias) {
1065 kfree(dev->ifalias);
1066 dev->ifalias = NULL;
1068 return 0;
1071 dev->ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
1072 if (!dev->ifalias)
1073 return -ENOMEM;
1075 strlcpy(dev->ifalias, alias, len+1);
1076 return len;
1081 * netdev_features_change - device changes features
1082 * @dev: device to cause notification
1084 * Called to indicate a device has changed features.
1086 void netdev_features_change(struct net_device *dev)
1088 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
1090 EXPORT_SYMBOL(netdev_features_change);
1093 * netdev_state_change - device changes state
1094 * @dev: device to cause notification
1096 * Called to indicate a device has changed state. This function calls
1097 * the notifier chains for netdev_chain and sends a NEWLINK message
1098 * to the routing socket.
1100 void netdev_state_change(struct net_device *dev)
1102 if (dev->flags & IFF_UP) {
1103 call_netdevice_notifiers(NETDEV_CHANGE, dev);
1104 rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
1107 EXPORT_SYMBOL(netdev_state_change);
1109 int netdev_bonding_change(struct net_device *dev, unsigned long event)
1111 return call_netdevice_notifiers(event, dev);
1113 EXPORT_SYMBOL(netdev_bonding_change);
1116 * dev_load - load a network module
1117 * @net: the applicable net namespace
1118 * @name: name of interface
1120 * If a network interface is not present and the process has suitable
1121 * privileges this function loads the module. If module loading is not
1122 * available in this kernel then it becomes a nop.
1125 void dev_load(struct net *net, const char *name)
1127 struct net_device *dev;
1128 int no_module;
1130 rcu_read_lock();
1131 dev = dev_get_by_name_rcu(net, name);
1132 rcu_read_unlock();
1134 no_module = !dev;
1135 if (no_module && capable(CAP_NET_ADMIN))
1136 no_module = request_module("netdev-%s", name);
1137 if (no_module && capable(CAP_SYS_MODULE)) {
1138 if (!request_module("%s", name))
1139 pr_warn("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s instead.\n",
1140 name);
1143 EXPORT_SYMBOL(dev_load);
1145 static int __dev_open(struct net_device *dev)
1147 const struct net_device_ops *ops = dev->netdev_ops;
1148 int ret;
1150 ASSERT_RTNL();
1152 if (!netif_device_present(dev))
1153 return -ENODEV;
1155 ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1156 ret = notifier_to_errno(ret);
1157 if (ret)
1158 return ret;
1160 set_bit(__LINK_STATE_START, &dev->state);
1162 if (ops->ndo_validate_addr)
1163 ret = ops->ndo_validate_addr(dev);
1165 if (!ret && ops->ndo_open)
1166 ret = ops->ndo_open(dev);
1168 if (ret)
1169 clear_bit(__LINK_STATE_START, &dev->state);
1170 else {
1171 dev->flags |= IFF_UP;
1172 net_dmaengine_get();
1173 dev_set_rx_mode(dev);
1174 dev_activate(dev);
1177 return ret;
1181 * dev_open - prepare an interface for use.
1182 * @dev: device to open
1184 * Takes a device from down to up state. The device's private open
1185 * function is invoked and then the multicast lists are loaded. Finally
1186 * the device is moved into the up state and a %NETDEV_UP message is
1187 * sent to the netdev notifier chain.
1189 * Calling this function on an active interface is a nop. On a failure
1190 * a negative errno code is returned.
1192 int dev_open(struct net_device *dev)
1194 int ret;
1196 if (dev->flags & IFF_UP)
1197 return 0;
1199 ret = __dev_open(dev);
1200 if (ret < 0)
1201 return ret;
1203 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
1204 call_netdevice_notifiers(NETDEV_UP, dev);
1206 return ret;
1208 EXPORT_SYMBOL(dev_open);
1210 static int __dev_close_many(struct list_head *head)
1212 struct net_device *dev;
1214 ASSERT_RTNL();
1215 might_sleep();
1217 list_for_each_entry(dev, head, unreg_list) {
1218 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
1220 clear_bit(__LINK_STATE_START, &dev->state);
1222 /* Synchronize to scheduled poll. We cannot touch poll list, it
1223 * can be even on different cpu. So just clear netif_running().
1225 * dev->stop() will invoke napi_disable() on all of it's
1226 * napi_struct instances on this device.
1228 smp_mb__after_clear_bit(); /* Commit netif_running(). */
1231 dev_deactivate_many(head);
1233 list_for_each_entry(dev, head, unreg_list) {
1234 const struct net_device_ops *ops = dev->netdev_ops;
1237 * Call the device specific close. This cannot fail.
1238 * Only if device is UP
1240 * We allow it to be called even after a DETACH hot-plug
1241 * event.
1243 if (ops->ndo_stop)
1244 ops->ndo_stop(dev);
1246 dev->flags &= ~IFF_UP;
1247 net_dmaengine_put();
1250 return 0;
1253 static int __dev_close(struct net_device *dev)
1255 int retval;
1256 LIST_HEAD(single);
1258 list_add(&dev->unreg_list, &single);
1259 retval = __dev_close_many(&single);
1260 list_del(&single);
1261 return retval;
1264 static int dev_close_many(struct list_head *head)
1266 struct net_device *dev, *tmp;
1267 LIST_HEAD(tmp_list);
1269 list_for_each_entry_safe(dev, tmp, head, unreg_list)
1270 if (!(dev->flags & IFF_UP))
1271 list_move(&dev->unreg_list, &tmp_list);
1273 __dev_close_many(head);
1275 list_for_each_entry(dev, head, unreg_list) {
1276 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
1277 call_netdevice_notifiers(NETDEV_DOWN, dev);
1280 /* rollback_registered_many needs the complete original list */
1281 list_splice(&tmp_list, head);
1282 return 0;
1286 * dev_close - shutdown an interface.
1287 * @dev: device to shutdown
1289 * This function moves an active device into down state. A
1290 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1291 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1292 * chain.
1294 int dev_close(struct net_device *dev)
1296 if (dev->flags & IFF_UP) {
1297 LIST_HEAD(single);
1299 list_add(&dev->unreg_list, &single);
1300 dev_close_many(&single);
1301 list_del(&single);
1303 return 0;
1305 EXPORT_SYMBOL(dev_close);
1309 * dev_disable_lro - disable Large Receive Offload on a device
1310 * @dev: device
1312 * Disable Large Receive Offload (LRO) on a net device. Must be
1313 * called under RTNL. This is needed if received packets may be
1314 * forwarded to another interface.
1316 void dev_disable_lro(struct net_device *dev)
1319 * If we're trying to disable lro on a vlan device
1320 * use the underlying physical device instead
1322 if (is_vlan_dev(dev))
1323 dev = vlan_dev_real_dev(dev);
1325 dev->wanted_features &= ~NETIF_F_LRO;
1326 netdev_update_features(dev);
1328 if (unlikely(dev->features & NETIF_F_LRO))
1329 netdev_WARN(dev, "failed to disable LRO!\n");
1331 EXPORT_SYMBOL(dev_disable_lro);
1334 static int dev_boot_phase = 1;
1337 * register_netdevice_notifier - register a network notifier block
1338 * @nb: notifier
1340 * Register a notifier to be called when network device events occur.
1341 * The notifier passed is linked into the kernel structures and must
1342 * not be reused until it has been unregistered. A negative errno code
1343 * is returned on a failure.
1345 * When registered all registration and up events are replayed
1346 * to the new notifier to allow device to have a race free
1347 * view of the network device list.
1350 int register_netdevice_notifier(struct notifier_block *nb)
1352 struct net_device *dev;
1353 struct net_device *last;
1354 struct net *net;
1355 int err;
1357 rtnl_lock();
1358 err = raw_notifier_chain_register(&netdev_chain, nb);
1359 if (err)
1360 goto unlock;
1361 if (dev_boot_phase)
1362 goto unlock;
1363 for_each_net(net) {
1364 for_each_netdev(net, dev) {
1365 err = nb->notifier_call(nb, NETDEV_REGISTER, dev);
1366 err = notifier_to_errno(err);
1367 if (err)
1368 goto rollback;
1370 if (!(dev->flags & IFF_UP))
1371 continue;
1373 nb->notifier_call(nb, NETDEV_UP, dev);
1377 unlock:
1378 rtnl_unlock();
1379 return err;
1381 rollback:
1382 last = dev;
1383 for_each_net(net) {
1384 for_each_netdev(net, dev) {
1385 if (dev == last)
1386 goto outroll;
1388 if (dev->flags & IFF_UP) {
1389 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1390 nb->notifier_call(nb, NETDEV_DOWN, dev);
1392 nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
1393 nb->notifier_call(nb, NETDEV_UNREGISTER_BATCH, dev);
1397 outroll:
1398 raw_notifier_chain_unregister(&netdev_chain, nb);
1399 goto unlock;
1401 EXPORT_SYMBOL(register_netdevice_notifier);
1404 * unregister_netdevice_notifier - unregister a network notifier block
1405 * @nb: notifier
1407 * Unregister a notifier previously registered by
1408 * register_netdevice_notifier(). The notifier is unlinked into the
1409 * kernel structures and may then be reused. A negative errno code
1410 * is returned on a failure.
1412 * After unregistering unregister and down device events are synthesized
1413 * for all devices on the device list to the removed notifier to remove
1414 * the need for special case cleanup code.
1417 int unregister_netdevice_notifier(struct notifier_block *nb)
1419 struct net_device *dev;
1420 struct net *net;
1421 int err;
1423 rtnl_lock();
1424 err = raw_notifier_chain_unregister(&netdev_chain, nb);
1425 if (err)
1426 goto unlock;
1428 for_each_net(net) {
1429 for_each_netdev(net, dev) {
1430 if (dev->flags & IFF_UP) {
1431 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1432 nb->notifier_call(nb, NETDEV_DOWN, dev);
1434 nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
1435 nb->notifier_call(nb, NETDEV_UNREGISTER_BATCH, dev);
1438 unlock:
1439 rtnl_unlock();
1440 return err;
1442 EXPORT_SYMBOL(unregister_netdevice_notifier);
1445 * call_netdevice_notifiers - call all network notifier blocks
1446 * @val: value passed unmodified to notifier function
1447 * @dev: net_device pointer passed unmodified to notifier function
1449 * Call all network notifier blocks. Parameters and return value
1450 * are as for raw_notifier_call_chain().
1453 int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
1455 ASSERT_RTNL();
1456 return raw_notifier_call_chain(&netdev_chain, val, dev);
1458 EXPORT_SYMBOL(call_netdevice_notifiers);
1460 static struct static_key netstamp_needed __read_mostly;
1461 #ifdef HAVE_JUMP_LABEL
1462 /* We are not allowed to call static_key_slow_dec() from irq context
1463 * If net_disable_timestamp() is called from irq context, defer the
1464 * static_key_slow_dec() calls.
1466 static atomic_t netstamp_needed_deferred;
1467 #endif
1469 void net_enable_timestamp(void)
1471 #ifdef HAVE_JUMP_LABEL
1472 int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
1474 if (deferred) {
1475 while (--deferred)
1476 static_key_slow_dec(&netstamp_needed);
1477 return;
1479 #endif
1480 WARN_ON(in_interrupt());
1481 static_key_slow_inc(&netstamp_needed);
1483 EXPORT_SYMBOL(net_enable_timestamp);
1485 void net_disable_timestamp(void)
1487 #ifdef HAVE_JUMP_LABEL
1488 if (in_interrupt()) {
1489 atomic_inc(&netstamp_needed_deferred);
1490 return;
1492 #endif
1493 static_key_slow_dec(&netstamp_needed);
1495 EXPORT_SYMBOL(net_disable_timestamp);
1497 static inline void net_timestamp_set(struct sk_buff *skb)
1499 skb->tstamp.tv64 = 0;
1500 if (static_key_false(&netstamp_needed))
1501 __net_timestamp(skb);
1504 #define net_timestamp_check(COND, SKB) \
1505 if (static_key_false(&netstamp_needed)) { \
1506 if ((COND) && !(SKB)->tstamp.tv64) \
1507 __net_timestamp(SKB); \
1510 static int net_hwtstamp_validate(struct ifreq *ifr)
1512 struct hwtstamp_config cfg;
1513 enum hwtstamp_tx_types tx_type;
1514 enum hwtstamp_rx_filters rx_filter;
1515 int tx_type_valid = 0;
1516 int rx_filter_valid = 0;
1518 if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
1519 return -EFAULT;
1521 if (cfg.flags) /* reserved for future extensions */
1522 return -EINVAL;
1524 tx_type = cfg.tx_type;
1525 rx_filter = cfg.rx_filter;
1527 switch (tx_type) {
1528 case HWTSTAMP_TX_OFF:
1529 case HWTSTAMP_TX_ON:
1530 case HWTSTAMP_TX_ONESTEP_SYNC:
1531 tx_type_valid = 1;
1532 break;
1535 switch (rx_filter) {
1536 case HWTSTAMP_FILTER_NONE:
1537 case HWTSTAMP_FILTER_ALL:
1538 case HWTSTAMP_FILTER_SOME:
1539 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
1540 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
1541 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
1542 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
1543 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
1544 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
1545 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1546 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
1547 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
1548 case HWTSTAMP_FILTER_PTP_V2_EVENT:
1549 case HWTSTAMP_FILTER_PTP_V2_SYNC:
1550 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
1551 rx_filter_valid = 1;
1552 break;
1555 if (!tx_type_valid || !rx_filter_valid)
1556 return -ERANGE;
1558 return 0;
1561 static inline bool is_skb_forwardable(struct net_device *dev,
1562 struct sk_buff *skb)
1564 unsigned int len;
1566 if (!(dev->flags & IFF_UP))
1567 return false;
1569 len = dev->mtu + dev->hard_header_len + VLAN_HLEN;
1570 if (skb->len <= len)
1571 return true;
1573 /* if TSO is enabled, we don't care about the length as the packet
1574 * could be forwarded without being segmented before
1576 if (skb_is_gso(skb))
1577 return true;
1579 return false;
1583 * dev_forward_skb - loopback an skb to another netif
1585 * @dev: destination network device
1586 * @skb: buffer to forward
1588 * return values:
1589 * NET_RX_SUCCESS (no congestion)
1590 * NET_RX_DROP (packet was dropped, but freed)
1592 * dev_forward_skb can be used for injecting an skb from the
1593 * start_xmit function of one device into the receive queue
1594 * of another device.
1596 * The receiving device may be in another namespace, so
1597 * we have to clear all information in the skb that could
1598 * impact namespace isolation.
1600 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1602 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
1603 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
1604 atomic_long_inc(&dev->rx_dropped);
1605 kfree_skb(skb);
1606 return NET_RX_DROP;
1610 skb_orphan(skb);
1611 nf_reset(skb);
1613 if (unlikely(!is_skb_forwardable(dev, skb))) {
1614 atomic_long_inc(&dev->rx_dropped);
1615 kfree_skb(skb);
1616 return NET_RX_DROP;
1618 skb->skb_iif = 0;
1619 skb->dev = dev;
1620 skb_dst_drop(skb);
1621 skb->tstamp.tv64 = 0;
1622 skb->pkt_type = PACKET_HOST;
1623 skb->protocol = eth_type_trans(skb, dev);
1624 skb->mark = 0;
1625 secpath_reset(skb);
1626 nf_reset(skb);
1627 return netif_rx(skb);
1629 EXPORT_SYMBOL_GPL(dev_forward_skb);
1631 static inline int deliver_skb(struct sk_buff *skb,
1632 struct packet_type *pt_prev,
1633 struct net_device *orig_dev)
1635 atomic_inc(&skb->users);
1636 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1640 * Support routine. Sends outgoing frames to any network
1641 * taps currently in use.
1644 static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1646 struct packet_type *ptype;
1647 struct sk_buff *skb2 = NULL;
1648 struct packet_type *pt_prev = NULL;
1650 rcu_read_lock();
1651 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1652 /* Never send packets back to the socket
1653 * they originated from - MvS (miquels@drinkel.ow.org)
1655 if ((ptype->dev == dev || !ptype->dev) &&
1656 (ptype->af_packet_priv == NULL ||
1657 (struct sock *)ptype->af_packet_priv != skb->sk)) {
1658 if (pt_prev) {
1659 deliver_skb(skb2, pt_prev, skb->dev);
1660 pt_prev = ptype;
1661 continue;
1664 skb2 = skb_clone(skb, GFP_ATOMIC);
1665 if (!skb2)
1666 break;
1668 net_timestamp_set(skb2);
1670 /* skb->nh should be correctly
1671 set by sender, so that the second statement is
1672 just protection against buggy protocols.
1674 skb_reset_mac_header(skb2);
1676 if (skb_network_header(skb2) < skb2->data ||
1677 skb2->network_header > skb2->tail) {
1678 net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
1679 ntohs(skb2->protocol),
1680 dev->name);
1681 skb_reset_network_header(skb2);
1684 skb2->transport_header = skb2->network_header;
1685 skb2->pkt_type = PACKET_OUTGOING;
1686 pt_prev = ptype;
1689 if (pt_prev)
1690 pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
1691 rcu_read_unlock();
1694 /* netif_setup_tc - Handle tc mappings on real_num_tx_queues change
1695 * @dev: Network device
1696 * @txq: number of queues available
1698 * If real_num_tx_queues is changed the tc mappings may no longer be
1699 * valid. To resolve this verify the tc mapping remains valid and if
1700 * not NULL the mapping. With no priorities mapping to this
1701 * offset/count pair it will no longer be used. In the worst case TC0
1702 * is invalid nothing can be done so disable priority mappings. If is
1703 * expected that drivers will fix this mapping if they can before
1704 * calling netif_set_real_num_tx_queues.
1706 static void netif_setup_tc(struct net_device *dev, unsigned int txq)
1708 int i;
1709 struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
1711 /* If TC0 is invalidated disable TC mapping */
1712 if (tc->offset + tc->count > txq) {
1713 pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
1714 dev->num_tc = 0;
1715 return;
1718 /* Invalidated prio to tc mappings set to TC0 */
1719 for (i = 1; i < TC_BITMASK + 1; i++) {
1720 int q = netdev_get_prio_tc_map(dev, i);
1722 tc = &dev->tc_to_txq[q];
1723 if (tc->offset + tc->count > txq) {
1724 pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
1725 i, q);
1726 netdev_set_prio_tc_map(dev, i, 0);
1732 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
1733 * greater then real_num_tx_queues stale skbs on the qdisc must be flushed.
1735 int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
1737 int rc;
1739 if (txq < 1 || txq > dev->num_tx_queues)
1740 return -EINVAL;
1742 if (dev->reg_state == NETREG_REGISTERED ||
1743 dev->reg_state == NETREG_UNREGISTERING) {
1744 ASSERT_RTNL();
1746 rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
1747 txq);
1748 if (rc)
1749 return rc;
1751 if (dev->num_tc)
1752 netif_setup_tc(dev, txq);
1754 if (txq < dev->real_num_tx_queues)
1755 qdisc_reset_all_tx_gt(dev, txq);
1758 dev->real_num_tx_queues = txq;
1759 return 0;
1761 EXPORT_SYMBOL(netif_set_real_num_tx_queues);
1763 #ifdef CONFIG_RPS
1765 * netif_set_real_num_rx_queues - set actual number of RX queues used
1766 * @dev: Network device
1767 * @rxq: Actual number of RX queues
1769 * This must be called either with the rtnl_lock held or before
1770 * registration of the net device. Returns 0 on success, or a
1771 * negative error code. If called before registration, it always
1772 * succeeds.
1774 int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
1776 int rc;
1778 if (rxq < 1 || rxq > dev->num_rx_queues)
1779 return -EINVAL;
1781 if (dev->reg_state == NETREG_REGISTERED) {
1782 ASSERT_RTNL();
1784 rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
1785 rxq);
1786 if (rc)
1787 return rc;
1790 dev->real_num_rx_queues = rxq;
1791 return 0;
1793 EXPORT_SYMBOL(netif_set_real_num_rx_queues);
1794 #endif
1796 /* netif_get_num_default_rss_queues - default number of RSS queues
1798 * This routine should set an upper limit on the number of RSS queues
1799 * used by default by multiqueue devices.
1801 int netif_get_num_default_rss_queues()
1803 return min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus());
1805 EXPORT_SYMBOL(netif_get_num_default_rss_queues);
1807 static inline void __netif_reschedule(struct Qdisc *q)
1809 struct softnet_data *sd;
1810 unsigned long flags;
1812 local_irq_save(flags);
1813 sd = &__get_cpu_var(softnet_data);
1814 q->next_sched = NULL;
1815 *sd->output_queue_tailp = q;
1816 sd->output_queue_tailp = &q->next_sched;
1817 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1818 local_irq_restore(flags);
1821 void __netif_schedule(struct Qdisc *q)
1823 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
1824 __netif_reschedule(q);
1826 EXPORT_SYMBOL(__netif_schedule);
1828 void dev_kfree_skb_irq(struct sk_buff *skb)
1830 if (atomic_dec_and_test(&skb->users)) {
1831 struct softnet_data *sd;
1832 unsigned long flags;
1834 local_irq_save(flags);
1835 sd = &__get_cpu_var(softnet_data);
1836 skb->next = sd->completion_queue;
1837 sd->completion_queue = skb;
1838 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1839 local_irq_restore(flags);
1842 EXPORT_SYMBOL(dev_kfree_skb_irq);
1844 void dev_kfree_skb_any(struct sk_buff *skb)
1846 if (in_irq() || irqs_disabled())
1847 dev_kfree_skb_irq(skb);
1848 else
1849 dev_kfree_skb(skb);
1851 EXPORT_SYMBOL(dev_kfree_skb_any);
1855 * netif_device_detach - mark device as removed
1856 * @dev: network device
1858 * Mark device as removed from system and therefore no longer available.
1860 void netif_device_detach(struct net_device *dev)
1862 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
1863 netif_running(dev)) {
1864 netif_tx_stop_all_queues(dev);
1867 EXPORT_SYMBOL(netif_device_detach);
1870 * netif_device_attach - mark device as attached
1871 * @dev: network device
1873 * Mark device as attached from system and restart if needed.
1875 void netif_device_attach(struct net_device *dev)
1877 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
1878 netif_running(dev)) {
1879 netif_tx_wake_all_queues(dev);
1880 __netdev_watchdog_up(dev);
1883 EXPORT_SYMBOL(netif_device_attach);
1885 static void skb_warn_bad_offload(const struct sk_buff *skb)
1887 static const netdev_features_t null_features = 0;
1888 struct net_device *dev = skb->dev;
1889 const char *driver = "";
1891 if (dev && dev->dev.parent)
1892 driver = dev_driver_string(dev->dev.parent);
1894 WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d "
1895 "gso_type=%d ip_summed=%d\n",
1896 driver, dev ? &dev->features : &null_features,
1897 skb->sk ? &skb->sk->sk_route_caps : &null_features,
1898 skb->len, skb->data_len, skb_shinfo(skb)->gso_size,
1899 skb_shinfo(skb)->gso_type, skb->ip_summed);
1903 * Invalidate hardware checksum when packet is to be mangled, and
1904 * complete checksum manually on outgoing path.
1906 int skb_checksum_help(struct sk_buff *skb)
1908 __wsum csum;
1909 int ret = 0, offset;
1911 if (skb->ip_summed == CHECKSUM_COMPLETE)
1912 goto out_set_summed;
1914 if (unlikely(skb_shinfo(skb)->gso_size)) {
1915 skb_warn_bad_offload(skb);
1916 return -EINVAL;
1919 offset = skb_checksum_start_offset(skb);
1920 BUG_ON(offset >= skb_headlen(skb));
1921 csum = skb_checksum(skb, offset, skb->len - offset, 0);
1923 offset += skb->csum_offset;
1924 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
1926 if (skb_cloned(skb) &&
1927 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
1928 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1929 if (ret)
1930 goto out;
1933 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
1934 out_set_summed:
1935 skb->ip_summed = CHECKSUM_NONE;
1936 out:
1937 return ret;
1939 EXPORT_SYMBOL(skb_checksum_help);
1942 * skb_gso_segment - Perform segmentation on skb.
1943 * @skb: buffer to segment
1944 * @features: features for the output path (see dev->features)
1946 * This function segments the given skb and returns a list of segments.
1948 * It may return NULL if the skb requires no segmentation. This is
1949 * only possible when GSO is used for verifying header integrity.
1951 struct sk_buff *skb_gso_segment(struct sk_buff *skb,
1952 netdev_features_t features)
1954 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
1955 struct packet_type *ptype;
1956 __be16 type = skb->protocol;
1957 int vlan_depth = ETH_HLEN;
1958 int err;
1960 while (type == htons(ETH_P_8021Q)) {
1961 struct vlan_hdr *vh;
1963 if (unlikely(!pskb_may_pull(skb, vlan_depth + VLAN_HLEN)))
1964 return ERR_PTR(-EINVAL);
1966 vh = (struct vlan_hdr *)(skb->data + vlan_depth);
1967 type = vh->h_vlan_encapsulated_proto;
1968 vlan_depth += VLAN_HLEN;
1971 skb_reset_mac_header(skb);
1972 skb->mac_len = skb->network_header - skb->mac_header;
1973 __skb_pull(skb, skb->mac_len);
1975 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
1976 skb_warn_bad_offload(skb);
1978 if (skb_header_cloned(skb) &&
1979 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
1980 return ERR_PTR(err);
1983 rcu_read_lock();
1984 list_for_each_entry_rcu(ptype,
1985 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
1986 if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
1987 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
1988 err = ptype->gso_send_check(skb);
1989 segs = ERR_PTR(err);
1990 if (err || skb_gso_ok(skb, features))
1991 break;
1992 __skb_push(skb, (skb->data -
1993 skb_network_header(skb)));
1995 segs = ptype->gso_segment(skb, features);
1996 break;
1999 rcu_read_unlock();
2001 __skb_push(skb, skb->data - skb_mac_header(skb));
2003 return segs;
2005 EXPORT_SYMBOL(skb_gso_segment);
2007 /* Take action when hardware reception checksum errors are detected. */
2008 #ifdef CONFIG_BUG
2009 void netdev_rx_csum_fault(struct net_device *dev)
2011 if (net_ratelimit()) {
2012 pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>");
2013 dump_stack();
2016 EXPORT_SYMBOL(netdev_rx_csum_fault);
2017 #endif
2019 /* Actually, we should eliminate this check as soon as we know, that:
2020 * 1. IOMMU is present and allows to map all the memory.
2021 * 2. No high memory really exists on this machine.
2024 static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
2026 #ifdef CONFIG_HIGHMEM
2027 int i;
2028 if (!(dev->features & NETIF_F_HIGHDMA)) {
2029 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2030 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2031 if (PageHighMem(skb_frag_page(frag)))
2032 return 1;
2036 if (PCI_DMA_BUS_IS_PHYS) {
2037 struct device *pdev = dev->dev.parent;
2039 if (!pdev)
2040 return 0;
2041 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2042 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2043 dma_addr_t addr = page_to_phys(skb_frag_page(frag));
2044 if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask)
2045 return 1;
2048 #endif
2049 return 0;
2052 struct dev_gso_cb {
2053 void (*destructor)(struct sk_buff *skb);
2056 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
2058 static void dev_gso_skb_destructor(struct sk_buff *skb)
2060 struct dev_gso_cb *cb;
2062 do {
2063 struct sk_buff *nskb = skb->next;
2065 skb->next = nskb->next;
2066 nskb->next = NULL;
2067 kfree_skb(nskb);
2068 } while (skb->next);
2070 cb = DEV_GSO_CB(skb);
2071 if (cb->destructor)
2072 cb->destructor(skb);
2076 * dev_gso_segment - Perform emulated hardware segmentation on skb.
2077 * @skb: buffer to segment
2078 * @features: device features as applicable to this skb
2080 * This function segments the given skb and stores the list of segments
2081 * in skb->next.
2083 static int dev_gso_segment(struct sk_buff *skb, netdev_features_t features)
2085 struct sk_buff *segs;
2087 segs = skb_gso_segment(skb, features);
2089 /* Verifying header integrity only. */
2090 if (!segs)
2091 return 0;
2093 if (IS_ERR(segs))
2094 return PTR_ERR(segs);
2096 skb->next = segs;
2097 DEV_GSO_CB(skb)->destructor = skb->destructor;
2098 skb->destructor = dev_gso_skb_destructor;
2100 return 0;
2103 static bool can_checksum_protocol(netdev_features_t features, __be16 protocol)
2105 return ((features & NETIF_F_GEN_CSUM) ||
2106 ((features & NETIF_F_V4_CSUM) &&
2107 protocol == htons(ETH_P_IP)) ||
2108 ((features & NETIF_F_V6_CSUM) &&
2109 protocol == htons(ETH_P_IPV6)) ||
2110 ((features & NETIF_F_FCOE_CRC) &&
2111 protocol == htons(ETH_P_FCOE)));
2114 static netdev_features_t harmonize_features(struct sk_buff *skb,
2115 __be16 protocol, netdev_features_t features)
2117 if (!can_checksum_protocol(features, protocol)) {
2118 features &= ~NETIF_F_ALL_CSUM;
2119 features &= ~NETIF_F_SG;
2120 } else if (illegal_highdma(skb->dev, skb)) {
2121 features &= ~NETIF_F_SG;
2124 return features;
2127 netdev_features_t netif_skb_features(struct sk_buff *skb)
2129 __be16 protocol = skb->protocol;
2130 netdev_features_t features = skb->dev->features;
2132 if (protocol == htons(ETH_P_8021Q)) {
2133 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
2134 protocol = veh->h_vlan_encapsulated_proto;
2135 } else if (!vlan_tx_tag_present(skb)) {
2136 return harmonize_features(skb, protocol, features);
2139 features &= (skb->dev->vlan_features | NETIF_F_HW_VLAN_TX);
2141 if (protocol != htons(ETH_P_8021Q)) {
2142 return harmonize_features(skb, protocol, features);
2143 } else {
2144 features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST |
2145 NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_TX;
2146 return harmonize_features(skb, protocol, features);
2149 EXPORT_SYMBOL(netif_skb_features);
2152 * Returns true if either:
2153 * 1. skb has frag_list and the device doesn't support FRAGLIST, or
2154 * 2. skb is fragmented and the device does not support SG, or if
2155 * at least one of fragments is in highmem and device does not
2156 * support DMA from it.
2158 static inline int skb_needs_linearize(struct sk_buff *skb,
2159 int features)
2161 return skb_is_nonlinear(skb) &&
2162 ((skb_has_frag_list(skb) &&
2163 !(features & NETIF_F_FRAGLIST)) ||
2164 (skb_shinfo(skb)->nr_frags &&
2165 !(features & NETIF_F_SG)));
2168 int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
2169 struct netdev_queue *txq)
2171 const struct net_device_ops *ops = dev->netdev_ops;
2172 int rc = NETDEV_TX_OK;
2173 unsigned int skb_len;
2175 if (likely(!skb->next)) {
2176 netdev_features_t features;
2179 * If device doesn't need skb->dst, release it right now while
2180 * its hot in this cpu cache
2182 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
2183 skb_dst_drop(skb);
2185 if (!list_empty(&ptype_all))
2186 dev_queue_xmit_nit(skb, dev);
2188 features = netif_skb_features(skb);
2190 if (vlan_tx_tag_present(skb) &&
2191 !(features & NETIF_F_HW_VLAN_TX)) {
2192 skb = __vlan_put_tag(skb, vlan_tx_tag_get(skb));
2193 if (unlikely(!skb))
2194 goto out;
2196 skb->vlan_tci = 0;
2199 if (netif_needs_gso(skb, features)) {
2200 if (unlikely(dev_gso_segment(skb, features)))
2201 goto out_kfree_skb;
2202 if (skb->next)
2203 goto gso;
2204 } else {
2205 if (skb_needs_linearize(skb, features) &&
2206 __skb_linearize(skb))
2207 goto out_kfree_skb;
2209 /* If packet is not checksummed and device does not
2210 * support checksumming for this protocol, complete
2211 * checksumming here.
2213 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2214 skb_set_transport_header(skb,
2215 skb_checksum_start_offset(skb));
2216 if (!(features & NETIF_F_ALL_CSUM) &&
2217 skb_checksum_help(skb))
2218 goto out_kfree_skb;
2222 skb_len = skb->len;
2223 rc = ops->ndo_start_xmit(skb, dev);
2224 trace_net_dev_xmit(skb, rc, dev, skb_len);
2225 if (rc == NETDEV_TX_OK)
2226 txq_trans_update(txq);
2227 return rc;
2230 gso:
2231 do {
2232 struct sk_buff *nskb = skb->next;
2234 skb->next = nskb->next;
2235 nskb->next = NULL;
2238 * If device doesn't need nskb->dst, release it right now while
2239 * its hot in this cpu cache
2241 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
2242 skb_dst_drop(nskb);
2244 skb_len = nskb->len;
2245 rc = ops->ndo_start_xmit(nskb, dev);
2246 trace_net_dev_xmit(nskb, rc, dev, skb_len);
2247 if (unlikely(rc != NETDEV_TX_OK)) {
2248 if (rc & ~NETDEV_TX_MASK)
2249 goto out_kfree_gso_skb;
2250 nskb->next = skb->next;
2251 skb->next = nskb;
2252 return rc;
2254 txq_trans_update(txq);
2255 if (unlikely(netif_xmit_stopped(txq) && skb->next))
2256 return NETDEV_TX_BUSY;
2257 } while (skb->next);
2259 out_kfree_gso_skb:
2260 if (likely(skb->next == NULL))
2261 skb->destructor = DEV_GSO_CB(skb)->destructor;
2262 out_kfree_skb:
2263 kfree_skb(skb);
2264 out:
2265 return rc;
2268 static u32 hashrnd __read_mostly;
2271 * Returns a Tx hash based on the given packet descriptor a Tx queues' number
2272 * to be used as a distribution range.
2274 u16 __skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb,
2275 unsigned int num_tx_queues)
2277 u32 hash;
2278 u16 qoffset = 0;
2279 u16 qcount = num_tx_queues;
2281 if (skb_rx_queue_recorded(skb)) {
2282 hash = skb_get_rx_queue(skb);
2283 while (unlikely(hash >= num_tx_queues))
2284 hash -= num_tx_queues;
2285 return hash;
2288 if (dev->num_tc) {
2289 u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
2290 qoffset = dev->tc_to_txq[tc].offset;
2291 qcount = dev->tc_to_txq[tc].count;
2294 if (skb->sk && skb->sk->sk_hash)
2295 hash = skb->sk->sk_hash;
2296 else
2297 hash = (__force u16) skb->protocol;
2298 hash = jhash_1word(hash, hashrnd);
2300 return (u16) (((u64) hash * qcount) >> 32) + qoffset;
2302 EXPORT_SYMBOL(__skb_tx_hash);
2304 static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index)
2306 if (unlikely(queue_index >= dev->real_num_tx_queues)) {
2307 net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n",
2308 dev->name, queue_index,
2309 dev->real_num_tx_queues);
2310 return 0;
2312 return queue_index;
2315 static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
2317 #ifdef CONFIG_XPS
2318 struct xps_dev_maps *dev_maps;
2319 struct xps_map *map;
2320 int queue_index = -1;
2322 rcu_read_lock();
2323 dev_maps = rcu_dereference(dev->xps_maps);
2324 if (dev_maps) {
2325 map = rcu_dereference(
2326 dev_maps->cpu_map[raw_smp_processor_id()]);
2327 if (map) {
2328 if (map->len == 1)
2329 queue_index = map->queues[0];
2330 else {
2331 u32 hash;
2332 if (skb->sk && skb->sk->sk_hash)
2333 hash = skb->sk->sk_hash;
2334 else
2335 hash = (__force u16) skb->protocol ^
2336 skb->rxhash;
2337 hash = jhash_1word(hash, hashrnd);
2338 queue_index = map->queues[
2339 ((u64)hash * map->len) >> 32];
2341 if (unlikely(queue_index >= dev->real_num_tx_queues))
2342 queue_index = -1;
2345 rcu_read_unlock();
2347 return queue_index;
2348 #else
2349 return -1;
2350 #endif
2353 static struct netdev_queue *dev_pick_tx(struct net_device *dev,
2354 struct sk_buff *skb)
2356 int queue_index;
2357 const struct net_device_ops *ops = dev->netdev_ops;
2359 if (dev->real_num_tx_queues == 1)
2360 queue_index = 0;
2361 else if (ops->ndo_select_queue) {
2362 queue_index = ops->ndo_select_queue(dev, skb);
2363 queue_index = dev_cap_txqueue(dev, queue_index);
2364 } else {
2365 struct sock *sk = skb->sk;
2366 queue_index = sk_tx_queue_get(sk);
2368 if (queue_index < 0 || skb->ooo_okay ||
2369 queue_index >= dev->real_num_tx_queues) {
2370 int old_index = queue_index;
2372 queue_index = get_xps_queue(dev, skb);
2373 if (queue_index < 0)
2374 queue_index = skb_tx_hash(dev, skb);
2376 if (queue_index != old_index && sk) {
2377 struct dst_entry *dst =
2378 rcu_dereference_check(sk->sk_dst_cache, 1);
2380 if (dst && skb_dst(skb) == dst)
2381 sk_tx_queue_set(sk, queue_index);
2386 skb_set_queue_mapping(skb, queue_index);
2387 return netdev_get_tx_queue(dev, queue_index);
2390 static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
2391 struct net_device *dev,
2392 struct netdev_queue *txq)
2394 spinlock_t *root_lock = qdisc_lock(q);
2395 bool contended;
2396 int rc;
2398 qdisc_skb_cb(skb)->pkt_len = skb->len;
2399 qdisc_calculate_pkt_len(skb, q);
2401 * Heuristic to force contended enqueues to serialize on a
2402 * separate lock before trying to get qdisc main lock.
2403 * This permits __QDISC_STATE_RUNNING owner to get the lock more often
2404 * and dequeue packets faster.
2406 contended = qdisc_is_running(q);
2407 if (unlikely(contended))
2408 spin_lock(&q->busylock);
2410 spin_lock(root_lock);
2411 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
2412 kfree_skb(skb);
2413 rc = NET_XMIT_DROP;
2414 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
2415 qdisc_run_begin(q)) {
2417 * This is a work-conserving queue; there are no old skbs
2418 * waiting to be sent out; and the qdisc is not running -
2419 * xmit the skb directly.
2421 if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE))
2422 skb_dst_force(skb);
2424 qdisc_bstats_update(q, skb);
2426 if (sch_direct_xmit(skb, q, dev, txq, root_lock)) {
2427 if (unlikely(contended)) {
2428 spin_unlock(&q->busylock);
2429 contended = false;
2431 __qdisc_run(q);
2432 } else
2433 qdisc_run_end(q);
2435 rc = NET_XMIT_SUCCESS;
2436 } else {
2437 skb_dst_force(skb);
2438 rc = q->enqueue(skb, q) & NET_XMIT_MASK;
2439 if (qdisc_run_begin(q)) {
2440 if (unlikely(contended)) {
2441 spin_unlock(&q->busylock);
2442 contended = false;
2444 __qdisc_run(q);
2447 spin_unlock(root_lock);
2448 if (unlikely(contended))
2449 spin_unlock(&q->busylock);
2450 return rc;
2453 #if IS_ENABLED(CONFIG_NETPRIO_CGROUP)
2454 static void skb_update_prio(struct sk_buff *skb)
2456 struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap);
2458 if ((!skb->priority) && (skb->sk) && map)
2459 skb->priority = map->priomap[skb->sk->sk_cgrp_prioidx];
2461 #else
2462 #define skb_update_prio(skb)
2463 #endif
2465 static DEFINE_PER_CPU(int, xmit_recursion);
2466 #define RECURSION_LIMIT 10
2469 * dev_loopback_xmit - loop back @skb
2470 * @skb: buffer to transmit
2472 int dev_loopback_xmit(struct sk_buff *skb)
2474 skb_reset_mac_header(skb);
2475 __skb_pull(skb, skb_network_offset(skb));
2476 skb->pkt_type = PACKET_LOOPBACK;
2477 skb->ip_summed = CHECKSUM_UNNECESSARY;
2478 WARN_ON(!skb_dst(skb));
2479 skb_dst_force(skb);
2480 netif_rx_ni(skb);
2481 return 0;
2483 EXPORT_SYMBOL(dev_loopback_xmit);
2486 * dev_queue_xmit - transmit a buffer
2487 * @skb: buffer to transmit
2489 * Queue a buffer for transmission to a network device. The caller must
2490 * have set the device and priority and built the buffer before calling
2491 * this function. The function can be called from an interrupt.
2493 * A negative errno code is returned on a failure. A success does not
2494 * guarantee the frame will be transmitted as it may be dropped due
2495 * to congestion or traffic shaping.
2497 * -----------------------------------------------------------------------------------
2498 * I notice this method can also return errors from the queue disciplines,
2499 * including NET_XMIT_DROP, which is a positive value. So, errors can also
2500 * be positive.
2502 * Regardless of the return value, the skb is consumed, so it is currently
2503 * difficult to retry a send to this method. (You can bump the ref count
2504 * before sending to hold a reference for retry if you are careful.)
2506 * When calling this method, interrupts MUST be enabled. This is because
2507 * the BH enable code must have IRQs enabled so that it will not deadlock.
2508 * --BLG
2510 int dev_queue_xmit(struct sk_buff *skb)
2512 struct net_device *dev = skb->dev;
2513 struct netdev_queue *txq;
2514 struct Qdisc *q;
2515 int rc = -ENOMEM;
2517 /* Disable soft irqs for various locks below. Also
2518 * stops preemption for RCU.
2520 rcu_read_lock_bh();
2522 skb_update_prio(skb);
2524 txq = dev_pick_tx(dev, skb);
2525 q = rcu_dereference_bh(txq->qdisc);
2527 #ifdef CONFIG_NET_CLS_ACT
2528 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
2529 #endif
2530 trace_net_dev_queue(skb);
2531 if (q->enqueue) {
2532 rc = __dev_xmit_skb(skb, q, dev, txq);
2533 goto out;
2536 /* The device has no queue. Common case for software devices:
2537 loopback, all the sorts of tunnels...
2539 Really, it is unlikely that netif_tx_lock protection is necessary
2540 here. (f.e. loopback and IP tunnels are clean ignoring statistics
2541 counters.)
2542 However, it is possible, that they rely on protection
2543 made by us here.
2545 Check this and shot the lock. It is not prone from deadlocks.
2546 Either shot noqueue qdisc, it is even simpler 8)
2548 if (dev->flags & IFF_UP) {
2549 int cpu = smp_processor_id(); /* ok because BHs are off */
2551 if (txq->xmit_lock_owner != cpu) {
2553 if (__this_cpu_read(xmit_recursion) > RECURSION_LIMIT)
2554 goto recursion_alert;
2556 HARD_TX_LOCK(dev, txq, cpu);
2558 if (!netif_xmit_stopped(txq)) {
2559 __this_cpu_inc(xmit_recursion);
2560 rc = dev_hard_start_xmit(skb, dev, txq);
2561 __this_cpu_dec(xmit_recursion);
2562 if (dev_xmit_complete(rc)) {
2563 HARD_TX_UNLOCK(dev, txq);
2564 goto out;
2567 HARD_TX_UNLOCK(dev, txq);
2568 net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
2569 dev->name);
2570 } else {
2571 /* Recursion is detected! It is possible,
2572 * unfortunately
2574 recursion_alert:
2575 net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
2576 dev->name);
2580 rc = -ENETDOWN;
2581 rcu_read_unlock_bh();
2583 kfree_skb(skb);
2584 return rc;
2585 out:
2586 rcu_read_unlock_bh();
2587 return rc;
2589 EXPORT_SYMBOL(dev_queue_xmit);
2592 /*=======================================================================
2593 Receiver routines
2594 =======================================================================*/
2596 int netdev_max_backlog __read_mostly = 1000;
2597 int netdev_tstamp_prequeue __read_mostly = 1;
2598 int netdev_budget __read_mostly = 300;
2599 int weight_p __read_mostly = 64; /* old backlog weight */
2601 /* Called with irq disabled */
2602 static inline void ____napi_schedule(struct softnet_data *sd,
2603 struct napi_struct *napi)
2605 list_add_tail(&napi->poll_list, &sd->poll_list);
2606 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2610 * __skb_get_rxhash: calculate a flow hash based on src/dst addresses
2611 * and src/dst port numbers. Sets rxhash in skb to non-zero hash value
2612 * on success, zero indicates no valid hash. Also, sets l4_rxhash in skb
2613 * if hash is a canonical 4-tuple hash over transport ports.
2615 void __skb_get_rxhash(struct sk_buff *skb)
2617 struct flow_keys keys;
2618 u32 hash;
2620 if (!skb_flow_dissect(skb, &keys))
2621 return;
2623 if (keys.ports) {
2624 if ((__force u16)keys.port16[1] < (__force u16)keys.port16[0])
2625 swap(keys.port16[0], keys.port16[1]);
2626 skb->l4_rxhash = 1;
2629 /* get a consistent hash (same value on both flow directions) */
2630 if ((__force u32)keys.dst < (__force u32)keys.src)
2631 swap(keys.dst, keys.src);
2633 hash = jhash_3words((__force u32)keys.dst,
2634 (__force u32)keys.src,
2635 (__force u32)keys.ports, hashrnd);
2636 if (!hash)
2637 hash = 1;
2639 skb->rxhash = hash;
2641 EXPORT_SYMBOL(__skb_get_rxhash);
2643 #ifdef CONFIG_RPS
2645 /* One global table that all flow-based protocols share. */
2646 struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
2647 EXPORT_SYMBOL(rps_sock_flow_table);
2649 struct static_key rps_needed __read_mostly;
2651 static struct rps_dev_flow *
2652 set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
2653 struct rps_dev_flow *rflow, u16 next_cpu)
2655 if (next_cpu != RPS_NO_CPU) {
2656 #ifdef CONFIG_RFS_ACCEL
2657 struct netdev_rx_queue *rxqueue;
2658 struct rps_dev_flow_table *flow_table;
2659 struct rps_dev_flow *old_rflow;
2660 u32 flow_id;
2661 u16 rxq_index;
2662 int rc;
2664 /* Should we steer this flow to a different hardware queue? */
2665 if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
2666 !(dev->features & NETIF_F_NTUPLE))
2667 goto out;
2668 rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu);
2669 if (rxq_index == skb_get_rx_queue(skb))
2670 goto out;
2672 rxqueue = dev->_rx + rxq_index;
2673 flow_table = rcu_dereference(rxqueue->rps_flow_table);
2674 if (!flow_table)
2675 goto out;
2676 flow_id = skb->rxhash & flow_table->mask;
2677 rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
2678 rxq_index, flow_id);
2679 if (rc < 0)
2680 goto out;
2681 old_rflow = rflow;
2682 rflow = &flow_table->flows[flow_id];
2683 rflow->filter = rc;
2684 if (old_rflow->filter == rflow->filter)
2685 old_rflow->filter = RPS_NO_FILTER;
2686 out:
2687 #endif
2688 rflow->last_qtail =
2689 per_cpu(softnet_data, next_cpu).input_queue_head;
2692 rflow->cpu = next_cpu;
2693 return rflow;
2697 * get_rps_cpu is called from netif_receive_skb and returns the target
2698 * CPU from the RPS map of the receiving queue for a given skb.
2699 * rcu_read_lock must be held on entry.
2701 static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
2702 struct rps_dev_flow **rflowp)
2704 struct netdev_rx_queue *rxqueue;
2705 struct rps_map *map;
2706 struct rps_dev_flow_table *flow_table;
2707 struct rps_sock_flow_table *sock_flow_table;
2708 int cpu = -1;
2709 u16 tcpu;
2711 if (skb_rx_queue_recorded(skb)) {
2712 u16 index = skb_get_rx_queue(skb);
2713 if (unlikely(index >= dev->real_num_rx_queues)) {
2714 WARN_ONCE(dev->real_num_rx_queues > 1,
2715 "%s received packet on queue %u, but number "
2716 "of RX queues is %u\n",
2717 dev->name, index, dev->real_num_rx_queues);
2718 goto done;
2720 rxqueue = dev->_rx + index;
2721 } else
2722 rxqueue = dev->_rx;
2724 map = rcu_dereference(rxqueue->rps_map);
2725 if (map) {
2726 if (map->len == 1 &&
2727 !rcu_access_pointer(rxqueue->rps_flow_table)) {
2728 tcpu = map->cpus[0];
2729 if (cpu_online(tcpu))
2730 cpu = tcpu;
2731 goto done;
2733 } else if (!rcu_access_pointer(rxqueue->rps_flow_table)) {
2734 goto done;
2737 skb_reset_network_header(skb);
2738 if (!skb_get_rxhash(skb))
2739 goto done;
2741 flow_table = rcu_dereference(rxqueue->rps_flow_table);
2742 sock_flow_table = rcu_dereference(rps_sock_flow_table);
2743 if (flow_table && sock_flow_table) {
2744 u16 next_cpu;
2745 struct rps_dev_flow *rflow;
2747 rflow = &flow_table->flows[skb->rxhash & flow_table->mask];
2748 tcpu = rflow->cpu;
2750 next_cpu = sock_flow_table->ents[skb->rxhash &
2751 sock_flow_table->mask];
2754 * If the desired CPU (where last recvmsg was done) is
2755 * different from current CPU (one in the rx-queue flow
2756 * table entry), switch if one of the following holds:
2757 * - Current CPU is unset (equal to RPS_NO_CPU).
2758 * - Current CPU is offline.
2759 * - The current CPU's queue tail has advanced beyond the
2760 * last packet that was enqueued using this table entry.
2761 * This guarantees that all previous packets for the flow
2762 * have been dequeued, thus preserving in order delivery.
2764 if (unlikely(tcpu != next_cpu) &&
2765 (tcpu == RPS_NO_CPU || !cpu_online(tcpu) ||
2766 ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
2767 rflow->last_qtail)) >= 0))
2768 rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
2770 if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) {
2771 *rflowp = rflow;
2772 cpu = tcpu;
2773 goto done;
2777 if (map) {
2778 tcpu = map->cpus[((u64) skb->rxhash * map->len) >> 32];
2780 if (cpu_online(tcpu)) {
2781 cpu = tcpu;
2782 goto done;
2786 done:
2787 return cpu;
2790 #ifdef CONFIG_RFS_ACCEL
2793 * rps_may_expire_flow - check whether an RFS hardware filter may be removed
2794 * @dev: Device on which the filter was set
2795 * @rxq_index: RX queue index
2796 * @flow_id: Flow ID passed to ndo_rx_flow_steer()
2797 * @filter_id: Filter ID returned by ndo_rx_flow_steer()
2799 * Drivers that implement ndo_rx_flow_steer() should periodically call
2800 * this function for each installed filter and remove the filters for
2801 * which it returns %true.
2803 bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
2804 u32 flow_id, u16 filter_id)
2806 struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index;
2807 struct rps_dev_flow_table *flow_table;
2808 struct rps_dev_flow *rflow;
2809 bool expire = true;
2810 int cpu;
2812 rcu_read_lock();
2813 flow_table = rcu_dereference(rxqueue->rps_flow_table);
2814 if (flow_table && flow_id <= flow_table->mask) {
2815 rflow = &flow_table->flows[flow_id];
2816 cpu = ACCESS_ONCE(rflow->cpu);
2817 if (rflow->filter == filter_id && cpu != RPS_NO_CPU &&
2818 ((int)(per_cpu(softnet_data, cpu).input_queue_head -
2819 rflow->last_qtail) <
2820 (int)(10 * flow_table->mask)))
2821 expire = false;
2823 rcu_read_unlock();
2824 return expire;
2826 EXPORT_SYMBOL(rps_may_expire_flow);
2828 #endif /* CONFIG_RFS_ACCEL */
2830 /* Called from hardirq (IPI) context */
2831 static void rps_trigger_softirq(void *data)
2833 struct softnet_data *sd = data;
2835 ____napi_schedule(sd, &sd->backlog);
2836 sd->received_rps++;
2839 #endif /* CONFIG_RPS */
2842 * Check if this softnet_data structure is another cpu one
2843 * If yes, queue it to our IPI list and return 1
2844 * If no, return 0
2846 static int rps_ipi_queued(struct softnet_data *sd)
2848 #ifdef CONFIG_RPS
2849 struct softnet_data *mysd = &__get_cpu_var(softnet_data);
2851 if (sd != mysd) {
2852 sd->rps_ipi_next = mysd->rps_ipi_list;
2853 mysd->rps_ipi_list = sd;
2855 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2856 return 1;
2858 #endif /* CONFIG_RPS */
2859 return 0;
2863 * enqueue_to_backlog is called to queue an skb to a per CPU backlog
2864 * queue (may be a remote CPU queue).
2866 static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
2867 unsigned int *qtail)
2869 struct softnet_data *sd;
2870 unsigned long flags;
2872 sd = &per_cpu(softnet_data, cpu);
2874 local_irq_save(flags);
2876 rps_lock(sd);
2877 if (skb_queue_len(&sd->input_pkt_queue) <= netdev_max_backlog) {
2878 if (skb_queue_len(&sd->input_pkt_queue)) {
2879 enqueue:
2880 __skb_queue_tail(&sd->input_pkt_queue, skb);
2881 input_queue_tail_incr_save(sd, qtail);
2882 rps_unlock(sd);
2883 local_irq_restore(flags);
2884 return NET_RX_SUCCESS;
2887 /* Schedule NAPI for backlog device
2888 * We can use non atomic operation since we own the queue lock
2890 if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) {
2891 if (!rps_ipi_queued(sd))
2892 ____napi_schedule(sd, &sd->backlog);
2894 goto enqueue;
2897 sd->dropped++;
2898 rps_unlock(sd);
2900 local_irq_restore(flags);
2902 atomic_long_inc(&skb->dev->rx_dropped);
2903 kfree_skb(skb);
2904 return NET_RX_DROP;
2908 * netif_rx - post buffer to the network code
2909 * @skb: buffer to post
2911 * This function receives a packet from a device driver and queues it for
2912 * the upper (protocol) levels to process. It always succeeds. The buffer
2913 * may be dropped during processing for congestion control or by the
2914 * protocol layers.
2916 * return values:
2917 * NET_RX_SUCCESS (no congestion)
2918 * NET_RX_DROP (packet was dropped)
2922 int netif_rx(struct sk_buff *skb)
2924 int ret;
2926 /* if netpoll wants it, pretend we never saw it */
2927 if (netpoll_rx(skb))
2928 return NET_RX_DROP;
2930 net_timestamp_check(netdev_tstamp_prequeue, skb);
2932 trace_netif_rx(skb);
2933 #ifdef CONFIG_RPS
2934 if (static_key_false(&rps_needed)) {
2935 struct rps_dev_flow voidflow, *rflow = &voidflow;
2936 int cpu;
2938 preempt_disable();
2939 rcu_read_lock();
2941 cpu = get_rps_cpu(skb->dev, skb, &rflow);
2942 if (cpu < 0)
2943 cpu = smp_processor_id();
2945 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
2947 rcu_read_unlock();
2948 preempt_enable();
2949 } else
2950 #endif
2952 unsigned int qtail;
2953 ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
2954 put_cpu();
2956 return ret;
2958 EXPORT_SYMBOL(netif_rx);
2960 int netif_rx_ni(struct sk_buff *skb)
2962 int err;
2964 preempt_disable();
2965 err = netif_rx(skb);
2966 if (local_softirq_pending())
2967 do_softirq();
2968 preempt_enable();
2970 return err;
2972 EXPORT_SYMBOL(netif_rx_ni);
2974 static void net_tx_action(struct softirq_action *h)
2976 struct softnet_data *sd = &__get_cpu_var(softnet_data);
2978 if (sd->completion_queue) {
2979 struct sk_buff *clist;
2981 local_irq_disable();
2982 clist = sd->completion_queue;
2983 sd->completion_queue = NULL;
2984 local_irq_enable();
2986 while (clist) {
2987 struct sk_buff *skb = clist;
2988 clist = clist->next;
2990 WARN_ON(atomic_read(&skb->users));
2991 trace_kfree_skb(skb, net_tx_action);
2992 __kfree_skb(skb);
2996 if (sd->output_queue) {
2997 struct Qdisc *head;
2999 local_irq_disable();
3000 head = sd->output_queue;
3001 sd->output_queue = NULL;
3002 sd->output_queue_tailp = &sd->output_queue;
3003 local_irq_enable();
3005 while (head) {
3006 struct Qdisc *q = head;
3007 spinlock_t *root_lock;
3009 head = head->next_sched;
3011 root_lock = qdisc_lock(q);
3012 if (spin_trylock(root_lock)) {
3013 smp_mb__before_clear_bit();
3014 clear_bit(__QDISC_STATE_SCHED,
3015 &q->state);
3016 qdisc_run(q);
3017 spin_unlock(root_lock);
3018 } else {
3019 if (!test_bit(__QDISC_STATE_DEACTIVATED,
3020 &q->state)) {
3021 __netif_reschedule(q);
3022 } else {
3023 smp_mb__before_clear_bit();
3024 clear_bit(__QDISC_STATE_SCHED,
3025 &q->state);
3032 #if (defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)) && \
3033 (defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE))
3034 /* This hook is defined here for ATM LANE */
3035 int (*br_fdb_test_addr_hook)(struct net_device *dev,
3036 unsigned char *addr) __read_mostly;
3037 EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
3038 #endif
3040 #ifdef CONFIG_NET_CLS_ACT
3041 /* TODO: Maybe we should just force sch_ingress to be compiled in
3042 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
3043 * a compare and 2 stores extra right now if we dont have it on
3044 * but have CONFIG_NET_CLS_ACT
3045 * NOTE: This doesn't stop any functionality; if you dont have
3046 * the ingress scheduler, you just can't add policies on ingress.
3049 static int ing_filter(struct sk_buff *skb, struct netdev_queue *rxq)
3051 struct net_device *dev = skb->dev;
3052 u32 ttl = G_TC_RTTL(skb->tc_verd);
3053 int result = TC_ACT_OK;
3054 struct Qdisc *q;
3056 if (unlikely(MAX_RED_LOOP < ttl++)) {
3057 net_warn_ratelimited("Redir loop detected Dropping packet (%d->%d)\n",
3058 skb->skb_iif, dev->ifindex);
3059 return TC_ACT_SHOT;
3062 skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
3063 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
3065 q = rxq->qdisc;
3066 if (q != &noop_qdisc) {
3067 spin_lock(qdisc_lock(q));
3068 if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
3069 result = qdisc_enqueue_root(skb, q);
3070 spin_unlock(qdisc_lock(q));
3073 return result;
3076 static inline struct sk_buff *handle_ing(struct sk_buff *skb,
3077 struct packet_type **pt_prev,
3078 int *ret, struct net_device *orig_dev)
3080 struct netdev_queue *rxq = rcu_dereference(skb->dev->ingress_queue);
3082 if (!rxq || rxq->qdisc == &noop_qdisc)
3083 goto out;
3085 if (*pt_prev) {
3086 *ret = deliver_skb(skb, *pt_prev, orig_dev);
3087 *pt_prev = NULL;
3090 switch (ing_filter(skb, rxq)) {
3091 case TC_ACT_SHOT:
3092 case TC_ACT_STOLEN:
3093 kfree_skb(skb);
3094 return NULL;
3097 out:
3098 skb->tc_verd = 0;
3099 return skb;
3101 #endif
3104 * netdev_rx_handler_register - register receive handler
3105 * @dev: device to register a handler for
3106 * @rx_handler: receive handler to register
3107 * @rx_handler_data: data pointer that is used by rx handler
3109 * Register a receive hander for a device. This handler will then be
3110 * called from __netif_receive_skb. A negative errno code is returned
3111 * on a failure.
3113 * The caller must hold the rtnl_mutex.
3115 * For a general description of rx_handler, see enum rx_handler_result.
3117 int netdev_rx_handler_register(struct net_device *dev,
3118 rx_handler_func_t *rx_handler,
3119 void *rx_handler_data)
3121 ASSERT_RTNL();
3123 if (dev->rx_handler)
3124 return -EBUSY;
3126 rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
3127 rcu_assign_pointer(dev->rx_handler, rx_handler);
3129 return 0;
3131 EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
3134 * netdev_rx_handler_unregister - unregister receive handler
3135 * @dev: device to unregister a handler from
3137 * Unregister a receive hander from a device.
3139 * The caller must hold the rtnl_mutex.
3141 void netdev_rx_handler_unregister(struct net_device *dev)
3144 ASSERT_RTNL();
3145 RCU_INIT_POINTER(dev->rx_handler, NULL);
3146 RCU_INIT_POINTER(dev->rx_handler_data, NULL);
3148 EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
3150 static int __netif_receive_skb(struct sk_buff *skb)
3152 struct packet_type *ptype, *pt_prev;
3153 rx_handler_func_t *rx_handler;
3154 struct net_device *orig_dev;
3155 struct net_device *null_or_dev;
3156 bool deliver_exact = false;
3157 int ret = NET_RX_DROP;
3158 __be16 type;
3160 net_timestamp_check(!netdev_tstamp_prequeue, skb);
3162 trace_netif_receive_skb(skb);
3164 /* if we've gotten here through NAPI, check netpoll */
3165 if (netpoll_receive_skb(skb))
3166 return NET_RX_DROP;
3168 if (!skb->skb_iif)
3169 skb->skb_iif = skb->dev->ifindex;
3170 orig_dev = skb->dev;
3172 skb_reset_network_header(skb);
3173 skb_reset_transport_header(skb);
3174 skb_reset_mac_len(skb);
3176 pt_prev = NULL;
3178 rcu_read_lock();
3180 another_round:
3182 __this_cpu_inc(softnet_data.processed);
3184 if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) {
3185 skb = vlan_untag(skb);
3186 if (unlikely(!skb))
3187 goto out;
3190 #ifdef CONFIG_NET_CLS_ACT
3191 if (skb->tc_verd & TC_NCLS) {
3192 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
3193 goto ncls;
3195 #endif
3197 list_for_each_entry_rcu(ptype, &ptype_all, list) {
3198 if (!ptype->dev || ptype->dev == skb->dev) {
3199 if (pt_prev)
3200 ret = deliver_skb(skb, pt_prev, orig_dev);
3201 pt_prev = ptype;
3205 #ifdef CONFIG_NET_CLS_ACT
3206 skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
3207 if (!skb)
3208 goto out;
3209 ncls:
3210 #endif
3212 rx_handler = rcu_dereference(skb->dev->rx_handler);
3213 if (vlan_tx_tag_present(skb)) {
3214 if (pt_prev) {
3215 ret = deliver_skb(skb, pt_prev, orig_dev);
3216 pt_prev = NULL;
3218 if (vlan_do_receive(&skb, !rx_handler))
3219 goto another_round;
3220 else if (unlikely(!skb))
3221 goto out;
3224 if (rx_handler) {
3225 if (pt_prev) {
3226 ret = deliver_skb(skb, pt_prev, orig_dev);
3227 pt_prev = NULL;
3229 switch (rx_handler(&skb)) {
3230 case RX_HANDLER_CONSUMED:
3231 goto out;
3232 case RX_HANDLER_ANOTHER:
3233 goto another_round;
3234 case RX_HANDLER_EXACT:
3235 deliver_exact = true;
3236 case RX_HANDLER_PASS:
3237 break;
3238 default:
3239 BUG();
3243 /* deliver only exact match when indicated */
3244 null_or_dev = deliver_exact ? skb->dev : NULL;
3246 type = skb->protocol;
3247 list_for_each_entry_rcu(ptype,
3248 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
3249 if (ptype->type == type &&
3250 (ptype->dev == null_or_dev || ptype->dev == skb->dev ||
3251 ptype->dev == orig_dev)) {
3252 if (pt_prev)
3253 ret = deliver_skb(skb, pt_prev, orig_dev);
3254 pt_prev = ptype;
3258 if (pt_prev) {
3259 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
3260 } else {
3261 atomic_long_inc(&skb->dev->rx_dropped);
3262 kfree_skb(skb);
3263 /* Jamal, now you will not able to escape explaining
3264 * me how you were going to use this. :-)
3266 ret = NET_RX_DROP;
3269 out:
3270 rcu_read_unlock();
3271 return ret;
3275 * netif_receive_skb - process receive buffer from network
3276 * @skb: buffer to process
3278 * netif_receive_skb() is the main receive data processing function.
3279 * It always succeeds. The buffer may be dropped during processing
3280 * for congestion control or by the protocol layers.
3282 * This function may only be called from softirq context and interrupts
3283 * should be enabled.
3285 * Return values (usually ignored):
3286 * NET_RX_SUCCESS: no congestion
3287 * NET_RX_DROP: packet was dropped
3289 int netif_receive_skb(struct sk_buff *skb)
3291 net_timestamp_check(netdev_tstamp_prequeue, skb);
3293 if (skb_defer_rx_timestamp(skb))
3294 return NET_RX_SUCCESS;
3296 #ifdef CONFIG_RPS
3297 if (static_key_false(&rps_needed)) {
3298 struct rps_dev_flow voidflow, *rflow = &voidflow;
3299 int cpu, ret;
3301 rcu_read_lock();
3303 cpu = get_rps_cpu(skb->dev, skb, &rflow);
3305 if (cpu >= 0) {
3306 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
3307 rcu_read_unlock();
3308 return ret;
3310 rcu_read_unlock();
3312 #endif
3313 return __netif_receive_skb(skb);
3315 EXPORT_SYMBOL(netif_receive_skb);
3317 /* Network device is going away, flush any packets still pending
3318 * Called with irqs disabled.
3320 static void flush_backlog(void *arg)
3322 struct net_device *dev = arg;
3323 struct softnet_data *sd = &__get_cpu_var(softnet_data);
3324 struct sk_buff *skb, *tmp;
3326 rps_lock(sd);
3327 skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
3328 if (skb->dev == dev) {
3329 __skb_unlink(skb, &sd->input_pkt_queue);
3330 kfree_skb(skb);
3331 input_queue_head_incr(sd);
3334 rps_unlock(sd);
3336 skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
3337 if (skb->dev == dev) {
3338 __skb_unlink(skb, &sd->process_queue);
3339 kfree_skb(skb);
3340 input_queue_head_incr(sd);
3345 static int napi_gro_complete(struct sk_buff *skb)
3347 struct packet_type *ptype;
3348 __be16 type = skb->protocol;
3349 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
3350 int err = -ENOENT;
3352 if (NAPI_GRO_CB(skb)->count == 1) {
3353 skb_shinfo(skb)->gso_size = 0;
3354 goto out;
3357 rcu_read_lock();
3358 list_for_each_entry_rcu(ptype, head, list) {
3359 if (ptype->type != type || ptype->dev || !ptype->gro_complete)
3360 continue;
3362 err = ptype->gro_complete(skb);
3363 break;
3365 rcu_read_unlock();
3367 if (err) {
3368 WARN_ON(&ptype->list == head);
3369 kfree_skb(skb);
3370 return NET_RX_SUCCESS;
3373 out:
3374 return netif_receive_skb(skb);
3377 inline void napi_gro_flush(struct napi_struct *napi)
3379 struct sk_buff *skb, *next;
3381 for (skb = napi->gro_list; skb; skb = next) {
3382 next = skb->next;
3383 skb->next = NULL;
3384 napi_gro_complete(skb);
3387 napi->gro_count = 0;
3388 napi->gro_list = NULL;
3390 EXPORT_SYMBOL(napi_gro_flush);
3392 enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
3394 struct sk_buff **pp = NULL;
3395 struct packet_type *ptype;
3396 __be16 type = skb->protocol;
3397 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
3398 int same_flow;
3399 int mac_len;
3400 enum gro_result ret;
3402 if (!(skb->dev->features & NETIF_F_GRO) || netpoll_rx_on(skb))
3403 goto normal;
3405 if (skb_is_gso(skb) || skb_has_frag_list(skb))
3406 goto normal;
3408 rcu_read_lock();
3409 list_for_each_entry_rcu(ptype, head, list) {
3410 if (ptype->type != type || ptype->dev || !ptype->gro_receive)
3411 continue;
3413 skb_set_network_header(skb, skb_gro_offset(skb));
3414 mac_len = skb->network_header - skb->mac_header;
3415 skb->mac_len = mac_len;
3416 NAPI_GRO_CB(skb)->same_flow = 0;
3417 NAPI_GRO_CB(skb)->flush = 0;
3418 NAPI_GRO_CB(skb)->free = 0;
3420 pp = ptype->gro_receive(&napi->gro_list, skb);
3421 break;
3423 rcu_read_unlock();
3425 if (&ptype->list == head)
3426 goto normal;
3428 same_flow = NAPI_GRO_CB(skb)->same_flow;
3429 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
3431 if (pp) {
3432 struct sk_buff *nskb = *pp;
3434 *pp = nskb->next;
3435 nskb->next = NULL;
3436 napi_gro_complete(nskb);
3437 napi->gro_count--;
3440 if (same_flow)
3441 goto ok;
3443 if (NAPI_GRO_CB(skb)->flush || napi->gro_count >= MAX_GRO_SKBS)
3444 goto normal;
3446 napi->gro_count++;
3447 NAPI_GRO_CB(skb)->count = 1;
3448 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
3449 skb->next = napi->gro_list;
3450 napi->gro_list = skb;
3451 ret = GRO_HELD;
3453 pull:
3454 if (skb_headlen(skb) < skb_gro_offset(skb)) {
3455 int grow = skb_gro_offset(skb) - skb_headlen(skb);
3457 BUG_ON(skb->end - skb->tail < grow);
3459 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
3461 skb->tail += grow;
3462 skb->data_len -= grow;
3464 skb_shinfo(skb)->frags[0].page_offset += grow;
3465 skb_frag_size_sub(&skb_shinfo(skb)->frags[0], grow);
3467 if (unlikely(!skb_frag_size(&skb_shinfo(skb)->frags[0]))) {
3468 skb_frag_unref(skb, 0);
3469 memmove(skb_shinfo(skb)->frags,
3470 skb_shinfo(skb)->frags + 1,
3471 --skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t));
3476 return ret;
3478 normal:
3479 ret = GRO_NORMAL;
3480 goto pull;
3482 EXPORT_SYMBOL(dev_gro_receive);
3484 static inline gro_result_t
3485 __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
3487 struct sk_buff *p;
3488 unsigned int maclen = skb->dev->hard_header_len;
3490 for (p = napi->gro_list; p; p = p->next) {
3491 unsigned long diffs;
3493 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
3494 diffs |= p->vlan_tci ^ skb->vlan_tci;
3495 if (maclen == ETH_HLEN)
3496 diffs |= compare_ether_header(skb_mac_header(p),
3497 skb_gro_mac_header(skb));
3498 else if (!diffs)
3499 diffs = memcmp(skb_mac_header(p),
3500 skb_gro_mac_header(skb),
3501 maclen);
3502 NAPI_GRO_CB(p)->same_flow = !diffs;
3503 NAPI_GRO_CB(p)->flush = 0;
3506 return dev_gro_receive(napi, skb);
3509 gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
3511 switch (ret) {
3512 case GRO_NORMAL:
3513 if (netif_receive_skb(skb))
3514 ret = GRO_DROP;
3515 break;
3517 case GRO_DROP:
3518 kfree_skb(skb);
3519 break;
3521 case GRO_MERGED_FREE:
3522 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
3523 kmem_cache_free(skbuff_head_cache, skb);
3524 else
3525 __kfree_skb(skb);
3526 break;
3528 case GRO_HELD:
3529 case GRO_MERGED:
3530 break;
3533 return ret;
3535 EXPORT_SYMBOL(napi_skb_finish);
3537 void skb_gro_reset_offset(struct sk_buff *skb)
3539 NAPI_GRO_CB(skb)->data_offset = 0;
3540 NAPI_GRO_CB(skb)->frag0 = NULL;
3541 NAPI_GRO_CB(skb)->frag0_len = 0;
3543 if (skb->mac_header == skb->tail &&
3544 !PageHighMem(skb_frag_page(&skb_shinfo(skb)->frags[0]))) {
3545 NAPI_GRO_CB(skb)->frag0 =
3546 skb_frag_address(&skb_shinfo(skb)->frags[0]);
3547 NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(&skb_shinfo(skb)->frags[0]);
3550 EXPORT_SYMBOL(skb_gro_reset_offset);
3552 gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
3554 skb_gro_reset_offset(skb);
3556 return napi_skb_finish(__napi_gro_receive(napi, skb), skb);
3558 EXPORT_SYMBOL(napi_gro_receive);
3560 static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
3562 __skb_pull(skb, skb_headlen(skb));
3563 /* restore the reserve we had after netdev_alloc_skb_ip_align() */
3564 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
3565 skb->vlan_tci = 0;
3566 skb->dev = napi->dev;
3567 skb->skb_iif = 0;
3569 napi->skb = skb;
3572 struct sk_buff *napi_get_frags(struct napi_struct *napi)
3574 struct sk_buff *skb = napi->skb;
3576 if (!skb) {
3577 skb = netdev_alloc_skb_ip_align(napi->dev, GRO_MAX_HEAD);
3578 if (skb)
3579 napi->skb = skb;
3581 return skb;
3583 EXPORT_SYMBOL(napi_get_frags);
3585 gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb,
3586 gro_result_t ret)
3588 switch (ret) {
3589 case GRO_NORMAL:
3590 case GRO_HELD:
3591 skb->protocol = eth_type_trans(skb, skb->dev);
3593 if (ret == GRO_HELD)
3594 skb_gro_pull(skb, -ETH_HLEN);
3595 else if (netif_receive_skb(skb))
3596 ret = GRO_DROP;
3597 break;
3599 case GRO_DROP:
3600 case GRO_MERGED_FREE:
3601 napi_reuse_skb(napi, skb);
3602 break;
3604 case GRO_MERGED:
3605 break;
3608 return ret;
3610 EXPORT_SYMBOL(napi_frags_finish);
3612 static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
3614 struct sk_buff *skb = napi->skb;
3615 struct ethhdr *eth;
3616 unsigned int hlen;
3617 unsigned int off;
3619 napi->skb = NULL;
3621 skb_reset_mac_header(skb);
3622 skb_gro_reset_offset(skb);
3624 off = skb_gro_offset(skb);
3625 hlen = off + sizeof(*eth);
3626 eth = skb_gro_header_fast(skb, off);
3627 if (skb_gro_header_hard(skb, hlen)) {
3628 eth = skb_gro_header_slow(skb, hlen, off);
3629 if (unlikely(!eth)) {
3630 napi_reuse_skb(napi, skb);
3631 skb = NULL;
3632 goto out;
3636 skb_gro_pull(skb, sizeof(*eth));
3639 * This works because the only protocols we care about don't require
3640 * special handling. We'll fix it up properly at the end.
3642 skb->protocol = eth->h_proto;
3644 out:
3645 return skb;
3648 gro_result_t napi_gro_frags(struct napi_struct *napi)
3650 struct sk_buff *skb = napi_frags_skb(napi);
3652 if (!skb)
3653 return GRO_DROP;
3655 return napi_frags_finish(napi, skb, __napi_gro_receive(napi, skb));
3657 EXPORT_SYMBOL(napi_gro_frags);
3660 * net_rps_action sends any pending IPI's for rps.
3661 * Note: called with local irq disabled, but exits with local irq enabled.
3663 static void net_rps_action_and_irq_enable(struct softnet_data *sd)
3665 #ifdef CONFIG_RPS
3666 struct softnet_data *remsd = sd->rps_ipi_list;
3668 if (remsd) {
3669 sd->rps_ipi_list = NULL;
3671 local_irq_enable();
3673 /* Send pending IPI's to kick RPS processing on remote cpus. */
3674 while (remsd) {
3675 struct softnet_data *next = remsd->rps_ipi_next;
3677 if (cpu_online(remsd->cpu))
3678 __smp_call_function_single(remsd->cpu,
3679 &remsd->csd, 0);
3680 remsd = next;
3682 } else
3683 #endif
3684 local_irq_enable();
3687 static int process_backlog(struct napi_struct *napi, int quota)
3689 int work = 0;
3690 struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
3692 #ifdef CONFIG_RPS
3693 /* Check if we have pending ipi, its better to send them now,
3694 * not waiting net_rx_action() end.
3696 if (sd->rps_ipi_list) {
3697 local_irq_disable();
3698 net_rps_action_and_irq_enable(sd);
3700 #endif
3701 napi->weight = weight_p;
3702 local_irq_disable();
3703 while (work < quota) {
3704 struct sk_buff *skb;
3705 unsigned int qlen;
3707 while ((skb = __skb_dequeue(&sd->process_queue))) {
3708 local_irq_enable();
3709 __netif_receive_skb(skb);
3710 local_irq_disable();
3711 input_queue_head_incr(sd);
3712 if (++work >= quota) {
3713 local_irq_enable();
3714 return work;
3718 rps_lock(sd);
3719 qlen = skb_queue_len(&sd->input_pkt_queue);
3720 if (qlen)
3721 skb_queue_splice_tail_init(&sd->input_pkt_queue,
3722 &sd->process_queue);
3724 if (qlen < quota - work) {
3726 * Inline a custom version of __napi_complete().
3727 * only current cpu owns and manipulates this napi,
3728 * and NAPI_STATE_SCHED is the only possible flag set on backlog.
3729 * we can use a plain write instead of clear_bit(),
3730 * and we dont need an smp_mb() memory barrier.
3732 list_del(&napi->poll_list);
3733 napi->state = 0;
3735 quota = work + qlen;
3737 rps_unlock(sd);
3739 local_irq_enable();
3741 return work;
3745 * __napi_schedule - schedule for receive
3746 * @n: entry to schedule
3748 * The entry's receive function will be scheduled to run
3750 void __napi_schedule(struct napi_struct *n)
3752 unsigned long flags;
3754 local_irq_save(flags);
3755 ____napi_schedule(&__get_cpu_var(softnet_data), n);
3756 local_irq_restore(flags);
3758 EXPORT_SYMBOL(__napi_schedule);
3760 void __napi_complete(struct napi_struct *n)
3762 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
3763 BUG_ON(n->gro_list);
3765 list_del(&n->poll_list);
3766 smp_mb__before_clear_bit();
3767 clear_bit(NAPI_STATE_SCHED, &n->state);
3769 EXPORT_SYMBOL(__napi_complete);
3771 void napi_complete(struct napi_struct *n)
3773 unsigned long flags;
3776 * don't let napi dequeue from the cpu poll list
3777 * just in case its running on a different cpu
3779 if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
3780 return;
3782 napi_gro_flush(n);
3783 local_irq_save(flags);
3784 __napi_complete(n);
3785 local_irq_restore(flags);
3787 EXPORT_SYMBOL(napi_complete);
3789 void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
3790 int (*poll)(struct napi_struct *, int), int weight)
3792 INIT_LIST_HEAD(&napi->poll_list);
3793 napi->gro_count = 0;
3794 napi->gro_list = NULL;
3795 napi->skb = NULL;
3796 napi->poll = poll;
3797 napi->weight = weight;
3798 list_add(&napi->dev_list, &dev->napi_list);
3799 napi->dev = dev;
3800 #ifdef CONFIG_NETPOLL
3801 spin_lock_init(&napi->poll_lock);
3802 napi->poll_owner = -1;
3803 #endif
3804 set_bit(NAPI_STATE_SCHED, &napi->state);
3806 EXPORT_SYMBOL(netif_napi_add);
3808 void netif_napi_del(struct napi_struct *napi)
3810 struct sk_buff *skb, *next;
3812 list_del_init(&napi->dev_list);
3813 napi_free_frags(napi);
3815 for (skb = napi->gro_list; skb; skb = next) {
3816 next = skb->next;
3817 skb->next = NULL;
3818 kfree_skb(skb);
3821 napi->gro_list = NULL;
3822 napi->gro_count = 0;
3824 EXPORT_SYMBOL(netif_napi_del);
3826 static void net_rx_action(struct softirq_action *h)
3828 struct softnet_data *sd = &__get_cpu_var(softnet_data);
3829 unsigned long time_limit = jiffies + 2;
3830 int budget = netdev_budget;
3831 void *have;
3833 local_irq_disable();
3835 while (!list_empty(&sd->poll_list)) {
3836 struct napi_struct *n;
3837 int work, weight;
3839 /* If softirq window is exhuasted then punt.
3840 * Allow this to run for 2 jiffies since which will allow
3841 * an average latency of 1.5/HZ.
3843 if (unlikely(budget <= 0 || time_after(jiffies, time_limit)))
3844 goto softnet_break;
3846 local_irq_enable();
3848 /* Even though interrupts have been re-enabled, this
3849 * access is safe because interrupts can only add new
3850 * entries to the tail of this list, and only ->poll()
3851 * calls can remove this head entry from the list.
3853 n = list_first_entry(&sd->poll_list, struct napi_struct, poll_list);
3855 have = netpoll_poll_lock(n);
3857 weight = n->weight;
3859 /* This NAPI_STATE_SCHED test is for avoiding a race
3860 * with netpoll's poll_napi(). Only the entity which
3861 * obtains the lock and sees NAPI_STATE_SCHED set will
3862 * actually make the ->poll() call. Therefore we avoid
3863 * accidentally calling ->poll() when NAPI is not scheduled.
3865 work = 0;
3866 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
3867 work = n->poll(n, weight);
3868 trace_napi_poll(n);
3871 WARN_ON_ONCE(work > weight);
3873 budget -= work;
3875 local_irq_disable();
3877 /* Drivers must not modify the NAPI state if they
3878 * consume the entire weight. In such cases this code
3879 * still "owns" the NAPI instance and therefore can
3880 * move the instance around on the list at-will.
3882 if (unlikely(work == weight)) {
3883 if (unlikely(napi_disable_pending(n))) {
3884 local_irq_enable();
3885 napi_complete(n);
3886 local_irq_disable();
3887 } else
3888 list_move_tail(&n->poll_list, &sd->poll_list);
3891 netpoll_poll_unlock(have);
3893 out:
3894 net_rps_action_and_irq_enable(sd);
3896 #ifdef CONFIG_NET_DMA
3898 * There may not be any more sk_buffs coming right now, so push
3899 * any pending DMA copies to hardware
3901 dma_issue_pending_all();
3902 #endif
3904 return;
3906 softnet_break:
3907 sd->time_squeeze++;
3908 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3909 goto out;
3912 static gifconf_func_t *gifconf_list[NPROTO];
3915 * register_gifconf - register a SIOCGIF handler
3916 * @family: Address family
3917 * @gifconf: Function handler
3919 * Register protocol dependent address dumping routines. The handler
3920 * that is passed must not be freed or reused until it has been replaced
3921 * by another handler.
3923 int register_gifconf(unsigned int family, gifconf_func_t *gifconf)
3925 if (family >= NPROTO)
3926 return -EINVAL;
3927 gifconf_list[family] = gifconf;
3928 return 0;
3930 EXPORT_SYMBOL(register_gifconf);
3934 * Map an interface index to its name (SIOCGIFNAME)
3938 * We need this ioctl for efficient implementation of the
3939 * if_indextoname() function required by the IPv6 API. Without
3940 * it, we would have to search all the interfaces to find a
3941 * match. --pb
3944 static int dev_ifname(struct net *net, struct ifreq __user *arg)
3946 struct net_device *dev;
3947 struct ifreq ifr;
3950 * Fetch the caller's info block.
3953 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
3954 return -EFAULT;
3956 rcu_read_lock();
3957 dev = dev_get_by_index_rcu(net, ifr.ifr_ifindex);
3958 if (!dev) {
3959 rcu_read_unlock();
3960 return -ENODEV;
3963 strcpy(ifr.ifr_name, dev->name);
3964 rcu_read_unlock();
3966 if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
3967 return -EFAULT;
3968 return 0;
3972 * Perform a SIOCGIFCONF call. This structure will change
3973 * size eventually, and there is nothing I can do about it.
3974 * Thus we will need a 'compatibility mode'.
3977 static int dev_ifconf(struct net *net, char __user *arg)
3979 struct ifconf ifc;
3980 struct net_device *dev;
3981 char __user *pos;
3982 int len;
3983 int total;
3984 int i;
3987 * Fetch the caller's info block.
3990 if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
3991 return -EFAULT;
3993 pos = ifc.ifc_buf;
3994 len = ifc.ifc_len;
3997 * Loop over the interfaces, and write an info block for each.
4000 total = 0;
4001 for_each_netdev(net, dev) {
4002 for (i = 0; i < NPROTO; i++) {
4003 if (gifconf_list[i]) {
4004 int done;
4005 if (!pos)
4006 done = gifconf_list[i](dev, NULL, 0);
4007 else
4008 done = gifconf_list[i](dev, pos + total,
4009 len - total);
4010 if (done < 0)
4011 return -EFAULT;
4012 total += done;
4018 * All done. Write the updated control block back to the caller.
4020 ifc.ifc_len = total;
4023 * Both BSD and Solaris return 0 here, so we do too.
4025 return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
4028 #ifdef CONFIG_PROC_FS
4030 #define BUCKET_SPACE (32 - NETDEV_HASHBITS - 1)
4032 #define get_bucket(x) ((x) >> BUCKET_SPACE)
4033 #define get_offset(x) ((x) & ((1 << BUCKET_SPACE) - 1))
4034 #define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o))
4036 static inline struct net_device *dev_from_same_bucket(struct seq_file *seq, loff_t *pos)
4038 struct net *net = seq_file_net(seq);
4039 struct net_device *dev;
4040 struct hlist_node *p;
4041 struct hlist_head *h;
4042 unsigned int count = 0, offset = get_offset(*pos);
4044 h = &net->dev_name_head[get_bucket(*pos)];
4045 hlist_for_each_entry_rcu(dev, p, h, name_hlist) {
4046 if (++count == offset)
4047 return dev;
4050 return NULL;
4053 static inline struct net_device *dev_from_bucket(struct seq_file *seq, loff_t *pos)
4055 struct net_device *dev;
4056 unsigned int bucket;
4058 do {
4059 dev = dev_from_same_bucket(seq, pos);
4060 if (dev)
4061 return dev;
4063 bucket = get_bucket(*pos) + 1;
4064 *pos = set_bucket_offset(bucket, 1);
4065 } while (bucket < NETDEV_HASHENTRIES);
4067 return NULL;
4071 * This is invoked by the /proc filesystem handler to display a device
4072 * in detail.
4074 void *dev_seq_start(struct seq_file *seq, loff_t *pos)
4075 __acquires(RCU)
4077 rcu_read_lock();
4078 if (!*pos)
4079 return SEQ_START_TOKEN;
4081 if (get_bucket(*pos) >= NETDEV_HASHENTRIES)
4082 return NULL;
4084 return dev_from_bucket(seq, pos);
4087 void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4089 ++*pos;
4090 return dev_from_bucket(seq, pos);
4093 void dev_seq_stop(struct seq_file *seq, void *v)
4094 __releases(RCU)
4096 rcu_read_unlock();
4099 static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
4101 struct rtnl_link_stats64 temp;
4102 const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
4104 seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
4105 "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
4106 dev->name, stats->rx_bytes, stats->rx_packets,
4107 stats->rx_errors,
4108 stats->rx_dropped + stats->rx_missed_errors,
4109 stats->rx_fifo_errors,
4110 stats->rx_length_errors + stats->rx_over_errors +
4111 stats->rx_crc_errors + stats->rx_frame_errors,
4112 stats->rx_compressed, stats->multicast,
4113 stats->tx_bytes, stats->tx_packets,
4114 stats->tx_errors, stats->tx_dropped,
4115 stats->tx_fifo_errors, stats->collisions,
4116 stats->tx_carrier_errors +
4117 stats->tx_aborted_errors +
4118 stats->tx_window_errors +
4119 stats->tx_heartbeat_errors,
4120 stats->tx_compressed);
4124 * Called from the PROCfs module. This now uses the new arbitrary sized
4125 * /proc/net interface to create /proc/net/dev
4127 static int dev_seq_show(struct seq_file *seq, void *v)
4129 if (v == SEQ_START_TOKEN)
4130 seq_puts(seq, "Inter-| Receive "
4131 " | Transmit\n"
4132 " face |bytes packets errs drop fifo frame "
4133 "compressed multicast|bytes packets errs "
4134 "drop fifo colls carrier compressed\n");
4135 else
4136 dev_seq_printf_stats(seq, v);
4137 return 0;
4140 static struct softnet_data *softnet_get_online(loff_t *pos)
4142 struct softnet_data *sd = NULL;
4144 while (*pos < nr_cpu_ids)
4145 if (cpu_online(*pos)) {
4146 sd = &per_cpu(softnet_data, *pos);
4147 break;
4148 } else
4149 ++*pos;
4150 return sd;
4153 static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
4155 return softnet_get_online(pos);
4158 static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4160 ++*pos;
4161 return softnet_get_online(pos);
4164 static void softnet_seq_stop(struct seq_file *seq, void *v)
4168 static int softnet_seq_show(struct seq_file *seq, void *v)
4170 struct softnet_data *sd = v;
4172 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
4173 sd->processed, sd->dropped, sd->time_squeeze, 0,
4174 0, 0, 0, 0, /* was fastroute */
4175 sd->cpu_collision, sd->received_rps);
4176 return 0;
4179 static const struct seq_operations dev_seq_ops = {
4180 .start = dev_seq_start,
4181 .next = dev_seq_next,
4182 .stop = dev_seq_stop,
4183 .show = dev_seq_show,
4186 static int dev_seq_open(struct inode *inode, struct file *file)
4188 return seq_open_net(inode, file, &dev_seq_ops,
4189 sizeof(struct seq_net_private));
4192 static const struct file_operations dev_seq_fops = {
4193 .owner = THIS_MODULE,
4194 .open = dev_seq_open,
4195 .read = seq_read,
4196 .llseek = seq_lseek,
4197 .release = seq_release_net,
4200 static const struct seq_operations softnet_seq_ops = {
4201 .start = softnet_seq_start,
4202 .next = softnet_seq_next,
4203 .stop = softnet_seq_stop,
4204 .show = softnet_seq_show,
4207 static int softnet_seq_open(struct inode *inode, struct file *file)
4209 return seq_open(file, &softnet_seq_ops);
4212 static const struct file_operations softnet_seq_fops = {
4213 .owner = THIS_MODULE,
4214 .open = softnet_seq_open,
4215 .read = seq_read,
4216 .llseek = seq_lseek,
4217 .release = seq_release,
4220 static void *ptype_get_idx(loff_t pos)
4222 struct packet_type *pt = NULL;
4223 loff_t i = 0;
4224 int t;
4226 list_for_each_entry_rcu(pt, &ptype_all, list) {
4227 if (i == pos)
4228 return pt;
4229 ++i;
4232 for (t = 0; t < PTYPE_HASH_SIZE; t++) {
4233 list_for_each_entry_rcu(pt, &ptype_base[t], list) {
4234 if (i == pos)
4235 return pt;
4236 ++i;
4239 return NULL;
4242 static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
4243 __acquires(RCU)
4245 rcu_read_lock();
4246 return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
4249 static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4251 struct packet_type *pt;
4252 struct list_head *nxt;
4253 int hash;
4255 ++*pos;
4256 if (v == SEQ_START_TOKEN)
4257 return ptype_get_idx(0);
4259 pt = v;
4260 nxt = pt->list.next;
4261 if (pt->type == htons(ETH_P_ALL)) {
4262 if (nxt != &ptype_all)
4263 goto found;
4264 hash = 0;
4265 nxt = ptype_base[0].next;
4266 } else
4267 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
4269 while (nxt == &ptype_base[hash]) {
4270 if (++hash >= PTYPE_HASH_SIZE)
4271 return NULL;
4272 nxt = ptype_base[hash].next;
4274 found:
4275 return list_entry(nxt, struct packet_type, list);
4278 static void ptype_seq_stop(struct seq_file *seq, void *v)
4279 __releases(RCU)
4281 rcu_read_unlock();
4284 static int ptype_seq_show(struct seq_file *seq, void *v)
4286 struct packet_type *pt = v;
4288 if (v == SEQ_START_TOKEN)
4289 seq_puts(seq, "Type Device Function\n");
4290 else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) {
4291 if (pt->type == htons(ETH_P_ALL))
4292 seq_puts(seq, "ALL ");
4293 else
4294 seq_printf(seq, "%04x", ntohs(pt->type));
4296 seq_printf(seq, " %-8s %pF\n",
4297 pt->dev ? pt->dev->name : "", pt->func);
4300 return 0;
4303 static const struct seq_operations ptype_seq_ops = {
4304 .start = ptype_seq_start,
4305 .next = ptype_seq_next,
4306 .stop = ptype_seq_stop,
4307 .show = ptype_seq_show,
4310 static int ptype_seq_open(struct inode *inode, struct file *file)
4312 return seq_open_net(inode, file, &ptype_seq_ops,
4313 sizeof(struct seq_net_private));
4316 static const struct file_operations ptype_seq_fops = {
4317 .owner = THIS_MODULE,
4318 .open = ptype_seq_open,
4319 .read = seq_read,
4320 .llseek = seq_lseek,
4321 .release = seq_release_net,
4325 static int __net_init dev_proc_net_init(struct net *net)
4327 int rc = -ENOMEM;
4329 if (!proc_net_fops_create(net, "dev", S_IRUGO, &dev_seq_fops))
4330 goto out;
4331 if (!proc_net_fops_create(net, "softnet_stat", S_IRUGO, &softnet_seq_fops))
4332 goto out_dev;
4333 if (!proc_net_fops_create(net, "ptype", S_IRUGO, &ptype_seq_fops))
4334 goto out_softnet;
4336 if (wext_proc_init(net))
4337 goto out_ptype;
4338 rc = 0;
4339 out:
4340 return rc;
4341 out_ptype:
4342 proc_net_remove(net, "ptype");
4343 out_softnet:
4344 proc_net_remove(net, "softnet_stat");
4345 out_dev:
4346 proc_net_remove(net, "dev");
4347 goto out;
4350 static void __net_exit dev_proc_net_exit(struct net *net)
4352 wext_proc_exit(net);
4354 proc_net_remove(net, "ptype");
4355 proc_net_remove(net, "softnet_stat");
4356 proc_net_remove(net, "dev");
4359 static struct pernet_operations __net_initdata dev_proc_ops = {
4360 .init = dev_proc_net_init,
4361 .exit = dev_proc_net_exit,
4364 static int __init dev_proc_init(void)
4366 return register_pernet_subsys(&dev_proc_ops);
4368 #else
4369 #define dev_proc_init() 0
4370 #endif /* CONFIG_PROC_FS */
4374 * netdev_set_master - set up master pointer
4375 * @slave: slave device
4376 * @master: new master device
4378 * Changes the master device of the slave. Pass %NULL to break the
4379 * bonding. The caller must hold the RTNL semaphore. On a failure
4380 * a negative errno code is returned. On success the reference counts
4381 * are adjusted and the function returns zero.
4383 int netdev_set_master(struct net_device *slave, struct net_device *master)
4385 struct net_device *old = slave->master;
4387 ASSERT_RTNL();
4389 if (master) {
4390 if (old)
4391 return -EBUSY;
4392 dev_hold(master);
4395 slave->master = master;
4397 if (old)
4398 dev_put(old);
4399 return 0;
4401 EXPORT_SYMBOL(netdev_set_master);
4404 * netdev_set_bond_master - set up bonding master/slave pair
4405 * @slave: slave device
4406 * @master: new master device
4408 * Changes the master device of the slave. Pass %NULL to break the
4409 * bonding. The caller must hold the RTNL semaphore. On a failure
4410 * a negative errno code is returned. On success %RTM_NEWLINK is sent
4411 * to the routing socket and the function returns zero.
4413 int netdev_set_bond_master(struct net_device *slave, struct net_device *master)
4415 int err;
4417 ASSERT_RTNL();
4419 err = netdev_set_master(slave, master);
4420 if (err)
4421 return err;
4422 if (master)
4423 slave->flags |= IFF_SLAVE;
4424 else
4425 slave->flags &= ~IFF_SLAVE;
4427 rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
4428 return 0;
4430 EXPORT_SYMBOL(netdev_set_bond_master);
4432 static void dev_change_rx_flags(struct net_device *dev, int flags)
4434 const struct net_device_ops *ops = dev->netdev_ops;
4436 if ((dev->flags & IFF_UP) && ops->ndo_change_rx_flags)
4437 ops->ndo_change_rx_flags(dev, flags);
4440 static int __dev_set_promiscuity(struct net_device *dev, int inc)
4442 unsigned int old_flags = dev->flags;
4443 uid_t uid;
4444 gid_t gid;
4446 ASSERT_RTNL();
4448 dev->flags |= IFF_PROMISC;
4449 dev->promiscuity += inc;
4450 if (dev->promiscuity == 0) {
4452 * Avoid overflow.
4453 * If inc causes overflow, untouch promisc and return error.
4455 if (inc < 0)
4456 dev->flags &= ~IFF_PROMISC;
4457 else {
4458 dev->promiscuity -= inc;
4459 pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n",
4460 dev->name);
4461 return -EOVERFLOW;
4464 if (dev->flags != old_flags) {
4465 pr_info("device %s %s promiscuous mode\n",
4466 dev->name,
4467 dev->flags & IFF_PROMISC ? "entered" : "left");
4468 if (audit_enabled) {
4469 current_uid_gid(&uid, &gid);
4470 audit_log(current->audit_context, GFP_ATOMIC,
4471 AUDIT_ANOM_PROMISCUOUS,
4472 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
4473 dev->name, (dev->flags & IFF_PROMISC),
4474 (old_flags & IFF_PROMISC),
4475 audit_get_loginuid(current),
4476 uid, gid,
4477 audit_get_sessionid(current));
4480 dev_change_rx_flags(dev, IFF_PROMISC);
4482 return 0;
4486 * dev_set_promiscuity - update promiscuity count on a device
4487 * @dev: device
4488 * @inc: modifier
4490 * Add or remove promiscuity from a device. While the count in the device
4491 * remains above zero the interface remains promiscuous. Once it hits zero
4492 * the device reverts back to normal filtering operation. A negative inc
4493 * value is used to drop promiscuity on the device.
4494 * Return 0 if successful or a negative errno code on error.
4496 int dev_set_promiscuity(struct net_device *dev, int inc)
4498 unsigned int old_flags = dev->flags;
4499 int err;
4501 err = __dev_set_promiscuity(dev, inc);
4502 if (err < 0)
4503 return err;
4504 if (dev->flags != old_flags)
4505 dev_set_rx_mode(dev);
4506 return err;
4508 EXPORT_SYMBOL(dev_set_promiscuity);
4511 * dev_set_allmulti - update allmulti count on a device
4512 * @dev: device
4513 * @inc: modifier
4515 * Add or remove reception of all multicast frames to a device. While the
4516 * count in the device remains above zero the interface remains listening
4517 * to all interfaces. Once it hits zero the device reverts back to normal
4518 * filtering operation. A negative @inc value is used to drop the counter
4519 * when releasing a resource needing all multicasts.
4520 * Return 0 if successful or a negative errno code on error.
4523 int dev_set_allmulti(struct net_device *dev, int inc)
4525 unsigned int old_flags = dev->flags;
4527 ASSERT_RTNL();
4529 dev->flags |= IFF_ALLMULTI;
4530 dev->allmulti += inc;
4531 if (dev->allmulti == 0) {
4533 * Avoid overflow.
4534 * If inc causes overflow, untouch allmulti and return error.
4536 if (inc < 0)
4537 dev->flags &= ~IFF_ALLMULTI;
4538 else {
4539 dev->allmulti -= inc;
4540 pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n",
4541 dev->name);
4542 return -EOVERFLOW;
4545 if (dev->flags ^ old_flags) {
4546 dev_change_rx_flags(dev, IFF_ALLMULTI);
4547 dev_set_rx_mode(dev);
4549 return 0;
4551 EXPORT_SYMBOL(dev_set_allmulti);
4554 * Upload unicast and multicast address lists to device and
4555 * configure RX filtering. When the device doesn't support unicast
4556 * filtering it is put in promiscuous mode while unicast addresses
4557 * are present.
4559 void __dev_set_rx_mode(struct net_device *dev)
4561 const struct net_device_ops *ops = dev->netdev_ops;
4563 /* dev_open will call this function so the list will stay sane. */
4564 if (!(dev->flags&IFF_UP))
4565 return;
4567 if (!netif_device_present(dev))
4568 return;
4570 if (!(dev->priv_flags & IFF_UNICAST_FLT)) {
4571 /* Unicast addresses changes may only happen under the rtnl,
4572 * therefore calling __dev_set_promiscuity here is safe.
4574 if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
4575 __dev_set_promiscuity(dev, 1);
4576 dev->uc_promisc = true;
4577 } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
4578 __dev_set_promiscuity(dev, -1);
4579 dev->uc_promisc = false;
4583 if (ops->ndo_set_rx_mode)
4584 ops->ndo_set_rx_mode(dev);
4587 void dev_set_rx_mode(struct net_device *dev)
4589 netif_addr_lock_bh(dev);
4590 __dev_set_rx_mode(dev);
4591 netif_addr_unlock_bh(dev);
4595 * dev_get_flags - get flags reported to userspace
4596 * @dev: device
4598 * Get the combination of flag bits exported through APIs to userspace.
4600 unsigned int dev_get_flags(const struct net_device *dev)
4602 unsigned int flags;
4604 flags = (dev->flags & ~(IFF_PROMISC |
4605 IFF_ALLMULTI |
4606 IFF_RUNNING |
4607 IFF_LOWER_UP |
4608 IFF_DORMANT)) |
4609 (dev->gflags & (IFF_PROMISC |
4610 IFF_ALLMULTI));
4612 if (netif_running(dev)) {
4613 if (netif_oper_up(dev))
4614 flags |= IFF_RUNNING;
4615 if (netif_carrier_ok(dev))
4616 flags |= IFF_LOWER_UP;
4617 if (netif_dormant(dev))
4618 flags |= IFF_DORMANT;
4621 return flags;
4623 EXPORT_SYMBOL(dev_get_flags);
4625 int __dev_change_flags(struct net_device *dev, unsigned int flags)
4627 unsigned int old_flags = dev->flags;
4628 int ret;
4630 ASSERT_RTNL();
4633 * Set the flags on our device.
4636 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
4637 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
4638 IFF_AUTOMEDIA)) |
4639 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
4640 IFF_ALLMULTI));
4643 * Load in the correct multicast list now the flags have changed.
4646 if ((old_flags ^ flags) & IFF_MULTICAST)
4647 dev_change_rx_flags(dev, IFF_MULTICAST);
4649 dev_set_rx_mode(dev);
4652 * Have we downed the interface. We handle IFF_UP ourselves
4653 * according to user attempts to set it, rather than blindly
4654 * setting it.
4657 ret = 0;
4658 if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
4659 ret = ((old_flags & IFF_UP) ? __dev_close : __dev_open)(dev);
4661 if (!ret)
4662 dev_set_rx_mode(dev);
4665 if ((flags ^ dev->gflags) & IFF_PROMISC) {
4666 int inc = (flags & IFF_PROMISC) ? 1 : -1;
4668 dev->gflags ^= IFF_PROMISC;
4669 dev_set_promiscuity(dev, inc);
4672 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
4673 is important. Some (broken) drivers set IFF_PROMISC, when
4674 IFF_ALLMULTI is requested not asking us and not reporting.
4676 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
4677 int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
4679 dev->gflags ^= IFF_ALLMULTI;
4680 dev_set_allmulti(dev, inc);
4683 return ret;
4686 void __dev_notify_flags(struct net_device *dev, unsigned int old_flags)
4688 unsigned int changes = dev->flags ^ old_flags;
4690 if (changes & IFF_UP) {
4691 if (dev->flags & IFF_UP)
4692 call_netdevice_notifiers(NETDEV_UP, dev);
4693 else
4694 call_netdevice_notifiers(NETDEV_DOWN, dev);
4697 if (dev->flags & IFF_UP &&
4698 (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE)))
4699 call_netdevice_notifiers(NETDEV_CHANGE, dev);
4703 * dev_change_flags - change device settings
4704 * @dev: device
4705 * @flags: device state flags
4707 * Change settings on device based state flags. The flags are
4708 * in the userspace exported format.
4710 int dev_change_flags(struct net_device *dev, unsigned int flags)
4712 int ret;
4713 unsigned int changes, old_flags = dev->flags;
4715 ret = __dev_change_flags(dev, flags);
4716 if (ret < 0)
4717 return ret;
4719 changes = old_flags ^ dev->flags;
4720 if (changes)
4721 rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
4723 __dev_notify_flags(dev, old_flags);
4724 return ret;
4726 EXPORT_SYMBOL(dev_change_flags);
4729 * dev_set_mtu - Change maximum transfer unit
4730 * @dev: device
4731 * @new_mtu: new transfer unit
4733 * Change the maximum transfer size of the network device.
4735 int dev_set_mtu(struct net_device *dev, int new_mtu)
4737 const struct net_device_ops *ops = dev->netdev_ops;
4738 int err;
4740 if (new_mtu == dev->mtu)
4741 return 0;
4743 /* MTU must be positive. */
4744 if (new_mtu < 0)
4745 return -EINVAL;
4747 if (!netif_device_present(dev))
4748 return -ENODEV;
4750 err = 0;
4751 if (ops->ndo_change_mtu)
4752 err = ops->ndo_change_mtu(dev, new_mtu);
4753 else
4754 dev->mtu = new_mtu;
4756 if (!err && dev->flags & IFF_UP)
4757 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
4758 return err;
4760 EXPORT_SYMBOL(dev_set_mtu);
4763 * dev_set_group - Change group this device belongs to
4764 * @dev: device
4765 * @new_group: group this device should belong to
4767 void dev_set_group(struct net_device *dev, int new_group)
4769 dev->group = new_group;
4771 EXPORT_SYMBOL(dev_set_group);
4774 * dev_set_mac_address - Change Media Access Control Address
4775 * @dev: device
4776 * @sa: new address
4778 * Change the hardware (MAC) address of the device
4780 int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
4782 const struct net_device_ops *ops = dev->netdev_ops;
4783 int err;
4785 if (!ops->ndo_set_mac_address)
4786 return -EOPNOTSUPP;
4787 if (sa->sa_family != dev->type)
4788 return -EINVAL;
4789 if (!netif_device_present(dev))
4790 return -ENODEV;
4791 err = ops->ndo_set_mac_address(dev, sa);
4792 if (!err)
4793 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
4794 return err;
4796 EXPORT_SYMBOL(dev_set_mac_address);
4799 * Perform the SIOCxIFxxx calls, inside rcu_read_lock()
4801 static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd)
4803 int err;
4804 struct net_device *dev = dev_get_by_name_rcu(net, ifr->ifr_name);
4806 if (!dev)
4807 return -ENODEV;
4809 switch (cmd) {
4810 case SIOCGIFFLAGS: /* Get interface flags */
4811 ifr->ifr_flags = (short) dev_get_flags(dev);
4812 return 0;
4814 case SIOCGIFMETRIC: /* Get the metric on the interface
4815 (currently unused) */
4816 ifr->ifr_metric = 0;
4817 return 0;
4819 case SIOCGIFMTU: /* Get the MTU of a device */
4820 ifr->ifr_mtu = dev->mtu;
4821 return 0;
4823 case SIOCGIFHWADDR:
4824 if (!dev->addr_len)
4825 memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
4826 else
4827 memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
4828 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
4829 ifr->ifr_hwaddr.sa_family = dev->type;
4830 return 0;
4832 case SIOCGIFSLAVE:
4833 err = -EINVAL;
4834 break;
4836 case SIOCGIFMAP:
4837 ifr->ifr_map.mem_start = dev->mem_start;
4838 ifr->ifr_map.mem_end = dev->mem_end;
4839 ifr->ifr_map.base_addr = dev->base_addr;
4840 ifr->ifr_map.irq = dev->irq;
4841 ifr->ifr_map.dma = dev->dma;
4842 ifr->ifr_map.port = dev->if_port;
4843 return 0;
4845 case SIOCGIFINDEX:
4846 ifr->ifr_ifindex = dev->ifindex;
4847 return 0;
4849 case SIOCGIFTXQLEN:
4850 ifr->ifr_qlen = dev->tx_queue_len;
4851 return 0;
4853 default:
4854 /* dev_ioctl() should ensure this case
4855 * is never reached
4857 WARN_ON(1);
4858 err = -ENOTTY;
4859 break;
4862 return err;
4866 * Perform the SIOCxIFxxx calls, inside rtnl_lock()
4868 static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
4870 int err;
4871 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
4872 const struct net_device_ops *ops;
4874 if (!dev)
4875 return -ENODEV;
4877 ops = dev->netdev_ops;
4879 switch (cmd) {
4880 case SIOCSIFFLAGS: /* Set interface flags */
4881 return dev_change_flags(dev, ifr->ifr_flags);
4883 case SIOCSIFMETRIC: /* Set the metric on the interface
4884 (currently unused) */
4885 return -EOPNOTSUPP;
4887 case SIOCSIFMTU: /* Set the MTU of a device */
4888 return dev_set_mtu(dev, ifr->ifr_mtu);
4890 case SIOCSIFHWADDR:
4891 return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
4893 case SIOCSIFHWBROADCAST:
4894 if (ifr->ifr_hwaddr.sa_family != dev->type)
4895 return -EINVAL;
4896 memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
4897 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
4898 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
4899 return 0;
4901 case SIOCSIFMAP:
4902 if (ops->ndo_set_config) {
4903 if (!netif_device_present(dev))
4904 return -ENODEV;
4905 return ops->ndo_set_config(dev, &ifr->ifr_map);
4907 return -EOPNOTSUPP;
4909 case SIOCADDMULTI:
4910 if (!ops->ndo_set_rx_mode ||
4911 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4912 return -EINVAL;
4913 if (!netif_device_present(dev))
4914 return -ENODEV;
4915 return dev_mc_add_global(dev, ifr->ifr_hwaddr.sa_data);
4917 case SIOCDELMULTI:
4918 if (!ops->ndo_set_rx_mode ||
4919 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4920 return -EINVAL;
4921 if (!netif_device_present(dev))
4922 return -ENODEV;
4923 return dev_mc_del_global(dev, ifr->ifr_hwaddr.sa_data);
4925 case SIOCSIFTXQLEN:
4926 if (ifr->ifr_qlen < 0)
4927 return -EINVAL;
4928 dev->tx_queue_len = ifr->ifr_qlen;
4929 return 0;
4931 case SIOCSIFNAME:
4932 ifr->ifr_newname[IFNAMSIZ-1] = '\0';
4933 return dev_change_name(dev, ifr->ifr_newname);
4935 case SIOCSHWTSTAMP:
4936 err = net_hwtstamp_validate(ifr);
4937 if (err)
4938 return err;
4939 /* fall through */
4942 * Unknown or private ioctl
4944 default:
4945 if ((cmd >= SIOCDEVPRIVATE &&
4946 cmd <= SIOCDEVPRIVATE + 15) ||
4947 cmd == SIOCBONDENSLAVE ||
4948 cmd == SIOCBONDRELEASE ||
4949 cmd == SIOCBONDSETHWADDR ||
4950 cmd == SIOCBONDSLAVEINFOQUERY ||
4951 cmd == SIOCBONDINFOQUERY ||
4952 cmd == SIOCBONDCHANGEACTIVE ||
4953 cmd == SIOCGMIIPHY ||
4954 cmd == SIOCGMIIREG ||
4955 cmd == SIOCSMIIREG ||
4956 cmd == SIOCBRADDIF ||
4957 cmd == SIOCBRDELIF ||
4958 cmd == SIOCSHWTSTAMP ||
4959 cmd == SIOCWANDEV) {
4960 err = -EOPNOTSUPP;
4961 if (ops->ndo_do_ioctl) {
4962 if (netif_device_present(dev))
4963 err = ops->ndo_do_ioctl(dev, ifr, cmd);
4964 else
4965 err = -ENODEV;
4967 } else
4968 err = -EINVAL;
4971 return err;
4975 * This function handles all "interface"-type I/O control requests. The actual
4976 * 'doing' part of this is dev_ifsioc above.
4980 * dev_ioctl - network device ioctl
4981 * @net: the applicable net namespace
4982 * @cmd: command to issue
4983 * @arg: pointer to a struct ifreq in user space
4985 * Issue ioctl functions to devices. This is normally called by the
4986 * user space syscall interfaces but can sometimes be useful for
4987 * other purposes. The return value is the return from the syscall if
4988 * positive or a negative errno code on error.
4991 int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
4993 struct ifreq ifr;
4994 int ret;
4995 char *colon;
4997 /* One special case: SIOCGIFCONF takes ifconf argument
4998 and requires shared lock, because it sleeps writing
4999 to user space.
5002 if (cmd == SIOCGIFCONF) {
5003 rtnl_lock();
5004 ret = dev_ifconf(net, (char __user *) arg);
5005 rtnl_unlock();
5006 return ret;
5008 if (cmd == SIOCGIFNAME)
5009 return dev_ifname(net, (struct ifreq __user *)arg);
5011 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
5012 return -EFAULT;
5014 ifr.ifr_name[IFNAMSIZ-1] = 0;
5016 colon = strchr(ifr.ifr_name, ':');
5017 if (colon)
5018 *colon = 0;
5021 * See which interface the caller is talking about.
5024 switch (cmd) {
5026 * These ioctl calls:
5027 * - can be done by all.
5028 * - atomic and do not require locking.
5029 * - return a value
5031 case SIOCGIFFLAGS:
5032 case SIOCGIFMETRIC:
5033 case SIOCGIFMTU:
5034 case SIOCGIFHWADDR:
5035 case SIOCGIFSLAVE:
5036 case SIOCGIFMAP:
5037 case SIOCGIFINDEX:
5038 case SIOCGIFTXQLEN:
5039 dev_load(net, ifr.ifr_name);
5040 rcu_read_lock();
5041 ret = dev_ifsioc_locked(net, &ifr, cmd);
5042 rcu_read_unlock();
5043 if (!ret) {
5044 if (colon)
5045 *colon = ':';
5046 if (copy_to_user(arg, &ifr,
5047 sizeof(struct ifreq)))
5048 ret = -EFAULT;
5050 return ret;
5052 case SIOCETHTOOL:
5053 dev_load(net, ifr.ifr_name);
5054 rtnl_lock();
5055 ret = dev_ethtool(net, &ifr);
5056 rtnl_unlock();
5057 if (!ret) {
5058 if (colon)
5059 *colon = ':';
5060 if (copy_to_user(arg, &ifr,
5061 sizeof(struct ifreq)))
5062 ret = -EFAULT;
5064 return ret;
5067 * These ioctl calls:
5068 * - require superuser power.
5069 * - require strict serialization.
5070 * - return a value
5072 case SIOCGMIIPHY:
5073 case SIOCGMIIREG:
5074 case SIOCSIFNAME:
5075 if (!capable(CAP_NET_ADMIN))
5076 return -EPERM;
5077 dev_load(net, ifr.ifr_name);
5078 rtnl_lock();
5079 ret = dev_ifsioc(net, &ifr, cmd);
5080 rtnl_unlock();
5081 if (!ret) {
5082 if (colon)
5083 *colon = ':';
5084 if (copy_to_user(arg, &ifr,
5085 sizeof(struct ifreq)))
5086 ret = -EFAULT;
5088 return ret;
5091 * These ioctl calls:
5092 * - require superuser power.
5093 * - require strict serialization.
5094 * - do not return a value
5096 case SIOCSIFFLAGS:
5097 case SIOCSIFMETRIC:
5098 case SIOCSIFMTU:
5099 case SIOCSIFMAP:
5100 case SIOCSIFHWADDR:
5101 case SIOCSIFSLAVE:
5102 case SIOCADDMULTI:
5103 case SIOCDELMULTI:
5104 case SIOCSIFHWBROADCAST:
5105 case SIOCSIFTXQLEN:
5106 case SIOCSMIIREG:
5107 case SIOCBONDENSLAVE:
5108 case SIOCBONDRELEASE:
5109 case SIOCBONDSETHWADDR:
5110 case SIOCBONDCHANGEACTIVE:
5111 case SIOCBRADDIF:
5112 case SIOCBRDELIF:
5113 case SIOCSHWTSTAMP:
5114 if (!capable(CAP_NET_ADMIN))
5115 return -EPERM;
5116 /* fall through */
5117 case SIOCBONDSLAVEINFOQUERY:
5118 case SIOCBONDINFOQUERY:
5119 dev_load(net, ifr.ifr_name);
5120 rtnl_lock();
5121 ret = dev_ifsioc(net, &ifr, cmd);
5122 rtnl_unlock();
5123 return ret;
5125 case SIOCGIFMEM:
5126 /* Get the per device memory space. We can add this but
5127 * currently do not support it */
5128 case SIOCSIFMEM:
5129 /* Set the per device memory buffer space.
5130 * Not applicable in our case */
5131 case SIOCSIFLINK:
5132 return -ENOTTY;
5135 * Unknown or private ioctl.
5137 default:
5138 if (cmd == SIOCWANDEV ||
5139 (cmd >= SIOCDEVPRIVATE &&
5140 cmd <= SIOCDEVPRIVATE + 15)) {
5141 dev_load(net, ifr.ifr_name);
5142 rtnl_lock();
5143 ret = dev_ifsioc(net, &ifr, cmd);
5144 rtnl_unlock();
5145 if (!ret && copy_to_user(arg, &ifr,
5146 sizeof(struct ifreq)))
5147 ret = -EFAULT;
5148 return ret;
5150 /* Take care of Wireless Extensions */
5151 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
5152 return wext_handle_ioctl(net, &ifr, cmd, arg);
5153 return -ENOTTY;
5159 * dev_new_index - allocate an ifindex
5160 * @net: the applicable net namespace
5162 * Returns a suitable unique value for a new device interface
5163 * number. The caller must hold the rtnl semaphore or the
5164 * dev_base_lock to be sure it remains unique.
5166 static int dev_new_index(struct net *net)
5168 static int ifindex;
5169 for (;;) {
5170 if (++ifindex <= 0)
5171 ifindex = 1;
5172 if (!__dev_get_by_index(net, ifindex))
5173 return ifindex;
5177 /* Delayed registration/unregisteration */
5178 static LIST_HEAD(net_todo_list);
5180 static void net_set_todo(struct net_device *dev)
5182 list_add_tail(&dev->todo_list, &net_todo_list);
5185 static void rollback_registered_many(struct list_head *head)
5187 struct net_device *dev, *tmp;
5189 BUG_ON(dev_boot_phase);
5190 ASSERT_RTNL();
5192 list_for_each_entry_safe(dev, tmp, head, unreg_list) {
5193 /* Some devices call without registering
5194 * for initialization unwind. Remove those
5195 * devices and proceed with the remaining.
5197 if (dev->reg_state == NETREG_UNINITIALIZED) {
5198 pr_debug("unregister_netdevice: device %s/%p never was registered\n",
5199 dev->name, dev);
5201 WARN_ON(1);
5202 list_del(&dev->unreg_list);
5203 continue;
5205 dev->dismantle = true;
5206 BUG_ON(dev->reg_state != NETREG_REGISTERED);
5209 /* If device is running, close it first. */
5210 dev_close_many(head);
5212 list_for_each_entry(dev, head, unreg_list) {
5213 /* And unlink it from device chain. */
5214 unlist_netdevice(dev);
5216 dev->reg_state = NETREG_UNREGISTERING;
5219 synchronize_net();
5221 list_for_each_entry(dev, head, unreg_list) {
5222 /* Shutdown queueing discipline. */
5223 dev_shutdown(dev);
5226 /* Notify protocols, that we are about to destroy
5227 this device. They should clean all the things.
5229 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
5231 if (!dev->rtnl_link_ops ||
5232 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
5233 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
5236 * Flush the unicast and multicast chains
5238 dev_uc_flush(dev);
5239 dev_mc_flush(dev);
5241 if (dev->netdev_ops->ndo_uninit)
5242 dev->netdev_ops->ndo_uninit(dev);
5244 /* Notifier chain MUST detach us from master device. */
5245 WARN_ON(dev->master);
5247 /* Remove entries from kobject tree */
5248 netdev_unregister_kobject(dev);
5251 /* Process any work delayed until the end of the batch */
5252 dev = list_first_entry(head, struct net_device, unreg_list);
5253 call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
5255 synchronize_net();
5257 list_for_each_entry(dev, head, unreg_list)
5258 dev_put(dev);
5261 static void rollback_registered(struct net_device *dev)
5263 LIST_HEAD(single);
5265 list_add(&dev->unreg_list, &single);
5266 rollback_registered_many(&single);
5267 list_del(&single);
5270 static netdev_features_t netdev_fix_features(struct net_device *dev,
5271 netdev_features_t features)
5273 /* Fix illegal checksum combinations */
5274 if ((features & NETIF_F_HW_CSUM) &&
5275 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
5276 netdev_warn(dev, "mixed HW and IP checksum settings.\n");
5277 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
5280 /* Fix illegal SG+CSUM combinations. */
5281 if ((features & NETIF_F_SG) &&
5282 !(features & NETIF_F_ALL_CSUM)) {
5283 netdev_dbg(dev,
5284 "Dropping NETIF_F_SG since no checksum feature.\n");
5285 features &= ~NETIF_F_SG;
5288 /* TSO requires that SG is present as well. */
5289 if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
5290 netdev_dbg(dev, "Dropping TSO features since no SG feature.\n");
5291 features &= ~NETIF_F_ALL_TSO;
5294 /* TSO ECN requires that TSO is present as well. */
5295 if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
5296 features &= ~NETIF_F_TSO_ECN;
5298 /* Software GSO depends on SG. */
5299 if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
5300 netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
5301 features &= ~NETIF_F_GSO;
5304 /* UFO needs SG and checksumming */
5305 if (features & NETIF_F_UFO) {
5306 /* maybe split UFO into V4 and V6? */
5307 if (!((features & NETIF_F_GEN_CSUM) ||
5308 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))
5309 == (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
5310 netdev_dbg(dev,
5311 "Dropping NETIF_F_UFO since no checksum offload features.\n");
5312 features &= ~NETIF_F_UFO;
5315 if (!(features & NETIF_F_SG)) {
5316 netdev_dbg(dev,
5317 "Dropping NETIF_F_UFO since no NETIF_F_SG feature.\n");
5318 features &= ~NETIF_F_UFO;
5322 return features;
5325 int __netdev_update_features(struct net_device *dev)
5327 netdev_features_t features;
5328 int err = 0;
5330 ASSERT_RTNL();
5332 features = netdev_get_wanted_features(dev);
5334 if (dev->netdev_ops->ndo_fix_features)
5335 features = dev->netdev_ops->ndo_fix_features(dev, features);
5337 /* driver might be less strict about feature dependencies */
5338 features = netdev_fix_features(dev, features);
5340 if (dev->features == features)
5341 return 0;
5343 netdev_dbg(dev, "Features changed: %pNF -> %pNF\n",
5344 &dev->features, &features);
5346 if (dev->netdev_ops->ndo_set_features)
5347 err = dev->netdev_ops->ndo_set_features(dev, features);
5349 if (unlikely(err < 0)) {
5350 netdev_err(dev,
5351 "set_features() failed (%d); wanted %pNF, left %pNF\n",
5352 err, &features, &dev->features);
5353 return -1;
5356 if (!err)
5357 dev->features = features;
5359 return 1;
5363 * netdev_update_features - recalculate device features
5364 * @dev: the device to check
5366 * Recalculate dev->features set and send notifications if it
5367 * has changed. Should be called after driver or hardware dependent
5368 * conditions might have changed that influence the features.
5370 void netdev_update_features(struct net_device *dev)
5372 if (__netdev_update_features(dev))
5373 netdev_features_change(dev);
5375 EXPORT_SYMBOL(netdev_update_features);
5378 * netdev_change_features - recalculate device features
5379 * @dev: the device to check
5381 * Recalculate dev->features set and send notifications even
5382 * if they have not changed. Should be called instead of
5383 * netdev_update_features() if also dev->vlan_features might
5384 * have changed to allow the changes to be propagated to stacked
5385 * VLAN devices.
5387 void netdev_change_features(struct net_device *dev)
5389 __netdev_update_features(dev);
5390 netdev_features_change(dev);
5392 EXPORT_SYMBOL(netdev_change_features);
5395 * netif_stacked_transfer_operstate - transfer operstate
5396 * @rootdev: the root or lower level device to transfer state from
5397 * @dev: the device to transfer operstate to
5399 * Transfer operational state from root to device. This is normally
5400 * called when a stacking relationship exists between the root
5401 * device and the device(a leaf device).
5403 void netif_stacked_transfer_operstate(const struct net_device *rootdev,
5404 struct net_device *dev)
5406 if (rootdev->operstate == IF_OPER_DORMANT)
5407 netif_dormant_on(dev);
5408 else
5409 netif_dormant_off(dev);
5411 if (netif_carrier_ok(rootdev)) {
5412 if (!netif_carrier_ok(dev))
5413 netif_carrier_on(dev);
5414 } else {
5415 if (netif_carrier_ok(dev))
5416 netif_carrier_off(dev);
5419 EXPORT_SYMBOL(netif_stacked_transfer_operstate);
5421 #ifdef CONFIG_RPS
5422 static int netif_alloc_rx_queues(struct net_device *dev)
5424 unsigned int i, count = dev->num_rx_queues;
5425 struct netdev_rx_queue *rx;
5427 BUG_ON(count < 1);
5429 rx = kcalloc(count, sizeof(struct netdev_rx_queue), GFP_KERNEL);
5430 if (!rx) {
5431 pr_err("netdev: Unable to allocate %u rx queues\n", count);
5432 return -ENOMEM;
5434 dev->_rx = rx;
5436 for (i = 0; i < count; i++)
5437 rx[i].dev = dev;
5438 return 0;
5440 #endif
5442 static void netdev_init_one_queue(struct net_device *dev,
5443 struct netdev_queue *queue, void *_unused)
5445 /* Initialize queue lock */
5446 spin_lock_init(&queue->_xmit_lock);
5447 netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
5448 queue->xmit_lock_owner = -1;
5449 netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
5450 queue->dev = dev;
5451 #ifdef CONFIG_BQL
5452 dql_init(&queue->dql, HZ);
5453 #endif
5456 static int netif_alloc_netdev_queues(struct net_device *dev)
5458 unsigned int count = dev->num_tx_queues;
5459 struct netdev_queue *tx;
5461 BUG_ON(count < 1);
5463 tx = kcalloc(count, sizeof(struct netdev_queue), GFP_KERNEL);
5464 if (!tx) {
5465 pr_err("netdev: Unable to allocate %u tx queues\n", count);
5466 return -ENOMEM;
5468 dev->_tx = tx;
5470 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
5471 spin_lock_init(&dev->tx_global_lock);
5473 return 0;
5477 * register_netdevice - register a network device
5478 * @dev: device to register
5480 * Take a completed network device structure and add it to the kernel
5481 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
5482 * chain. 0 is returned on success. A negative errno code is returned
5483 * on a failure to set up the device, or if the name is a duplicate.
5485 * Callers must hold the rtnl semaphore. You may want
5486 * register_netdev() instead of this.
5488 * BUGS:
5489 * The locking appears insufficient to guarantee two parallel registers
5490 * will not get the same name.
5493 int register_netdevice(struct net_device *dev)
5495 int ret;
5496 struct net *net = dev_net(dev);
5498 BUG_ON(dev_boot_phase);
5499 ASSERT_RTNL();
5501 might_sleep();
5503 /* When net_device's are persistent, this will be fatal. */
5504 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
5505 BUG_ON(!net);
5507 spin_lock_init(&dev->addr_list_lock);
5508 netdev_set_addr_lockdep_class(dev);
5510 dev->iflink = -1;
5512 ret = dev_get_valid_name(dev, dev->name);
5513 if (ret < 0)
5514 goto out;
5516 /* Init, if this function is available */
5517 if (dev->netdev_ops->ndo_init) {
5518 ret = dev->netdev_ops->ndo_init(dev);
5519 if (ret) {
5520 if (ret > 0)
5521 ret = -EIO;
5522 goto out;
5526 dev->ifindex = dev_new_index(net);
5527 if (dev->iflink == -1)
5528 dev->iflink = dev->ifindex;
5530 /* Transfer changeable features to wanted_features and enable
5531 * software offloads (GSO and GRO).
5533 dev->hw_features |= NETIF_F_SOFT_FEATURES;
5534 dev->features |= NETIF_F_SOFT_FEATURES;
5535 dev->wanted_features = dev->features & dev->hw_features;
5537 /* Turn on no cache copy if HW is doing checksum */
5538 if (!(dev->flags & IFF_LOOPBACK)) {
5539 dev->hw_features |= NETIF_F_NOCACHE_COPY;
5540 if (dev->features & NETIF_F_ALL_CSUM) {
5541 dev->wanted_features |= NETIF_F_NOCACHE_COPY;
5542 dev->features |= NETIF_F_NOCACHE_COPY;
5546 /* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
5548 dev->vlan_features |= NETIF_F_HIGHDMA;
5550 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
5551 ret = notifier_to_errno(ret);
5552 if (ret)
5553 goto err_uninit;
5555 ret = netdev_register_kobject(dev);
5556 if (ret)
5557 goto err_uninit;
5558 dev->reg_state = NETREG_REGISTERED;
5560 __netdev_update_features(dev);
5563 * Default initial state at registry is that the
5564 * device is present.
5567 set_bit(__LINK_STATE_PRESENT, &dev->state);
5569 dev_init_scheduler(dev);
5570 dev_hold(dev);
5571 list_netdevice(dev);
5573 /* Notify protocols, that a new device appeared. */
5574 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
5575 ret = notifier_to_errno(ret);
5576 if (ret) {
5577 rollback_registered(dev);
5578 dev->reg_state = NETREG_UNREGISTERED;
5581 * Prevent userspace races by waiting until the network
5582 * device is fully setup before sending notifications.
5584 if (!dev->rtnl_link_ops ||
5585 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
5586 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
5588 out:
5589 return ret;
5591 err_uninit:
5592 if (dev->netdev_ops->ndo_uninit)
5593 dev->netdev_ops->ndo_uninit(dev);
5594 goto out;
5596 EXPORT_SYMBOL(register_netdevice);
5599 * init_dummy_netdev - init a dummy network device for NAPI
5600 * @dev: device to init
5602 * This takes a network device structure and initialize the minimum
5603 * amount of fields so it can be used to schedule NAPI polls without
5604 * registering a full blown interface. This is to be used by drivers
5605 * that need to tie several hardware interfaces to a single NAPI
5606 * poll scheduler due to HW limitations.
5608 int init_dummy_netdev(struct net_device *dev)
5610 /* Clear everything. Note we don't initialize spinlocks
5611 * are they aren't supposed to be taken by any of the
5612 * NAPI code and this dummy netdev is supposed to be
5613 * only ever used for NAPI polls
5615 memset(dev, 0, sizeof(struct net_device));
5617 /* make sure we BUG if trying to hit standard
5618 * register/unregister code path
5620 dev->reg_state = NETREG_DUMMY;
5622 /* NAPI wants this */
5623 INIT_LIST_HEAD(&dev->napi_list);
5625 /* a dummy interface is started by default */
5626 set_bit(__LINK_STATE_PRESENT, &dev->state);
5627 set_bit(__LINK_STATE_START, &dev->state);
5629 /* Note : We dont allocate pcpu_refcnt for dummy devices,
5630 * because users of this 'device' dont need to change
5631 * its refcount.
5634 return 0;
5636 EXPORT_SYMBOL_GPL(init_dummy_netdev);
5640 * register_netdev - register a network device
5641 * @dev: device to register
5643 * Take a completed network device structure and add it to the kernel
5644 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
5645 * chain. 0 is returned on success. A negative errno code is returned
5646 * on a failure to set up the device, or if the name is a duplicate.
5648 * This is a wrapper around register_netdevice that takes the rtnl semaphore
5649 * and expands the device name if you passed a format string to
5650 * alloc_netdev.
5652 int register_netdev(struct net_device *dev)
5654 int err;
5656 rtnl_lock();
5657 err = register_netdevice(dev);
5658 rtnl_unlock();
5659 return err;
5661 EXPORT_SYMBOL(register_netdev);
5663 int netdev_refcnt_read(const struct net_device *dev)
5665 int i, refcnt = 0;
5667 for_each_possible_cpu(i)
5668 refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i);
5669 return refcnt;
5671 EXPORT_SYMBOL(netdev_refcnt_read);
5674 * netdev_wait_allrefs - wait until all references are gone.
5676 * This is called when unregistering network devices.
5678 * Any protocol or device that holds a reference should register
5679 * for netdevice notification, and cleanup and put back the
5680 * reference if they receive an UNREGISTER event.
5681 * We can get stuck here if buggy protocols don't correctly
5682 * call dev_put.
5684 static void netdev_wait_allrefs(struct net_device *dev)
5686 unsigned long rebroadcast_time, warning_time;
5687 int refcnt;
5689 linkwatch_forget_dev(dev);
5691 rebroadcast_time = warning_time = jiffies;
5692 refcnt = netdev_refcnt_read(dev);
5694 while (refcnt != 0) {
5695 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
5696 rtnl_lock();
5698 /* Rebroadcast unregister notification */
5699 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
5700 /* don't resend NETDEV_UNREGISTER_BATCH, _BATCH users
5701 * should have already handle it the first time */
5703 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
5704 &dev->state)) {
5705 /* We must not have linkwatch events
5706 * pending on unregister. If this
5707 * happens, we simply run the queue
5708 * unscheduled, resulting in a noop
5709 * for this device.
5711 linkwatch_run_queue();
5714 __rtnl_unlock();
5716 rebroadcast_time = jiffies;
5719 msleep(250);
5721 refcnt = netdev_refcnt_read(dev);
5723 if (time_after(jiffies, warning_time + 10 * HZ)) {
5724 pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
5725 dev->name, refcnt);
5726 warning_time = jiffies;
5731 /* The sequence is:
5733 * rtnl_lock();
5734 * ...
5735 * register_netdevice(x1);
5736 * register_netdevice(x2);
5737 * ...
5738 * unregister_netdevice(y1);
5739 * unregister_netdevice(y2);
5740 * ...
5741 * rtnl_unlock();
5742 * free_netdev(y1);
5743 * free_netdev(y2);
5745 * We are invoked by rtnl_unlock().
5746 * This allows us to deal with problems:
5747 * 1) We can delete sysfs objects which invoke hotplug
5748 * without deadlocking with linkwatch via keventd.
5749 * 2) Since we run with the RTNL semaphore not held, we can sleep
5750 * safely in order to wait for the netdev refcnt to drop to zero.
5752 * We must not return until all unregister events added during
5753 * the interval the lock was held have been completed.
5755 void netdev_run_todo(void)
5757 struct list_head list;
5759 /* Snapshot list, allow later requests */
5760 list_replace_init(&net_todo_list, &list);
5762 __rtnl_unlock();
5764 /* Wait for rcu callbacks to finish before attempting to drain
5765 * the device list. This usually avoids a 250ms wait.
5767 if (!list_empty(&list))
5768 rcu_barrier();
5770 while (!list_empty(&list)) {
5771 struct net_device *dev
5772 = list_first_entry(&list, struct net_device, todo_list);
5773 list_del(&dev->todo_list);
5775 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
5776 pr_err("network todo '%s' but state %d\n",
5777 dev->name, dev->reg_state);
5778 dump_stack();
5779 continue;
5782 dev->reg_state = NETREG_UNREGISTERED;
5784 on_each_cpu(flush_backlog, dev, 1);
5786 netdev_wait_allrefs(dev);
5788 /* paranoia */
5789 BUG_ON(netdev_refcnt_read(dev));
5790 WARN_ON(rcu_access_pointer(dev->ip_ptr));
5791 WARN_ON(rcu_access_pointer(dev->ip6_ptr));
5792 WARN_ON(dev->dn_ptr);
5794 if (dev->destructor)
5795 dev->destructor(dev);
5797 /* Free network device */
5798 kobject_put(&dev->dev.kobj);
5802 /* Convert net_device_stats to rtnl_link_stats64. They have the same
5803 * fields in the same order, with only the type differing.
5805 void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
5806 const struct net_device_stats *netdev_stats)
5808 #if BITS_PER_LONG == 64
5809 BUILD_BUG_ON(sizeof(*stats64) != sizeof(*netdev_stats));
5810 memcpy(stats64, netdev_stats, sizeof(*stats64));
5811 #else
5812 size_t i, n = sizeof(*stats64) / sizeof(u64);
5813 const unsigned long *src = (const unsigned long *)netdev_stats;
5814 u64 *dst = (u64 *)stats64;
5816 BUILD_BUG_ON(sizeof(*netdev_stats) / sizeof(unsigned long) !=
5817 sizeof(*stats64) / sizeof(u64));
5818 for (i = 0; i < n; i++)
5819 dst[i] = src[i];
5820 #endif
5822 EXPORT_SYMBOL(netdev_stats_to_stats64);
5825 * dev_get_stats - get network device statistics
5826 * @dev: device to get statistics from
5827 * @storage: place to store stats
5829 * Get network statistics from device. Return @storage.
5830 * The device driver may provide its own method by setting
5831 * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
5832 * otherwise the internal statistics structure is used.
5834 struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
5835 struct rtnl_link_stats64 *storage)
5837 const struct net_device_ops *ops = dev->netdev_ops;
5839 if (ops->ndo_get_stats64) {
5840 memset(storage, 0, sizeof(*storage));
5841 ops->ndo_get_stats64(dev, storage);
5842 } else if (ops->ndo_get_stats) {
5843 netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
5844 } else {
5845 netdev_stats_to_stats64(storage, &dev->stats);
5847 storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
5848 return storage;
5850 EXPORT_SYMBOL(dev_get_stats);
5852 struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
5854 struct netdev_queue *queue = dev_ingress_queue(dev);
5856 #ifdef CONFIG_NET_CLS_ACT
5857 if (queue)
5858 return queue;
5859 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
5860 if (!queue)
5861 return NULL;
5862 netdev_init_one_queue(dev, queue, NULL);
5863 queue->qdisc = &noop_qdisc;
5864 queue->qdisc_sleeping = &noop_qdisc;
5865 rcu_assign_pointer(dev->ingress_queue, queue);
5866 #endif
5867 return queue;
5871 * alloc_netdev_mqs - allocate network device
5872 * @sizeof_priv: size of private data to allocate space for
5873 * @name: device name format string
5874 * @setup: callback to initialize device
5875 * @txqs: the number of TX subqueues to allocate
5876 * @rxqs: the number of RX subqueues to allocate
5878 * Allocates a struct net_device with private data area for driver use
5879 * and performs basic initialization. Also allocates subquue structs
5880 * for each queue on the device.
5882 struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
5883 void (*setup)(struct net_device *),
5884 unsigned int txqs, unsigned int rxqs)
5886 struct net_device *dev;
5887 size_t alloc_size;
5888 struct net_device *p;
5890 BUG_ON(strlen(name) >= sizeof(dev->name));
5892 if (txqs < 1) {
5893 pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
5894 return NULL;
5897 #ifdef CONFIG_RPS
5898 if (rxqs < 1) {
5899 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
5900 return NULL;
5902 #endif
5904 alloc_size = sizeof(struct net_device);
5905 if (sizeof_priv) {
5906 /* ensure 32-byte alignment of private area */
5907 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
5908 alloc_size += sizeof_priv;
5910 /* ensure 32-byte alignment of whole construct */
5911 alloc_size += NETDEV_ALIGN - 1;
5913 p = kzalloc(alloc_size, GFP_KERNEL);
5914 if (!p) {
5915 pr_err("alloc_netdev: Unable to allocate device\n");
5916 return NULL;
5919 dev = PTR_ALIGN(p, NETDEV_ALIGN);
5920 dev->padded = (char *)dev - (char *)p;
5922 dev->pcpu_refcnt = alloc_percpu(int);
5923 if (!dev->pcpu_refcnt)
5924 goto free_p;
5926 if (dev_addr_init(dev))
5927 goto free_pcpu;
5929 dev_mc_init(dev);
5930 dev_uc_init(dev);
5932 dev_net_set(dev, &init_net);
5934 dev->gso_max_size = GSO_MAX_SIZE;
5936 INIT_LIST_HEAD(&dev->napi_list);
5937 INIT_LIST_HEAD(&dev->unreg_list);
5938 INIT_LIST_HEAD(&dev->link_watch_list);
5939 dev->priv_flags = IFF_XMIT_DST_RELEASE;
5940 setup(dev);
5942 dev->num_tx_queues = txqs;
5943 dev->real_num_tx_queues = txqs;
5944 if (netif_alloc_netdev_queues(dev))
5945 goto free_all;
5947 #ifdef CONFIG_RPS
5948 dev->num_rx_queues = rxqs;
5949 dev->real_num_rx_queues = rxqs;
5950 if (netif_alloc_rx_queues(dev))
5951 goto free_all;
5952 #endif
5954 strcpy(dev->name, name);
5955 dev->group = INIT_NETDEV_GROUP;
5956 return dev;
5958 free_all:
5959 free_netdev(dev);
5960 return NULL;
5962 free_pcpu:
5963 free_percpu(dev->pcpu_refcnt);
5964 kfree(dev->_tx);
5965 #ifdef CONFIG_RPS
5966 kfree(dev->_rx);
5967 #endif
5969 free_p:
5970 kfree(p);
5971 return NULL;
5973 EXPORT_SYMBOL(alloc_netdev_mqs);
5976 * free_netdev - free network device
5977 * @dev: device
5979 * This function does the last stage of destroying an allocated device
5980 * interface. The reference to the device object is released.
5981 * If this is the last reference then it will be freed.
5983 void free_netdev(struct net_device *dev)
5985 struct napi_struct *p, *n;
5987 release_net(dev_net(dev));
5989 kfree(dev->_tx);
5990 #ifdef CONFIG_RPS
5991 kfree(dev->_rx);
5992 #endif
5994 kfree(rcu_dereference_protected(dev->ingress_queue, 1));
5996 /* Flush device addresses */
5997 dev_addr_flush(dev);
5999 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
6000 netif_napi_del(p);
6002 free_percpu(dev->pcpu_refcnt);
6003 dev->pcpu_refcnt = NULL;
6005 /* Compatibility with error handling in drivers */
6006 if (dev->reg_state == NETREG_UNINITIALIZED) {
6007 kfree((char *)dev - dev->padded);
6008 return;
6011 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
6012 dev->reg_state = NETREG_RELEASED;
6014 /* will free via device release */
6015 put_device(&dev->dev);
6017 EXPORT_SYMBOL(free_netdev);
6020 * synchronize_net - Synchronize with packet receive processing
6022 * Wait for packets currently being received to be done.
6023 * Does not block later packets from starting.
6025 void synchronize_net(void)
6027 might_sleep();
6028 if (rtnl_is_locked())
6029 synchronize_rcu_expedited();
6030 else
6031 synchronize_rcu();
6033 EXPORT_SYMBOL(synchronize_net);
6036 * unregister_netdevice_queue - remove device from the kernel
6037 * @dev: device
6038 * @head: list
6040 * This function shuts down a device interface and removes it
6041 * from the kernel tables.
6042 * If head not NULL, device is queued to be unregistered later.
6044 * Callers must hold the rtnl semaphore. You may want
6045 * unregister_netdev() instead of this.
6048 void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
6050 ASSERT_RTNL();
6052 if (head) {
6053 list_move_tail(&dev->unreg_list, head);
6054 } else {
6055 rollback_registered(dev);
6056 /* Finish processing unregister after unlock */
6057 net_set_todo(dev);
6060 EXPORT_SYMBOL(unregister_netdevice_queue);
6063 * unregister_netdevice_many - unregister many devices
6064 * @head: list of devices
6066 void unregister_netdevice_many(struct list_head *head)
6068 struct net_device *dev;
6070 if (!list_empty(head)) {
6071 rollback_registered_many(head);
6072 list_for_each_entry(dev, head, unreg_list)
6073 net_set_todo(dev);
6076 EXPORT_SYMBOL(unregister_netdevice_many);
6079 * unregister_netdev - remove device from the kernel
6080 * @dev: device
6082 * This function shuts down a device interface and removes it
6083 * from the kernel tables.
6085 * This is just a wrapper for unregister_netdevice that takes
6086 * the rtnl semaphore. In general you want to use this and not
6087 * unregister_netdevice.
6089 void unregister_netdev(struct net_device *dev)
6091 rtnl_lock();
6092 unregister_netdevice(dev);
6093 rtnl_unlock();
6095 EXPORT_SYMBOL(unregister_netdev);
6098 * dev_change_net_namespace - move device to different nethost namespace
6099 * @dev: device
6100 * @net: network namespace
6101 * @pat: If not NULL name pattern to try if the current device name
6102 * is already taken in the destination network namespace.
6104 * This function shuts down a device interface and moves it
6105 * to a new network namespace. On success 0 is returned, on
6106 * a failure a netagive errno code is returned.
6108 * Callers must hold the rtnl semaphore.
6111 int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
6113 int err;
6115 ASSERT_RTNL();
6117 /* Don't allow namespace local devices to be moved. */
6118 err = -EINVAL;
6119 if (dev->features & NETIF_F_NETNS_LOCAL)
6120 goto out;
6122 /* Ensure the device has been registrered */
6123 err = -EINVAL;
6124 if (dev->reg_state != NETREG_REGISTERED)
6125 goto out;
6127 /* Get out if there is nothing todo */
6128 err = 0;
6129 if (net_eq(dev_net(dev), net))
6130 goto out;
6132 /* Pick the destination device name, and ensure
6133 * we can use it in the destination network namespace.
6135 err = -EEXIST;
6136 if (__dev_get_by_name(net, dev->name)) {
6137 /* We get here if we can't use the current device name */
6138 if (!pat)
6139 goto out;
6140 if (dev_get_valid_name(dev, pat) < 0)
6141 goto out;
6145 * And now a mini version of register_netdevice unregister_netdevice.
6148 /* If device is running close it first. */
6149 dev_close(dev);
6151 /* And unlink it from device chain */
6152 err = -ENODEV;
6153 unlist_netdevice(dev);
6155 synchronize_net();
6157 /* Shutdown queueing discipline. */
6158 dev_shutdown(dev);
6160 /* Notify protocols, that we are about to destroy
6161 this device. They should clean all the things.
6163 Note that dev->reg_state stays at NETREG_REGISTERED.
6164 This is wanted because this way 8021q and macvlan know
6165 the device is just moving and can keep their slaves up.
6167 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
6168 call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
6169 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
6172 * Flush the unicast and multicast chains
6174 dev_uc_flush(dev);
6175 dev_mc_flush(dev);
6177 /* Actually switch the network namespace */
6178 dev_net_set(dev, net);
6180 /* If there is an ifindex conflict assign a new one */
6181 if (__dev_get_by_index(net, dev->ifindex)) {
6182 int iflink = (dev->iflink == dev->ifindex);
6183 dev->ifindex = dev_new_index(net);
6184 if (iflink)
6185 dev->iflink = dev->ifindex;
6188 /* Fixup kobjects */
6189 err = device_rename(&dev->dev, dev->name);
6190 WARN_ON(err);
6192 /* Add the device back in the hashes */
6193 list_netdevice(dev);
6195 /* Notify protocols, that a new device appeared. */
6196 call_netdevice_notifiers(NETDEV_REGISTER, dev);
6199 * Prevent userspace races by waiting until the network
6200 * device is fully setup before sending notifications.
6202 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
6204 synchronize_net();
6205 err = 0;
6206 out:
6207 return err;
6209 EXPORT_SYMBOL_GPL(dev_change_net_namespace);
6211 static int dev_cpu_callback(struct notifier_block *nfb,
6212 unsigned long action,
6213 void *ocpu)
6215 struct sk_buff **list_skb;
6216 struct sk_buff *skb;
6217 unsigned int cpu, oldcpu = (unsigned long)ocpu;
6218 struct softnet_data *sd, *oldsd;
6220 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
6221 return NOTIFY_OK;
6223 local_irq_disable();
6224 cpu = smp_processor_id();
6225 sd = &per_cpu(softnet_data, cpu);
6226 oldsd = &per_cpu(softnet_data, oldcpu);
6228 /* Find end of our completion_queue. */
6229 list_skb = &sd->completion_queue;
6230 while (*list_skb)
6231 list_skb = &(*list_skb)->next;
6232 /* Append completion queue from offline CPU. */
6233 *list_skb = oldsd->completion_queue;
6234 oldsd->completion_queue = NULL;
6236 /* Append output queue from offline CPU. */
6237 if (oldsd->output_queue) {
6238 *sd->output_queue_tailp = oldsd->output_queue;
6239 sd->output_queue_tailp = oldsd->output_queue_tailp;
6240 oldsd->output_queue = NULL;
6241 oldsd->output_queue_tailp = &oldsd->output_queue;
6243 /* Append NAPI poll list from offline CPU. */
6244 if (!list_empty(&oldsd->poll_list)) {
6245 list_splice_init(&oldsd->poll_list, &sd->poll_list);
6246 raise_softirq_irqoff(NET_RX_SOFTIRQ);
6249 raise_softirq_irqoff(NET_TX_SOFTIRQ);
6250 local_irq_enable();
6252 /* Process offline CPU's input_pkt_queue */
6253 while ((skb = __skb_dequeue(&oldsd->process_queue))) {
6254 netif_rx(skb);
6255 input_queue_head_incr(oldsd);
6257 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) {
6258 netif_rx(skb);
6259 input_queue_head_incr(oldsd);
6262 return NOTIFY_OK;
6267 * netdev_increment_features - increment feature set by one
6268 * @all: current feature set
6269 * @one: new feature set
6270 * @mask: mask feature set
6272 * Computes a new feature set after adding a device with feature set
6273 * @one to the master device with current feature set @all. Will not
6274 * enable anything that is off in @mask. Returns the new feature set.
6276 netdev_features_t netdev_increment_features(netdev_features_t all,
6277 netdev_features_t one, netdev_features_t mask)
6279 if (mask & NETIF_F_GEN_CSUM)
6280 mask |= NETIF_F_ALL_CSUM;
6281 mask |= NETIF_F_VLAN_CHALLENGED;
6283 all |= one & (NETIF_F_ONE_FOR_ALL|NETIF_F_ALL_CSUM) & mask;
6284 all &= one | ~NETIF_F_ALL_FOR_ALL;
6286 /* If one device supports hw checksumming, set for all. */
6287 if (all & NETIF_F_GEN_CSUM)
6288 all &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
6290 return all;
6292 EXPORT_SYMBOL(netdev_increment_features);
6294 static struct hlist_head *netdev_create_hash(void)
6296 int i;
6297 struct hlist_head *hash;
6299 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
6300 if (hash != NULL)
6301 for (i = 0; i < NETDEV_HASHENTRIES; i++)
6302 INIT_HLIST_HEAD(&hash[i]);
6304 return hash;
6307 /* Initialize per network namespace state */
6308 static int __net_init netdev_init(struct net *net)
6310 INIT_LIST_HEAD(&net->dev_base_head);
6312 net->dev_name_head = netdev_create_hash();
6313 if (net->dev_name_head == NULL)
6314 goto err_name;
6316 net->dev_index_head = netdev_create_hash();
6317 if (net->dev_index_head == NULL)
6318 goto err_idx;
6320 return 0;
6322 err_idx:
6323 kfree(net->dev_name_head);
6324 err_name:
6325 return -ENOMEM;
6329 * netdev_drivername - network driver for the device
6330 * @dev: network device
6332 * Determine network driver for device.
6334 const char *netdev_drivername(const struct net_device *dev)
6336 const struct device_driver *driver;
6337 const struct device *parent;
6338 const char *empty = "";
6340 parent = dev->dev.parent;
6341 if (!parent)
6342 return empty;
6344 driver = parent->driver;
6345 if (driver && driver->name)
6346 return driver->name;
6347 return empty;
6350 int __netdev_printk(const char *level, const struct net_device *dev,
6351 struct va_format *vaf)
6353 int r;
6355 if (dev && dev->dev.parent)
6356 r = dev_printk(level, dev->dev.parent, "%s: %pV",
6357 netdev_name(dev), vaf);
6358 else if (dev)
6359 r = printk("%s%s: %pV", level, netdev_name(dev), vaf);
6360 else
6361 r = printk("%s(NULL net_device): %pV", level, vaf);
6363 return r;
6365 EXPORT_SYMBOL(__netdev_printk);
6367 int netdev_printk(const char *level, const struct net_device *dev,
6368 const char *format, ...)
6370 struct va_format vaf;
6371 va_list args;
6372 int r;
6374 va_start(args, format);
6376 vaf.fmt = format;
6377 vaf.va = &args;
6379 r = __netdev_printk(level, dev, &vaf);
6380 va_end(args);
6382 return r;
6384 EXPORT_SYMBOL(netdev_printk);
6386 #define define_netdev_printk_level(func, level) \
6387 int func(const struct net_device *dev, const char *fmt, ...) \
6389 int r; \
6390 struct va_format vaf; \
6391 va_list args; \
6393 va_start(args, fmt); \
6395 vaf.fmt = fmt; \
6396 vaf.va = &args; \
6398 r = __netdev_printk(level, dev, &vaf); \
6399 va_end(args); \
6401 return r; \
6403 EXPORT_SYMBOL(func);
6405 define_netdev_printk_level(netdev_emerg, KERN_EMERG);
6406 define_netdev_printk_level(netdev_alert, KERN_ALERT);
6407 define_netdev_printk_level(netdev_crit, KERN_CRIT);
6408 define_netdev_printk_level(netdev_err, KERN_ERR);
6409 define_netdev_printk_level(netdev_warn, KERN_WARNING);
6410 define_netdev_printk_level(netdev_notice, KERN_NOTICE);
6411 define_netdev_printk_level(netdev_info, KERN_INFO);
6413 static void __net_exit netdev_exit(struct net *net)
6415 kfree(net->dev_name_head);
6416 kfree(net->dev_index_head);
6419 static struct pernet_operations __net_initdata netdev_net_ops = {
6420 .init = netdev_init,
6421 .exit = netdev_exit,
6424 static void __net_exit default_device_exit(struct net *net)
6426 struct net_device *dev, *aux;
6428 * Push all migratable network devices back to the
6429 * initial network namespace
6431 rtnl_lock();
6432 for_each_netdev_safe(net, dev, aux) {
6433 int err;
6434 char fb_name[IFNAMSIZ];
6436 /* Ignore unmoveable devices (i.e. loopback) */
6437 if (dev->features & NETIF_F_NETNS_LOCAL)
6438 continue;
6440 /* Leave virtual devices for the generic cleanup */
6441 if (dev->rtnl_link_ops)
6442 continue;
6444 /* Push remaining network devices to init_net */
6445 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
6446 err = dev_change_net_namespace(dev, &init_net, fb_name);
6447 if (err) {
6448 pr_emerg("%s: failed to move %s to init_net: %d\n",
6449 __func__, dev->name, err);
6450 BUG();
6453 rtnl_unlock();
6456 static void __net_exit default_device_exit_batch(struct list_head *net_list)
6458 /* At exit all network devices most be removed from a network
6459 * namespace. Do this in the reverse order of registration.
6460 * Do this across as many network namespaces as possible to
6461 * improve batching efficiency.
6463 struct net_device *dev;
6464 struct net *net;
6465 LIST_HEAD(dev_kill_list);
6467 rtnl_lock();
6468 list_for_each_entry(net, net_list, exit_list) {
6469 for_each_netdev_reverse(net, dev) {
6470 if (dev->rtnl_link_ops)
6471 dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
6472 else
6473 unregister_netdevice_queue(dev, &dev_kill_list);
6476 unregister_netdevice_many(&dev_kill_list);
6477 list_del(&dev_kill_list);
6478 rtnl_unlock();
6481 static struct pernet_operations __net_initdata default_device_ops = {
6482 .exit = default_device_exit,
6483 .exit_batch = default_device_exit_batch,
6487 * Initialize the DEV module. At boot time this walks the device list and
6488 * unhooks any devices that fail to initialise (normally hardware not
6489 * present) and leaves us with a valid list of present and active devices.
6494 * This is called single threaded during boot, so no need
6495 * to take the rtnl semaphore.
6497 static int __init net_dev_init(void)
6499 int i, rc = -ENOMEM;
6501 BUG_ON(!dev_boot_phase);
6503 if (dev_proc_init())
6504 goto out;
6506 if (netdev_kobject_init())
6507 goto out;
6509 INIT_LIST_HEAD(&ptype_all);
6510 for (i = 0; i < PTYPE_HASH_SIZE; i++)
6511 INIT_LIST_HEAD(&ptype_base[i]);
6513 if (register_pernet_subsys(&netdev_net_ops))
6514 goto out;
6517 * Initialise the packet receive queues.
6520 for_each_possible_cpu(i) {
6521 struct softnet_data *sd = &per_cpu(softnet_data, i);
6523 memset(sd, 0, sizeof(*sd));
6524 skb_queue_head_init(&sd->input_pkt_queue);
6525 skb_queue_head_init(&sd->process_queue);
6526 sd->completion_queue = NULL;
6527 INIT_LIST_HEAD(&sd->poll_list);
6528 sd->output_queue = NULL;
6529 sd->output_queue_tailp = &sd->output_queue;
6530 #ifdef CONFIG_RPS
6531 sd->csd.func = rps_trigger_softirq;
6532 sd->csd.info = sd;
6533 sd->csd.flags = 0;
6534 sd->cpu = i;
6535 #endif
6537 sd->backlog.poll = process_backlog;
6538 sd->backlog.weight = weight_p;
6539 sd->backlog.gro_list = NULL;
6540 sd->backlog.gro_count = 0;
6543 dev_boot_phase = 0;
6545 /* The loopback device is special if any other network devices
6546 * is present in a network namespace the loopback device must
6547 * be present. Since we now dynamically allocate and free the
6548 * loopback device ensure this invariant is maintained by
6549 * keeping the loopback device as the first device on the
6550 * list of network devices. Ensuring the loopback devices
6551 * is the first device that appears and the last network device
6552 * that disappears.
6554 if (register_pernet_device(&loopback_net_ops))
6555 goto out;
6557 if (register_pernet_device(&default_device_ops))
6558 goto out;
6560 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
6561 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
6563 hotcpu_notifier(dev_cpu_callback, 0);
6564 dst_init();
6565 dev_mcast_init();
6566 rc = 0;
6567 out:
6568 return rc;
6571 subsys_initcall(net_dev_init);
6573 static int __init initialize_hashrnd(void)
6575 get_random_bytes(&hashrnd, sizeof(hashrnd));
6576 return 0;
6579 late_initcall_sync(initialize_hashrnd);