2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Definitions for the Interfaces handler.
8 * Version: @(#)dev.h 1.0.10 08/12/93
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Corey Minyard <wf-rch!minyard@relay.EU.net>
13 * Donald J. Becker, <becker@cesdis.gsfc.nasa.gov>
14 * Alan Cox, <alan@lxorguk.ukuu.org.uk>
15 * Bjorn Ekwall. <bj0rn@blox.se>
16 * Pekka Riikonen <priikone@poseidon.pspt.fi>
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
23 * Moved to /usr/include/linux for NET3
25 #ifndef _LINUX_NETDEVICE_H
26 #define _LINUX_NETDEVICE_H
29 #include <linux/if_ether.h>
30 #include <linux/if_packet.h>
33 #include <linux/timer.h>
34 #include <linux/delay.h>
36 #include <asm/atomic.h>
37 #include <asm/cache.h>
38 #include <asm/byteorder.h>
40 #include <linux/device.h>
41 #include <linux/percpu.h>
42 #include <linux/rculist.h>
43 #include <linux/dmaengine.h>
44 #include <linux/workqueue.h>
46 #include <linux/ethtool.h>
47 #include <net/net_namespace.h>
50 #include <net/dcbnl.h>
57 /* source back-compat hooks */
58 #define SET_ETHTOOL_OPS(netdev,ops) \
59 ( (netdev)->ethtool_ops = (ops) )
61 #define HAVE_ALLOC_NETDEV /* feature macro: alloc_xxxdev
62 functions are available. */
63 #define HAVE_FREE_NETDEV /* free_netdev() */
64 #define HAVE_NETDEV_PRIV /* netdev_priv() */
66 #define NET_XMIT_SUCCESS 0
67 #define NET_XMIT_DROP 1 /* skb dropped */
68 #define NET_XMIT_CN 2 /* congestion notification */
69 #define NET_XMIT_POLICED 3 /* skb is shot by police */
70 #define NET_XMIT_MASK 0xFFFF /* qdisc flags in net/sch_generic.h */
72 /* Backlog congestion levels */
73 #define NET_RX_SUCCESS 0 /* keep 'em coming, baby */
74 #define NET_RX_DROP 1 /* packet dropped */
76 /* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It
77 * indicates that the device will soon be dropping packets, or already drops
78 * some packets of the same priority; prompting us to send less aggressively. */
79 #define net_xmit_eval(e) ((e) == NET_XMIT_CN? 0 : (e))
80 #define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0)
82 /* Driver transmit return codes */
84 NETDEV_TX_OK
= 0, /* driver took care of packet */
85 NETDEV_TX_BUSY
, /* driver tx path was busy*/
86 NETDEV_TX_LOCKED
= -1, /* driver tx lock was already taken */
88 typedef enum netdev_tx netdev_tx_t
;
92 #define MAX_ADDR_LEN 32 /* Largest hardware address length */
96 * Compute the worst case header length according to the protocols
100 #if defined(CONFIG_WLAN_80211) || defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
101 # if defined(CONFIG_MAC80211_MESH)
102 # define LL_MAX_HEADER 128
104 # define LL_MAX_HEADER 96
106 #elif defined(CONFIG_TR) || defined(CONFIG_TR_MODULE)
107 # define LL_MAX_HEADER 48
109 # define LL_MAX_HEADER 32
112 #if !defined(CONFIG_NET_IPIP) && !defined(CONFIG_NET_IPIP_MODULE) && \
113 !defined(CONFIG_NET_IPGRE) && !defined(CONFIG_NET_IPGRE_MODULE) && \
114 !defined(CONFIG_IPV6_SIT) && !defined(CONFIG_IPV6_SIT_MODULE) && \
115 !defined(CONFIG_IPV6_TUNNEL) && !defined(CONFIG_IPV6_TUNNEL_MODULE)
116 #define MAX_HEADER LL_MAX_HEADER
118 #define MAX_HEADER (LL_MAX_HEADER + 48)
121 #endif /* __KERNEL__ */
124 * Network device statistics. Akin to the 2.0 ether stats but
125 * with byte counters.
128 struct net_device_stats
130 unsigned long rx_packets
; /* total packets received */
131 unsigned long tx_packets
; /* total packets transmitted */
132 unsigned long rx_bytes
; /* total bytes received */
133 unsigned long tx_bytes
; /* total bytes transmitted */
134 unsigned long rx_errors
; /* bad packets received */
135 unsigned long tx_errors
; /* packet transmit problems */
136 unsigned long rx_dropped
; /* no space in linux buffers */
137 unsigned long tx_dropped
; /* no space available in linux */
138 unsigned long multicast
; /* multicast packets received */
139 unsigned long collisions
;
141 /* detailed rx_errors: */
142 unsigned long rx_length_errors
;
143 unsigned long rx_over_errors
; /* receiver ring buff overflow */
144 unsigned long rx_crc_errors
; /* recved pkt with crc error */
145 unsigned long rx_frame_errors
; /* recv'd frame alignment error */
146 unsigned long rx_fifo_errors
; /* recv'r fifo overrun */
147 unsigned long rx_missed_errors
; /* receiver missed packet */
149 /* detailed tx_errors */
150 unsigned long tx_aborted_errors
;
151 unsigned long tx_carrier_errors
;
152 unsigned long tx_fifo_errors
;
153 unsigned long tx_heartbeat_errors
;
154 unsigned long tx_window_errors
;
157 unsigned long rx_compressed
;
158 unsigned long tx_compressed
;
162 /* Media selection options. */
175 #include <linux/cache.h>
176 #include <linux/skbuff.h>
182 struct netif_rx_stats
186 unsigned time_squeeze
;
187 unsigned cpu_collision
;
190 DECLARE_PER_CPU(struct netif_rx_stats
, netdev_rx_stat
);
194 struct dev_addr_list
*next
;
195 u8 da_addr
[MAX_ADDR_LEN
];
203 * We tag multicasts with these structures.
206 #define dev_mc_list dev_addr_list
207 #define dmi_addr da_addr
208 #define dmi_addrlen da_addrlen
209 #define dmi_users da_users
210 #define dmi_gusers da_gusers
212 struct netdev_hw_addr
{
213 struct list_head list
;
214 unsigned char addr
[MAX_ADDR_LEN
];
216 #define NETDEV_HW_ADDR_T_LAN 1
217 #define NETDEV_HW_ADDR_T_SAN 2
218 #define NETDEV_HW_ADDR_T_SLAVE 3
219 #define NETDEV_HW_ADDR_T_UNICAST 4
222 struct rcu_head rcu_head
;
225 struct netdev_hw_addr_list
{
226 struct list_head list
;
232 struct hh_cache
*hh_next
; /* Next entry */
233 atomic_t hh_refcnt
; /* number of users */
235 * We want hh_output, hh_len, hh_lock and hh_data be a in a separate
237 * They are mostly read, but hh_refcnt may be changed quite frequently,
238 * incurring cache line ping pongs.
240 __be16 hh_type ____cacheline_aligned_in_smp
;
241 /* protocol identifier, f.e ETH_P_IP
242 * NOTE: For VLANs, this will be the
243 * encapuslated type. --BLG
245 u16 hh_len
; /* length of header */
246 int (*hh_output
)(struct sk_buff
*skb
);
249 /* cached hardware header; allow for machine alignment needs. */
250 #define HH_DATA_MOD 16
251 #define HH_DATA_OFF(__len) \
252 (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1))
253 #define HH_DATA_ALIGN(__len) \
254 (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1))
255 unsigned long hh_data
[HH_DATA_ALIGN(LL_MAX_HEADER
) / sizeof(long)];
258 /* Reserve HH_DATA_MOD byte aligned hard_header_len, but at least that much.
260 * dev->hard_header_len ? (dev->hard_header_len +
261 * (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0
263 * We could use other alignment values, but we must maintain the
264 * relationship HH alignment <= LL alignment.
266 * LL_ALLOCATED_SPACE also takes into account the tailroom the device
269 #define LL_RESERVED_SPACE(dev) \
270 ((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
271 #define LL_RESERVED_SPACE_EXTRA(dev,extra) \
272 ((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
273 #define LL_ALLOCATED_SPACE(dev) \
274 ((((dev)->hard_header_len+(dev)->needed_headroom+(dev)->needed_tailroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
277 int (*create
) (struct sk_buff
*skb
, struct net_device
*dev
,
278 unsigned short type
, const void *daddr
,
279 const void *saddr
, unsigned len
);
280 int (*parse
)(const struct sk_buff
*skb
, unsigned char *haddr
);
281 int (*rebuild
)(struct sk_buff
*skb
);
282 #define HAVE_HEADER_CACHE
283 int (*cache
)(const struct neighbour
*neigh
, struct hh_cache
*hh
);
284 void (*cache_update
)(struct hh_cache
*hh
,
285 const struct net_device
*dev
,
286 const unsigned char *haddr
);
289 /* These flag bits are private to the generic network queueing
290 * layer, they may not be explicitly referenced by any other
297 __LINK_STATE_PRESENT
,
298 __LINK_STATE_NOCARRIER
,
299 __LINK_STATE_LINKWATCH_PENDING
,
300 __LINK_STATE_DORMANT
,
305 * This structure holds at boot time configured netdevice settings. They
306 * are then used in the device probing.
308 struct netdev_boot_setup
{
312 #define NETDEV_BOOT_SETUP_MAX 8
314 extern int __init
netdev_boot_setup(char *str
);
317 * Structure for NAPI scheduling similar to tasklet but with weighting
320 /* The poll_list must only be managed by the entity which
321 * changes the state of the NAPI_STATE_SCHED bit. This means
322 * whoever atomically sets that bit can add this napi_struct
323 * to the per-cpu poll_list, and whoever clears that bit
324 * can remove from the list right before clearing the bit.
326 struct list_head poll_list
;
330 int (*poll
)(struct napi_struct
*, int);
331 #ifdef CONFIG_NETPOLL
332 spinlock_t poll_lock
;
336 unsigned int gro_count
;
338 struct net_device
*dev
;
339 struct list_head dev_list
;
340 struct sk_buff
*gro_list
;
346 NAPI_STATE_SCHED
, /* Poll is scheduled */
347 NAPI_STATE_DISABLE
, /* Disable pending */
348 NAPI_STATE_NPSVC
, /* Netpoll - don't dequeue from poll_list */
359 extern void __napi_schedule(struct napi_struct
*n
);
361 static inline int napi_disable_pending(struct napi_struct
*n
)
363 return test_bit(NAPI_STATE_DISABLE
, &n
->state
);
367 * napi_schedule_prep - check if napi can be scheduled
370 * Test if NAPI routine is already running, and if not mark
371 * it as running. This is used as a condition variable
372 * insure only one NAPI poll instance runs. We also make
373 * sure there is no pending NAPI disable.
375 static inline int napi_schedule_prep(struct napi_struct
*n
)
377 return !napi_disable_pending(n
) &&
378 !test_and_set_bit(NAPI_STATE_SCHED
, &n
->state
);
382 * napi_schedule - schedule NAPI poll
385 * Schedule NAPI poll routine to be called if it is not already
388 static inline void napi_schedule(struct napi_struct
*n
)
390 if (napi_schedule_prep(n
))
394 /* Try to reschedule poll. Called by dev->poll() after napi_complete(). */
395 static inline int napi_reschedule(struct napi_struct
*napi
)
397 if (napi_schedule_prep(napi
)) {
398 __napi_schedule(napi
);
405 * napi_complete - NAPI processing complete
408 * Mark NAPI processing as complete.
410 extern void __napi_complete(struct napi_struct
*n
);
411 extern void napi_complete(struct napi_struct
*n
);
414 * napi_disable - prevent NAPI from scheduling
417 * Stop NAPI from being scheduled on this context.
418 * Waits till any outstanding processing completes.
420 static inline void napi_disable(struct napi_struct
*n
)
422 set_bit(NAPI_STATE_DISABLE
, &n
->state
);
423 while (test_and_set_bit(NAPI_STATE_SCHED
, &n
->state
))
425 clear_bit(NAPI_STATE_DISABLE
, &n
->state
);
429 * napi_enable - enable NAPI scheduling
432 * Resume NAPI from being scheduled on this context.
433 * Must be paired with napi_disable.
435 static inline void napi_enable(struct napi_struct
*n
)
437 BUG_ON(!test_bit(NAPI_STATE_SCHED
, &n
->state
));
438 smp_mb__before_clear_bit();
439 clear_bit(NAPI_STATE_SCHED
, &n
->state
);
444 * napi_synchronize - wait until NAPI is not running
447 * Wait until NAPI is done being scheduled on this context.
448 * Waits till any outstanding processing completes but
449 * does not disable future activations.
451 static inline void napi_synchronize(const struct napi_struct
*n
)
453 while (test_bit(NAPI_STATE_SCHED
, &n
->state
))
457 # define napi_synchronize(n) barrier()
460 enum netdev_queue_state_t
463 __QUEUE_STATE_FROZEN
,
466 struct netdev_queue
{
470 struct net_device
*dev
;
473 struct Qdisc
*qdisc_sleeping
;
477 spinlock_t _xmit_lock ____cacheline_aligned_in_smp
;
480 * please use this field instead of dev->trans_start
482 unsigned long trans_start
;
483 unsigned long tx_bytes
;
484 unsigned long tx_packets
;
485 unsigned long tx_dropped
;
486 } ____cacheline_aligned_in_smp
;
490 * This structure defines the management hooks for network devices.
491 * The following hooks can be defined; unless noted otherwise, they are
492 * optional and can be filled with a null pointer.
494 * int (*ndo_init)(struct net_device *dev);
495 * This function is called once when network device is registered.
496 * The network device can use this to any late stage initializaton
497 * or semantic validattion. It can fail with an error code which will
498 * be propogated back to register_netdev
500 * void (*ndo_uninit)(struct net_device *dev);
501 * This function is called when device is unregistered or when registration
502 * fails. It is not called if init fails.
504 * int (*ndo_open)(struct net_device *dev);
505 * This function is called when network device transistions to the up
508 * int (*ndo_stop)(struct net_device *dev);
509 * This function is called when network device transistions to the down
512 * netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb,
513 * struct net_device *dev);
514 * Called when a packet needs to be transmitted.
515 * Must return NETDEV_TX_OK , NETDEV_TX_BUSY.
516 * (can also return NETDEV_TX_LOCKED iff NETIF_F_LLTX)
517 * Required can not be NULL.
519 * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb);
520 * Called to decide which queue to when device supports multiple
523 * void (*ndo_change_rx_flags)(struct net_device *dev, int flags);
524 * This function is called to allow device receiver to make
525 * changes to configuration when multicast or promiscious is enabled.
527 * void (*ndo_set_rx_mode)(struct net_device *dev);
528 * This function is called device changes address list filtering.
530 * void (*ndo_set_multicast_list)(struct net_device *dev);
531 * This function is called when the multicast address list changes.
533 * int (*ndo_set_mac_address)(struct net_device *dev, void *addr);
534 * This function is called when the Media Access Control address
535 * needs to be changed. If this interface is not defined, the
536 * mac address can not be changed.
538 * int (*ndo_validate_addr)(struct net_device *dev);
539 * Test if Media Access Control address is valid for the device.
541 * int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd);
542 * Called when a user request an ioctl which can't be handled by
543 * the generic interface code. If not defined ioctl's return
544 * not supported error code.
546 * int (*ndo_set_config)(struct net_device *dev, struct ifmap *map);
547 * Used to set network devices bus interface parameters. This interface
548 * is retained for legacy reason, new devices should use the bus
549 * interface (PCI) for low level management.
551 * int (*ndo_change_mtu)(struct net_device *dev, int new_mtu);
552 * Called when a user wants to change the Maximum Transfer Unit
553 * of a device. If not defined, any request to change MTU will
554 * will return an error.
556 * void (*ndo_tx_timeout)(struct net_device *dev);
557 * Callback uses when the transmitter has not made any progress
558 * for dev->watchdog ticks.
560 * struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
561 * Called when a user wants to get the network device usage
562 * statistics. If not defined, the counters in dev->stats will
565 * void (*ndo_vlan_rx_register)(struct net_device *dev, struct vlan_group *grp);
566 * If device support VLAN receive accleration
567 * (ie. dev->features & NETIF_F_HW_VLAN_RX), then this function is called
568 * when vlan groups for the device changes. Note: grp is NULL
569 * if no vlan's groups are being used.
571 * void (*ndo_vlan_rx_add_vid)(struct net_device *dev, unsigned short vid);
572 * If device support VLAN filtering (dev->features & NETIF_F_HW_VLAN_FILTER)
573 * this function is called when a VLAN id is registered.
575 * void (*ndo_vlan_rx_kill_vid)(struct net_device *dev, unsigned short vid);
576 * If device support VLAN filtering (dev->features & NETIF_F_HW_VLAN_FILTER)
577 * this function is called when a VLAN id is unregistered.
579 * void (*ndo_poll_controller)(struct net_device *dev);
581 #define HAVE_NET_DEVICE_OPS
582 struct net_device_ops
{
583 int (*ndo_init
)(struct net_device
*dev
);
584 void (*ndo_uninit
)(struct net_device
*dev
);
585 int (*ndo_open
)(struct net_device
*dev
);
586 int (*ndo_stop
)(struct net_device
*dev
);
587 netdev_tx_t (*ndo_start_xmit
) (struct sk_buff
*skb
,
588 struct net_device
*dev
);
589 u16 (*ndo_select_queue
)(struct net_device
*dev
,
590 struct sk_buff
*skb
);
591 #define HAVE_CHANGE_RX_FLAGS
592 void (*ndo_change_rx_flags
)(struct net_device
*dev
,
594 #define HAVE_SET_RX_MODE
595 void (*ndo_set_rx_mode
)(struct net_device
*dev
);
596 #define HAVE_MULTICAST
597 void (*ndo_set_multicast_list
)(struct net_device
*dev
);
598 #define HAVE_SET_MAC_ADDR
599 int (*ndo_set_mac_address
)(struct net_device
*dev
,
601 #define HAVE_VALIDATE_ADDR
602 int (*ndo_validate_addr
)(struct net_device
*dev
);
603 #define HAVE_PRIVATE_IOCTL
604 int (*ndo_do_ioctl
)(struct net_device
*dev
,
605 struct ifreq
*ifr
, int cmd
);
606 #define HAVE_SET_CONFIG
607 int (*ndo_set_config
)(struct net_device
*dev
,
609 #define HAVE_CHANGE_MTU
610 int (*ndo_change_mtu
)(struct net_device
*dev
,
612 int (*ndo_neigh_setup
)(struct net_device
*dev
,
613 struct neigh_parms
*);
614 #define HAVE_TX_TIMEOUT
615 void (*ndo_tx_timeout
) (struct net_device
*dev
);
617 struct net_device_stats
* (*ndo_get_stats
)(struct net_device
*dev
);
619 void (*ndo_vlan_rx_register
)(struct net_device
*dev
,
620 struct vlan_group
*grp
);
621 void (*ndo_vlan_rx_add_vid
)(struct net_device
*dev
,
623 void (*ndo_vlan_rx_kill_vid
)(struct net_device
*dev
,
625 #ifdef CONFIG_NET_POLL_CONTROLLER
626 #define HAVE_NETDEV_POLL
627 void (*ndo_poll_controller
)(struct net_device
*dev
);
629 #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
630 int (*ndo_fcoe_enable
)(struct net_device
*dev
);
631 int (*ndo_fcoe_disable
)(struct net_device
*dev
);
632 int (*ndo_fcoe_ddp_setup
)(struct net_device
*dev
,
634 struct scatterlist
*sgl
,
636 int (*ndo_fcoe_ddp_done
)(struct net_device
*dev
,
642 * The DEVICE structure.
643 * Actually, this whole structure is a big mistake. It mixes I/O
644 * data with strictly "high-level" data, and it has to know about
645 * almost every data structure used in the INET module.
647 * FIXME: cleanup struct net_device such that network protocol info
655 * This is the first field of the "visible" part of this structure
656 * (i.e. as seen by users in the "Space.c" file). It is the name
660 /* device name hash chain */
661 struct hlist_node name_hlist
;
666 * I/O specific fields
667 * FIXME: Merge these and struct ifmap into one
669 unsigned long mem_end
; /* shared mem end */
670 unsigned long mem_start
; /* shared mem start */
671 unsigned long base_addr
; /* device I/O address */
672 unsigned int irq
; /* device IRQ number */
675 * Some hardware also needs these fields, but they are not
676 * part of the usual set specified in Space.c.
679 unsigned char if_port
; /* Selectable AUI, TP,..*/
680 unsigned char dma
; /* DMA channel */
684 struct list_head dev_list
;
685 struct list_head napi_list
;
686 struct list_head unreg_list
;
688 /* Net device features */
689 unsigned long features
;
690 #define NETIF_F_SG 1 /* Scatter/gather IO. */
691 #define NETIF_F_IP_CSUM 2 /* Can checksum TCP/UDP over IPv4. */
692 #define NETIF_F_NO_CSUM 4 /* Does not require checksum. F.e. loopack. */
693 #define NETIF_F_HW_CSUM 8 /* Can checksum all the packets. */
694 #define NETIF_F_IPV6_CSUM 16 /* Can checksum TCP/UDP over IPV6 */
695 #define NETIF_F_HIGHDMA 32 /* Can DMA to high memory. */
696 #define NETIF_F_FRAGLIST 64 /* Scatter/gather IO. */
697 #define NETIF_F_HW_VLAN_TX 128 /* Transmit VLAN hw acceleration */
698 #define NETIF_F_HW_VLAN_RX 256 /* Receive VLAN hw acceleration */
699 #define NETIF_F_HW_VLAN_FILTER 512 /* Receive filtering on VLAN */
700 #define NETIF_F_VLAN_CHALLENGED 1024 /* Device cannot handle VLAN packets */
701 #define NETIF_F_GSO 2048 /* Enable software GSO. */
702 #define NETIF_F_LLTX 4096 /* LockLess TX - deprecated. Please */
703 /* do not use LLTX in new drivers */
704 #define NETIF_F_NETNS_LOCAL 8192 /* Does not change network namespaces */
705 #define NETIF_F_GRO 16384 /* Generic receive offload */
706 #define NETIF_F_LRO 32768 /* large receive offload */
708 /* the GSO_MASK reserves bits 16 through 23 */
709 #define NETIF_F_FCOE_CRC (1 << 24) /* FCoE CRC32 */
710 #define NETIF_F_SCTP_CSUM (1 << 25) /* SCTP checksum offload */
711 #define NETIF_F_FCOE_MTU (1 << 26) /* Supports max FCoE MTU, 2158 bytes*/
713 /* Segmentation offload features */
714 #define NETIF_F_GSO_SHIFT 16
715 #define NETIF_F_GSO_MASK 0x00ff0000
716 #define NETIF_F_TSO (SKB_GSO_TCPV4 << NETIF_F_GSO_SHIFT)
717 #define NETIF_F_UFO (SKB_GSO_UDP << NETIF_F_GSO_SHIFT)
718 #define NETIF_F_GSO_ROBUST (SKB_GSO_DODGY << NETIF_F_GSO_SHIFT)
719 #define NETIF_F_TSO_ECN (SKB_GSO_TCP_ECN << NETIF_F_GSO_SHIFT)
720 #define NETIF_F_TSO6 (SKB_GSO_TCPV6 << NETIF_F_GSO_SHIFT)
721 #define NETIF_F_FSO (SKB_GSO_FCOE << NETIF_F_GSO_SHIFT)
723 /* List of features with software fallbacks. */
724 #define NETIF_F_GSO_SOFTWARE (NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6)
727 #define NETIF_F_GEN_CSUM (NETIF_F_NO_CSUM | NETIF_F_HW_CSUM)
728 #define NETIF_F_V4_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IP_CSUM)
729 #define NETIF_F_V6_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IPV6_CSUM)
730 #define NETIF_F_ALL_CSUM (NETIF_F_V4_CSUM | NETIF_F_V6_CSUM)
733 * If one device supports one of these features, then enable them
734 * for all in netdev_increment_features.
736 #define NETIF_F_ONE_FOR_ALL (NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ROBUST | \
737 NETIF_F_SG | NETIF_F_HIGHDMA | \
740 /* Interface index. Unique device identifier */
744 struct net_device_stats stats
;
746 #ifdef CONFIG_WIRELESS_EXT
747 /* List of functions to handle Wireless Extensions (instead of ioctl).
748 * See <net/iw_handler.h> for details. Jean II */
749 const struct iw_handler_def
* wireless_handlers
;
750 /* Instance data managed by the core of Wireless Extensions. */
751 struct iw_public_data
* wireless_data
;
753 /* Management operations */
754 const struct net_device_ops
*netdev_ops
;
755 const struct ethtool_ops
*ethtool_ops
;
757 /* Hardware header description */
758 const struct header_ops
*header_ops
;
760 unsigned int flags
; /* interface flags (a la BSD) */
761 unsigned short gflags
;
762 unsigned short priv_flags
; /* Like 'flags' but invisible to userspace. */
763 unsigned short padded
; /* How much padding added by alloc_netdev() */
765 unsigned char operstate
; /* RFC2863 operstate */
766 unsigned char link_mode
; /* mapping policy to operstate */
768 unsigned mtu
; /* interface MTU value */
769 unsigned short type
; /* interface hardware type */
770 unsigned short hard_header_len
; /* hardware hdr length */
772 /* extra head- and tailroom the hardware may need, but not in all cases
773 * can this be guaranteed, especially tailroom. Some cases also use
774 * LL_MAX_HEADER instead to allocate the skb.
776 unsigned short needed_headroom
;
777 unsigned short needed_tailroom
;
779 struct net_device
*master
; /* Pointer to master device of a group,
780 * which this device is member of.
783 /* Interface address info. */
784 unsigned char perm_addr
[MAX_ADDR_LEN
]; /* permanent hw address */
785 unsigned char addr_len
; /* hardware address length */
786 unsigned short dev_id
; /* for shared network cards */
788 struct netdev_hw_addr_list uc
; /* Secondary unicast
791 spinlock_t addr_list_lock
;
792 struct dev_addr_list
*mc_list
; /* Multicast mac addresses */
793 int mc_count
; /* Number of installed mcasts */
794 unsigned int promiscuity
;
795 unsigned int allmulti
;
798 /* Protocol specific pointers */
800 #ifdef CONFIG_NET_DSA
801 void *dsa_ptr
; /* dsa specific data */
803 void *atalk_ptr
; /* AppleTalk link */
804 void *ip_ptr
; /* IPv4 specific data */
805 void *dn_ptr
; /* DECnet specific data */
806 void *ip6_ptr
; /* IPv6 specific data */
807 void *ec_ptr
; /* Econet specific data */
808 void *ax25_ptr
; /* AX.25 specific data */
809 struct wireless_dev
*ieee80211_ptr
; /* IEEE 802.11 specific data,
810 assign before registering */
813 * Cache line mostly used on receive path (including eth_type_trans())
815 unsigned long last_rx
; /* Time of last Rx */
816 /* Interface address info used in eth_type_trans() */
817 unsigned char *dev_addr
; /* hw address, (before bcast
818 because most packets are
821 struct netdev_hw_addr_list dev_addrs
; /* list of device
824 unsigned char broadcast
[MAX_ADDR_LEN
]; /* hw bcast add */
826 struct netdev_queue rx_queue
;
828 struct netdev_queue
*_tx ____cacheline_aligned_in_smp
;
830 /* Number of TX queues allocated at alloc_netdev_mq() time */
831 unsigned int num_tx_queues
;
833 /* Number of TX queues currently active in device */
834 unsigned int real_num_tx_queues
;
836 /* root qdisc from userspace point of view */
839 unsigned long tx_queue_len
; /* Max frames per queue allowed */
840 spinlock_t tx_global_lock
;
842 * One part is mostly used on xmit path (device)
844 /* These may be needed for future network-power-down code. */
847 * trans_start here is expensive for high speed devices on SMP,
848 * please use netdev_queue->trans_start instead.
850 unsigned long trans_start
; /* Time (in jiffies) of last Tx */
852 int watchdog_timeo
; /* used by dev_watchdog() */
853 struct timer_list watchdog_timer
;
855 /* Number of references to this device */
856 atomic_t refcnt ____cacheline_aligned_in_smp
;
858 /* delayed register/unregister */
859 struct list_head todo_list
;
860 /* device index hash chain */
861 struct hlist_node index_hlist
;
863 struct net_device
*link_watch_next
;
865 /* register/unregister state machine */
866 enum { NETREG_UNINITIALIZED
=0,
867 NETREG_REGISTERED
, /* completed register_netdevice */
868 NETREG_UNREGISTERING
, /* called unregister_netdevice */
869 NETREG_UNREGISTERED
, /* completed unregister todo */
870 NETREG_RELEASED
, /* called free_netdev */
871 NETREG_DUMMY
, /* dummy device for NAPI poll */
874 /* Called from unregister, can be used to call free_netdev */
875 void (*destructor
)(struct net_device
*dev
);
877 #ifdef CONFIG_NETPOLL
878 struct netpoll_info
*npinfo
;
882 /* Network namespace this network device is inside */
886 /* mid-layer private */
890 struct net_bridge_port
*br_port
;
892 struct macvlan_port
*macvlan_port
;
894 struct garp_port
*garp_port
;
896 /* class/net/name entry */
898 /* space for optional statistics and wireless sysfs groups */
899 const struct attribute_group
*sysfs_groups
[3];
901 /* rtnetlink link ops */
902 const struct rtnl_link_ops
*rtnl_link_ops
;
904 /* VLAN feature mask */
905 unsigned long vlan_features
;
907 /* for setting kernel sock attribute on TCP connection setup */
908 #define GSO_MAX_SIZE 65536
909 unsigned int gso_max_size
;
912 /* Data Center Bridging netlink ops */
913 const struct dcbnl_rtnl_ops
*dcbnl_ops
;
916 #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
917 /* max exchange id for FCoE LRO by ddp */
918 unsigned int fcoe_ddp_xid
;
921 #define to_net_dev(d) container_of(d, struct net_device, dev)
923 #define NETDEV_ALIGN 32
926 struct netdev_queue
*netdev_get_tx_queue(const struct net_device
*dev
,
929 return &dev
->_tx
[index
];
932 static inline void netdev_for_each_tx_queue(struct net_device
*dev
,
933 void (*f
)(struct net_device
*,
934 struct netdev_queue
*,
940 for (i
= 0; i
< dev
->num_tx_queues
; i
++)
941 f(dev
, &dev
->_tx
[i
], arg
);
945 * Net namespace inlines
948 struct net
*dev_net(const struct net_device
*dev
)
958 void dev_net_set(struct net_device
*dev
, struct net
*net
)
961 release_net(dev
->nd_net
);
962 dev
->nd_net
= hold_net(net
);
966 static inline bool netdev_uses_dsa_tags(struct net_device
*dev
)
968 #ifdef CONFIG_NET_DSA_TAG_DSA
969 if (dev
->dsa_ptr
!= NULL
)
970 return dsa_uses_dsa_tags(dev
->dsa_ptr
);
976 static inline bool netdev_uses_trailer_tags(struct net_device
*dev
)
978 #ifdef CONFIG_NET_DSA_TAG_TRAILER
979 if (dev
->dsa_ptr
!= NULL
)
980 return dsa_uses_trailer_tags(dev
->dsa_ptr
);
987 * netdev_priv - access network device private data
988 * @dev: network device
990 * Get network device private data
992 static inline void *netdev_priv(const struct net_device
*dev
)
994 return (char *)dev
+ ALIGN(sizeof(struct net_device
), NETDEV_ALIGN
);
997 /* Set the sysfs physical device reference for the network logical device
998 * if set prior to registration will cause a symlink during initialization.
1000 #define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev))
1002 /* Set the sysfs device type for the network logical device to allow
1003 * fin grained indentification of different network device types. For
1004 * example Ethernet, Wirelss LAN, Bluetooth, WiMAX etc.
1006 #define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype))
1009 * netif_napi_add - initialize a napi context
1010 * @dev: network device
1011 * @napi: napi context
1012 * @poll: polling function
1013 * @weight: default weight
1015 * netif_napi_add() must be used to initialize a napi context prior to calling
1016 * *any* of the other napi related functions.
1018 void netif_napi_add(struct net_device
*dev
, struct napi_struct
*napi
,
1019 int (*poll
)(struct napi_struct
*, int), int weight
);
1022 * netif_napi_del - remove a napi context
1023 * @napi: napi context
1025 * netif_napi_del() removes a napi context from the network device napi list
1027 void netif_napi_del(struct napi_struct
*napi
);
1029 struct napi_gro_cb
{
1030 /* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */
1033 /* Length of frag0. */
1034 unsigned int frag0_len
;
1036 /* This indicates where we are processing relative to skb->data. */
1039 /* This is non-zero if the packet may be of the same flow. */
1042 /* This is non-zero if the packet cannot be merged with the new skb. */
1045 /* Number of segments aggregated. */
1052 #define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
1054 struct packet_type
{
1055 __be16 type
; /* This is really htons(ether_type). */
1056 struct net_device
*dev
; /* NULL is wildcarded here */
1057 int (*func
) (struct sk_buff
*,
1058 struct net_device
*,
1059 struct packet_type
*,
1060 struct net_device
*);
1061 struct sk_buff
*(*gso_segment
)(struct sk_buff
*skb
,
1063 int (*gso_send_check
)(struct sk_buff
*skb
);
1064 struct sk_buff
**(*gro_receive
)(struct sk_buff
**head
,
1065 struct sk_buff
*skb
);
1066 int (*gro_complete
)(struct sk_buff
*skb
);
1067 void *af_packet_priv
;
1068 struct list_head list
;
1071 #include <linux/interrupt.h>
1072 #include <linux/notifier.h>
1074 extern rwlock_t dev_base_lock
; /* Device list lock */
1077 #define for_each_netdev(net, d) \
1078 list_for_each_entry(d, &(net)->dev_base_head, dev_list)
1079 #define for_each_netdev_safe(net, d, n) \
1080 list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list)
1081 #define for_each_netdev_continue(net, d) \
1082 list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list)
1083 #define net_device_entry(lh) list_entry(lh, struct net_device, dev_list)
1085 static inline struct net_device
*next_net_device(struct net_device
*dev
)
1087 struct list_head
*lh
;
1091 lh
= dev
->dev_list
.next
;
1092 return lh
== &net
->dev_base_head
? NULL
: net_device_entry(lh
);
1095 static inline struct net_device
*first_net_device(struct net
*net
)
1097 return list_empty(&net
->dev_base_head
) ? NULL
:
1098 net_device_entry(net
->dev_base_head
.next
);
1101 extern int netdev_boot_setup_check(struct net_device
*dev
);
1102 extern unsigned long netdev_boot_base(const char *prefix
, int unit
);
1103 extern struct net_device
*dev_getbyhwaddr(struct net
*net
, unsigned short type
, char *hwaddr
);
1104 extern struct net_device
*dev_getfirstbyhwtype(struct net
*net
, unsigned short type
);
1105 extern struct net_device
*__dev_getfirstbyhwtype(struct net
*net
, unsigned short type
);
1106 extern void dev_add_pack(struct packet_type
*pt
);
1107 extern void dev_remove_pack(struct packet_type
*pt
);
1108 extern void __dev_remove_pack(struct packet_type
*pt
);
1110 extern struct net_device
*dev_get_by_flags(struct net
*net
, unsigned short flags
,
1111 unsigned short mask
);
1112 extern struct net_device
*dev_get_by_name(struct net
*net
, const char *name
);
1113 extern struct net_device
*__dev_get_by_name(struct net
*net
, const char *name
);
1114 extern int dev_alloc_name(struct net_device
*dev
, const char *name
);
1115 extern int dev_open(struct net_device
*dev
);
1116 extern int dev_close(struct net_device
*dev
);
1117 extern void dev_disable_lro(struct net_device
*dev
);
1118 extern int dev_queue_xmit(struct sk_buff
*skb
);
1119 extern int register_netdevice(struct net_device
*dev
);
1120 extern void unregister_netdevice_queue(struct net_device
*dev
,
1121 struct list_head
*head
);
1122 extern void unregister_netdevice_many(struct list_head
*head
);
1123 static inline void unregister_netdevice(struct net_device
*dev
)
1125 unregister_netdevice_queue(dev
, NULL
);
1128 extern void free_netdev(struct net_device
*dev
);
1129 extern void synchronize_net(void);
1130 extern int register_netdevice_notifier(struct notifier_block
*nb
);
1131 extern int unregister_netdevice_notifier(struct notifier_block
*nb
);
1132 extern int init_dummy_netdev(struct net_device
*dev
);
1133 extern void netdev_resync_ops(struct net_device
*dev
);
1135 extern int call_netdevice_notifiers(unsigned long val
, struct net_device
*dev
);
1136 extern struct net_device
*dev_get_by_index(struct net
*net
, int ifindex
);
1137 extern struct net_device
*__dev_get_by_index(struct net
*net
, int ifindex
);
1138 extern int dev_restart(struct net_device
*dev
);
1139 #ifdef CONFIG_NETPOLL_TRAP
1140 extern int netpoll_trap(void);
1142 extern int skb_gro_receive(struct sk_buff
**head
,
1143 struct sk_buff
*skb
);
1144 extern void skb_gro_reset_offset(struct sk_buff
*skb
);
1146 static inline unsigned int skb_gro_offset(const struct sk_buff
*skb
)
1148 return NAPI_GRO_CB(skb
)->data_offset
;
1151 static inline unsigned int skb_gro_len(const struct sk_buff
*skb
)
1153 return skb
->len
- NAPI_GRO_CB(skb
)->data_offset
;
1156 static inline void skb_gro_pull(struct sk_buff
*skb
, unsigned int len
)
1158 NAPI_GRO_CB(skb
)->data_offset
+= len
;
1161 static inline void *skb_gro_header_fast(struct sk_buff
*skb
,
1162 unsigned int offset
)
1164 return NAPI_GRO_CB(skb
)->frag0
+ offset
;
1167 static inline int skb_gro_header_hard(struct sk_buff
*skb
, unsigned int hlen
)
1169 return NAPI_GRO_CB(skb
)->frag0_len
< hlen
;
1172 static inline void *skb_gro_header_slow(struct sk_buff
*skb
, unsigned int hlen
,
1173 unsigned int offset
)
1175 NAPI_GRO_CB(skb
)->frag0
= NULL
;
1176 NAPI_GRO_CB(skb
)->frag0_len
= 0;
1177 return pskb_may_pull(skb
, hlen
) ? skb
->data
+ offset
: NULL
;
1180 static inline void *skb_gro_mac_header(struct sk_buff
*skb
)
1182 return NAPI_GRO_CB(skb
)->frag0
?: skb_mac_header(skb
);
1185 static inline void *skb_gro_network_header(struct sk_buff
*skb
)
1187 return (NAPI_GRO_CB(skb
)->frag0
?: skb
->data
) +
1188 skb_network_offset(skb
);
1191 static inline int dev_hard_header(struct sk_buff
*skb
, struct net_device
*dev
,
1192 unsigned short type
,
1193 const void *daddr
, const void *saddr
,
1196 if (!dev
->header_ops
|| !dev
->header_ops
->create
)
1199 return dev
->header_ops
->create(skb
, dev
, type
, daddr
, saddr
, len
);
1202 static inline int dev_parse_header(const struct sk_buff
*skb
,
1203 unsigned char *haddr
)
1205 const struct net_device
*dev
= skb
->dev
;
1207 if (!dev
->header_ops
|| !dev
->header_ops
->parse
)
1209 return dev
->header_ops
->parse(skb
, haddr
);
1212 typedef int gifconf_func_t(struct net_device
* dev
, char __user
* bufptr
, int len
);
1213 extern int register_gifconf(unsigned int family
, gifconf_func_t
* gifconf
);
1214 static inline int unregister_gifconf(unsigned int family
)
1216 return register_gifconf(family
, NULL
);
1220 * Incoming packets are placed on per-cpu queues so that
1221 * no locking is needed.
1225 struct Qdisc
*output_queue
;
1226 struct sk_buff_head input_pkt_queue
;
1227 struct list_head poll_list
;
1228 struct sk_buff
*completion_queue
;
1230 struct napi_struct backlog
;
1233 DECLARE_PER_CPU(struct softnet_data
,softnet_data
);
1235 #define HAVE_NETIF_QUEUE
1237 extern void __netif_schedule(struct Qdisc
*q
);
1239 static inline void netif_schedule_queue(struct netdev_queue
*txq
)
1241 if (!test_bit(__QUEUE_STATE_XOFF
, &txq
->state
))
1242 __netif_schedule(txq
->qdisc
);
1245 static inline void netif_tx_schedule_all(struct net_device
*dev
)
1249 for (i
= 0; i
< dev
->num_tx_queues
; i
++)
1250 netif_schedule_queue(netdev_get_tx_queue(dev
, i
));
1253 static inline void netif_tx_start_queue(struct netdev_queue
*dev_queue
)
1255 clear_bit(__QUEUE_STATE_XOFF
, &dev_queue
->state
);
1259 * netif_start_queue - allow transmit
1260 * @dev: network device
1262 * Allow upper layers to call the device hard_start_xmit routine.
1264 static inline void netif_start_queue(struct net_device
*dev
)
1266 netif_tx_start_queue(netdev_get_tx_queue(dev
, 0));
1269 static inline void netif_tx_start_all_queues(struct net_device
*dev
)
1273 for (i
= 0; i
< dev
->num_tx_queues
; i
++) {
1274 struct netdev_queue
*txq
= netdev_get_tx_queue(dev
, i
);
1275 netif_tx_start_queue(txq
);
1279 static inline void netif_tx_wake_queue(struct netdev_queue
*dev_queue
)
1281 #ifdef CONFIG_NETPOLL_TRAP
1282 if (netpoll_trap()) {
1283 netif_tx_start_queue(dev_queue
);
1287 if (test_and_clear_bit(__QUEUE_STATE_XOFF
, &dev_queue
->state
))
1288 __netif_schedule(dev_queue
->qdisc
);
1292 * netif_wake_queue - restart transmit
1293 * @dev: network device
1295 * Allow upper layers to call the device hard_start_xmit routine.
1296 * Used for flow control when transmit resources are available.
1298 static inline void netif_wake_queue(struct net_device
*dev
)
1300 netif_tx_wake_queue(netdev_get_tx_queue(dev
, 0));
1303 static inline void netif_tx_wake_all_queues(struct net_device
*dev
)
1307 for (i
= 0; i
< dev
->num_tx_queues
; i
++) {
1308 struct netdev_queue
*txq
= netdev_get_tx_queue(dev
, i
);
1309 netif_tx_wake_queue(txq
);
1313 static inline void netif_tx_stop_queue(struct netdev_queue
*dev_queue
)
1315 set_bit(__QUEUE_STATE_XOFF
, &dev_queue
->state
);
1319 * netif_stop_queue - stop transmitted packets
1320 * @dev: network device
1322 * Stop upper layers calling the device hard_start_xmit routine.
1323 * Used for flow control when transmit resources are unavailable.
1325 static inline void netif_stop_queue(struct net_device
*dev
)
1327 netif_tx_stop_queue(netdev_get_tx_queue(dev
, 0));
1330 static inline void netif_tx_stop_all_queues(struct net_device
*dev
)
1334 for (i
= 0; i
< dev
->num_tx_queues
; i
++) {
1335 struct netdev_queue
*txq
= netdev_get_tx_queue(dev
, i
);
1336 netif_tx_stop_queue(txq
);
1340 static inline int netif_tx_queue_stopped(const struct netdev_queue
*dev_queue
)
1342 return test_bit(__QUEUE_STATE_XOFF
, &dev_queue
->state
);
1346 * netif_queue_stopped - test if transmit queue is flowblocked
1347 * @dev: network device
1349 * Test if transmit queue on device is currently unable to send.
1351 static inline int netif_queue_stopped(const struct net_device
*dev
)
1353 return netif_tx_queue_stopped(netdev_get_tx_queue(dev
, 0));
1356 static inline int netif_tx_queue_frozen(const struct netdev_queue
*dev_queue
)
1358 return test_bit(__QUEUE_STATE_FROZEN
, &dev_queue
->state
);
1362 * netif_running - test if up
1363 * @dev: network device
1365 * Test if the device has been brought up.
1367 static inline int netif_running(const struct net_device
*dev
)
1369 return test_bit(__LINK_STATE_START
, &dev
->state
);
1373 * Routines to manage the subqueues on a device. We only need start
1374 * stop, and a check if it's stopped. All other device management is
1375 * done at the overall netdevice level.
1376 * Also test the device if we're multiqueue.
1380 * netif_start_subqueue - allow sending packets on subqueue
1381 * @dev: network device
1382 * @queue_index: sub queue index
1384 * Start individual transmit queue of a device with multiple transmit queues.
1386 static inline void netif_start_subqueue(struct net_device
*dev
, u16 queue_index
)
1388 struct netdev_queue
*txq
= netdev_get_tx_queue(dev
, queue_index
);
1390 netif_tx_start_queue(txq
);
1394 * netif_stop_subqueue - stop sending packets on subqueue
1395 * @dev: network device
1396 * @queue_index: sub queue index
1398 * Stop individual transmit queue of a device with multiple transmit queues.
1400 static inline void netif_stop_subqueue(struct net_device
*dev
, u16 queue_index
)
1402 struct netdev_queue
*txq
= netdev_get_tx_queue(dev
, queue_index
);
1403 #ifdef CONFIG_NETPOLL_TRAP
1407 netif_tx_stop_queue(txq
);
1411 * netif_subqueue_stopped - test status of subqueue
1412 * @dev: network device
1413 * @queue_index: sub queue index
1415 * Check individual transmit queue of a device with multiple transmit queues.
1417 static inline int __netif_subqueue_stopped(const struct net_device
*dev
,
1420 struct netdev_queue
*txq
= netdev_get_tx_queue(dev
, queue_index
);
1422 return netif_tx_queue_stopped(txq
);
1425 static inline int netif_subqueue_stopped(const struct net_device
*dev
,
1426 struct sk_buff
*skb
)
1428 return __netif_subqueue_stopped(dev
, skb_get_queue_mapping(skb
));
1432 * netif_wake_subqueue - allow sending packets on subqueue
1433 * @dev: network device
1434 * @queue_index: sub queue index
1436 * Resume individual transmit queue of a device with multiple transmit queues.
1438 static inline void netif_wake_subqueue(struct net_device
*dev
, u16 queue_index
)
1440 struct netdev_queue
*txq
= netdev_get_tx_queue(dev
, queue_index
);
1441 #ifdef CONFIG_NETPOLL_TRAP
1445 if (test_and_clear_bit(__QUEUE_STATE_XOFF
, &txq
->state
))
1446 __netif_schedule(txq
->qdisc
);
1450 * netif_is_multiqueue - test if device has multiple transmit queues
1451 * @dev: network device
1453 * Check if device has multiple transmit queues
1455 static inline int netif_is_multiqueue(const struct net_device
*dev
)
1457 return (dev
->num_tx_queues
> 1);
1460 /* Use this variant when it is known for sure that it
1461 * is executing from hardware interrupt context or with hardware interrupts
1464 extern void dev_kfree_skb_irq(struct sk_buff
*skb
);
1466 /* Use this variant in places where it could be invoked
1467 * from either hardware interrupt or other context, with hardware interrupts
1468 * either disabled or enabled.
1470 extern void dev_kfree_skb_any(struct sk_buff
*skb
);
1472 #define HAVE_NETIF_RX 1
1473 extern int netif_rx(struct sk_buff
*skb
);
1474 extern int netif_rx_ni(struct sk_buff
*skb
);
1475 #define HAVE_NETIF_RECEIVE_SKB 1
1476 extern int netif_receive_skb(struct sk_buff
*skb
);
1477 extern void napi_gro_flush(struct napi_struct
*napi
);
1478 extern int dev_gro_receive(struct napi_struct
*napi
,
1479 struct sk_buff
*skb
);
1480 extern int napi_skb_finish(int ret
, struct sk_buff
*skb
);
1481 extern int napi_gro_receive(struct napi_struct
*napi
,
1482 struct sk_buff
*skb
);
1483 extern void napi_reuse_skb(struct napi_struct
*napi
,
1484 struct sk_buff
*skb
);
1485 extern struct sk_buff
* napi_get_frags(struct napi_struct
*napi
);
1486 extern int napi_frags_finish(struct napi_struct
*napi
,
1487 struct sk_buff
*skb
, int ret
);
1488 extern struct sk_buff
* napi_frags_skb(struct napi_struct
*napi
);
1489 extern int napi_gro_frags(struct napi_struct
*napi
);
1491 static inline void napi_free_frags(struct napi_struct
*napi
)
1493 kfree_skb(napi
->skb
);
1497 extern void netif_nit_deliver(struct sk_buff
*skb
);
1498 extern int dev_valid_name(const char *name
);
1499 extern int dev_ioctl(struct net
*net
, unsigned int cmd
, void __user
*);
1500 extern int dev_ethtool(struct net
*net
, struct ifreq
*);
1501 extern unsigned dev_get_flags(const struct net_device
*);
1502 extern int dev_change_flags(struct net_device
*, unsigned);
1503 extern int dev_change_name(struct net_device
*, const char *);
1504 extern int dev_set_alias(struct net_device
*, const char *, size_t);
1505 extern int dev_change_net_namespace(struct net_device
*,
1506 struct net
*, const char *);
1507 extern int dev_set_mtu(struct net_device
*, int);
1508 extern int dev_set_mac_address(struct net_device
*,
1510 extern int dev_hard_start_xmit(struct sk_buff
*skb
,
1511 struct net_device
*dev
,
1512 struct netdev_queue
*txq
);
1514 extern int netdev_budget
;
1516 /* Called by rtnetlink.c:rtnl_unlock() */
1517 extern void netdev_run_todo(void);
1520 * dev_put - release reference to device
1521 * @dev: network device
1523 * Release reference to device to allow it to be freed.
1525 static inline void dev_put(struct net_device
*dev
)
1527 atomic_dec(&dev
->refcnt
);
1531 * dev_hold - get reference to device
1532 * @dev: network device
1534 * Hold reference to device to keep it from being freed.
1536 static inline void dev_hold(struct net_device
*dev
)
1538 atomic_inc(&dev
->refcnt
);
1541 /* Carrier loss detection, dial on demand. The functions netif_carrier_on
1542 * and _off may be called from IRQ context, but it is caller
1543 * who is responsible for serialization of these calls.
1545 * The name carrier is inappropriate, these functions should really be
1546 * called netif_lowerlayer_*() because they represent the state of any
1547 * kind of lower layer not just hardware media.
1550 extern void linkwatch_fire_event(struct net_device
*dev
);
1553 * netif_carrier_ok - test if carrier present
1554 * @dev: network device
1556 * Check if carrier is present on device
1558 static inline int netif_carrier_ok(const struct net_device
*dev
)
1560 return !test_bit(__LINK_STATE_NOCARRIER
, &dev
->state
);
1563 extern unsigned long dev_trans_start(struct net_device
*dev
);
1565 extern void __netdev_watchdog_up(struct net_device
*dev
);
1567 extern void netif_carrier_on(struct net_device
*dev
);
1569 extern void netif_carrier_off(struct net_device
*dev
);
1572 * netif_dormant_on - mark device as dormant.
1573 * @dev: network device
1575 * Mark device as dormant (as per RFC2863).
1577 * The dormant state indicates that the relevant interface is not
1578 * actually in a condition to pass packets (i.e., it is not 'up') but is
1579 * in a "pending" state, waiting for some external event. For "on-
1580 * demand" interfaces, this new state identifies the situation where the
1581 * interface is waiting for events to place it in the up state.
1584 static inline void netif_dormant_on(struct net_device
*dev
)
1586 if (!test_and_set_bit(__LINK_STATE_DORMANT
, &dev
->state
))
1587 linkwatch_fire_event(dev
);
1591 * netif_dormant_off - set device as not dormant.
1592 * @dev: network device
1594 * Device is not in dormant state.
1596 static inline void netif_dormant_off(struct net_device
*dev
)
1598 if (test_and_clear_bit(__LINK_STATE_DORMANT
, &dev
->state
))
1599 linkwatch_fire_event(dev
);
1603 * netif_dormant - test if carrier present
1604 * @dev: network device
1606 * Check if carrier is present on device
1608 static inline int netif_dormant(const struct net_device
*dev
)
1610 return test_bit(__LINK_STATE_DORMANT
, &dev
->state
);
1615 * netif_oper_up - test if device is operational
1616 * @dev: network device
1618 * Check if carrier is operational
1620 static inline int netif_oper_up(const struct net_device
*dev
) {
1621 return (dev
->operstate
== IF_OPER_UP
||
1622 dev
->operstate
== IF_OPER_UNKNOWN
/* backward compat */);
1626 * netif_device_present - is device available or removed
1627 * @dev: network device
1629 * Check if device has not been removed from system.
1631 static inline int netif_device_present(struct net_device
*dev
)
1633 return test_bit(__LINK_STATE_PRESENT
, &dev
->state
);
1636 extern void netif_device_detach(struct net_device
*dev
);
1638 extern void netif_device_attach(struct net_device
*dev
);
1641 * Network interface message level settings
1643 #define HAVE_NETIF_MSG 1
1646 NETIF_MSG_DRV
= 0x0001,
1647 NETIF_MSG_PROBE
= 0x0002,
1648 NETIF_MSG_LINK
= 0x0004,
1649 NETIF_MSG_TIMER
= 0x0008,
1650 NETIF_MSG_IFDOWN
= 0x0010,
1651 NETIF_MSG_IFUP
= 0x0020,
1652 NETIF_MSG_RX_ERR
= 0x0040,
1653 NETIF_MSG_TX_ERR
= 0x0080,
1654 NETIF_MSG_TX_QUEUED
= 0x0100,
1655 NETIF_MSG_INTR
= 0x0200,
1656 NETIF_MSG_TX_DONE
= 0x0400,
1657 NETIF_MSG_RX_STATUS
= 0x0800,
1658 NETIF_MSG_PKTDATA
= 0x1000,
1659 NETIF_MSG_HW
= 0x2000,
1660 NETIF_MSG_WOL
= 0x4000,
1663 #define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV)
1664 #define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE)
1665 #define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK)
1666 #define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER)
1667 #define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN)
1668 #define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP)
1669 #define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR)
1670 #define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR)
1671 #define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED)
1672 #define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR)
1673 #define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE)
1674 #define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS)
1675 #define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA)
1676 #define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW)
1677 #define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL)
1679 static inline u32
netif_msg_init(int debug_value
, int default_msg_enable_bits
)
1682 if (debug_value
< 0 || debug_value
>= (sizeof(u32
) * 8))
1683 return default_msg_enable_bits
;
1684 if (debug_value
== 0) /* no output */
1686 /* set low N bits */
1687 return (1 << debug_value
) - 1;
1690 static inline void __netif_tx_lock(struct netdev_queue
*txq
, int cpu
)
1692 spin_lock(&txq
->_xmit_lock
);
1693 txq
->xmit_lock_owner
= cpu
;
1696 static inline void __netif_tx_lock_bh(struct netdev_queue
*txq
)
1698 spin_lock_bh(&txq
->_xmit_lock
);
1699 txq
->xmit_lock_owner
= smp_processor_id();
1702 static inline int __netif_tx_trylock(struct netdev_queue
*txq
)
1704 int ok
= spin_trylock(&txq
->_xmit_lock
);
1706 txq
->xmit_lock_owner
= smp_processor_id();
1710 static inline void __netif_tx_unlock(struct netdev_queue
*txq
)
1712 txq
->xmit_lock_owner
= -1;
1713 spin_unlock(&txq
->_xmit_lock
);
1716 static inline void __netif_tx_unlock_bh(struct netdev_queue
*txq
)
1718 txq
->xmit_lock_owner
= -1;
1719 spin_unlock_bh(&txq
->_xmit_lock
);
1722 static inline void txq_trans_update(struct netdev_queue
*txq
)
1724 if (txq
->xmit_lock_owner
!= -1)
1725 txq
->trans_start
= jiffies
;
1729 * netif_tx_lock - grab network device transmit lock
1730 * @dev: network device
1732 * Get network device transmit lock
1734 static inline void netif_tx_lock(struct net_device
*dev
)
1739 spin_lock(&dev
->tx_global_lock
);
1740 cpu
= smp_processor_id();
1741 for (i
= 0; i
< dev
->num_tx_queues
; i
++) {
1742 struct netdev_queue
*txq
= netdev_get_tx_queue(dev
, i
);
1744 /* We are the only thread of execution doing a
1745 * freeze, but we have to grab the _xmit_lock in
1746 * order to synchronize with threads which are in
1747 * the ->hard_start_xmit() handler and already
1748 * checked the frozen bit.
1750 __netif_tx_lock(txq
, cpu
);
1751 set_bit(__QUEUE_STATE_FROZEN
, &txq
->state
);
1752 __netif_tx_unlock(txq
);
1756 static inline void netif_tx_lock_bh(struct net_device
*dev
)
1762 static inline void netif_tx_unlock(struct net_device
*dev
)
1766 for (i
= 0; i
< dev
->num_tx_queues
; i
++) {
1767 struct netdev_queue
*txq
= netdev_get_tx_queue(dev
, i
);
1769 /* No need to grab the _xmit_lock here. If the
1770 * queue is not stopped for another reason, we
1773 clear_bit(__QUEUE_STATE_FROZEN
, &txq
->state
);
1774 netif_schedule_queue(txq
);
1776 spin_unlock(&dev
->tx_global_lock
);
1779 static inline void netif_tx_unlock_bh(struct net_device
*dev
)
1781 netif_tx_unlock(dev
);
1785 #define HARD_TX_LOCK(dev, txq, cpu) { \
1786 if ((dev->features & NETIF_F_LLTX) == 0) { \
1787 __netif_tx_lock(txq, cpu); \
1791 #define HARD_TX_UNLOCK(dev, txq) { \
1792 if ((dev->features & NETIF_F_LLTX) == 0) { \
1793 __netif_tx_unlock(txq); \
1797 static inline void netif_tx_disable(struct net_device
*dev
)
1803 cpu
= smp_processor_id();
1804 for (i
= 0; i
< dev
->num_tx_queues
; i
++) {
1805 struct netdev_queue
*txq
= netdev_get_tx_queue(dev
, i
);
1807 __netif_tx_lock(txq
, cpu
);
1808 netif_tx_stop_queue(txq
);
1809 __netif_tx_unlock(txq
);
1814 static inline void netif_addr_lock(struct net_device
*dev
)
1816 spin_lock(&dev
->addr_list_lock
);
1819 static inline void netif_addr_lock_bh(struct net_device
*dev
)
1821 spin_lock_bh(&dev
->addr_list_lock
);
1824 static inline void netif_addr_unlock(struct net_device
*dev
)
1826 spin_unlock(&dev
->addr_list_lock
);
1829 static inline void netif_addr_unlock_bh(struct net_device
*dev
)
1831 spin_unlock_bh(&dev
->addr_list_lock
);
1835 * dev_addrs walker. Should be used only for read access. Call with
1836 * rcu_read_lock held.
1838 #define for_each_dev_addr(dev, ha) \
1839 list_for_each_entry_rcu(ha, &dev->dev_addrs.list, list)
1841 /* These functions live elsewhere (drivers/net/net_init.c, but related) */
1843 extern void ether_setup(struct net_device
*dev
);
1845 /* Support for loadable net-drivers */
1846 extern struct net_device
*alloc_netdev_mq(int sizeof_priv
, const char *name
,
1847 void (*setup
)(struct net_device
*),
1848 unsigned int queue_count
);
1849 #define alloc_netdev(sizeof_priv, name, setup) \
1850 alloc_netdev_mq(sizeof_priv, name, setup, 1)
1851 extern int register_netdev(struct net_device
*dev
);
1852 extern void unregister_netdev(struct net_device
*dev
);
1854 /* Functions used for device addresses handling */
1855 extern int dev_addr_add(struct net_device
*dev
, unsigned char *addr
,
1856 unsigned char addr_type
);
1857 extern int dev_addr_del(struct net_device
*dev
, unsigned char *addr
,
1858 unsigned char addr_type
);
1859 extern int dev_addr_add_multiple(struct net_device
*to_dev
,
1860 struct net_device
*from_dev
,
1861 unsigned char addr_type
);
1862 extern int dev_addr_del_multiple(struct net_device
*to_dev
,
1863 struct net_device
*from_dev
,
1864 unsigned char addr_type
);
1866 /* Functions used for secondary unicast and multicast support */
1867 extern void dev_set_rx_mode(struct net_device
*dev
);
1868 extern void __dev_set_rx_mode(struct net_device
*dev
);
1869 extern int dev_unicast_delete(struct net_device
*dev
, void *addr
);
1870 extern int dev_unicast_add(struct net_device
*dev
, void *addr
);
1871 extern int dev_unicast_sync(struct net_device
*to
, struct net_device
*from
);
1872 extern void dev_unicast_unsync(struct net_device
*to
, struct net_device
*from
);
1873 extern int dev_mc_delete(struct net_device
*dev
, void *addr
, int alen
, int all
);
1874 extern int dev_mc_add(struct net_device
*dev
, void *addr
, int alen
, int newonly
);
1875 extern int dev_mc_sync(struct net_device
*to
, struct net_device
*from
);
1876 extern void dev_mc_unsync(struct net_device
*to
, struct net_device
*from
);
1877 extern int __dev_addr_delete(struct dev_addr_list
**list
, int *count
, void *addr
, int alen
, int all
);
1878 extern int __dev_addr_add(struct dev_addr_list
**list
, int *count
, void *addr
, int alen
, int newonly
);
1879 extern int __dev_addr_sync(struct dev_addr_list
**to
, int *to_count
, struct dev_addr_list
**from
, int *from_count
);
1880 extern void __dev_addr_unsync(struct dev_addr_list
**to
, int *to_count
, struct dev_addr_list
**from
, int *from_count
);
1881 extern int dev_set_promiscuity(struct net_device
*dev
, int inc
);
1882 extern int dev_set_allmulti(struct net_device
*dev
, int inc
);
1883 extern void netdev_state_change(struct net_device
*dev
);
1884 extern void netdev_bonding_change(struct net_device
*dev
,
1885 unsigned long event
);
1886 extern void netdev_features_change(struct net_device
*dev
);
1887 /* Load a device via the kmod */
1888 extern void dev_load(struct net
*net
, const char *name
);
1889 extern void dev_mcast_init(void);
1890 extern const struct net_device_stats
*dev_get_stats(struct net_device
*dev
);
1892 extern int netdev_max_backlog
;
1893 extern int weight_p
;
1894 extern int netdev_set_master(struct net_device
*dev
, struct net_device
*master
);
1895 extern int skb_checksum_help(struct sk_buff
*skb
);
1896 extern struct sk_buff
*skb_gso_segment(struct sk_buff
*skb
, int features
);
1898 extern void netdev_rx_csum_fault(struct net_device
*dev
);
1900 static inline void netdev_rx_csum_fault(struct net_device
*dev
)
1904 /* rx skb timestamps */
1905 extern void net_enable_timestamp(void);
1906 extern void net_disable_timestamp(void);
1908 #ifdef CONFIG_PROC_FS
1909 extern void *dev_seq_start(struct seq_file
*seq
, loff_t
*pos
);
1910 extern void *dev_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
);
1911 extern void dev_seq_stop(struct seq_file
*seq
, void *v
);
1914 extern int netdev_class_create_file(struct class_attribute
*class_attr
);
1915 extern void netdev_class_remove_file(struct class_attribute
*class_attr
);
1917 extern char *netdev_drivername(const struct net_device
*dev
, char *buffer
, int len
);
1919 extern void linkwatch_run_queue(void);
1921 unsigned long netdev_increment_features(unsigned long all
, unsigned long one
,
1922 unsigned long mask
);
1923 unsigned long netdev_fix_features(unsigned long features
, const char *name
);
1925 static inline int net_gso_ok(int features
, int gso_type
)
1927 int feature
= gso_type
<< NETIF_F_GSO_SHIFT
;
1928 return (features
& feature
) == feature
;
1931 static inline int skb_gso_ok(struct sk_buff
*skb
, int features
)
1933 return net_gso_ok(features
, skb_shinfo(skb
)->gso_type
) &&
1934 (!skb_has_frags(skb
) || (features
& NETIF_F_FRAGLIST
));
1937 static inline int netif_needs_gso(struct net_device
*dev
, struct sk_buff
*skb
)
1939 return skb_is_gso(skb
) &&
1940 (!skb_gso_ok(skb
, dev
->features
) ||
1941 unlikely(skb
->ip_summed
!= CHECKSUM_PARTIAL
));
1944 static inline void netif_set_gso_max_size(struct net_device
*dev
,
1947 dev
->gso_max_size
= size
;
1950 static inline void skb_bond_set_mac_by_master(struct sk_buff
*skb
,
1951 struct net_device
*master
)
1953 if (skb
->pkt_type
== PACKET_HOST
) {
1954 u16
*dest
= (u16
*) eth_hdr(skb
)->h_dest
;
1956 memcpy(dest
, master
->dev_addr
, ETH_ALEN
);
1960 /* On bonding slaves other than the currently active slave, suppress
1961 * duplicates except for 802.3ad ETH_P_SLOW, alb non-mcast/bcast, and
1962 * ARP on active-backup slaves with arp_validate enabled.
1964 static inline int skb_bond_should_drop(struct sk_buff
*skb
)
1966 struct net_device
*dev
= skb
->dev
;
1967 struct net_device
*master
= dev
->master
;
1970 if (master
->priv_flags
& IFF_MASTER_ARPMON
)
1971 dev
->last_rx
= jiffies
;
1973 if ((master
->priv_flags
& IFF_MASTER_ALB
) && master
->br_port
) {
1974 /* Do address unmangle. The local destination address
1975 * will be always the one master has. Provides the right
1976 * functionality in a bridge.
1978 skb_bond_set_mac_by_master(skb
, master
);
1981 if (dev
->priv_flags
& IFF_SLAVE_INACTIVE
) {
1982 if ((dev
->priv_flags
& IFF_SLAVE_NEEDARP
) &&
1983 skb
->protocol
== __cpu_to_be16(ETH_P_ARP
))
1986 if (master
->priv_flags
& IFF_MASTER_ALB
) {
1987 if (skb
->pkt_type
!= PACKET_BROADCAST
&&
1988 skb
->pkt_type
!= PACKET_MULTICAST
)
1991 if (master
->priv_flags
& IFF_MASTER_8023AD
&&
1992 skb
->protocol
== __cpu_to_be16(ETH_P_SLOW
))
2001 extern struct pernet_operations __net_initdata loopback_net_ops
;
2003 static inline int dev_ethtool_get_settings(struct net_device
*dev
,
2004 struct ethtool_cmd
*cmd
)
2006 if (!dev
->ethtool_ops
|| !dev
->ethtool_ops
->get_settings
)
2008 return dev
->ethtool_ops
->get_settings(dev
, cmd
);
2011 static inline u32
dev_ethtool_get_rx_csum(struct net_device
*dev
)
2013 if (!dev
->ethtool_ops
|| !dev
->ethtool_ops
->get_rx_csum
)
2015 return dev
->ethtool_ops
->get_rx_csum(dev
);
2018 static inline u32
dev_ethtool_get_flags(struct net_device
*dev
)
2020 if (!dev
->ethtool_ops
|| !dev
->ethtool_ops
->get_flags
)
2022 return dev
->ethtool_ops
->get_flags(dev
);
2024 #endif /* __KERNEL__ */
2026 #endif /* _LINUX_NETDEVICE_H */