2 * Common code for low-level network console, dump, and debugger code
4 * Derived from netconsole, kgdb-over-ethernet, and netdump patches
7 #ifndef _LINUX_NETPOLL_H
8 #define _LINUX_NETPOLL_H
10 #include <linux/netdevice.h>
11 #include <linux/interrupt.h>
12 #include <linux/rcupdate.h>
13 #include <linux/list.h>
16 struct net_device
*dev
;
17 char dev_name
[IFNAMSIZ
];
19 void (*rx_hook
)(struct netpoll
*, int, char *, int);
21 __be32 local_ip
, remote_ip
;
22 u16 local_port
, remote_port
;
23 u8 remote_mac
[ETH_ALEN
];
25 struct list_head rx
; /* rx_np list element */
33 struct list_head rx_np
; /* netpolls that registered an rx_hook */
35 struct sk_buff_head arp_tx
; /* list of arp requests to reply to */
36 struct sk_buff_head txq
;
38 struct delayed_work tx_work
;
40 struct netpoll
*netpoll
;
43 void netpoll_poll_dev(struct net_device
*dev
);
44 void netpoll_poll(struct netpoll
*np
);
45 void netpoll_send_udp(struct netpoll
*np
, const char *msg
, int len
);
46 void netpoll_print_options(struct netpoll
*np
);
47 int netpoll_parse_options(struct netpoll
*np
, char *opt
);
48 int __netpoll_setup(struct netpoll
*np
);
49 int netpoll_setup(struct netpoll
*np
);
50 int netpoll_trap(void);
51 void netpoll_set_trap(int trap
);
52 void __netpoll_cleanup(struct netpoll
*np
);
53 void netpoll_cleanup(struct netpoll
*np
);
54 int __netpoll_rx(struct sk_buff
*skb
);
55 void netpoll_send_skb_on_dev(struct netpoll
*np
, struct sk_buff
*skb
,
56 struct net_device
*dev
);
57 static inline void netpoll_send_skb(struct netpoll
*np
, struct sk_buff
*skb
)
59 netpoll_send_skb_on_dev(np
, skb
, np
->dev
);
65 static inline bool netpoll_rx(struct sk_buff
*skb
)
67 struct netpoll_info
*npinfo
;
71 local_irq_save(flags
);
72 npinfo
= rcu_dereference_bh(skb
->dev
->npinfo
);
74 if (!npinfo
|| (list_empty(&npinfo
->rx_np
) && !npinfo
->rx_flags
))
77 spin_lock(&npinfo
->rx_lock
);
78 /* check rx_flags again with the lock held */
79 if (npinfo
->rx_flags
&& __netpoll_rx(skb
))
81 spin_unlock(&npinfo
->rx_lock
);
84 local_irq_restore(flags
);
88 static inline int netpoll_rx_on(struct sk_buff
*skb
)
90 struct netpoll_info
*npinfo
= rcu_dereference_bh(skb
->dev
->npinfo
);
92 return npinfo
&& (!list_empty(&npinfo
->rx_np
) || npinfo
->rx_flags
);
95 static inline int netpoll_receive_skb(struct sk_buff
*skb
)
97 if (!list_empty(&skb
->dev
->napi_list
))
98 return netpoll_rx(skb
);
102 static inline void *netpoll_poll_lock(struct napi_struct
*napi
)
104 struct net_device
*dev
= napi
->dev
;
106 if (dev
&& dev
->npinfo
) {
107 spin_lock(&napi
->poll_lock
);
108 napi
->poll_owner
= smp_processor_id();
114 static inline void netpoll_poll_unlock(void *have
)
116 struct napi_struct
*napi
= have
;
119 napi
->poll_owner
= -1;
120 spin_unlock(&napi
->poll_lock
);
124 static inline int netpoll_tx_running(struct net_device
*dev
)
126 return irqs_disabled();
130 static inline bool netpoll_rx(struct sk_buff
*skb
)
134 static inline int netpoll_rx_on(struct sk_buff
*skb
)
138 static inline int netpoll_receive_skb(struct sk_buff
*skb
)
142 static inline void *netpoll_poll_lock(struct napi_struct
*napi
)
146 static inline void netpoll_poll_unlock(void *have
)
149 static inline void netpoll_netdev_init(struct net_device
*dev
)
152 static inline int netpoll_tx_running(struct net_device
*dev
)