TOMOYO: Use callback for updating entries.
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / include / linux / netpoll.h
blobe9e231215865bd6218abd37c320784ab3976235e
1 /*
2 * Common code for low-level network console, dump, and debugger code
4 * Derived from netconsole, kgdb-over-ethernet, and netdump patches
5 */
7 #ifndef _LINUX_NETPOLL_H
8 #define _LINUX_NETPOLL_H
10 #include <linux/netdevice.h>
11 #include <linux/interrupt.h>
12 #include <linux/rcupdate.h>
13 #include <linux/list.h>
15 struct netpoll {
16 struct net_device *dev;
17 struct net_device *real_dev;
18 char dev_name[IFNAMSIZ];
19 const char *name;
20 void (*rx_hook)(struct netpoll *, int, char *, int);
22 __be32 local_ip, remote_ip;
23 u16 local_port, remote_port;
24 u8 remote_mac[ETH_ALEN];
26 struct list_head rx; /* rx_np list element */
29 struct netpoll_info {
30 atomic_t refcnt;
32 int rx_flags;
33 spinlock_t rx_lock;
34 struct list_head rx_np; /* netpolls that registered an rx_hook */
36 struct sk_buff_head arp_tx; /* list of arp requests to reply to */
37 struct sk_buff_head txq;
39 struct delayed_work tx_work;
41 struct netpoll *netpoll;
44 void netpoll_poll_dev(struct net_device *dev);
45 void netpoll_poll(struct netpoll *np);
46 void netpoll_send_udp(struct netpoll *np, const char *msg, int len);
47 void netpoll_print_options(struct netpoll *np);
48 int netpoll_parse_options(struct netpoll *np, char *opt);
49 int netpoll_setup(struct netpoll *np);
50 int netpoll_trap(void);
51 void netpoll_set_trap(int trap);
52 void netpoll_cleanup(struct netpoll *np);
53 int __netpoll_rx(struct sk_buff *skb);
54 void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb);
57 #ifdef CONFIG_NETPOLL
58 static inline bool netpoll_rx(struct sk_buff *skb)
60 struct netpoll_info *npinfo = skb->dev->npinfo;
61 unsigned long flags;
62 bool ret = false;
64 if (!npinfo || (list_empty(&npinfo->rx_np) && !npinfo->rx_flags))
65 return false;
67 spin_lock_irqsave(&npinfo->rx_lock, flags);
68 /* check rx_flags again with the lock held */
69 if (npinfo->rx_flags && __netpoll_rx(skb))
70 ret = true;
71 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
73 return ret;
76 static inline int netpoll_rx_on(struct sk_buff *skb)
78 struct netpoll_info *npinfo = skb->dev->npinfo;
80 return npinfo && (!list_empty(&npinfo->rx_np) || npinfo->rx_flags);
83 static inline int netpoll_receive_skb(struct sk_buff *skb)
85 if (!list_empty(&skb->dev->napi_list))
86 return netpoll_rx(skb);
87 return 0;
90 static inline void *netpoll_poll_lock(struct napi_struct *napi)
92 struct net_device *dev = napi->dev;
94 rcu_read_lock(); /* deal with race on ->npinfo */
95 if (dev && dev->npinfo) {
96 spin_lock(&napi->poll_lock);
97 napi->poll_owner = smp_processor_id();
98 return napi;
100 return NULL;
103 static inline void netpoll_poll_unlock(void *have)
105 struct napi_struct *napi = have;
107 if (napi) {
108 napi->poll_owner = -1;
109 spin_unlock(&napi->poll_lock);
111 rcu_read_unlock();
114 #else
115 static inline int netpoll_rx(struct sk_buff *skb)
117 return 0;
119 static inline int netpoll_rx_on(struct sk_buff *skb)
121 return 0;
123 static inline int netpoll_receive_skb(struct sk_buff *skb)
125 return 0;
127 static inline void *netpoll_poll_lock(struct napi_struct *napi)
129 return NULL;
131 static inline void netpoll_poll_unlock(void *have)
134 static inline void netpoll_netdev_init(struct net_device *dev)
137 #endif
139 #endif