drm/nouveau: ratelimit IRQ messages
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / include / linux / netpoll.h
blob79358bb712c6905e474a55b09b1edae58c3a5c43
1 /*
2 * Common code for low-level network console, dump, and debugger code
4 * Derived from netconsole, kgdb-over-ethernet, and netdump patches
5 */
7 #ifndef _LINUX_NETPOLL_H
8 #define _LINUX_NETPOLL_H
10 #include <linux/netdevice.h>
11 #include <linux/interrupt.h>
12 #include <linux/rcupdate.h>
13 #include <linux/list.h>
15 struct netpoll {
16 struct net_device *dev;
17 char dev_name[IFNAMSIZ];
18 const char *name;
19 void (*rx_hook)(struct netpoll *, int, char *, int);
21 __be32 local_ip, remote_ip;
22 u16 local_port, remote_port;
23 u8 remote_mac[ETH_ALEN];
25 struct list_head rx; /* rx_np list element */
28 struct netpoll_info {
29 atomic_t refcnt;
31 int rx_flags;
32 spinlock_t rx_lock;
33 struct list_head rx_np; /* netpolls that registered an rx_hook */
35 struct sk_buff_head arp_tx; /* list of arp requests to reply to */
36 struct sk_buff_head txq;
38 struct delayed_work tx_work;
40 struct netpoll *netpoll;
43 void netpoll_poll_dev(struct net_device *dev);
44 void netpoll_poll(struct netpoll *np);
45 void netpoll_send_udp(struct netpoll *np, const char *msg, int len);
46 void netpoll_print_options(struct netpoll *np);
47 int netpoll_parse_options(struct netpoll *np, char *opt);
48 int __netpoll_setup(struct netpoll *np);
49 int netpoll_setup(struct netpoll *np);
50 int netpoll_trap(void);
51 void netpoll_set_trap(int trap);
52 void __netpoll_cleanup(struct netpoll *np);
53 void netpoll_cleanup(struct netpoll *np);
54 int __netpoll_rx(struct sk_buff *skb);
55 void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
56 struct net_device *dev);
57 static inline void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
59 netpoll_send_skb_on_dev(np, skb, np->dev);
64 #ifdef CONFIG_NETPOLL
65 static inline bool netpoll_rx(struct sk_buff *skb)
67 struct netpoll_info *npinfo;
68 unsigned long flags;
69 bool ret = false;
71 local_irq_save(flags);
72 npinfo = rcu_dereference_bh(skb->dev->npinfo);
74 if (!npinfo || (list_empty(&npinfo->rx_np) && !npinfo->rx_flags))
75 goto out;
77 spin_lock(&npinfo->rx_lock);
78 /* check rx_flags again with the lock held */
79 if (npinfo->rx_flags && __netpoll_rx(skb))
80 ret = true;
81 spin_unlock(&npinfo->rx_lock);
83 out:
84 local_irq_restore(flags);
85 return ret;
88 static inline int netpoll_rx_on(struct sk_buff *skb)
90 struct netpoll_info *npinfo = rcu_dereference_bh(skb->dev->npinfo);
92 return npinfo && (!list_empty(&npinfo->rx_np) || npinfo->rx_flags);
95 static inline int netpoll_receive_skb(struct sk_buff *skb)
97 if (!list_empty(&skb->dev->napi_list))
98 return netpoll_rx(skb);
99 return 0;
102 static inline void *netpoll_poll_lock(struct napi_struct *napi)
104 struct net_device *dev = napi->dev;
106 if (dev && dev->npinfo) {
107 spin_lock(&napi->poll_lock);
108 napi->poll_owner = smp_processor_id();
109 return napi;
111 return NULL;
114 static inline void netpoll_poll_unlock(void *have)
116 struct napi_struct *napi = have;
118 if (napi) {
119 napi->poll_owner = -1;
120 spin_unlock(&napi->poll_lock);
124 static inline int netpoll_tx_running(struct net_device *dev)
126 return irqs_disabled();
129 #else
130 static inline bool netpoll_rx(struct sk_buff *skb)
132 return 0;
134 static inline int netpoll_rx_on(struct sk_buff *skb)
136 return 0;
138 static inline int netpoll_receive_skb(struct sk_buff *skb)
140 return 0;
142 static inline void *netpoll_poll_lock(struct napi_struct *napi)
144 return NULL;
146 static inline void netpoll_poll_unlock(void *have)
149 static inline void netpoll_netdev_init(struct net_device *dev)
152 static inline int netpoll_tx_running(struct net_device *dev)
154 return 0;
156 #endif
158 #endif