[PARISC] irq_affinityp[] only available for SMP builds
[linux-2.6.22.y-op.git] / net / netfilter / nf_queue.c
blobd3a4f30a7f2247f0bac16cdc9466c31b12409e39
1 #include <linux/config.h>
2 #include <linux/kernel.h>
3 #include <linux/init.h>
4 #include <linux/module.h>
5 #include <linux/proc_fs.h>
6 #include <linux/skbuff.h>
7 #include <linux/netfilter.h>
8 #include <linux/seq_file.h>
9 #include <net/protocol.h>
11 #include "nf_internals.h"
13 /*
14 * A queue handler may be registered for each protocol. Each is protected by
15 * long term mutex. The handler must provide an an outfn() to accept packets
16 * for queueing and must reinject all packets it receives, no matter what.
18 static struct nf_queue_handler *queue_handler[NPROTO];
19 static struct nf_queue_rerouter *queue_rerouter;
21 static DEFINE_RWLOCK(queue_handler_lock);
23 /* return EBUSY when somebody else is registered, return EEXIST if the
24 * same handler is registered, return 0 in case of success. */
25 int nf_register_queue_handler(int pf, struct nf_queue_handler *qh)
27 int ret;
29 if (pf >= NPROTO)
30 return -EINVAL;
32 write_lock_bh(&queue_handler_lock);
33 if (queue_handler[pf] == qh)
34 ret = -EEXIST;
35 else if (queue_handler[pf])
36 ret = -EBUSY;
37 else {
38 queue_handler[pf] = qh;
39 ret = 0;
41 write_unlock_bh(&queue_handler_lock);
43 return ret;
45 EXPORT_SYMBOL(nf_register_queue_handler);
47 /* The caller must flush their queue before this */
48 int nf_unregister_queue_handler(int pf)
50 if (pf >= NPROTO)
51 return -EINVAL;
53 write_lock_bh(&queue_handler_lock);
54 queue_handler[pf] = NULL;
55 write_unlock_bh(&queue_handler_lock);
57 return 0;
59 EXPORT_SYMBOL(nf_unregister_queue_handler);
61 int nf_register_queue_rerouter(int pf, struct nf_queue_rerouter *rer)
63 if (pf >= NPROTO)
64 return -EINVAL;
66 write_lock_bh(&queue_handler_lock);
67 memcpy(&queue_rerouter[pf], rer, sizeof(queue_rerouter[pf]));
68 write_unlock_bh(&queue_handler_lock);
70 return 0;
72 EXPORT_SYMBOL_GPL(nf_register_queue_rerouter);
74 int nf_unregister_queue_rerouter(int pf)
76 if (pf >= NPROTO)
77 return -EINVAL;
79 write_lock_bh(&queue_handler_lock);
80 memset(&queue_rerouter[pf], 0, sizeof(queue_rerouter[pf]));
81 write_unlock_bh(&queue_handler_lock);
82 return 0;
84 EXPORT_SYMBOL_GPL(nf_unregister_queue_rerouter);
86 void nf_unregister_queue_handlers(struct nf_queue_handler *qh)
88 int pf;
90 write_lock_bh(&queue_handler_lock);
91 for (pf = 0; pf < NPROTO; pf++) {
92 if (queue_handler[pf] == qh)
93 queue_handler[pf] = NULL;
95 write_unlock_bh(&queue_handler_lock);
97 EXPORT_SYMBOL_GPL(nf_unregister_queue_handlers);
99 /*
100 * Any packet that leaves via this function must come back
101 * through nf_reinject().
103 int nf_queue(struct sk_buff **skb,
104 struct list_head *elem,
105 int pf, unsigned int hook,
106 struct net_device *indev,
107 struct net_device *outdev,
108 int (*okfn)(struct sk_buff *),
109 unsigned int queuenum)
111 int status;
112 struct nf_info *info;
113 #ifdef CONFIG_BRIDGE_NETFILTER
114 struct net_device *physindev = NULL;
115 struct net_device *physoutdev = NULL;
116 #endif
118 /* QUEUE == DROP if noone is waiting, to be safe. */
119 read_lock(&queue_handler_lock);
120 if (!queue_handler[pf] || !queue_handler[pf]->outfn) {
121 read_unlock(&queue_handler_lock);
122 kfree_skb(*skb);
123 return 1;
126 info = kmalloc(sizeof(*info)+queue_rerouter[pf].rer_size, GFP_ATOMIC);
127 if (!info) {
128 if (net_ratelimit())
129 printk(KERN_ERR "OOM queueing packet %p\n",
130 *skb);
131 read_unlock(&queue_handler_lock);
132 kfree_skb(*skb);
133 return 1;
136 *info = (struct nf_info) {
137 (struct nf_hook_ops *)elem, pf, hook, indev, outdev, okfn };
139 /* If it's going away, ignore hook. */
140 if (!try_module_get(info->elem->owner)) {
141 read_unlock(&queue_handler_lock);
142 kfree(info);
143 return 0;
146 /* Bump dev refs so they don't vanish while packet is out */
147 if (indev) dev_hold(indev);
148 if (outdev) dev_hold(outdev);
150 #ifdef CONFIG_BRIDGE_NETFILTER
151 if ((*skb)->nf_bridge) {
152 physindev = (*skb)->nf_bridge->physindev;
153 if (physindev) dev_hold(physindev);
154 physoutdev = (*skb)->nf_bridge->physoutdev;
155 if (physoutdev) dev_hold(physoutdev);
157 #endif
158 if (queue_rerouter[pf].save)
159 queue_rerouter[pf].save(*skb, info);
161 status = queue_handler[pf]->outfn(*skb, info, queuenum,
162 queue_handler[pf]->data);
164 if (status >= 0 && queue_rerouter[pf].reroute)
165 status = queue_rerouter[pf].reroute(skb, info);
167 read_unlock(&queue_handler_lock);
169 if (status < 0) {
170 /* James M doesn't say fuck enough. */
171 if (indev) dev_put(indev);
172 if (outdev) dev_put(outdev);
173 #ifdef CONFIG_BRIDGE_NETFILTER
174 if (physindev) dev_put(physindev);
175 if (physoutdev) dev_put(physoutdev);
176 #endif
177 module_put(info->elem->owner);
178 kfree(info);
179 kfree_skb(*skb);
181 return 1;
184 return 1;
187 void nf_reinject(struct sk_buff *skb, struct nf_info *info,
188 unsigned int verdict)
190 struct list_head *elem = &info->elem->list;
191 struct list_head *i;
193 rcu_read_lock();
195 /* Release those devices we held, or Alexey will kill me. */
196 if (info->indev) dev_put(info->indev);
197 if (info->outdev) dev_put(info->outdev);
198 #ifdef CONFIG_BRIDGE_NETFILTER
199 if (skb->nf_bridge) {
200 if (skb->nf_bridge->physindev)
201 dev_put(skb->nf_bridge->physindev);
202 if (skb->nf_bridge->physoutdev)
203 dev_put(skb->nf_bridge->physoutdev);
205 #endif
207 /* Drop reference to owner of hook which queued us. */
208 module_put(info->elem->owner);
210 list_for_each_rcu(i, &nf_hooks[info->pf][info->hook]) {
211 if (i == elem)
212 break;
215 if (elem == &nf_hooks[info->pf][info->hook]) {
216 /* The module which sent it to userspace is gone. */
217 NFDEBUG("%s: module disappeared, dropping packet.\n",
218 __FUNCTION__);
219 verdict = NF_DROP;
222 /* Continue traversal iff userspace said ok... */
223 if (verdict == NF_REPEAT) {
224 elem = elem->prev;
225 verdict = NF_ACCEPT;
228 if (verdict == NF_ACCEPT) {
229 next_hook:
230 verdict = nf_iterate(&nf_hooks[info->pf][info->hook],
231 &skb, info->hook,
232 info->indev, info->outdev, &elem,
233 info->okfn, INT_MIN);
236 switch (verdict & NF_VERDICT_MASK) {
237 case NF_ACCEPT:
238 info->okfn(skb);
239 break;
241 case NF_QUEUE:
242 if (!nf_queue(&skb, elem, info->pf, info->hook,
243 info->indev, info->outdev, info->okfn,
244 verdict >> NF_VERDICT_BITS))
245 goto next_hook;
246 break;
248 rcu_read_unlock();
250 if (verdict == NF_DROP)
251 kfree_skb(skb);
253 kfree(info);
254 return;
256 EXPORT_SYMBOL(nf_reinject);
258 #ifdef CONFIG_PROC_FS
259 static void *seq_start(struct seq_file *seq, loff_t *pos)
261 if (*pos >= NPROTO)
262 return NULL;
264 return pos;
267 static void *seq_next(struct seq_file *s, void *v, loff_t *pos)
269 (*pos)++;
271 if (*pos >= NPROTO)
272 return NULL;
274 return pos;
277 static void seq_stop(struct seq_file *s, void *v)
282 static int seq_show(struct seq_file *s, void *v)
284 int ret;
285 loff_t *pos = v;
286 struct nf_queue_handler *qh;
288 read_lock_bh(&queue_handler_lock);
289 qh = queue_handler[*pos];
290 if (!qh)
291 ret = seq_printf(s, "%2lld NONE\n", *pos);
292 else
293 ret = seq_printf(s, "%2lld %s\n", *pos, qh->name);
294 read_unlock_bh(&queue_handler_lock);
296 return ret;
299 static struct seq_operations nfqueue_seq_ops = {
300 .start = seq_start,
301 .next = seq_next,
302 .stop = seq_stop,
303 .show = seq_show,
306 static int nfqueue_open(struct inode *inode, struct file *file)
308 return seq_open(file, &nfqueue_seq_ops);
311 static struct file_operations nfqueue_file_ops = {
312 .owner = THIS_MODULE,
313 .open = nfqueue_open,
314 .read = seq_read,
315 .llseek = seq_lseek,
316 .release = seq_release,
318 #endif /* PROC_FS */
321 int __init netfilter_queue_init(void)
323 #ifdef CONFIG_PROC_FS
324 struct proc_dir_entry *pde;
325 #endif
326 queue_rerouter = kmalloc(NPROTO * sizeof(struct nf_queue_rerouter),
327 GFP_KERNEL);
328 if (!queue_rerouter)
329 return -ENOMEM;
331 #ifdef CONFIG_PROC_FS
332 pde = create_proc_entry("nf_queue", S_IRUGO, proc_net_netfilter);
333 if (!pde) {
334 kfree(queue_rerouter);
335 return -1;
337 pde->proc_fops = &nfqueue_file_ops;
338 #endif
339 memset(queue_rerouter, 0, NPROTO * sizeof(struct nf_queue_rerouter));
341 return 0;