[PATCH] f71805f: Resource needs not be global
[linux-2.6/x86.git] / net / netfilter / core.c
blob8455a32ea5c4df61e7cc8c78125f988e15b65cf3
1 /* netfilter.c: look after the filters for various protocols.
2 * Heavily influenced by the old firewall.c by David Bonn and Alan Cox.
4 * Thanks to Rob `CmdrTaco' Malda for not influencing this code in any
5 * way.
7 * Rusty Russell (C)2000 -- This code is GPL.
9 * February 2000: Modified by James Morris to have 1 queue per protocol.
10 * 15-Mar-2000: Added NF_REPEAT --RR.
11 * 08-May-2003: Internal logging interface added by Jozsef Kadlecsik.
13 #include <linux/config.h>
14 #include <linux/kernel.h>
15 #include <linux/netfilter.h>
16 #include <net/protocol.h>
17 #include <linux/init.h>
18 #include <linux/skbuff.h>
19 #include <linux/wait.h>
20 #include <linux/module.h>
21 #include <linux/interrupt.h>
22 #include <linux/if.h>
23 #include <linux/netdevice.h>
24 #include <linux/inetdevice.h>
25 #include <linux/proc_fs.h>
26 #include <net/sock.h>
28 #include "nf_internals.h"
30 static DEFINE_SPINLOCK(afinfo_lock);
32 struct nf_afinfo *nf_afinfo[NPROTO];
33 EXPORT_SYMBOL(nf_afinfo);
35 int nf_register_afinfo(struct nf_afinfo *afinfo)
37 spin_lock(&afinfo_lock);
38 rcu_assign_pointer(nf_afinfo[afinfo->family], afinfo);
39 spin_unlock(&afinfo_lock);
40 return 0;
42 EXPORT_SYMBOL_GPL(nf_register_afinfo);
44 void nf_unregister_afinfo(struct nf_afinfo *afinfo)
46 spin_lock(&afinfo_lock);
47 rcu_assign_pointer(nf_afinfo[afinfo->family], NULL);
48 spin_unlock(&afinfo_lock);
49 synchronize_rcu();
51 EXPORT_SYMBOL_GPL(nf_unregister_afinfo);
53 /* In this code, we can be waiting indefinitely for userspace to
54 * service a packet if a hook returns NF_QUEUE. We could keep a count
55 * of skbuffs queued for userspace, and not deregister a hook unless
56 * this is zero, but that sucks. Now, we simply check when the
57 * packets come back: if the hook is gone, the packet is discarded. */
58 struct list_head nf_hooks[NPROTO][NF_MAX_HOOKS];
59 EXPORT_SYMBOL(nf_hooks);
60 static DEFINE_SPINLOCK(nf_hook_lock);
62 int nf_register_hook(struct nf_hook_ops *reg)
64 struct list_head *i;
66 spin_lock_bh(&nf_hook_lock);
67 list_for_each(i, &nf_hooks[reg->pf][reg->hooknum]) {
68 if (reg->priority < ((struct nf_hook_ops *)i)->priority)
69 break;
71 list_add_rcu(&reg->list, i->prev);
72 spin_unlock_bh(&nf_hook_lock);
74 synchronize_net();
75 return 0;
77 EXPORT_SYMBOL(nf_register_hook);
79 void nf_unregister_hook(struct nf_hook_ops *reg)
81 spin_lock_bh(&nf_hook_lock);
82 list_del_rcu(&reg->list);
83 spin_unlock_bh(&nf_hook_lock);
85 synchronize_net();
87 EXPORT_SYMBOL(nf_unregister_hook);
89 int nf_register_hooks(struct nf_hook_ops *reg, unsigned int n)
91 unsigned int i;
92 int err = 0;
94 for (i = 0; i < n; i++) {
95 err = nf_register_hook(&reg[i]);
96 if (err)
97 goto err;
99 return err;
101 err:
102 if (i > 0)
103 nf_unregister_hooks(reg, i);
104 return err;
106 EXPORT_SYMBOL(nf_register_hooks);
108 void nf_unregister_hooks(struct nf_hook_ops *reg, unsigned int n)
110 unsigned int i;
112 for (i = 0; i < n; i++)
113 nf_unregister_hook(&reg[i]);
115 EXPORT_SYMBOL(nf_unregister_hooks);
117 unsigned int nf_iterate(struct list_head *head,
118 struct sk_buff **skb,
119 int hook,
120 const struct net_device *indev,
121 const struct net_device *outdev,
122 struct list_head **i,
123 int (*okfn)(struct sk_buff *),
124 int hook_thresh)
126 unsigned int verdict;
129 * The caller must not block between calls to this
130 * function because of risk of continuing from deleted element.
132 list_for_each_continue_rcu(*i, head) {
133 struct nf_hook_ops *elem = (struct nf_hook_ops *)*i;
135 if (hook_thresh > elem->priority)
136 continue;
138 /* Optimization: we don't need to hold module
139 reference here, since function can't sleep. --RR */
140 verdict = elem->hook(hook, skb, indev, outdev, okfn);
141 if (verdict != NF_ACCEPT) {
142 #ifdef CONFIG_NETFILTER_DEBUG
143 if (unlikely((verdict & NF_VERDICT_MASK)
144 > NF_MAX_VERDICT)) {
145 NFDEBUG("Evil return from %p(%u).\n",
146 elem->hook, hook);
147 continue;
149 #endif
150 if (verdict != NF_REPEAT)
151 return verdict;
152 *i = (*i)->prev;
155 return NF_ACCEPT;
159 /* Returns 1 if okfn() needs to be executed by the caller,
160 * -EPERM for NF_DROP, 0 otherwise. */
161 int nf_hook_slow(int pf, unsigned int hook, struct sk_buff **pskb,
162 struct net_device *indev,
163 struct net_device *outdev,
164 int (*okfn)(struct sk_buff *),
165 int hook_thresh)
167 struct list_head *elem;
168 unsigned int verdict;
169 int ret = 0;
171 /* We may already have this, but read-locks nest anyway */
172 rcu_read_lock();
174 elem = &nf_hooks[pf][hook];
175 next_hook:
176 verdict = nf_iterate(&nf_hooks[pf][hook], pskb, hook, indev,
177 outdev, &elem, okfn, hook_thresh);
178 if (verdict == NF_ACCEPT || verdict == NF_STOP) {
179 ret = 1;
180 goto unlock;
181 } else if (verdict == NF_DROP) {
182 kfree_skb(*pskb);
183 ret = -EPERM;
184 } else if ((verdict & NF_VERDICT_MASK) == NF_QUEUE) {
185 NFDEBUG("nf_hook: Verdict = QUEUE.\n");
186 if (!nf_queue(pskb, elem, pf, hook, indev, outdev, okfn,
187 verdict >> NF_VERDICT_BITS))
188 goto next_hook;
190 unlock:
191 rcu_read_unlock();
192 return ret;
194 EXPORT_SYMBOL(nf_hook_slow);
197 int skb_make_writable(struct sk_buff **pskb, unsigned int writable_len)
199 struct sk_buff *nskb;
201 if (writable_len > (*pskb)->len)
202 return 0;
204 /* Not exclusive use of packet? Must copy. */
205 if (skb_shared(*pskb) || skb_cloned(*pskb))
206 goto copy_skb;
208 return pskb_may_pull(*pskb, writable_len);
210 copy_skb:
211 nskb = skb_copy(*pskb, GFP_ATOMIC);
212 if (!nskb)
213 return 0;
214 BUG_ON(skb_is_nonlinear(nskb));
216 /* Rest of kernel will get very unhappy if we pass it a
217 suddenly-orphaned skbuff */
218 if ((*pskb)->sk)
219 skb_set_owner_w(nskb, (*pskb)->sk);
220 kfree_skb(*pskb);
221 *pskb = nskb;
222 return 1;
224 EXPORT_SYMBOL(skb_make_writable);
227 /* This does not belong here, but locally generated errors need it if connection
228 tracking in use: without this, connection may not be in hash table, and hence
229 manufactured ICMP or RST packets will not be associated with it. */
230 void (*ip_ct_attach)(struct sk_buff *, struct sk_buff *);
231 EXPORT_SYMBOL(ip_ct_attach);
233 void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb)
235 void (*attach)(struct sk_buff *, struct sk_buff *);
237 if (skb->nfct && (attach = ip_ct_attach) != NULL) {
238 mb(); /* Just to be sure: must be read before executing this */
239 attach(new, skb);
242 EXPORT_SYMBOL(nf_ct_attach);
244 #ifdef CONFIG_PROC_FS
245 struct proc_dir_entry *proc_net_netfilter;
246 EXPORT_SYMBOL(proc_net_netfilter);
247 #endif
249 void __init netfilter_init(void)
251 int i, h;
252 for (i = 0; i < NPROTO; i++) {
253 for (h = 0; h < NF_MAX_HOOKS; h++)
254 INIT_LIST_HEAD(&nf_hooks[i][h]);
257 #ifdef CONFIG_PROC_FS
258 proc_net_netfilter = proc_mkdir("netfilter", proc_net);
259 if (!proc_net_netfilter)
260 panic("cannot create netfilter proc entry");
261 #endif
263 if (netfilter_queue_init() < 0)
264 panic("cannot initialize nf_queue");
265 if (netfilter_log_init() < 0)
266 panic("cannot initialize nf_log");