[ALSA] es1688 - Use platform_device
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / net / netfilter / core.c
blob1ceb1a6c254b81083ec7f50408b70ebbad017454
1 /* netfilter.c: look after the filters for various protocols.
2 * Heavily influenced by the old firewall.c by David Bonn and Alan Cox.
4 * Thanks to Rob `CmdrTaco' Malda for not influencing this code in any
5 * way.
7 * Rusty Russell (C)2000 -- This code is GPL.
9 * February 2000: Modified by James Morris to have 1 queue per protocol.
10 * 15-Mar-2000: Added NF_REPEAT --RR.
11 * 08-May-2003: Internal logging interface added by Jozsef Kadlecsik.
13 #include <linux/config.h>
14 #include <linux/kernel.h>
15 #include <linux/netfilter.h>
16 #include <net/protocol.h>
17 #include <linux/init.h>
18 #include <linux/skbuff.h>
19 #include <linux/wait.h>
20 #include <linux/module.h>
21 #include <linux/interrupt.h>
22 #include <linux/if.h>
23 #include <linux/netdevice.h>
24 #include <linux/inetdevice.h>
25 #include <linux/proc_fs.h>
26 #include <net/sock.h>
28 #include "nf_internals.h"
30 /* In this code, we can be waiting indefinitely for userspace to
31 * service a packet if a hook returns NF_QUEUE. We could keep a count
32 * of skbuffs queued for userspace, and not deregister a hook unless
33 * this is zero, but that sucks. Now, we simply check when the
34 * packets come back: if the hook is gone, the packet is discarded. */
35 struct list_head nf_hooks[NPROTO][NF_MAX_HOOKS];
36 EXPORT_SYMBOL(nf_hooks);
37 static DEFINE_SPINLOCK(nf_hook_lock);
39 int nf_register_hook(struct nf_hook_ops *reg)
41 struct list_head *i;
43 spin_lock_bh(&nf_hook_lock);
44 list_for_each(i, &nf_hooks[reg->pf][reg->hooknum]) {
45 if (reg->priority < ((struct nf_hook_ops *)i)->priority)
46 break;
48 list_add_rcu(&reg->list, i->prev);
49 spin_unlock_bh(&nf_hook_lock);
51 synchronize_net();
52 return 0;
54 EXPORT_SYMBOL(nf_register_hook);
56 void nf_unregister_hook(struct nf_hook_ops *reg)
58 spin_lock_bh(&nf_hook_lock);
59 list_del_rcu(&reg->list);
60 spin_unlock_bh(&nf_hook_lock);
62 synchronize_net();
64 EXPORT_SYMBOL(nf_unregister_hook);
66 unsigned int nf_iterate(struct list_head *head,
67 struct sk_buff **skb,
68 int hook,
69 const struct net_device *indev,
70 const struct net_device *outdev,
71 struct list_head **i,
72 int (*okfn)(struct sk_buff *),
73 int hook_thresh)
75 unsigned int verdict;
78 * The caller must not block between calls to this
79 * function because of risk of continuing from deleted element.
81 list_for_each_continue_rcu(*i, head) {
82 struct nf_hook_ops *elem = (struct nf_hook_ops *)*i;
84 if (hook_thresh > elem->priority)
85 continue;
87 /* Optimization: we don't need to hold module
88 reference here, since function can't sleep. --RR */
89 verdict = elem->hook(hook, skb, indev, outdev, okfn);
90 if (verdict != NF_ACCEPT) {
91 #ifdef CONFIG_NETFILTER_DEBUG
92 if (unlikely((verdict & NF_VERDICT_MASK)
93 > NF_MAX_VERDICT)) {
94 NFDEBUG("Evil return from %p(%u).\n",
95 elem->hook, hook);
96 continue;
98 #endif
99 if (verdict != NF_REPEAT)
100 return verdict;
101 *i = (*i)->prev;
104 return NF_ACCEPT;
108 /* Returns 1 if okfn() needs to be executed by the caller,
109 * -EPERM for NF_DROP, 0 otherwise. */
110 int nf_hook_slow(int pf, unsigned int hook, struct sk_buff **pskb,
111 struct net_device *indev,
112 struct net_device *outdev,
113 int (*okfn)(struct sk_buff *),
114 int hook_thresh)
116 struct list_head *elem;
117 unsigned int verdict;
118 int ret = 0;
120 /* We may already have this, but read-locks nest anyway */
121 rcu_read_lock();
123 elem = &nf_hooks[pf][hook];
124 next_hook:
125 verdict = nf_iterate(&nf_hooks[pf][hook], pskb, hook, indev,
126 outdev, &elem, okfn, hook_thresh);
127 if (verdict == NF_ACCEPT || verdict == NF_STOP) {
128 ret = 1;
129 goto unlock;
130 } else if (verdict == NF_DROP) {
131 kfree_skb(*pskb);
132 ret = -EPERM;
133 } else if ((verdict & NF_VERDICT_MASK) == NF_QUEUE) {
134 NFDEBUG("nf_hook: Verdict = QUEUE.\n");
135 if (!nf_queue(pskb, elem, pf, hook, indev, outdev, okfn,
136 verdict >> NF_VERDICT_BITS))
137 goto next_hook;
139 unlock:
140 rcu_read_unlock();
141 return ret;
143 EXPORT_SYMBOL(nf_hook_slow);
146 int skb_make_writable(struct sk_buff **pskb, unsigned int writable_len)
148 struct sk_buff *nskb;
150 if (writable_len > (*pskb)->len)
151 return 0;
153 /* Not exclusive use of packet? Must copy. */
154 if (skb_shared(*pskb) || skb_cloned(*pskb))
155 goto copy_skb;
157 return pskb_may_pull(*pskb, writable_len);
159 copy_skb:
160 nskb = skb_copy(*pskb, GFP_ATOMIC);
161 if (!nskb)
162 return 0;
163 BUG_ON(skb_is_nonlinear(nskb));
165 /* Rest of kernel will get very unhappy if we pass it a
166 suddenly-orphaned skbuff */
167 if ((*pskb)->sk)
168 skb_set_owner_w(nskb, (*pskb)->sk);
169 kfree_skb(*pskb);
170 *pskb = nskb;
171 return 1;
173 EXPORT_SYMBOL(skb_make_writable);
176 /* This does not belong here, but locally generated errors need it if connection
177 tracking in use: without this, connection may not be in hash table, and hence
178 manufactured ICMP or RST packets will not be associated with it. */
179 void (*ip_ct_attach)(struct sk_buff *, struct sk_buff *);
180 EXPORT_SYMBOL(ip_ct_attach);
182 void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb)
184 void (*attach)(struct sk_buff *, struct sk_buff *);
186 if (skb->nfct && (attach = ip_ct_attach) != NULL) {
187 mb(); /* Just to be sure: must be read before executing this */
188 attach(new, skb);
191 EXPORT_SYMBOL(nf_ct_attach);
193 #ifdef CONFIG_PROC_FS
194 struct proc_dir_entry *proc_net_netfilter;
195 EXPORT_SYMBOL(proc_net_netfilter);
196 #endif
198 void __init netfilter_init(void)
200 int i, h;
201 for (i = 0; i < NPROTO; i++) {
202 for (h = 0; h < NF_MAX_HOOKS; h++)
203 INIT_LIST_HEAD(&nf_hooks[i][h]);
206 #ifdef CONFIG_PROC_FS
207 proc_net_netfilter = proc_mkdir("netfilter", proc_net);
208 if (!proc_net_netfilter)
209 panic("cannot create netfilter proc entry");
210 #endif
212 if (netfilter_queue_init() < 0)
213 panic("cannot initialize nf_queue");
214 if (netfilter_log_init() < 0)
215 panic("cannot initialize nf_log");