GUI: Fix Tomato RAF theme for all builds. Compilation typo.
[tomato.git] / release / src-rt-6.x.4708 / linux / linux-2.6.36 / net / netfilter / nf_queue.c
blob0b82098166026da6dd5bec5d7e7cb89b7e231a9c
1 #include <linux/kernel.h>
2 #include <linux/slab.h>
3 #include <linux/init.h>
4 #include <linux/module.h>
5 #include <linux/proc_fs.h>
6 #include <linux/skbuff.h>
7 #include <linux/netfilter.h>
8 #include <linux/seq_file.h>
9 #include <linux/rcupdate.h>
10 #include <net/protocol.h>
11 #include <net/netfilter/nf_queue.h>
12 #include <net/dst.h>
14 #include "nf_internals.h"
17 * A queue handler may be registered for each protocol. Each is protected by
18 * long term mutex. The handler must provide an an outfn() to accept packets
19 * for queueing and must reinject all packets it receives, no matter what.
21 static const struct nf_queue_handler *queue_handler[NFPROTO_NUMPROTO] __read_mostly;
23 static DEFINE_MUTEX(queue_handler_mutex);
25 /* return EBUSY when somebody else is registered, return EEXIST if the
26 * same handler is registered, return 0 in case of success. */
27 int nf_register_queue_handler(u_int8_t pf, const struct nf_queue_handler *qh)
29 int ret;
31 if (pf >= ARRAY_SIZE(queue_handler))
32 return -EINVAL;
34 mutex_lock(&queue_handler_mutex);
35 if (queue_handler[pf] == qh)
36 ret = -EEXIST;
37 else if (queue_handler[pf])
38 ret = -EBUSY;
39 else {
40 rcu_assign_pointer(queue_handler[pf], qh);
41 ret = 0;
43 mutex_unlock(&queue_handler_mutex);
45 return ret;
47 EXPORT_SYMBOL(nf_register_queue_handler);
49 /* The caller must flush their queue before this */
50 int nf_unregister_queue_handler(u_int8_t pf, const struct nf_queue_handler *qh)
52 if (pf >= ARRAY_SIZE(queue_handler))
53 return -EINVAL;
55 mutex_lock(&queue_handler_mutex);
56 if (queue_handler[pf] && queue_handler[pf] != qh) {
57 mutex_unlock(&queue_handler_mutex);
58 return -EINVAL;
61 rcu_assign_pointer(queue_handler[pf], NULL);
62 mutex_unlock(&queue_handler_mutex);
64 synchronize_rcu();
66 return 0;
68 EXPORT_SYMBOL(nf_unregister_queue_handler);
70 void nf_unregister_queue_handlers(const struct nf_queue_handler *qh)
72 u_int8_t pf;
74 mutex_lock(&queue_handler_mutex);
75 for (pf = 0; pf < ARRAY_SIZE(queue_handler); pf++) {
76 if (queue_handler[pf] == qh)
77 rcu_assign_pointer(queue_handler[pf], NULL);
79 mutex_unlock(&queue_handler_mutex);
81 synchronize_rcu();
83 EXPORT_SYMBOL_GPL(nf_unregister_queue_handlers);
85 static void nf_queue_entry_release_refs(struct nf_queue_entry *entry)
87 /* Release those devices we held, or Alexey will kill me. */
88 if (entry->indev)
89 dev_put(entry->indev);
90 if (entry->outdev)
91 dev_put(entry->outdev);
92 #ifdef CONFIG_BRIDGE_NETFILTER
93 if (entry->skb->nf_bridge) {
94 struct nf_bridge_info *nf_bridge = entry->skb->nf_bridge;
96 if (nf_bridge->physindev)
97 dev_put(nf_bridge->physindev);
98 if (nf_bridge->physoutdev)
99 dev_put(nf_bridge->physoutdev);
101 #endif
102 /* Drop reference to owner of hook which queued us. */
103 module_put(entry->elem->owner);
107 * Any packet that leaves via this function must come back
108 * through nf_reinject().
110 static int __nf_queue(struct sk_buff *skb,
111 struct list_head *elem,
112 u_int8_t pf, unsigned int hook,
113 struct net_device *indev,
114 struct net_device *outdev,
115 int (*okfn)(struct sk_buff *),
116 unsigned int queuenum)
118 int status = -ENOENT;
119 struct nf_queue_entry *entry = NULL;
120 #ifdef CONFIG_BRIDGE_NETFILTER
121 struct net_device *physindev;
122 struct net_device *physoutdev;
123 #endif
124 const struct nf_afinfo *afinfo;
125 const struct nf_queue_handler *qh;
127 /* QUEUE == DROP if noone is waiting, to be safe. */
128 rcu_read_lock();
130 qh = rcu_dereference(queue_handler[pf]);
131 if (!qh) {
132 status = -ESRCH;
133 goto err_unlock;
136 afinfo = nf_get_afinfo(pf);
137 if (!afinfo)
138 goto err_unlock;
140 entry = kmalloc(sizeof(*entry) + afinfo->route_key_size, GFP_ATOMIC);
141 if (!entry) {
142 status = -ENOMEM;
143 goto err_unlock;
146 *entry = (struct nf_queue_entry) {
147 .skb = skb,
148 .elem = list_entry(elem, struct nf_hook_ops, list),
149 .pf = pf,
150 .hook = hook,
151 .indev = indev,
152 .outdev = outdev,
153 .okfn = okfn,
156 /* If it's going away, ignore hook. */
157 if (!try_module_get(entry->elem->owner)) {
158 status = -ECANCELED;
159 goto err_unlock;
162 /* Bump dev refs so they don't vanish while packet is out */
163 if (indev)
164 dev_hold(indev);
165 if (outdev)
166 dev_hold(outdev);
167 #ifdef CONFIG_BRIDGE_NETFILTER
168 if (skb->nf_bridge) {
169 physindev = skb->nf_bridge->physindev;
170 if (physindev)
171 dev_hold(physindev);
172 physoutdev = skb->nf_bridge->physoutdev;
173 if (physoutdev)
174 dev_hold(physoutdev);
176 #endif
177 skb_dst_force(skb);
178 afinfo->saveroute(skb, entry);
179 status = qh->outfn(entry, queuenum);
181 rcu_read_unlock();
183 if (status < 0) {
184 nf_queue_entry_release_refs(entry);
185 goto err;
188 return 0;
190 err_unlock:
191 rcu_read_unlock();
192 err:
193 kfree(entry);
194 return status;
197 int nf_queue(struct sk_buff *skb,
198 struct list_head *elem,
199 u_int8_t pf, unsigned int hook,
200 struct net_device *indev,
201 struct net_device *outdev,
202 int (*okfn)(struct sk_buff *),
203 unsigned int queuenum)
205 struct sk_buff *segs;
206 int err;
207 unsigned int queued;
209 if (!skb_is_gso(skb))
210 return __nf_queue(skb, elem, pf, hook, indev, outdev, okfn,
211 queuenum);
213 switch (pf) {
214 case NFPROTO_IPV4:
215 skb->protocol = htons(ETH_P_IP);
216 break;
217 case NFPROTO_IPV6:
218 skb->protocol = htons(ETH_P_IPV6);
219 break;
222 /* Set packet's nf flag to indicate non-optimized segmentation */
223 skb->tcpf_nf = 1;
225 segs = skb_gso_segment(skb, 0);
226 /* Does not use PTR_ERR to limit the number of error codes that can be
227 * returned by nf_queue. For instance, callers rely on -ECANCELED to mean
228 * 'ignore this hook'.
230 if (IS_ERR(segs))
231 return -EINVAL;
233 queued = 0;
234 err = 0;
235 do {
236 struct sk_buff *nskb = segs->next;
238 segs->next = NULL;
239 if (err == 0)
240 err = __nf_queue(segs, elem, pf, hook, indev,
241 outdev, okfn, queuenum);
242 if (err == 0)
243 queued++;
244 else
245 kfree_skb(segs);
246 segs = nskb;
247 } while (segs);
249 /* also free orig skb if only some segments were queued */
250 if (unlikely(err && queued))
251 err = 0;
252 if (err == 0)
253 kfree_skb(skb);
254 return err;
257 void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
259 struct sk_buff *skb = entry->skb;
260 struct list_head *elem = &entry->elem->list;
261 const struct nf_afinfo *afinfo;
262 int err;
264 rcu_read_lock();
266 nf_queue_entry_release_refs(entry);
268 /* Continue traversal iff userspace said ok... */
269 if (verdict == NF_REPEAT) {
270 elem = elem->prev;
271 verdict = NF_ACCEPT;
274 if (verdict == NF_ACCEPT) {
275 afinfo = nf_get_afinfo(entry->pf);
276 if (!afinfo || afinfo->reroute(skb, entry) < 0)
277 verdict = NF_DROP;
280 if (verdict == NF_ACCEPT) {
281 next_hook:
282 verdict = nf_iterate(&nf_hooks[entry->pf][entry->hook],
283 skb, entry->hook,
284 entry->indev, entry->outdev, &elem,
285 entry->okfn, INT_MIN);
288 switch (verdict & NF_VERDICT_MASK) {
289 case NF_ACCEPT:
290 case NF_STOP:
291 local_bh_disable();
292 entry->okfn(skb);
293 local_bh_enable();
294 break;
295 case NF_QUEUE:
296 err = __nf_queue(skb, elem, entry->pf, entry->hook,
297 entry->indev, entry->outdev, entry->okfn,
298 verdict >> NF_VERDICT_QBITS);
299 if (err < 0) {
300 if (err == -ECANCELED)
301 goto next_hook;
302 if (err == -ESRCH &&
303 (verdict & NF_VERDICT_FLAG_QUEUE_BYPASS))
304 goto next_hook;
305 kfree_skb(skb);
307 break;
308 case NF_STOLEN:
309 default:
310 kfree_skb(skb);
312 rcu_read_unlock();
313 kfree(entry);
315 EXPORT_SYMBOL(nf_reinject);
317 #ifdef CONFIG_PROC_FS
318 static void *seq_start(struct seq_file *seq, loff_t *pos)
320 if (*pos >= ARRAY_SIZE(queue_handler))
321 return NULL;
323 return pos;
326 static void *seq_next(struct seq_file *s, void *v, loff_t *pos)
328 (*pos)++;
330 if (*pos >= ARRAY_SIZE(queue_handler))
331 return NULL;
333 return pos;
336 static void seq_stop(struct seq_file *s, void *v)
341 static int seq_show(struct seq_file *s, void *v)
343 int ret;
344 loff_t *pos = v;
345 const struct nf_queue_handler *qh;
347 rcu_read_lock();
348 qh = rcu_dereference(queue_handler[*pos]);
349 if (!qh)
350 ret = seq_printf(s, "%2lld NONE\n", *pos);
351 else
352 ret = seq_printf(s, "%2lld %s\n", *pos, qh->name);
353 rcu_read_unlock();
355 return ret;
358 static const struct seq_operations nfqueue_seq_ops = {
359 .start = seq_start,
360 .next = seq_next,
361 .stop = seq_stop,
362 .show = seq_show,
365 static int nfqueue_open(struct inode *inode, struct file *file)
367 return seq_open(file, &nfqueue_seq_ops);
370 static const struct file_operations nfqueue_file_ops = {
371 .owner = THIS_MODULE,
372 .open = nfqueue_open,
373 .read = seq_read,
374 .llseek = seq_lseek,
375 .release = seq_release,
377 #endif /* PROC_FS */
380 int __init netfilter_queue_init(void)
382 #ifdef CONFIG_PROC_FS
383 if (!proc_create("nf_queue", S_IRUGO,
384 proc_net_netfilter, &nfqueue_file_ops))
385 return -1;
386 #endif
387 return 0;