x86/irq: Remove x86_io_apic_ops.write and x86_io_apic_ops.modify
[linux-2.6/btrfs-unstable.git] / net / netfilter / nf_queue.c
blob2e88032cd5ad22fb1e910966167340a1ba8761f6
1 /*
2 * Rusty Russell (C)2000 -- This code is GPL.
3 * Patrick McHardy (c) 2006-2012
4 */
6 #include <linux/kernel.h>
7 #include <linux/slab.h>
8 #include <linux/init.h>
9 #include <linux/module.h>
10 #include <linux/proc_fs.h>
11 #include <linux/skbuff.h>
12 #include <linux/netfilter.h>
13 #include <linux/netfilter_bridge.h>
14 #include <linux/seq_file.h>
15 #include <linux/rcupdate.h>
16 #include <net/protocol.h>
17 #include <net/netfilter/nf_queue.h>
18 #include <net/dst.h>
20 #include "nf_internals.h"
23 * Hook for nfnetlink_queue to register its queue handler.
24 * We do this so that most of the NFQUEUE code can be modular.
26 * Once the queue is registered it must reinject all packets it
27 * receives, no matter what.
29 static const struct nf_queue_handler __rcu *queue_handler __read_mostly;
31 /* return EBUSY when somebody else is registered, return EEXIST if the
32 * same handler is registered, return 0 in case of success. */
33 void nf_register_queue_handler(const struct nf_queue_handler *qh)
35 /* should never happen, we only have one queueing backend in kernel */
36 WARN_ON(rcu_access_pointer(queue_handler));
37 rcu_assign_pointer(queue_handler, qh);
39 EXPORT_SYMBOL(nf_register_queue_handler);
41 /* The caller must flush their queue before this */
42 void nf_unregister_queue_handler(void)
44 RCU_INIT_POINTER(queue_handler, NULL);
45 synchronize_rcu();
47 EXPORT_SYMBOL(nf_unregister_queue_handler);
49 void nf_queue_entry_release_refs(struct nf_queue_entry *entry)
51 struct nf_hook_state *state = &entry->state;
53 /* Release those devices we held, or Alexey will kill me. */
54 if (state->in)
55 dev_put(state->in);
56 if (state->out)
57 dev_put(state->out);
58 if (state->sk)
59 sock_put(state->sk);
60 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
61 if (entry->skb->nf_bridge) {
62 struct net_device *physdev;
64 physdev = nf_bridge_get_physindev(entry->skb);
65 if (physdev)
66 dev_put(physdev);
67 physdev = nf_bridge_get_physoutdev(entry->skb);
68 if (physdev)
69 dev_put(physdev);
71 #endif
72 /* Drop reference to owner of hook which queued us. */
73 module_put(entry->elem->owner);
75 EXPORT_SYMBOL_GPL(nf_queue_entry_release_refs);
77 /* Bump dev refs so they don't vanish while packet is out */
78 bool nf_queue_entry_get_refs(struct nf_queue_entry *entry)
80 struct nf_hook_state *state = &entry->state;
82 if (!try_module_get(entry->elem->owner))
83 return false;
85 if (state->in)
86 dev_hold(state->in);
87 if (state->out)
88 dev_hold(state->out);
89 if (state->sk)
90 sock_hold(state->sk);
91 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
92 if (entry->skb->nf_bridge) {
93 struct net_device *physdev;
95 physdev = nf_bridge_get_physindev(entry->skb);
96 if (physdev)
97 dev_hold(physdev);
98 physdev = nf_bridge_get_physoutdev(entry->skb);
99 if (physdev)
100 dev_hold(physdev);
102 #endif
104 return true;
106 EXPORT_SYMBOL_GPL(nf_queue_entry_get_refs);
109 * Any packet that leaves via this function must come back
110 * through nf_reinject().
112 int nf_queue(struct sk_buff *skb,
113 struct nf_hook_ops *elem,
114 struct nf_hook_state *state,
115 unsigned int queuenum)
117 int status = -ENOENT;
118 struct nf_queue_entry *entry = NULL;
119 const struct nf_afinfo *afinfo;
120 const struct nf_queue_handler *qh;
122 /* QUEUE == DROP if no one is waiting, to be safe. */
123 rcu_read_lock();
125 qh = rcu_dereference(queue_handler);
126 if (!qh) {
127 status = -ESRCH;
128 goto err_unlock;
131 afinfo = nf_get_afinfo(state->pf);
132 if (!afinfo)
133 goto err_unlock;
135 entry = kmalloc(sizeof(*entry) + afinfo->route_key_size, GFP_ATOMIC);
136 if (!entry) {
137 status = -ENOMEM;
138 goto err_unlock;
141 *entry = (struct nf_queue_entry) {
142 .skb = skb,
143 .elem = elem,
144 .state = *state,
145 .size = sizeof(*entry) + afinfo->route_key_size,
148 if (!nf_queue_entry_get_refs(entry)) {
149 status = -ECANCELED;
150 goto err_unlock;
152 skb_dst_force(skb);
153 afinfo->saveroute(skb, entry);
154 status = qh->outfn(entry, queuenum);
156 rcu_read_unlock();
158 if (status < 0) {
159 nf_queue_entry_release_refs(entry);
160 goto err;
163 return 0;
165 err_unlock:
166 rcu_read_unlock();
167 err:
168 kfree(entry);
169 return status;
172 void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
174 struct sk_buff *skb = entry->skb;
175 struct nf_hook_ops *elem = entry->elem;
176 const struct nf_afinfo *afinfo;
177 int err;
179 rcu_read_lock();
181 nf_queue_entry_release_refs(entry);
183 /* Continue traversal iff userspace said ok... */
184 if (verdict == NF_REPEAT) {
185 elem = list_entry(elem->list.prev, struct nf_hook_ops, list);
186 verdict = NF_ACCEPT;
189 if (verdict == NF_ACCEPT) {
190 afinfo = nf_get_afinfo(entry->state.pf);
191 if (!afinfo || afinfo->reroute(skb, entry) < 0)
192 verdict = NF_DROP;
195 entry->state.thresh = INT_MIN;
197 if (verdict == NF_ACCEPT) {
198 next_hook:
199 verdict = nf_iterate(&nf_hooks[entry->state.pf][entry->state.hook],
200 skb, &entry->state, &elem);
203 switch (verdict & NF_VERDICT_MASK) {
204 case NF_ACCEPT:
205 case NF_STOP:
206 local_bh_disable();
207 entry->state.okfn(entry->state.sk, skb);
208 local_bh_enable();
209 break;
210 case NF_QUEUE:
211 err = nf_queue(skb, elem, &entry->state,
212 verdict >> NF_VERDICT_QBITS);
213 if (err < 0) {
214 if (err == -ECANCELED)
215 goto next_hook;
216 if (err == -ESRCH &&
217 (verdict & NF_VERDICT_FLAG_QUEUE_BYPASS))
218 goto next_hook;
219 kfree_skb(skb);
221 break;
222 case NF_STOLEN:
223 break;
224 default:
225 kfree_skb(skb);
227 rcu_read_unlock();
228 kfree(entry);
230 EXPORT_SYMBOL(nf_reinject);