1 #include <linux/kernel.h>
2 #include <linux/slab.h>
3 #include <linux/init.h>
4 #include <linux/module.h>
5 #include <linux/proc_fs.h>
6 #include <linux/skbuff.h>
7 #include <linux/netfilter.h>
8 #include <linux/seq_file.h>
9 #include <linux/rcupdate.h>
10 #include <net/protocol.h>
11 #include <net/netfilter/nf_queue.h>
14 #include "nf_internals.h"
17 * Hook for nfnetlink_queue to register its queue handler.
18 * We do this so that most of the NFQUEUE code can be modular.
20 * Once the queue is registered it must reinject all packets it
21 * receives, no matter what.
23 static const struct nf_queue_handler __rcu
*queue_handler __read_mostly
;
25 /* return EBUSY when somebody else is registered, return EEXIST if the
26 * same handler is registered, return 0 in case of success. */
27 void nf_register_queue_handler(const struct nf_queue_handler
*qh
)
29 /* should never happen, we only have one queueing backend in kernel */
30 WARN_ON(rcu_access_pointer(queue_handler
));
31 rcu_assign_pointer(queue_handler
, qh
);
33 EXPORT_SYMBOL(nf_register_queue_handler
);
35 /* The caller must flush their queue before this */
36 void nf_unregister_queue_handler(void)
38 RCU_INIT_POINTER(queue_handler
, NULL
);
41 EXPORT_SYMBOL(nf_unregister_queue_handler
);
43 static void nf_queue_entry_release_refs(struct nf_queue_entry
*entry
)
45 /* Release those devices we held, or Alexey will kill me. */
47 dev_put(entry
->indev
);
49 dev_put(entry
->outdev
);
50 #ifdef CONFIG_BRIDGE_NETFILTER
51 if (entry
->skb
->nf_bridge
) {
52 struct nf_bridge_info
*nf_bridge
= entry
->skb
->nf_bridge
;
54 if (nf_bridge
->physindev
)
55 dev_put(nf_bridge
->physindev
);
56 if (nf_bridge
->physoutdev
)
57 dev_put(nf_bridge
->physoutdev
);
60 /* Drop reference to owner of hook which queued us. */
61 module_put(entry
->elem
->owner
);
65 * Any packet that leaves via this function must come back
66 * through nf_reinject().
68 static int __nf_queue(struct sk_buff
*skb
,
69 struct nf_hook_ops
*elem
,
70 u_int8_t pf
, unsigned int hook
,
71 struct net_device
*indev
,
72 struct net_device
*outdev
,
73 int (*okfn
)(struct sk_buff
*),
74 unsigned int queuenum
)
77 struct nf_queue_entry
*entry
= NULL
;
78 #ifdef CONFIG_BRIDGE_NETFILTER
79 struct net_device
*physindev
;
80 struct net_device
*physoutdev
;
82 const struct nf_afinfo
*afinfo
;
83 const struct nf_queue_handler
*qh
;
85 /* QUEUE == DROP if no one is waiting, to be safe. */
88 qh
= rcu_dereference(queue_handler
);
94 afinfo
= nf_get_afinfo(pf
);
98 entry
= kmalloc(sizeof(*entry
) + afinfo
->route_key_size
, GFP_ATOMIC
);
104 *entry
= (struct nf_queue_entry
) {
114 /* If it's going away, ignore hook. */
115 if (!try_module_get(entry
->elem
->owner
)) {
119 /* Bump dev refs so they don't vanish while packet is out */
124 #ifdef CONFIG_BRIDGE_NETFILTER
125 if (skb
->nf_bridge
) {
126 physindev
= skb
->nf_bridge
->physindev
;
129 physoutdev
= skb
->nf_bridge
->physoutdev
;
131 dev_hold(physoutdev
);
135 afinfo
->saveroute(skb
, entry
);
136 status
= qh
->outfn(entry
, queuenum
);
141 nf_queue_entry_release_refs(entry
);
154 #ifdef CONFIG_BRIDGE_NETFILTER
155 /* When called from bridge netfilter, skb->data must point to MAC header
156 * before calling skb_gso_segment(). Else, original MAC header is lost
157 * and segmented skbs will be sent to wrong destination.
159 static void nf_bridge_adjust_skb_data(struct sk_buff
*skb
)
162 __skb_push(skb
, skb
->network_header
- skb
->mac_header
);
165 static void nf_bridge_adjust_segmented_data(struct sk_buff
*skb
)
168 __skb_pull(skb
, skb
->network_header
- skb
->mac_header
);
171 #define nf_bridge_adjust_skb_data(s) do {} while (0)
172 #define nf_bridge_adjust_segmented_data(s) do {} while (0)
175 int nf_queue(struct sk_buff
*skb
,
176 struct nf_hook_ops
*elem
,
177 u_int8_t pf
, unsigned int hook
,
178 struct net_device
*indev
,
179 struct net_device
*outdev
,
180 int (*okfn
)(struct sk_buff
*),
181 unsigned int queuenum
)
183 struct sk_buff
*segs
;
187 if (!skb_is_gso(skb
))
188 return __nf_queue(skb
, elem
, pf
, hook
, indev
, outdev
, okfn
,
193 skb
->protocol
= htons(ETH_P_IP
);
196 skb
->protocol
= htons(ETH_P_IPV6
);
200 nf_bridge_adjust_skb_data(skb
);
201 segs
= skb_gso_segment(skb
, 0);
202 /* Does not use PTR_ERR to limit the number of error codes that can be
203 * returned by nf_queue. For instance, callers rely on -ECANCELED to mean
204 * 'ignore this hook'.
211 struct sk_buff
*nskb
= segs
->next
;
215 nf_bridge_adjust_segmented_data(segs
);
216 err
= __nf_queue(segs
, elem
, pf
, hook
, indev
,
217 outdev
, okfn
, queuenum
);
231 nf_bridge_adjust_segmented_data(skb
);
235 void nf_reinject(struct nf_queue_entry
*entry
, unsigned int verdict
)
237 struct sk_buff
*skb
= entry
->skb
;
238 struct nf_hook_ops
*elem
= entry
->elem
;
239 const struct nf_afinfo
*afinfo
;
244 nf_queue_entry_release_refs(entry
);
246 /* Continue traversal iff userspace said ok... */
247 if (verdict
== NF_REPEAT
) {
248 elem
= list_entry(elem
->list
.prev
, struct nf_hook_ops
, list
);
252 if (verdict
== NF_ACCEPT
) {
253 afinfo
= nf_get_afinfo(entry
->pf
);
254 if (!afinfo
|| afinfo
->reroute(skb
, entry
) < 0)
258 if (verdict
== NF_ACCEPT
) {
260 verdict
= nf_iterate(&nf_hooks
[entry
->pf
][entry
->hook
],
262 entry
->indev
, entry
->outdev
, &elem
,
263 entry
->okfn
, INT_MIN
);
266 switch (verdict
& NF_VERDICT_MASK
) {
274 err
= __nf_queue(skb
, elem
, entry
->pf
, entry
->hook
,
275 entry
->indev
, entry
->outdev
, entry
->okfn
,
276 verdict
>> NF_VERDICT_QBITS
);
278 if (err
== -ECANCELED
)
281 (verdict
& NF_VERDICT_FLAG_QUEUE_BYPASS
))
294 EXPORT_SYMBOL(nf_reinject
);