1 #include <linux/kernel.h>
2 #include <linux/init.h>
3 #include <linux/module.h>
4 #include <linux/proc_fs.h>
5 #include <linux/skbuff.h>
6 #include <linux/netfilter.h>
7 #include <linux/seq_file.h>
8 #include <linux/rcupdate.h>
9 #include <net/protocol.h>
11 #include "nf_internals.h"
14 * A queue handler may be registered for each protocol. Each is protected by
15 * long term mutex. The handler must provide an an outfn() to accept packets
16 * for queueing and must reinject all packets it receives, no matter what.
18 static struct nf_queue_handler
*queue_handler
[NPROTO
];
20 static DEFINE_RWLOCK(queue_handler_lock
);
22 /* return EBUSY when somebody else is registered, return EEXIST if the
23 * same handler is registered, return 0 in case of success. */
24 int nf_register_queue_handler(int pf
, struct nf_queue_handler
*qh
)
31 write_lock_bh(&queue_handler_lock
);
32 if (queue_handler
[pf
] == qh
)
34 else if (queue_handler
[pf
])
37 queue_handler
[pf
] = qh
;
40 write_unlock_bh(&queue_handler_lock
);
44 EXPORT_SYMBOL(nf_register_queue_handler
);
46 /* The caller must flush their queue before this */
47 int nf_unregister_queue_handler(int pf
)
52 write_lock_bh(&queue_handler_lock
);
53 queue_handler
[pf
] = NULL
;
54 write_unlock_bh(&queue_handler_lock
);
58 EXPORT_SYMBOL(nf_unregister_queue_handler
);
60 void nf_unregister_queue_handlers(struct nf_queue_handler
*qh
)
64 write_lock_bh(&queue_handler_lock
);
65 for (pf
= 0; pf
< NPROTO
; pf
++) {
66 if (queue_handler
[pf
] == qh
)
67 queue_handler
[pf
] = NULL
;
69 write_unlock_bh(&queue_handler_lock
);
71 EXPORT_SYMBOL_GPL(nf_unregister_queue_handlers
);
74 * Any packet that leaves via this function must come back
75 * through nf_reinject().
77 static int __nf_queue(struct sk_buff
*skb
,
78 struct list_head
*elem
,
79 int pf
, unsigned int hook
,
80 struct net_device
*indev
,
81 struct net_device
*outdev
,
82 int (*okfn
)(struct sk_buff
*),
83 unsigned int queuenum
)
87 #ifdef CONFIG_BRIDGE_NETFILTER
88 struct net_device
*physindev
= NULL
;
89 struct net_device
*physoutdev
= NULL
;
91 struct nf_afinfo
*afinfo
;
93 /* QUEUE == DROP if noone is waiting, to be safe. */
94 read_lock(&queue_handler_lock
);
95 if (!queue_handler
[pf
]) {
96 read_unlock(&queue_handler_lock
);
101 afinfo
= nf_get_afinfo(pf
);
103 read_unlock(&queue_handler_lock
);
108 info
= kmalloc(sizeof(*info
) + afinfo
->route_key_size
, GFP_ATOMIC
);
111 printk(KERN_ERR
"OOM queueing packet %p\n",
113 read_unlock(&queue_handler_lock
);
118 *info
= (struct nf_info
) {
119 (struct nf_hook_ops
*)elem
, pf
, hook
, indev
, outdev
, okfn
};
121 /* If it's going away, ignore hook. */
122 if (!try_module_get(info
->elem
->owner
)) {
123 read_unlock(&queue_handler_lock
);
128 /* Bump dev refs so they don't vanish while packet is out */
129 if (indev
) dev_hold(indev
);
130 if (outdev
) dev_hold(outdev
);
132 #ifdef CONFIG_BRIDGE_NETFILTER
133 if (skb
->nf_bridge
) {
134 physindev
= skb
->nf_bridge
->physindev
;
135 if (physindev
) dev_hold(physindev
);
136 physoutdev
= skb
->nf_bridge
->physoutdev
;
137 if (physoutdev
) dev_hold(physoutdev
);
140 afinfo
->saveroute(skb
, info
);
141 status
= queue_handler
[pf
]->outfn(skb
, info
, queuenum
,
142 queue_handler
[pf
]->data
);
144 read_unlock(&queue_handler_lock
);
147 /* James M doesn't say fuck enough. */
148 if (indev
) dev_put(indev
);
149 if (outdev
) dev_put(outdev
);
150 #ifdef CONFIG_BRIDGE_NETFILTER
151 if (physindev
) dev_put(physindev
);
152 if (physoutdev
) dev_put(physoutdev
);
154 module_put(info
->elem
->owner
);
164 int nf_queue(struct sk_buff
*skb
,
165 struct list_head
*elem
,
166 int pf
, unsigned int hook
,
167 struct net_device
*indev
,
168 struct net_device
*outdev
,
169 int (*okfn
)(struct sk_buff
*),
170 unsigned int queuenum
)
172 struct sk_buff
*segs
;
174 if (!skb_is_gso(skb
))
175 return __nf_queue(skb
, elem
, pf
, hook
, indev
, outdev
, okfn
,
180 skb
->protocol
= htons(ETH_P_IP
);
183 skb
->protocol
= htons(ETH_P_IPV6
);
187 segs
= skb_gso_segment(skb
, 0);
189 if (unlikely(IS_ERR(segs
)))
193 struct sk_buff
*nskb
= segs
->next
;
196 if (!__nf_queue(segs
, elem
, pf
, hook
, indev
, outdev
, okfn
,
204 void nf_reinject(struct sk_buff
*skb
, struct nf_info
*info
,
205 unsigned int verdict
)
207 struct list_head
*elem
= &info
->elem
->list
;
209 struct nf_afinfo
*afinfo
;
213 /* Release those devices we held, or Alexey will kill me. */
214 if (info
->indev
) dev_put(info
->indev
);
215 if (info
->outdev
) dev_put(info
->outdev
);
216 #ifdef CONFIG_BRIDGE_NETFILTER
217 if (skb
->nf_bridge
) {
218 if (skb
->nf_bridge
->physindev
)
219 dev_put(skb
->nf_bridge
->physindev
);
220 if (skb
->nf_bridge
->physoutdev
)
221 dev_put(skb
->nf_bridge
->physoutdev
);
225 /* Drop reference to owner of hook which queued us. */
226 module_put(info
->elem
->owner
);
228 list_for_each_rcu(i
, &nf_hooks
[info
->pf
][info
->hook
]) {
233 if (i
== &nf_hooks
[info
->pf
][info
->hook
]) {
234 /* The module which sent it to userspace is gone. */
235 NFDEBUG("%s: module disappeared, dropping packet.\n",
240 /* Continue traversal iff userspace said ok... */
241 if (verdict
== NF_REPEAT
) {
246 if (verdict
== NF_ACCEPT
) {
247 afinfo
= nf_get_afinfo(info
->pf
);
248 if (!afinfo
|| afinfo
->reroute(&skb
, info
) < 0)
252 if (verdict
== NF_ACCEPT
) {
254 verdict
= nf_iterate(&nf_hooks
[info
->pf
][info
->hook
],
256 info
->indev
, info
->outdev
, &elem
,
257 info
->okfn
, INT_MIN
);
260 switch (verdict
& NF_VERDICT_MASK
) {
267 if (!__nf_queue(skb
, elem
, info
->pf
, info
->hook
,
268 info
->indev
, info
->outdev
, info
->okfn
,
269 verdict
>> NF_VERDICT_BITS
))
279 EXPORT_SYMBOL(nf_reinject
);
281 #ifdef CONFIG_PROC_FS
282 static void *seq_start(struct seq_file
*seq
, loff_t
*pos
)
290 static void *seq_next(struct seq_file
*s
, void *v
, loff_t
*pos
)
300 static void seq_stop(struct seq_file
*s
, void *v
)
305 static int seq_show(struct seq_file
*s
, void *v
)
309 struct nf_queue_handler
*qh
;
311 read_lock_bh(&queue_handler_lock
);
312 qh
= queue_handler
[*pos
];
314 ret
= seq_printf(s
, "%2lld NONE\n", *pos
);
316 ret
= seq_printf(s
, "%2lld %s\n", *pos
, qh
->name
);
317 read_unlock_bh(&queue_handler_lock
);
322 static struct seq_operations nfqueue_seq_ops
= {
329 static int nfqueue_open(struct inode
*inode
, struct file
*file
)
331 return seq_open(file
, &nfqueue_seq_ops
);
334 static const struct file_operations nfqueue_file_ops
= {
335 .owner
= THIS_MODULE
,
336 .open
= nfqueue_open
,
339 .release
= seq_release
,
344 int __init
netfilter_queue_init(void)
346 #ifdef CONFIG_PROC_FS
347 struct proc_dir_entry
*pde
;
349 pde
= create_proc_entry("nf_queue", S_IRUGO
, proc_net_netfilter
);
352 pde
->proc_fops
= &nfqueue_file_ops
;