[PATCH] bttv: fix dst i2c read/write timeout failure.
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / net / core / netfilter.c
blob22a8f127c4aad9821e5f3294aa518807165ee8b7
1 /* netfilter.c: look after the filters for various protocols.
2 * Heavily influenced by the old firewall.c by David Bonn and Alan Cox.
4 * Thanks to Rob `CmdrTaco' Malda for not influencing this code in any
5 * way.
7 * Rusty Russell (C)2000 -- This code is GPL.
9 * February 2000: Modified by James Morris to have 1 queue per protocol.
10 * 15-Mar-2000: Added NF_REPEAT --RR.
11 * 08-May-2003: Internal logging interface added by Jozsef Kadlecsik.
13 #include <linux/config.h>
14 #include <linux/kernel.h>
15 #include <linux/netfilter.h>
16 #include <net/protocol.h>
17 #include <linux/init.h>
18 #include <linux/skbuff.h>
19 #include <linux/wait.h>
20 #include <linux/module.h>
21 #include <linux/interrupt.h>
22 #include <linux/if.h>
23 #include <linux/netdevice.h>
24 #include <linux/inetdevice.h>
25 #include <linux/tcp.h>
26 #include <linux/udp.h>
27 #include <linux/icmp.h>
28 #include <net/sock.h>
29 #include <net/route.h>
30 #include <linux/ip.h>
32 /* In this code, we can be waiting indefinitely for userspace to
33 * service a packet if a hook returns NF_QUEUE. We could keep a count
34 * of skbuffs queued for userspace, and not deregister a hook unless
35 * this is zero, but that sucks. Now, we simply check when the
36 * packets come back: if the hook is gone, the packet is discarded. */
37 #ifdef CONFIG_NETFILTER_DEBUG
38 #define NFDEBUG(format, args...) printk(format , ## args)
39 #else
40 #define NFDEBUG(format, args...)
41 #endif
43 /* Sockopts only registered and called from user context, so
44 net locking would be overkill. Also, [gs]etsockopt calls may
45 sleep. */
46 static DECLARE_MUTEX(nf_sockopt_mutex);
48 struct list_head nf_hooks[NPROTO][NF_MAX_HOOKS];
49 static LIST_HEAD(nf_sockopts);
50 static DEFINE_SPINLOCK(nf_hook_lock);
52 /*
53 * A queue handler may be registered for each protocol. Each is protected by
54 * long term mutex. The handler must provide an an outfn() to accept packets
55 * for queueing and must reinject all packets it receives, no matter what.
57 static struct nf_queue_handler_t {
58 nf_queue_outfn_t outfn;
59 void *data;
60 } queue_handler[NPROTO];
61 static DEFINE_RWLOCK(queue_handler_lock);
63 int nf_register_hook(struct nf_hook_ops *reg)
65 struct list_head *i;
67 spin_lock_bh(&nf_hook_lock);
68 list_for_each(i, &nf_hooks[reg->pf][reg->hooknum]) {
69 if (reg->priority < ((struct nf_hook_ops *)i)->priority)
70 break;
72 list_add_rcu(&reg->list, i->prev);
73 spin_unlock_bh(&nf_hook_lock);
75 synchronize_net();
76 return 0;
79 void nf_unregister_hook(struct nf_hook_ops *reg)
81 spin_lock_bh(&nf_hook_lock);
82 list_del_rcu(&reg->list);
83 spin_unlock_bh(&nf_hook_lock);
85 synchronize_net();
88 /* Do exclusive ranges overlap? */
89 static inline int overlap(int min1, int max1, int min2, int max2)
91 return max1 > min2 && min1 < max2;
94 /* Functions to register sockopt ranges (exclusive). */
95 int nf_register_sockopt(struct nf_sockopt_ops *reg)
97 struct list_head *i;
98 int ret = 0;
100 if (down_interruptible(&nf_sockopt_mutex) != 0)
101 return -EINTR;
103 list_for_each(i, &nf_sockopts) {
104 struct nf_sockopt_ops *ops = (struct nf_sockopt_ops *)i;
105 if (ops->pf == reg->pf
106 && (overlap(ops->set_optmin, ops->set_optmax,
107 reg->set_optmin, reg->set_optmax)
108 || overlap(ops->get_optmin, ops->get_optmax,
109 reg->get_optmin, reg->get_optmax))) {
110 NFDEBUG("nf_sock overlap: %u-%u/%u-%u v %u-%u/%u-%u\n",
111 ops->set_optmin, ops->set_optmax,
112 ops->get_optmin, ops->get_optmax,
113 reg->set_optmin, reg->set_optmax,
114 reg->get_optmin, reg->get_optmax);
115 ret = -EBUSY;
116 goto out;
120 list_add(&reg->list, &nf_sockopts);
121 out:
122 up(&nf_sockopt_mutex);
123 return ret;
126 void nf_unregister_sockopt(struct nf_sockopt_ops *reg)
128 /* No point being interruptible: we're probably in cleanup_module() */
129 restart:
130 down(&nf_sockopt_mutex);
131 if (reg->use != 0) {
132 /* To be woken by nf_sockopt call... */
133 /* FIXME: Stuart Young's name appears gratuitously. */
134 set_current_state(TASK_UNINTERRUPTIBLE);
135 reg->cleanup_task = current;
136 up(&nf_sockopt_mutex);
137 schedule();
138 goto restart;
140 list_del(&reg->list);
141 up(&nf_sockopt_mutex);
144 #ifdef CONFIG_NETFILTER_DEBUG
145 #include <net/ip.h>
146 #include <net/tcp.h>
147 #include <linux/netfilter_ipv4.h>
149 static void debug_print_hooks_ip(unsigned int nf_debug)
151 if (nf_debug & (1 << NF_IP_PRE_ROUTING)) {
152 printk("PRE_ROUTING ");
153 nf_debug ^= (1 << NF_IP_PRE_ROUTING);
155 if (nf_debug & (1 << NF_IP_LOCAL_IN)) {
156 printk("LOCAL_IN ");
157 nf_debug ^= (1 << NF_IP_LOCAL_IN);
159 if (nf_debug & (1 << NF_IP_FORWARD)) {
160 printk("FORWARD ");
161 nf_debug ^= (1 << NF_IP_FORWARD);
163 if (nf_debug & (1 << NF_IP_LOCAL_OUT)) {
164 printk("LOCAL_OUT ");
165 nf_debug ^= (1 << NF_IP_LOCAL_OUT);
167 if (nf_debug & (1 << NF_IP_POST_ROUTING)) {
168 printk("POST_ROUTING ");
169 nf_debug ^= (1 << NF_IP_POST_ROUTING);
171 if (nf_debug)
172 printk("Crap bits: 0x%04X", nf_debug);
173 printk("\n");
176 static void nf_dump_skb(int pf, struct sk_buff *skb)
178 printk("skb: pf=%i %s dev=%s len=%u\n",
180 skb->sk ? "(owned)" : "(unowned)",
181 skb->dev ? skb->dev->name : "(no dev)",
182 skb->len);
183 switch (pf) {
184 case PF_INET: {
185 const struct iphdr *ip = skb->nh.iph;
186 __u32 *opt = (__u32 *) (ip + 1);
187 int opti;
188 __u16 src_port = 0, dst_port = 0;
190 if (ip->protocol == IPPROTO_TCP
191 || ip->protocol == IPPROTO_UDP) {
192 struct tcphdr *tcp=(struct tcphdr *)((__u32 *)ip+ip->ihl);
193 src_port = ntohs(tcp->source);
194 dst_port = ntohs(tcp->dest);
197 printk("PROTO=%d %u.%u.%u.%u:%hu %u.%u.%u.%u:%hu"
198 " L=%hu S=0x%2.2hX I=%hu F=0x%4.4hX T=%hu",
199 ip->protocol, NIPQUAD(ip->saddr),
200 src_port, NIPQUAD(ip->daddr),
201 dst_port,
202 ntohs(ip->tot_len), ip->tos, ntohs(ip->id),
203 ntohs(ip->frag_off), ip->ttl);
205 for (opti = 0; opti < (ip->ihl - sizeof(struct iphdr) / 4); opti++)
206 printk(" O=0x%8.8X", *opt++);
207 printk("\n");
212 void nf_debug_ip_local_deliver(struct sk_buff *skb)
214 /* If it's a loopback packet, it must have come through
215 * NF_IP_LOCAL_OUT, NF_IP_RAW_INPUT, NF_IP_PRE_ROUTING and
216 * NF_IP_LOCAL_IN. Otherwise, must have gone through
217 * NF_IP_RAW_INPUT and NF_IP_PRE_ROUTING. */
218 if (!skb->dev) {
219 printk("ip_local_deliver: skb->dev is NULL.\n");
220 } else {
221 if (skb->nf_debug != ((1<<NF_IP_PRE_ROUTING)
222 | (1<<NF_IP_LOCAL_IN))) {
223 printk("ip_local_deliver: bad skb: ");
224 debug_print_hooks_ip(skb->nf_debug);
225 nf_dump_skb(PF_INET, skb);
230 void nf_debug_ip_loopback_xmit(struct sk_buff *newskb)
232 if (newskb->nf_debug != ((1 << NF_IP_LOCAL_OUT)
233 | (1 << NF_IP_POST_ROUTING))) {
234 printk("ip_dev_loopback_xmit: bad owned skb = %p: ",
235 newskb);
236 debug_print_hooks_ip(newskb->nf_debug);
237 nf_dump_skb(PF_INET, newskb);
241 void nf_debug_ip_finish_output2(struct sk_buff *skb)
243 /* If it's owned, it must have gone through the
244 * NF_IP_LOCAL_OUT and NF_IP_POST_ROUTING.
245 * Otherwise, must have gone through
246 * NF_IP_PRE_ROUTING, NF_IP_FORWARD and NF_IP_POST_ROUTING.
248 if (skb->sk) {
249 if (skb->nf_debug != ((1 << NF_IP_LOCAL_OUT)
250 | (1 << NF_IP_POST_ROUTING))) {
251 printk("ip_finish_output: bad owned skb = %p: ", skb);
252 debug_print_hooks_ip(skb->nf_debug);
253 nf_dump_skb(PF_INET, skb);
255 } else {
256 if (skb->nf_debug != ((1 << NF_IP_PRE_ROUTING)
257 | (1 << NF_IP_FORWARD)
258 | (1 << NF_IP_POST_ROUTING))) {
259 /* Fragments, entunnelled packets, TCP RSTs
260 generated by ipt_REJECT will have no
261 owners, but still may be local */
262 if (skb->nf_debug != ((1 << NF_IP_LOCAL_OUT)
263 | (1 << NF_IP_POST_ROUTING))){
264 printk("ip_finish_output:"
265 " bad unowned skb = %p: ",skb);
266 debug_print_hooks_ip(skb->nf_debug);
267 nf_dump_skb(PF_INET, skb);
272 #endif /*CONFIG_NETFILTER_DEBUG*/
274 /* Call get/setsockopt() */
275 static int nf_sockopt(struct sock *sk, int pf, int val,
276 char __user *opt, int *len, int get)
278 struct list_head *i;
279 struct nf_sockopt_ops *ops;
280 int ret;
282 if (down_interruptible(&nf_sockopt_mutex) != 0)
283 return -EINTR;
285 list_for_each(i, &nf_sockopts) {
286 ops = (struct nf_sockopt_ops *)i;
287 if (ops->pf == pf) {
288 if (get) {
289 if (val >= ops->get_optmin
290 && val < ops->get_optmax) {
291 ops->use++;
292 up(&nf_sockopt_mutex);
293 ret = ops->get(sk, val, opt, len);
294 goto out;
296 } else {
297 if (val >= ops->set_optmin
298 && val < ops->set_optmax) {
299 ops->use++;
300 up(&nf_sockopt_mutex);
301 ret = ops->set(sk, val, opt, *len);
302 goto out;
307 up(&nf_sockopt_mutex);
308 return -ENOPROTOOPT;
310 out:
311 down(&nf_sockopt_mutex);
312 ops->use--;
313 if (ops->cleanup_task)
314 wake_up_process(ops->cleanup_task);
315 up(&nf_sockopt_mutex);
316 return ret;
319 int nf_setsockopt(struct sock *sk, int pf, int val, char __user *opt,
320 int len)
322 return nf_sockopt(sk, pf, val, opt, &len, 0);
325 int nf_getsockopt(struct sock *sk, int pf, int val, char __user *opt, int *len)
327 return nf_sockopt(sk, pf, val, opt, len, 1);
330 static unsigned int nf_iterate(struct list_head *head,
331 struct sk_buff **skb,
332 int hook,
333 const struct net_device *indev,
334 const struct net_device *outdev,
335 struct list_head **i,
336 int (*okfn)(struct sk_buff *),
337 int hook_thresh)
339 unsigned int verdict;
342 * The caller must not block between calls to this
343 * function because of risk of continuing from deleted element.
345 list_for_each_continue_rcu(*i, head) {
346 struct nf_hook_ops *elem = (struct nf_hook_ops *)*i;
348 if (hook_thresh > elem->priority)
349 continue;
351 /* Optimization: we don't need to hold module
352 reference here, since function can't sleep. --RR */
353 verdict = elem->hook(hook, skb, indev, outdev, okfn);
354 if (verdict != NF_ACCEPT) {
355 #ifdef CONFIG_NETFILTER_DEBUG
356 if (unlikely(verdict > NF_MAX_VERDICT)) {
357 NFDEBUG("Evil return from %p(%u).\n",
358 elem->hook, hook);
359 continue;
361 #endif
362 if (verdict != NF_REPEAT)
363 return verdict;
364 *i = (*i)->prev;
367 return NF_ACCEPT;
370 int nf_register_queue_handler(int pf, nf_queue_outfn_t outfn, void *data)
372 int ret;
374 write_lock_bh(&queue_handler_lock);
375 if (queue_handler[pf].outfn)
376 ret = -EBUSY;
377 else {
378 queue_handler[pf].outfn = outfn;
379 queue_handler[pf].data = data;
380 ret = 0;
382 write_unlock_bh(&queue_handler_lock);
384 return ret;
387 /* The caller must flush their queue before this */
388 int nf_unregister_queue_handler(int pf)
390 write_lock_bh(&queue_handler_lock);
391 queue_handler[pf].outfn = NULL;
392 queue_handler[pf].data = NULL;
393 write_unlock_bh(&queue_handler_lock);
395 return 0;
399 * Any packet that leaves via this function must come back
400 * through nf_reinject().
402 static int nf_queue(struct sk_buff *skb,
403 struct list_head *elem,
404 int pf, unsigned int hook,
405 struct net_device *indev,
406 struct net_device *outdev,
407 int (*okfn)(struct sk_buff *))
409 int status;
410 struct nf_info *info;
411 #ifdef CONFIG_BRIDGE_NETFILTER
412 struct net_device *physindev = NULL;
413 struct net_device *physoutdev = NULL;
414 #endif
416 /* QUEUE == DROP if noone is waiting, to be safe. */
417 read_lock(&queue_handler_lock);
418 if (!queue_handler[pf].outfn) {
419 read_unlock(&queue_handler_lock);
420 kfree_skb(skb);
421 return 1;
424 info = kmalloc(sizeof(*info), GFP_ATOMIC);
425 if (!info) {
426 if (net_ratelimit())
427 printk(KERN_ERR "OOM queueing packet %p\n",
428 skb);
429 read_unlock(&queue_handler_lock);
430 kfree_skb(skb);
431 return 1;
434 *info = (struct nf_info) {
435 (struct nf_hook_ops *)elem, pf, hook, indev, outdev, okfn };
437 /* If it's going away, ignore hook. */
438 if (!try_module_get(info->elem->owner)) {
439 read_unlock(&queue_handler_lock);
440 kfree(info);
441 return 0;
444 /* Bump dev refs so they don't vanish while packet is out */
445 if (indev) dev_hold(indev);
446 if (outdev) dev_hold(outdev);
448 #ifdef CONFIG_BRIDGE_NETFILTER
449 if (skb->nf_bridge) {
450 physindev = skb->nf_bridge->physindev;
451 if (physindev) dev_hold(physindev);
452 physoutdev = skb->nf_bridge->physoutdev;
453 if (physoutdev) dev_hold(physoutdev);
455 #endif
457 status = queue_handler[pf].outfn(skb, info, queue_handler[pf].data);
458 read_unlock(&queue_handler_lock);
460 if (status < 0) {
461 /* James M doesn't say fuck enough. */
462 if (indev) dev_put(indev);
463 if (outdev) dev_put(outdev);
464 #ifdef CONFIG_BRIDGE_NETFILTER
465 if (physindev) dev_put(physindev);
466 if (physoutdev) dev_put(physoutdev);
467 #endif
468 module_put(info->elem->owner);
469 kfree(info);
470 kfree_skb(skb);
471 return 1;
473 return 1;
476 /* Returns 1 if okfn() needs to be executed by the caller,
477 * -EPERM for NF_DROP, 0 otherwise. */
478 int nf_hook_slow(int pf, unsigned int hook, struct sk_buff **pskb,
479 struct net_device *indev,
480 struct net_device *outdev,
481 int (*okfn)(struct sk_buff *),
482 int hook_thresh)
484 struct list_head *elem;
485 unsigned int verdict;
486 int ret = 0;
488 /* We may already have this, but read-locks nest anyway */
489 rcu_read_lock();
491 #ifdef CONFIG_NETFILTER_DEBUG
492 if (unlikely((*pskb)->nf_debug & (1 << hook))) {
493 printk("nf_hook: hook %i already set.\n", hook);
494 nf_dump_skb(pf, *pskb);
496 (*pskb)->nf_debug |= (1 << hook);
497 #endif
499 elem = &nf_hooks[pf][hook];
500 next_hook:
501 verdict = nf_iterate(&nf_hooks[pf][hook], pskb, hook, indev,
502 outdev, &elem, okfn, hook_thresh);
503 if (verdict == NF_ACCEPT || verdict == NF_STOP) {
504 ret = 1;
505 goto unlock;
506 } else if (verdict == NF_DROP) {
507 kfree_skb(*pskb);
508 ret = -EPERM;
509 } else if (verdict == NF_QUEUE) {
510 NFDEBUG("nf_hook: Verdict = QUEUE.\n");
511 if (!nf_queue(*pskb, elem, pf, hook, indev, outdev, okfn))
512 goto next_hook;
514 unlock:
515 rcu_read_unlock();
516 return ret;
519 void nf_reinject(struct sk_buff *skb, struct nf_info *info,
520 unsigned int verdict)
522 struct list_head *elem = &info->elem->list;
523 struct list_head *i;
525 rcu_read_lock();
527 /* Release those devices we held, or Alexey will kill me. */
528 if (info->indev) dev_put(info->indev);
529 if (info->outdev) dev_put(info->outdev);
530 #ifdef CONFIG_BRIDGE_NETFILTER
531 if (skb->nf_bridge) {
532 if (skb->nf_bridge->physindev)
533 dev_put(skb->nf_bridge->physindev);
534 if (skb->nf_bridge->physoutdev)
535 dev_put(skb->nf_bridge->physoutdev);
537 #endif
539 /* Drop reference to owner of hook which queued us. */
540 module_put(info->elem->owner);
542 list_for_each_rcu(i, &nf_hooks[info->pf][info->hook]) {
543 if (i == elem)
544 break;
547 if (elem == &nf_hooks[info->pf][info->hook]) {
548 /* The module which sent it to userspace is gone. */
549 NFDEBUG("%s: module disappeared, dropping packet.\n",
550 __FUNCTION__);
551 verdict = NF_DROP;
554 /* Continue traversal iff userspace said ok... */
555 if (verdict == NF_REPEAT) {
556 elem = elem->prev;
557 verdict = NF_ACCEPT;
560 if (verdict == NF_ACCEPT) {
561 next_hook:
562 verdict = nf_iterate(&nf_hooks[info->pf][info->hook],
563 &skb, info->hook,
564 info->indev, info->outdev, &elem,
565 info->okfn, INT_MIN);
568 switch (verdict) {
569 case NF_ACCEPT:
570 info->okfn(skb);
571 break;
573 case NF_QUEUE:
574 if (!nf_queue(skb, elem, info->pf, info->hook,
575 info->indev, info->outdev, info->okfn))
576 goto next_hook;
577 break;
579 rcu_read_unlock();
581 if (verdict == NF_DROP)
582 kfree_skb(skb);
584 kfree(info);
585 return;
588 #ifdef CONFIG_INET
589 /* route_me_harder function, used by iptable_nat, iptable_mangle + ip_queue */
590 int ip_route_me_harder(struct sk_buff **pskb)
592 struct iphdr *iph = (*pskb)->nh.iph;
593 struct rtable *rt;
594 struct flowi fl = {};
595 struct dst_entry *odst;
596 unsigned int hh_len;
598 /* some non-standard hacks like ipt_REJECT.c:send_reset() can cause
599 * packets with foreign saddr to appear on the NF_IP_LOCAL_OUT hook.
601 if (inet_addr_type(iph->saddr) == RTN_LOCAL) {
602 fl.nl_u.ip4_u.daddr = iph->daddr;
603 fl.nl_u.ip4_u.saddr = iph->saddr;
604 fl.nl_u.ip4_u.tos = RT_TOS(iph->tos);
605 fl.oif = (*pskb)->sk ? (*pskb)->sk->sk_bound_dev_if : 0;
606 #ifdef CONFIG_IP_ROUTE_FWMARK
607 fl.nl_u.ip4_u.fwmark = (*pskb)->nfmark;
608 #endif
609 fl.proto = iph->protocol;
610 if (ip_route_output_key(&rt, &fl) != 0)
611 return -1;
613 /* Drop old route. */
614 dst_release((*pskb)->dst);
615 (*pskb)->dst = &rt->u.dst;
616 } else {
617 /* non-local src, find valid iif to satisfy
618 * rp-filter when calling ip_route_input. */
619 fl.nl_u.ip4_u.daddr = iph->saddr;
620 if (ip_route_output_key(&rt, &fl) != 0)
621 return -1;
623 odst = (*pskb)->dst;
624 if (ip_route_input(*pskb, iph->daddr, iph->saddr,
625 RT_TOS(iph->tos), rt->u.dst.dev) != 0) {
626 dst_release(&rt->u.dst);
627 return -1;
629 dst_release(&rt->u.dst);
630 dst_release(odst);
633 if ((*pskb)->dst->error)
634 return -1;
636 /* Change in oif may mean change in hh_len. */
637 hh_len = (*pskb)->dst->dev->hard_header_len;
638 if (skb_headroom(*pskb) < hh_len) {
639 struct sk_buff *nskb;
641 nskb = skb_realloc_headroom(*pskb, hh_len);
642 if (!nskb)
643 return -1;
644 if ((*pskb)->sk)
645 skb_set_owner_w(nskb, (*pskb)->sk);
646 kfree_skb(*pskb);
647 *pskb = nskb;
650 return 0;
652 EXPORT_SYMBOL(ip_route_me_harder);
654 int skb_ip_make_writable(struct sk_buff **pskb, unsigned int writable_len)
656 struct sk_buff *nskb;
658 if (writable_len > (*pskb)->len)
659 return 0;
661 /* Not exclusive use of packet? Must copy. */
662 if (skb_shared(*pskb) || skb_cloned(*pskb))
663 goto copy_skb;
665 return pskb_may_pull(*pskb, writable_len);
667 copy_skb:
668 nskb = skb_copy(*pskb, GFP_ATOMIC);
669 if (!nskb)
670 return 0;
671 BUG_ON(skb_is_nonlinear(nskb));
673 /* Rest of kernel will get very unhappy if we pass it a
674 suddenly-orphaned skbuff */
675 if ((*pskb)->sk)
676 skb_set_owner_w(nskb, (*pskb)->sk);
677 kfree_skb(*pskb);
678 *pskb = nskb;
679 return 1;
681 EXPORT_SYMBOL(skb_ip_make_writable);
682 #endif /*CONFIG_INET*/
684 /* Internal logging interface, which relies on the real
685 LOG target modules */
687 #define NF_LOG_PREFIXLEN 128
689 static nf_logfn *nf_logging[NPROTO]; /* = NULL */
690 static int reported = 0;
691 static DEFINE_SPINLOCK(nf_log_lock);
693 int nf_log_register(int pf, nf_logfn *logfn)
695 int ret = -EBUSY;
697 /* Any setup of logging members must be done before
698 * substituting pointer. */
699 spin_lock(&nf_log_lock);
700 if (!nf_logging[pf]) {
701 rcu_assign_pointer(nf_logging[pf], logfn);
702 ret = 0;
704 spin_unlock(&nf_log_lock);
705 return ret;
708 void nf_log_unregister(int pf, nf_logfn *logfn)
710 spin_lock(&nf_log_lock);
711 if (nf_logging[pf] == logfn)
712 nf_logging[pf] = NULL;
713 spin_unlock(&nf_log_lock);
715 /* Give time to concurrent readers. */
716 synchronize_net();
719 void nf_log_packet(int pf,
720 unsigned int hooknum,
721 const struct sk_buff *skb,
722 const struct net_device *in,
723 const struct net_device *out,
724 const char *fmt, ...)
726 va_list args;
727 char prefix[NF_LOG_PREFIXLEN];
728 nf_logfn *logfn;
730 rcu_read_lock();
731 logfn = rcu_dereference(nf_logging[pf]);
732 if (logfn) {
733 va_start(args, fmt);
734 vsnprintf(prefix, sizeof(prefix), fmt, args);
735 va_end(args);
736 /* We must read logging before nf_logfn[pf] */
737 logfn(hooknum, skb, in, out, prefix);
738 } else if (!reported) {
739 printk(KERN_WARNING "nf_log_packet: can\'t log yet, "
740 "no backend logging module loaded in!\n");
741 reported++;
743 rcu_read_unlock();
745 EXPORT_SYMBOL(nf_log_register);
746 EXPORT_SYMBOL(nf_log_unregister);
747 EXPORT_SYMBOL(nf_log_packet);
749 /* This does not belong here, but locally generated errors need it if connection
750 tracking in use: without this, connection may not be in hash table, and hence
751 manufactured ICMP or RST packets will not be associated with it. */
752 void (*ip_ct_attach)(struct sk_buff *, struct sk_buff *);
754 void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb)
756 void (*attach)(struct sk_buff *, struct sk_buff *);
758 if (skb->nfct && (attach = ip_ct_attach) != NULL) {
759 mb(); /* Just to be sure: must be read before executing this */
760 attach(new, skb);
764 void __init netfilter_init(void)
766 int i, h;
768 for (i = 0; i < NPROTO; i++) {
769 for (h = 0; h < NF_MAX_HOOKS; h++)
770 INIT_LIST_HEAD(&nf_hooks[i][h]);
774 EXPORT_SYMBOL(ip_ct_attach);
775 EXPORT_SYMBOL(nf_ct_attach);
776 EXPORT_SYMBOL(nf_getsockopt);
777 EXPORT_SYMBOL(nf_hook_slow);
778 EXPORT_SYMBOL(nf_hooks);
779 EXPORT_SYMBOL(nf_register_hook);
780 EXPORT_SYMBOL(nf_register_queue_handler);
781 EXPORT_SYMBOL(nf_register_sockopt);
782 EXPORT_SYMBOL(nf_reinject);
783 EXPORT_SYMBOL(nf_setsockopt);
784 EXPORT_SYMBOL(nf_unregister_hook);
785 EXPORT_SYMBOL(nf_unregister_queue_handler);
786 EXPORT_SYMBOL(nf_unregister_sockopt);