ARM: 6280/1: imx: Fix build failure when including <mach/gpio.h> without <linux/spinl...
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / net / netfilter / nf_conntrack_core.c
blob0c9bbe93cc169c86ab2dbb241ae5a2654285e25f
1 /* Connection state tracking for netfilter. This is separated from,
2 but required by, the NAT layer; it can also be used by an iptables
3 extension. */
5 /* (C) 1999-2001 Paul `Rusty' Russell
6 * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
7 * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
14 #include <linux/types.h>
15 #include <linux/netfilter.h>
16 #include <linux/module.h>
17 #include <linux/sched.h>
18 #include <linux/skbuff.h>
19 #include <linux/proc_fs.h>
20 #include <linux/vmalloc.h>
21 #include <linux/stddef.h>
22 #include <linux/slab.h>
23 #include <linux/random.h>
24 #include <linux/jhash.h>
25 #include <linux/err.h>
26 #include <linux/percpu.h>
27 #include <linux/moduleparam.h>
28 #include <linux/notifier.h>
29 #include <linux/kernel.h>
30 #include <linux/netdevice.h>
31 #include <linux/socket.h>
32 #include <linux/mm.h>
33 #include <linux/nsproxy.h>
34 #include <linux/rculist_nulls.h>
36 #include <net/netfilter/nf_conntrack.h>
37 #include <net/netfilter/nf_conntrack_l3proto.h>
38 #include <net/netfilter/nf_conntrack_l4proto.h>
39 #include <net/netfilter/nf_conntrack_expect.h>
40 #include <net/netfilter/nf_conntrack_helper.h>
41 #include <net/netfilter/nf_conntrack_core.h>
42 #include <net/netfilter/nf_conntrack_extend.h>
43 #include <net/netfilter/nf_conntrack_acct.h>
44 #include <net/netfilter/nf_conntrack_ecache.h>
45 #include <net/netfilter/nf_conntrack_zones.h>
46 #include <net/netfilter/nf_nat.h>
47 #include <net/netfilter/nf_nat_core.h>
49 #define NF_CONNTRACK_VERSION "0.5.0"
51 int (*nfnetlink_parse_nat_setup_hook)(struct nf_conn *ct,
52 enum nf_nat_manip_type manip,
53 const struct nlattr *attr) __read_mostly;
54 EXPORT_SYMBOL_GPL(nfnetlink_parse_nat_setup_hook);
56 DEFINE_SPINLOCK(nf_conntrack_lock);
57 EXPORT_SYMBOL_GPL(nf_conntrack_lock);
59 unsigned int nf_conntrack_htable_size __read_mostly;
60 EXPORT_SYMBOL_GPL(nf_conntrack_htable_size);
62 unsigned int nf_conntrack_max __read_mostly;
63 EXPORT_SYMBOL_GPL(nf_conntrack_max);
65 struct nf_conn nf_conntrack_untracked __read_mostly;
66 EXPORT_SYMBOL_GPL(nf_conntrack_untracked);
68 static int nf_conntrack_hash_rnd_initted;
69 static unsigned int nf_conntrack_hash_rnd;
71 static u_int32_t __hash_conntrack(const struct nf_conntrack_tuple *tuple,
72 u16 zone, unsigned int size, unsigned int rnd)
74 unsigned int n;
75 u_int32_t h;
77 /* The direction must be ignored, so we hash everything up to the
78 * destination ports (which is a multiple of 4) and treat the last
79 * three bytes manually.
81 n = (sizeof(tuple->src) + sizeof(tuple->dst.u3)) / sizeof(u32);
82 h = jhash2((u32 *)tuple, n,
83 zone ^ rnd ^ (((__force __u16)tuple->dst.u.all << 16) |
84 tuple->dst.protonum));
86 return ((u64)h * size) >> 32;
89 static inline u_int32_t hash_conntrack(const struct net *net, u16 zone,
90 const struct nf_conntrack_tuple *tuple)
92 return __hash_conntrack(tuple, zone, net->ct.htable_size,
93 nf_conntrack_hash_rnd);
96 bool
97 nf_ct_get_tuple(const struct sk_buff *skb,
98 unsigned int nhoff,
99 unsigned int dataoff,
100 u_int16_t l3num,
101 u_int8_t protonum,
102 struct nf_conntrack_tuple *tuple,
103 const struct nf_conntrack_l3proto *l3proto,
104 const struct nf_conntrack_l4proto *l4proto)
106 memset(tuple, 0, sizeof(*tuple));
108 tuple->src.l3num = l3num;
109 if (l3proto->pkt_to_tuple(skb, nhoff, tuple) == 0)
110 return false;
112 tuple->dst.protonum = protonum;
113 tuple->dst.dir = IP_CT_DIR_ORIGINAL;
115 return l4proto->pkt_to_tuple(skb, dataoff, tuple);
117 EXPORT_SYMBOL_GPL(nf_ct_get_tuple);
119 bool nf_ct_get_tuplepr(const struct sk_buff *skb, unsigned int nhoff,
120 u_int16_t l3num, struct nf_conntrack_tuple *tuple)
122 struct nf_conntrack_l3proto *l3proto;
123 struct nf_conntrack_l4proto *l4proto;
124 unsigned int protoff;
125 u_int8_t protonum;
126 int ret;
128 rcu_read_lock();
130 l3proto = __nf_ct_l3proto_find(l3num);
131 ret = l3proto->get_l4proto(skb, nhoff, &protoff, &protonum);
132 if (ret != NF_ACCEPT) {
133 rcu_read_unlock();
134 return false;
137 l4proto = __nf_ct_l4proto_find(l3num, protonum);
139 ret = nf_ct_get_tuple(skb, nhoff, protoff, l3num, protonum, tuple,
140 l3proto, l4proto);
142 rcu_read_unlock();
143 return ret;
145 EXPORT_SYMBOL_GPL(nf_ct_get_tuplepr);
147 bool
148 nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
149 const struct nf_conntrack_tuple *orig,
150 const struct nf_conntrack_l3proto *l3proto,
151 const struct nf_conntrack_l4proto *l4proto)
153 memset(inverse, 0, sizeof(*inverse));
155 inverse->src.l3num = orig->src.l3num;
156 if (l3proto->invert_tuple(inverse, orig) == 0)
157 return false;
159 inverse->dst.dir = !orig->dst.dir;
161 inverse->dst.protonum = orig->dst.protonum;
162 return l4proto->invert_tuple(inverse, orig);
164 EXPORT_SYMBOL_GPL(nf_ct_invert_tuple);
166 static void
167 clean_from_lists(struct nf_conn *ct)
169 pr_debug("clean_from_lists(%p)\n", ct);
170 hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
171 hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode);
173 /* Destroy all pending expectations */
174 nf_ct_remove_expectations(ct);
177 static void
178 destroy_conntrack(struct nf_conntrack *nfct)
180 struct nf_conn *ct = (struct nf_conn *)nfct;
181 struct net *net = nf_ct_net(ct);
182 struct nf_conntrack_l4proto *l4proto;
184 pr_debug("destroy_conntrack(%p)\n", ct);
185 NF_CT_ASSERT(atomic_read(&nfct->use) == 0);
186 NF_CT_ASSERT(!timer_pending(&ct->timeout));
188 /* To make sure we don't get any weird locking issues here:
189 * destroy_conntrack() MUST NOT be called with a write lock
190 * to nf_conntrack_lock!!! -HW */
191 rcu_read_lock();
192 l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
193 if (l4proto && l4proto->destroy)
194 l4proto->destroy(ct);
196 rcu_read_unlock();
198 spin_lock_bh(&nf_conntrack_lock);
199 /* Expectations will have been removed in clean_from_lists,
200 * except TFTP can create an expectation on the first packet,
201 * before connection is in the list, so we need to clean here,
202 * too. */
203 nf_ct_remove_expectations(ct);
205 /* We overload first tuple to link into unconfirmed list. */
206 if (!nf_ct_is_confirmed(ct)) {
207 BUG_ON(hlist_nulls_unhashed(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode));
208 hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
211 NF_CT_STAT_INC(net, delete);
212 spin_unlock_bh(&nf_conntrack_lock);
214 if (ct->master)
215 nf_ct_put(ct->master);
217 pr_debug("destroy_conntrack: returning ct=%p to slab\n", ct);
218 nf_conntrack_free(ct);
221 void nf_ct_delete_from_lists(struct nf_conn *ct)
223 struct net *net = nf_ct_net(ct);
225 nf_ct_helper_destroy(ct);
226 spin_lock_bh(&nf_conntrack_lock);
227 /* Inside lock so preempt is disabled on module removal path.
228 * Otherwise we can get spurious warnings. */
229 NF_CT_STAT_INC(net, delete_list);
230 clean_from_lists(ct);
231 spin_unlock_bh(&nf_conntrack_lock);
233 EXPORT_SYMBOL_GPL(nf_ct_delete_from_lists);
235 static void death_by_event(unsigned long ul_conntrack)
237 struct nf_conn *ct = (void *)ul_conntrack;
238 struct net *net = nf_ct_net(ct);
240 if (nf_conntrack_event(IPCT_DESTROY, ct) < 0) {
241 /* bad luck, let's retry again */
242 ct->timeout.expires = jiffies +
243 (random32() % net->ct.sysctl_events_retry_timeout);
244 add_timer(&ct->timeout);
245 return;
247 /* we've got the event delivered, now it's dying */
248 set_bit(IPS_DYING_BIT, &ct->status);
249 spin_lock(&nf_conntrack_lock);
250 hlist_nulls_del(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
251 spin_unlock(&nf_conntrack_lock);
252 nf_ct_put(ct);
255 void nf_ct_insert_dying_list(struct nf_conn *ct)
257 struct net *net = nf_ct_net(ct);
259 /* add this conntrack to the dying list */
260 spin_lock_bh(&nf_conntrack_lock);
261 hlist_nulls_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
262 &net->ct.dying);
263 spin_unlock_bh(&nf_conntrack_lock);
264 /* set a new timer to retry event delivery */
265 setup_timer(&ct->timeout, death_by_event, (unsigned long)ct);
266 ct->timeout.expires = jiffies +
267 (random32() % net->ct.sysctl_events_retry_timeout);
268 add_timer(&ct->timeout);
270 EXPORT_SYMBOL_GPL(nf_ct_insert_dying_list);
272 static void death_by_timeout(unsigned long ul_conntrack)
274 struct nf_conn *ct = (void *)ul_conntrack;
276 if (!test_bit(IPS_DYING_BIT, &ct->status) &&
277 unlikely(nf_conntrack_event(IPCT_DESTROY, ct) < 0)) {
278 /* destroy event was not delivered */
279 nf_ct_delete_from_lists(ct);
280 nf_ct_insert_dying_list(ct);
281 return;
283 set_bit(IPS_DYING_BIT, &ct->status);
284 nf_ct_delete_from_lists(ct);
285 nf_ct_put(ct);
289 * Warning :
290 * - Caller must take a reference on returned object
291 * and recheck nf_ct_tuple_equal(tuple, &h->tuple)
292 * OR
293 * - Caller must lock nf_conntrack_lock before calling this function
295 struct nf_conntrack_tuple_hash *
296 __nf_conntrack_find(struct net *net, u16 zone,
297 const struct nf_conntrack_tuple *tuple)
299 struct nf_conntrack_tuple_hash *h;
300 struct hlist_nulls_node *n;
301 unsigned int hash = hash_conntrack(net, zone, tuple);
303 /* Disable BHs the entire time since we normally need to disable them
304 * at least once for the stats anyway.
306 local_bh_disable();
307 begin:
308 hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnnode) {
309 if (nf_ct_tuple_equal(tuple, &h->tuple) &&
310 nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)) == zone) {
311 NF_CT_STAT_INC(net, found);
312 local_bh_enable();
313 return h;
315 NF_CT_STAT_INC(net, searched);
318 * if the nulls value we got at the end of this lookup is
319 * not the expected one, we must restart lookup.
320 * We probably met an item that was moved to another chain.
322 if (get_nulls_value(n) != hash)
323 goto begin;
324 local_bh_enable();
326 return NULL;
328 EXPORT_SYMBOL_GPL(__nf_conntrack_find);
330 /* Find a connection corresponding to a tuple. */
331 struct nf_conntrack_tuple_hash *
332 nf_conntrack_find_get(struct net *net, u16 zone,
333 const struct nf_conntrack_tuple *tuple)
335 struct nf_conntrack_tuple_hash *h;
336 struct nf_conn *ct;
338 rcu_read_lock();
339 begin:
340 h = __nf_conntrack_find(net, zone, tuple);
341 if (h) {
342 ct = nf_ct_tuplehash_to_ctrack(h);
343 if (unlikely(nf_ct_is_dying(ct) ||
344 !atomic_inc_not_zero(&ct->ct_general.use)))
345 h = NULL;
346 else {
347 if (unlikely(!nf_ct_tuple_equal(tuple, &h->tuple) ||
348 nf_ct_zone(ct) != zone)) {
349 nf_ct_put(ct);
350 goto begin;
354 rcu_read_unlock();
356 return h;
358 EXPORT_SYMBOL_GPL(nf_conntrack_find_get);
360 static void __nf_conntrack_hash_insert(struct nf_conn *ct,
361 unsigned int hash,
362 unsigned int repl_hash)
364 struct net *net = nf_ct_net(ct);
366 hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
367 &net->ct.hash[hash]);
368 hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode,
369 &net->ct.hash[repl_hash]);
372 void nf_conntrack_hash_insert(struct nf_conn *ct)
374 struct net *net = nf_ct_net(ct);
375 unsigned int hash, repl_hash;
376 u16 zone;
378 zone = nf_ct_zone(ct);
379 hash = hash_conntrack(net, zone, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
380 repl_hash = hash_conntrack(net, zone, &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
382 __nf_conntrack_hash_insert(ct, hash, repl_hash);
384 EXPORT_SYMBOL_GPL(nf_conntrack_hash_insert);
386 /* Confirm a connection given skb; places it in hash table */
388 __nf_conntrack_confirm(struct sk_buff *skb)
390 unsigned int hash, repl_hash;
391 struct nf_conntrack_tuple_hash *h;
392 struct nf_conn *ct;
393 struct nf_conn_help *help;
394 struct hlist_nulls_node *n;
395 enum ip_conntrack_info ctinfo;
396 struct net *net;
397 u16 zone;
399 ct = nf_ct_get(skb, &ctinfo);
400 net = nf_ct_net(ct);
402 /* ipt_REJECT uses nf_conntrack_attach to attach related
403 ICMP/TCP RST packets in other direction. Actual packet
404 which created connection will be IP_CT_NEW or for an
405 expected connection, IP_CT_RELATED. */
406 if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
407 return NF_ACCEPT;
409 zone = nf_ct_zone(ct);
410 hash = hash_conntrack(net, zone, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
411 repl_hash = hash_conntrack(net, zone, &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
413 /* We're not in hash table, and we refuse to set up related
414 connections for unconfirmed conns. But packet copies and
415 REJECT will give spurious warnings here. */
416 /* NF_CT_ASSERT(atomic_read(&ct->ct_general.use) == 1); */
418 /* No external references means noone else could have
419 confirmed us. */
420 NF_CT_ASSERT(!nf_ct_is_confirmed(ct));
421 pr_debug("Confirming conntrack %p\n", ct);
423 spin_lock_bh(&nf_conntrack_lock);
425 /* See if there's one in the list already, including reverse:
426 NAT could have grabbed it without realizing, since we're
427 not in the hash. If there is, we lost race. */
428 hlist_nulls_for_each_entry(h, n, &net->ct.hash[hash], hnnode)
429 if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
430 &h->tuple) &&
431 zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
432 goto out;
433 hlist_nulls_for_each_entry(h, n, &net->ct.hash[repl_hash], hnnode)
434 if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple,
435 &h->tuple) &&
436 zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
437 goto out;
439 /* Remove from unconfirmed list */
440 hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
442 /* Timer relative to confirmation time, not original
443 setting time, otherwise we'd get timer wrap in
444 weird delay cases. */
445 ct->timeout.expires += jiffies;
446 add_timer(&ct->timeout);
447 atomic_inc(&ct->ct_general.use);
448 set_bit(IPS_CONFIRMED_BIT, &ct->status);
450 /* Since the lookup is lockless, hash insertion must be done after
451 * starting the timer and setting the CONFIRMED bit. The RCU barriers
452 * guarantee that no other CPU can find the conntrack before the above
453 * stores are visible.
455 __nf_conntrack_hash_insert(ct, hash, repl_hash);
456 NF_CT_STAT_INC(net, insert);
457 spin_unlock_bh(&nf_conntrack_lock);
459 help = nfct_help(ct);
460 if (help && help->helper)
461 nf_conntrack_event_cache(IPCT_HELPER, ct);
463 nf_conntrack_event_cache(master_ct(ct) ?
464 IPCT_RELATED : IPCT_NEW, ct);
465 return NF_ACCEPT;
467 out:
468 NF_CT_STAT_INC(net, insert_failed);
469 spin_unlock_bh(&nf_conntrack_lock);
470 return NF_DROP;
472 EXPORT_SYMBOL_GPL(__nf_conntrack_confirm);
474 /* Returns true if a connection correspondings to the tuple (required
475 for NAT). */
477 nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
478 const struct nf_conn *ignored_conntrack)
480 struct net *net = nf_ct_net(ignored_conntrack);
481 struct nf_conntrack_tuple_hash *h;
482 struct hlist_nulls_node *n;
483 struct nf_conn *ct;
484 u16 zone = nf_ct_zone(ignored_conntrack);
485 unsigned int hash = hash_conntrack(net, zone, tuple);
487 /* Disable BHs the entire time since we need to disable them at
488 * least once for the stats anyway.
490 rcu_read_lock_bh();
491 hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnnode) {
492 ct = nf_ct_tuplehash_to_ctrack(h);
493 if (ct != ignored_conntrack &&
494 nf_ct_tuple_equal(tuple, &h->tuple) &&
495 nf_ct_zone(ct) == zone) {
496 NF_CT_STAT_INC(net, found);
497 rcu_read_unlock_bh();
498 return 1;
500 NF_CT_STAT_INC(net, searched);
502 rcu_read_unlock_bh();
504 return 0;
506 EXPORT_SYMBOL_GPL(nf_conntrack_tuple_taken);
508 #define NF_CT_EVICTION_RANGE 8
510 /* There's a small race here where we may free a just-assured
511 connection. Too bad: we're in trouble anyway. */
512 static noinline int early_drop(struct net *net, unsigned int hash)
514 /* Use oldest entry, which is roughly LRU */
515 struct nf_conntrack_tuple_hash *h;
516 struct nf_conn *ct = NULL, *tmp;
517 struct hlist_nulls_node *n;
518 unsigned int i, cnt = 0;
519 int dropped = 0;
521 rcu_read_lock();
522 for (i = 0; i < net->ct.htable_size; i++) {
523 hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash],
524 hnnode) {
525 tmp = nf_ct_tuplehash_to_ctrack(h);
526 if (!test_bit(IPS_ASSURED_BIT, &tmp->status))
527 ct = tmp;
528 cnt++;
531 if (ct != NULL) {
532 if (likely(!nf_ct_is_dying(ct) &&
533 atomic_inc_not_zero(&ct->ct_general.use)))
534 break;
535 else
536 ct = NULL;
539 if (cnt >= NF_CT_EVICTION_RANGE)
540 break;
542 hash = (hash + 1) % net->ct.htable_size;
544 rcu_read_unlock();
546 if (!ct)
547 return dropped;
549 if (del_timer(&ct->timeout)) {
550 death_by_timeout((unsigned long)ct);
551 dropped = 1;
552 NF_CT_STAT_INC_ATOMIC(net, early_drop);
554 nf_ct_put(ct);
555 return dropped;
558 struct nf_conn *nf_conntrack_alloc(struct net *net, u16 zone,
559 const struct nf_conntrack_tuple *orig,
560 const struct nf_conntrack_tuple *repl,
561 gfp_t gfp)
563 struct nf_conn *ct;
565 if (unlikely(!nf_conntrack_hash_rnd_initted)) {
566 get_random_bytes(&nf_conntrack_hash_rnd,
567 sizeof(nf_conntrack_hash_rnd));
568 nf_conntrack_hash_rnd_initted = 1;
571 /* We don't want any race condition at early drop stage */
572 atomic_inc(&net->ct.count);
574 if (nf_conntrack_max &&
575 unlikely(atomic_read(&net->ct.count) > nf_conntrack_max)) {
576 unsigned int hash = hash_conntrack(net, zone, orig);
577 if (!early_drop(net, hash)) {
578 atomic_dec(&net->ct.count);
579 if (net_ratelimit())
580 printk(KERN_WARNING
581 "nf_conntrack: table full, dropping"
582 " packet.\n");
583 return ERR_PTR(-ENOMEM);
588 * Do not use kmem_cache_zalloc(), as this cache uses
589 * SLAB_DESTROY_BY_RCU.
591 ct = kmem_cache_alloc(net->ct.nf_conntrack_cachep, gfp);
592 if (ct == NULL) {
593 pr_debug("nf_conntrack_alloc: Can't alloc conntrack.\n");
594 atomic_dec(&net->ct.count);
595 return ERR_PTR(-ENOMEM);
598 * Let ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode.next
599 * and ct->tuplehash[IP_CT_DIR_REPLY].hnnode.next unchanged.
601 memset(&ct->tuplehash[IP_CT_DIR_MAX], 0,
602 sizeof(*ct) - offsetof(struct nf_conn, tuplehash[IP_CT_DIR_MAX]));
603 spin_lock_init(&ct->lock);
604 ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig;
605 ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode.pprev = NULL;
606 ct->tuplehash[IP_CT_DIR_REPLY].tuple = *repl;
607 ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev = NULL;
608 /* Don't set timer yet: wait for confirmation */
609 setup_timer(&ct->timeout, death_by_timeout, (unsigned long)ct);
610 #ifdef CONFIG_NET_NS
611 ct->ct_net = net;
612 #endif
613 #ifdef CONFIG_NF_CONNTRACK_ZONES
614 if (zone) {
615 struct nf_conntrack_zone *nf_ct_zone;
617 nf_ct_zone = nf_ct_ext_add(ct, NF_CT_EXT_ZONE, GFP_ATOMIC);
618 if (!nf_ct_zone)
619 goto out_free;
620 nf_ct_zone->id = zone;
622 #endif
624 * changes to lookup keys must be done before setting refcnt to 1
626 smp_wmb();
627 atomic_set(&ct->ct_general.use, 1);
628 return ct;
630 #ifdef CONFIG_NF_CONNTRACK_ZONES
631 out_free:
632 kmem_cache_free(net->ct.nf_conntrack_cachep, ct);
633 return ERR_PTR(-ENOMEM);
634 #endif
636 EXPORT_SYMBOL_GPL(nf_conntrack_alloc);
638 void nf_conntrack_free(struct nf_conn *ct)
640 struct net *net = nf_ct_net(ct);
642 nf_ct_ext_destroy(ct);
643 atomic_dec(&net->ct.count);
644 nf_ct_ext_free(ct);
645 kmem_cache_free(net->ct.nf_conntrack_cachep, ct);
647 EXPORT_SYMBOL_GPL(nf_conntrack_free);
649 /* Allocate a new conntrack: we return -ENOMEM if classification
650 failed due to stress. Otherwise it really is unclassifiable. */
651 static struct nf_conntrack_tuple_hash *
652 init_conntrack(struct net *net, struct nf_conn *tmpl,
653 const struct nf_conntrack_tuple *tuple,
654 struct nf_conntrack_l3proto *l3proto,
655 struct nf_conntrack_l4proto *l4proto,
656 struct sk_buff *skb,
657 unsigned int dataoff)
659 struct nf_conn *ct;
660 struct nf_conn_help *help;
661 struct nf_conntrack_tuple repl_tuple;
662 struct nf_conntrack_ecache *ecache;
663 struct nf_conntrack_expect *exp;
664 u16 zone = tmpl ? nf_ct_zone(tmpl) : NF_CT_DEFAULT_ZONE;
666 if (!nf_ct_invert_tuple(&repl_tuple, tuple, l3proto, l4proto)) {
667 pr_debug("Can't invert tuple.\n");
668 return NULL;
671 ct = nf_conntrack_alloc(net, zone, tuple, &repl_tuple, GFP_ATOMIC);
672 if (IS_ERR(ct)) {
673 pr_debug("Can't allocate conntrack.\n");
674 return (struct nf_conntrack_tuple_hash *)ct;
677 if (!l4proto->new(ct, skb, dataoff)) {
678 nf_conntrack_free(ct);
679 pr_debug("init conntrack: can't track with proto module\n");
680 return NULL;
683 nf_ct_acct_ext_add(ct, GFP_ATOMIC);
685 ecache = tmpl ? nf_ct_ecache_find(tmpl) : NULL;
686 nf_ct_ecache_ext_add(ct, ecache ? ecache->ctmask : 0,
687 ecache ? ecache->expmask : 0,
688 GFP_ATOMIC);
690 spin_lock_bh(&nf_conntrack_lock);
691 exp = nf_ct_find_expectation(net, zone, tuple);
692 if (exp) {
693 pr_debug("conntrack: expectation arrives ct=%p exp=%p\n",
694 ct, exp);
695 /* Welcome, Mr. Bond. We've been expecting you... */
696 __set_bit(IPS_EXPECTED_BIT, &ct->status);
697 ct->master = exp->master;
698 if (exp->helper) {
699 help = nf_ct_helper_ext_add(ct, GFP_ATOMIC);
700 if (help)
701 rcu_assign_pointer(help->helper, exp->helper);
704 #ifdef CONFIG_NF_CONNTRACK_MARK
705 ct->mark = exp->master->mark;
706 #endif
707 #ifdef CONFIG_NF_CONNTRACK_SECMARK
708 ct->secmark = exp->master->secmark;
709 #endif
710 nf_conntrack_get(&ct->master->ct_general);
711 NF_CT_STAT_INC(net, expect_new);
712 } else {
713 __nf_ct_try_assign_helper(ct, tmpl, GFP_ATOMIC);
714 NF_CT_STAT_INC(net, new);
717 /* Overload tuple linked list to put us in unconfirmed list. */
718 hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
719 &net->ct.unconfirmed);
721 spin_unlock_bh(&nf_conntrack_lock);
723 if (exp) {
724 if (exp->expectfn)
725 exp->expectfn(ct, exp);
726 nf_ct_expect_put(exp);
729 return &ct->tuplehash[IP_CT_DIR_ORIGINAL];
732 /* On success, returns conntrack ptr, sets skb->nfct and ctinfo */
733 static inline struct nf_conn *
734 resolve_normal_ct(struct net *net, struct nf_conn *tmpl,
735 struct sk_buff *skb,
736 unsigned int dataoff,
737 u_int16_t l3num,
738 u_int8_t protonum,
739 struct nf_conntrack_l3proto *l3proto,
740 struct nf_conntrack_l4proto *l4proto,
741 int *set_reply,
742 enum ip_conntrack_info *ctinfo)
744 struct nf_conntrack_tuple tuple;
745 struct nf_conntrack_tuple_hash *h;
746 struct nf_conn *ct;
747 u16 zone = tmpl ? nf_ct_zone(tmpl) : NF_CT_DEFAULT_ZONE;
749 if (!nf_ct_get_tuple(skb, skb_network_offset(skb),
750 dataoff, l3num, protonum, &tuple, l3proto,
751 l4proto)) {
752 pr_debug("resolve_normal_ct: Can't get tuple\n");
753 return NULL;
756 /* look for tuple match */
757 h = nf_conntrack_find_get(net, zone, &tuple);
758 if (!h) {
759 h = init_conntrack(net, tmpl, &tuple, l3proto, l4proto,
760 skb, dataoff);
761 if (!h)
762 return NULL;
763 if (IS_ERR(h))
764 return (void *)h;
766 ct = nf_ct_tuplehash_to_ctrack(h);
768 /* It exists; we have (non-exclusive) reference. */
769 if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY) {
770 *ctinfo = IP_CT_ESTABLISHED + IP_CT_IS_REPLY;
771 /* Please set reply bit if this packet OK */
772 *set_reply = 1;
773 } else {
774 /* Once we've had two way comms, always ESTABLISHED. */
775 if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
776 pr_debug("nf_conntrack_in: normal packet for %p\n", ct);
777 *ctinfo = IP_CT_ESTABLISHED;
778 } else if (test_bit(IPS_EXPECTED_BIT, &ct->status)) {
779 pr_debug("nf_conntrack_in: related packet for %p\n",
780 ct);
781 *ctinfo = IP_CT_RELATED;
782 } else {
783 pr_debug("nf_conntrack_in: new packet for %p\n", ct);
784 *ctinfo = IP_CT_NEW;
786 *set_reply = 0;
788 skb->nfct = &ct->ct_general;
789 skb->nfctinfo = *ctinfo;
790 return ct;
793 unsigned int
794 nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
795 struct sk_buff *skb)
797 struct nf_conn *ct, *tmpl = NULL;
798 enum ip_conntrack_info ctinfo;
799 struct nf_conntrack_l3proto *l3proto;
800 struct nf_conntrack_l4proto *l4proto;
801 unsigned int dataoff;
802 u_int8_t protonum;
803 int set_reply = 0;
804 int ret;
806 if (skb->nfct) {
807 /* Previously seen (loopback or untracked)? Ignore. */
808 tmpl = (struct nf_conn *)skb->nfct;
809 if (!nf_ct_is_template(tmpl)) {
810 NF_CT_STAT_INC_ATOMIC(net, ignore);
811 return NF_ACCEPT;
813 skb->nfct = NULL;
816 /* rcu_read_lock()ed by nf_hook_slow */
817 l3proto = __nf_ct_l3proto_find(pf);
818 ret = l3proto->get_l4proto(skb, skb_network_offset(skb),
819 &dataoff, &protonum);
820 if (ret <= 0) {
821 pr_debug("not prepared to track yet or error occured\n");
822 NF_CT_STAT_INC_ATOMIC(net, error);
823 NF_CT_STAT_INC_ATOMIC(net, invalid);
824 ret = -ret;
825 goto out;
828 l4proto = __nf_ct_l4proto_find(pf, protonum);
830 /* It may be an special packet, error, unclean...
831 * inverse of the return code tells to the netfilter
832 * core what to do with the packet. */
833 if (l4proto->error != NULL) {
834 ret = l4proto->error(net, tmpl, skb, dataoff, &ctinfo,
835 pf, hooknum);
836 if (ret <= 0) {
837 NF_CT_STAT_INC_ATOMIC(net, error);
838 NF_CT_STAT_INC_ATOMIC(net, invalid);
839 ret = -ret;
840 goto out;
844 ct = resolve_normal_ct(net, tmpl, skb, dataoff, pf, protonum,
845 l3proto, l4proto, &set_reply, &ctinfo);
846 if (!ct) {
847 /* Not valid part of a connection */
848 NF_CT_STAT_INC_ATOMIC(net, invalid);
849 ret = NF_ACCEPT;
850 goto out;
853 if (IS_ERR(ct)) {
854 /* Too stressed to deal. */
855 NF_CT_STAT_INC_ATOMIC(net, drop);
856 ret = NF_DROP;
857 goto out;
860 NF_CT_ASSERT(skb->nfct);
862 ret = l4proto->packet(ct, skb, dataoff, ctinfo, pf, hooknum);
863 if (ret <= 0) {
864 /* Invalid: inverse of the return code tells
865 * the netfilter core what to do */
866 pr_debug("nf_conntrack_in: Can't track with proto module\n");
867 nf_conntrack_put(skb->nfct);
868 skb->nfct = NULL;
869 NF_CT_STAT_INC_ATOMIC(net, invalid);
870 if (ret == -NF_DROP)
871 NF_CT_STAT_INC_ATOMIC(net, drop);
872 ret = -ret;
873 goto out;
876 if (set_reply && !test_and_set_bit(IPS_SEEN_REPLY_BIT, &ct->status))
877 nf_conntrack_event_cache(IPCT_REPLY, ct);
878 out:
879 if (tmpl)
880 nf_ct_put(tmpl);
882 return ret;
884 EXPORT_SYMBOL_GPL(nf_conntrack_in);
886 bool nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse,
887 const struct nf_conntrack_tuple *orig)
889 bool ret;
891 rcu_read_lock();
892 ret = nf_ct_invert_tuple(inverse, orig,
893 __nf_ct_l3proto_find(orig->src.l3num),
894 __nf_ct_l4proto_find(orig->src.l3num,
895 orig->dst.protonum));
896 rcu_read_unlock();
897 return ret;
899 EXPORT_SYMBOL_GPL(nf_ct_invert_tuplepr);
901 /* Alter reply tuple (maybe alter helper). This is for NAT, and is
902 implicitly racy: see __nf_conntrack_confirm */
903 void nf_conntrack_alter_reply(struct nf_conn *ct,
904 const struct nf_conntrack_tuple *newreply)
906 struct nf_conn_help *help = nfct_help(ct);
908 /* Should be unconfirmed, so not in hash table yet */
909 NF_CT_ASSERT(!nf_ct_is_confirmed(ct));
911 pr_debug("Altering reply tuple of %p to ", ct);
912 nf_ct_dump_tuple(newreply);
914 ct->tuplehash[IP_CT_DIR_REPLY].tuple = *newreply;
915 if (ct->master || (help && !hlist_empty(&help->expectations)))
916 return;
918 rcu_read_lock();
919 __nf_ct_try_assign_helper(ct, NULL, GFP_ATOMIC);
920 rcu_read_unlock();
922 EXPORT_SYMBOL_GPL(nf_conntrack_alter_reply);
924 /* Refresh conntrack for this many jiffies and do accounting if do_acct is 1 */
925 void __nf_ct_refresh_acct(struct nf_conn *ct,
926 enum ip_conntrack_info ctinfo,
927 const struct sk_buff *skb,
928 unsigned long extra_jiffies,
929 int do_acct)
931 NF_CT_ASSERT(ct->timeout.data == (unsigned long)ct);
932 NF_CT_ASSERT(skb);
934 /* Only update if this is not a fixed timeout */
935 if (test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status))
936 goto acct;
938 /* If not in hash table, timer will not be active yet */
939 if (!nf_ct_is_confirmed(ct)) {
940 ct->timeout.expires = extra_jiffies;
941 } else {
942 unsigned long newtime = jiffies + extra_jiffies;
944 /* Only update the timeout if the new timeout is at least
945 HZ jiffies from the old timeout. Need del_timer for race
946 avoidance (may already be dying). */
947 if (newtime - ct->timeout.expires >= HZ)
948 mod_timer_pending(&ct->timeout, newtime);
951 acct:
952 if (do_acct) {
953 struct nf_conn_counter *acct;
955 acct = nf_conn_acct_find(ct);
956 if (acct) {
957 spin_lock_bh(&ct->lock);
958 acct[CTINFO2DIR(ctinfo)].packets++;
959 acct[CTINFO2DIR(ctinfo)].bytes +=
960 skb->len - skb_network_offset(skb);
961 spin_unlock_bh(&ct->lock);
965 EXPORT_SYMBOL_GPL(__nf_ct_refresh_acct);
967 bool __nf_ct_kill_acct(struct nf_conn *ct,
968 enum ip_conntrack_info ctinfo,
969 const struct sk_buff *skb,
970 int do_acct)
972 if (do_acct) {
973 struct nf_conn_counter *acct;
975 acct = nf_conn_acct_find(ct);
976 if (acct) {
977 spin_lock_bh(&ct->lock);
978 acct[CTINFO2DIR(ctinfo)].packets++;
979 acct[CTINFO2DIR(ctinfo)].bytes +=
980 skb->len - skb_network_offset(skb);
981 spin_unlock_bh(&ct->lock);
985 if (del_timer(&ct->timeout)) {
986 ct->timeout.function((unsigned long)ct);
987 return true;
989 return false;
991 EXPORT_SYMBOL_GPL(__nf_ct_kill_acct);
993 #ifdef CONFIG_NF_CONNTRACK_ZONES
994 static struct nf_ct_ext_type nf_ct_zone_extend __read_mostly = {
995 .len = sizeof(struct nf_conntrack_zone),
996 .align = __alignof__(struct nf_conntrack_zone),
997 .id = NF_CT_EXT_ZONE,
999 #endif
1001 #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
1003 #include <linux/netfilter/nfnetlink.h>
1004 #include <linux/netfilter/nfnetlink_conntrack.h>
1005 #include <linux/mutex.h>
1007 /* Generic function for tcp/udp/sctp/dccp and alike. This needs to be
1008 * in ip_conntrack_core, since we don't want the protocols to autoload
1009 * or depend on ctnetlink */
1010 int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb,
1011 const struct nf_conntrack_tuple *tuple)
1013 NLA_PUT_BE16(skb, CTA_PROTO_SRC_PORT, tuple->src.u.tcp.port);
1014 NLA_PUT_BE16(skb, CTA_PROTO_DST_PORT, tuple->dst.u.tcp.port);
1015 return 0;
1017 nla_put_failure:
1018 return -1;
1020 EXPORT_SYMBOL_GPL(nf_ct_port_tuple_to_nlattr);
1022 const struct nla_policy nf_ct_port_nla_policy[CTA_PROTO_MAX+1] = {
1023 [CTA_PROTO_SRC_PORT] = { .type = NLA_U16 },
1024 [CTA_PROTO_DST_PORT] = { .type = NLA_U16 },
1026 EXPORT_SYMBOL_GPL(nf_ct_port_nla_policy);
1028 int nf_ct_port_nlattr_to_tuple(struct nlattr *tb[],
1029 struct nf_conntrack_tuple *t)
1031 if (!tb[CTA_PROTO_SRC_PORT] || !tb[CTA_PROTO_DST_PORT])
1032 return -EINVAL;
1034 t->src.u.tcp.port = nla_get_be16(tb[CTA_PROTO_SRC_PORT]);
1035 t->dst.u.tcp.port = nla_get_be16(tb[CTA_PROTO_DST_PORT]);
1037 return 0;
1039 EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_to_tuple);
1041 int nf_ct_port_nlattr_tuple_size(void)
1043 return nla_policy_len(nf_ct_port_nla_policy, CTA_PROTO_MAX + 1);
1045 EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_tuple_size);
1046 #endif
1048 /* Used by ipt_REJECT and ip6t_REJECT. */
1049 static void nf_conntrack_attach(struct sk_buff *nskb, struct sk_buff *skb)
1051 struct nf_conn *ct;
1052 enum ip_conntrack_info ctinfo;
1054 /* This ICMP is in reverse direction to the packet which caused it */
1055 ct = nf_ct_get(skb, &ctinfo);
1056 if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL)
1057 ctinfo = IP_CT_RELATED + IP_CT_IS_REPLY;
1058 else
1059 ctinfo = IP_CT_RELATED;
1061 /* Attach to new skbuff, and increment count */
1062 nskb->nfct = &ct->ct_general;
1063 nskb->nfctinfo = ctinfo;
1064 nf_conntrack_get(nskb->nfct);
1067 /* Bring out ya dead! */
1068 static struct nf_conn *
1069 get_next_corpse(struct net *net, int (*iter)(struct nf_conn *i, void *data),
1070 void *data, unsigned int *bucket)
1072 struct nf_conntrack_tuple_hash *h;
1073 struct nf_conn *ct;
1074 struct hlist_nulls_node *n;
1076 spin_lock_bh(&nf_conntrack_lock);
1077 for (; *bucket < net->ct.htable_size; (*bucket)++) {
1078 hlist_nulls_for_each_entry(h, n, &net->ct.hash[*bucket], hnnode) {
1079 ct = nf_ct_tuplehash_to_ctrack(h);
1080 if (iter(ct, data))
1081 goto found;
1084 hlist_nulls_for_each_entry(h, n, &net->ct.unconfirmed, hnnode) {
1085 ct = nf_ct_tuplehash_to_ctrack(h);
1086 if (iter(ct, data))
1087 set_bit(IPS_DYING_BIT, &ct->status);
1089 spin_unlock_bh(&nf_conntrack_lock);
1090 return NULL;
1091 found:
1092 atomic_inc(&ct->ct_general.use);
1093 spin_unlock_bh(&nf_conntrack_lock);
1094 return ct;
1097 void nf_ct_iterate_cleanup(struct net *net,
1098 int (*iter)(struct nf_conn *i, void *data),
1099 void *data)
1101 struct nf_conn *ct;
1102 unsigned int bucket = 0;
1104 while ((ct = get_next_corpse(net, iter, data, &bucket)) != NULL) {
1105 /* Time to push up daises... */
1106 if (del_timer(&ct->timeout))
1107 death_by_timeout((unsigned long)ct);
1108 /* ... else the timer will get him soon. */
1110 nf_ct_put(ct);
1113 EXPORT_SYMBOL_GPL(nf_ct_iterate_cleanup);
1115 struct __nf_ct_flush_report {
1116 u32 pid;
1117 int report;
1120 static int kill_report(struct nf_conn *i, void *data)
1122 struct __nf_ct_flush_report *fr = (struct __nf_ct_flush_report *)data;
1124 /* If we fail to deliver the event, death_by_timeout() will retry */
1125 if (nf_conntrack_event_report(IPCT_DESTROY, i,
1126 fr->pid, fr->report) < 0)
1127 return 1;
1129 /* Avoid the delivery of the destroy event in death_by_timeout(). */
1130 set_bit(IPS_DYING_BIT, &i->status);
1131 return 1;
1134 static int kill_all(struct nf_conn *i, void *data)
1136 return 1;
1139 void nf_ct_free_hashtable(void *hash, int vmalloced, unsigned int size)
1141 if (vmalloced)
1142 vfree(hash);
1143 else
1144 free_pages((unsigned long)hash,
1145 get_order(sizeof(struct hlist_head) * size));
1147 EXPORT_SYMBOL_GPL(nf_ct_free_hashtable);
1149 void nf_conntrack_flush_report(struct net *net, u32 pid, int report)
1151 struct __nf_ct_flush_report fr = {
1152 .pid = pid,
1153 .report = report,
1155 nf_ct_iterate_cleanup(net, kill_report, &fr);
1157 EXPORT_SYMBOL_GPL(nf_conntrack_flush_report);
1159 static void nf_ct_release_dying_list(struct net *net)
1161 struct nf_conntrack_tuple_hash *h;
1162 struct nf_conn *ct;
1163 struct hlist_nulls_node *n;
1165 spin_lock_bh(&nf_conntrack_lock);
1166 hlist_nulls_for_each_entry(h, n, &net->ct.dying, hnnode) {
1167 ct = nf_ct_tuplehash_to_ctrack(h);
1168 /* never fails to remove them, no listeners at this point */
1169 nf_ct_kill(ct);
1171 spin_unlock_bh(&nf_conntrack_lock);
1174 static void nf_conntrack_cleanup_init_net(void)
1176 /* wait until all references to nf_conntrack_untracked are dropped */
1177 while (atomic_read(&nf_conntrack_untracked.ct_general.use) > 1)
1178 schedule();
1180 nf_conntrack_helper_fini();
1181 nf_conntrack_proto_fini();
1182 #ifdef CONFIG_NF_CONNTRACK_ZONES
1183 nf_ct_extend_unregister(&nf_ct_zone_extend);
1184 #endif
1187 static void nf_conntrack_cleanup_net(struct net *net)
1189 i_see_dead_people:
1190 nf_ct_iterate_cleanup(net, kill_all, NULL);
1191 nf_ct_release_dying_list(net);
1192 if (atomic_read(&net->ct.count) != 0) {
1193 schedule();
1194 goto i_see_dead_people;
1197 nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc,
1198 net->ct.htable_size);
1199 nf_conntrack_ecache_fini(net);
1200 nf_conntrack_acct_fini(net);
1201 nf_conntrack_expect_fini(net);
1202 kmem_cache_destroy(net->ct.nf_conntrack_cachep);
1203 kfree(net->ct.slabname);
1204 free_percpu(net->ct.stat);
1207 /* Mishearing the voices in his head, our hero wonders how he's
1208 supposed to kill the mall. */
1209 void nf_conntrack_cleanup(struct net *net)
1211 if (net_eq(net, &init_net))
1212 rcu_assign_pointer(ip_ct_attach, NULL);
1214 /* This makes sure all current packets have passed through
1215 netfilter framework. Roll on, two-stage module
1216 delete... */
1217 synchronize_net();
1219 nf_conntrack_cleanup_net(net);
1221 if (net_eq(net, &init_net)) {
1222 rcu_assign_pointer(nf_ct_destroy, NULL);
1223 nf_conntrack_cleanup_init_net();
1227 void *nf_ct_alloc_hashtable(unsigned int *sizep, int *vmalloced, int nulls)
1229 struct hlist_nulls_head *hash;
1230 unsigned int nr_slots, i;
1231 size_t sz;
1233 *vmalloced = 0;
1235 BUILD_BUG_ON(sizeof(struct hlist_nulls_head) != sizeof(struct hlist_head));
1236 nr_slots = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_nulls_head));
1237 sz = nr_slots * sizeof(struct hlist_nulls_head);
1238 hash = (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO,
1239 get_order(sz));
1240 if (!hash) {
1241 *vmalloced = 1;
1242 printk(KERN_WARNING "nf_conntrack: falling back to vmalloc.\n");
1243 hash = __vmalloc(sz, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL);
1246 if (hash && nulls)
1247 for (i = 0; i < nr_slots; i++)
1248 INIT_HLIST_NULLS_HEAD(&hash[i], i);
1250 return hash;
1252 EXPORT_SYMBOL_GPL(nf_ct_alloc_hashtable);
1254 int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
1256 int i, bucket, vmalloced, old_vmalloced;
1257 unsigned int hashsize, old_size;
1258 struct hlist_nulls_head *hash, *old_hash;
1259 struct nf_conntrack_tuple_hash *h;
1260 struct nf_conn *ct;
1262 if (current->nsproxy->net_ns != &init_net)
1263 return -EOPNOTSUPP;
1265 /* On boot, we can set this without any fancy locking. */
1266 if (!nf_conntrack_htable_size)
1267 return param_set_uint(val, kp);
1269 hashsize = simple_strtoul(val, NULL, 0);
1270 if (!hashsize)
1271 return -EINVAL;
1273 hash = nf_ct_alloc_hashtable(&hashsize, &vmalloced, 1);
1274 if (!hash)
1275 return -ENOMEM;
1277 /* Lookups in the old hash might happen in parallel, which means we
1278 * might get false negatives during connection lookup. New connections
1279 * created because of a false negative won't make it into the hash
1280 * though since that required taking the lock.
1282 spin_lock_bh(&nf_conntrack_lock);
1283 for (i = 0; i < init_net.ct.htable_size; i++) {
1284 while (!hlist_nulls_empty(&init_net.ct.hash[i])) {
1285 h = hlist_nulls_entry(init_net.ct.hash[i].first,
1286 struct nf_conntrack_tuple_hash, hnnode);
1287 ct = nf_ct_tuplehash_to_ctrack(h);
1288 hlist_nulls_del_rcu(&h->hnnode);
1289 bucket = __hash_conntrack(&h->tuple, nf_ct_zone(ct),
1290 hashsize,
1291 nf_conntrack_hash_rnd);
1292 hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]);
1295 old_size = init_net.ct.htable_size;
1296 old_vmalloced = init_net.ct.hash_vmalloc;
1297 old_hash = init_net.ct.hash;
1299 init_net.ct.htable_size = nf_conntrack_htable_size = hashsize;
1300 init_net.ct.hash_vmalloc = vmalloced;
1301 init_net.ct.hash = hash;
1302 spin_unlock_bh(&nf_conntrack_lock);
1304 nf_ct_free_hashtable(old_hash, old_vmalloced, old_size);
1305 return 0;
1307 EXPORT_SYMBOL_GPL(nf_conntrack_set_hashsize);
1309 module_param_call(hashsize, nf_conntrack_set_hashsize, param_get_uint,
1310 &nf_conntrack_htable_size, 0600);
1312 static int nf_conntrack_init_init_net(void)
1314 int max_factor = 8;
1315 int ret;
1317 /* Idea from tcp.c: use 1/16384 of memory. On i386: 32MB
1318 * machine has 512 buckets. >= 1GB machines have 16384 buckets. */
1319 if (!nf_conntrack_htable_size) {
1320 nf_conntrack_htable_size
1321 = (((totalram_pages << PAGE_SHIFT) / 16384)
1322 / sizeof(struct hlist_head));
1323 if (totalram_pages > (1024 * 1024 * 1024 / PAGE_SIZE))
1324 nf_conntrack_htable_size = 16384;
1325 if (nf_conntrack_htable_size < 32)
1326 nf_conntrack_htable_size = 32;
1328 /* Use a max. factor of four by default to get the same max as
1329 * with the old struct list_heads. When a table size is given
1330 * we use the old value of 8 to avoid reducing the max.
1331 * entries. */
1332 max_factor = 4;
1334 nf_conntrack_max = max_factor * nf_conntrack_htable_size;
1336 printk("nf_conntrack version %s (%u buckets, %d max)\n",
1337 NF_CONNTRACK_VERSION, nf_conntrack_htable_size,
1338 nf_conntrack_max);
1340 ret = nf_conntrack_proto_init();
1341 if (ret < 0)
1342 goto err_proto;
1344 ret = nf_conntrack_helper_init();
1345 if (ret < 0)
1346 goto err_helper;
1348 #ifdef CONFIG_NF_CONNTRACK_ZONES
1349 ret = nf_ct_extend_register(&nf_ct_zone_extend);
1350 if (ret < 0)
1351 goto err_extend;
1352 #endif
1353 /* Set up fake conntrack: to never be deleted, not in any hashes */
1354 #ifdef CONFIG_NET_NS
1355 nf_conntrack_untracked.ct_net = &init_net;
1356 #endif
1357 atomic_set(&nf_conntrack_untracked.ct_general.use, 1);
1358 /* - and look it like as a confirmed connection */
1359 set_bit(IPS_CONFIRMED_BIT, &nf_conntrack_untracked.status);
1361 return 0;
1363 #ifdef CONFIG_NF_CONNTRACK_ZONES
1364 err_extend:
1365 nf_conntrack_helper_fini();
1366 #endif
1367 err_helper:
1368 nf_conntrack_proto_fini();
1369 err_proto:
1370 return ret;
1374 * We need to use special "null" values, not used in hash table
1376 #define UNCONFIRMED_NULLS_VAL ((1<<30)+0)
1377 #define DYING_NULLS_VAL ((1<<30)+1)
1379 static int nf_conntrack_init_net(struct net *net)
1381 int ret;
1383 atomic_set(&net->ct.count, 0);
1384 INIT_HLIST_NULLS_HEAD(&net->ct.unconfirmed, UNCONFIRMED_NULLS_VAL);
1385 INIT_HLIST_NULLS_HEAD(&net->ct.dying, DYING_NULLS_VAL);
1386 net->ct.stat = alloc_percpu(struct ip_conntrack_stat);
1387 if (!net->ct.stat) {
1388 ret = -ENOMEM;
1389 goto err_stat;
1392 net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net);
1393 if (!net->ct.slabname) {
1394 ret = -ENOMEM;
1395 goto err_slabname;
1398 net->ct.nf_conntrack_cachep = kmem_cache_create(net->ct.slabname,
1399 sizeof(struct nf_conn), 0,
1400 SLAB_DESTROY_BY_RCU, NULL);
1401 if (!net->ct.nf_conntrack_cachep) {
1402 printk(KERN_ERR "Unable to create nf_conn slab cache\n");
1403 ret = -ENOMEM;
1404 goto err_cache;
1407 net->ct.htable_size = nf_conntrack_htable_size;
1408 net->ct.hash = nf_ct_alloc_hashtable(&net->ct.htable_size,
1409 &net->ct.hash_vmalloc, 1);
1410 if (!net->ct.hash) {
1411 ret = -ENOMEM;
1412 printk(KERN_ERR "Unable to create nf_conntrack_hash\n");
1413 goto err_hash;
1415 ret = nf_conntrack_expect_init(net);
1416 if (ret < 0)
1417 goto err_expect;
1418 ret = nf_conntrack_acct_init(net);
1419 if (ret < 0)
1420 goto err_acct;
1421 ret = nf_conntrack_ecache_init(net);
1422 if (ret < 0)
1423 goto err_ecache;
1425 return 0;
1427 err_ecache:
1428 nf_conntrack_acct_fini(net);
1429 err_acct:
1430 nf_conntrack_expect_fini(net);
1431 err_expect:
1432 nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc,
1433 net->ct.htable_size);
1434 err_hash:
1435 kmem_cache_destroy(net->ct.nf_conntrack_cachep);
1436 err_cache:
1437 kfree(net->ct.slabname);
1438 err_slabname:
1439 free_percpu(net->ct.stat);
1440 err_stat:
1441 return ret;
1444 s16 (*nf_ct_nat_offset)(const struct nf_conn *ct,
1445 enum ip_conntrack_dir dir,
1446 u32 seq);
1447 EXPORT_SYMBOL_GPL(nf_ct_nat_offset);
1449 int nf_conntrack_init(struct net *net)
1451 int ret;
1453 if (net_eq(net, &init_net)) {
1454 ret = nf_conntrack_init_init_net();
1455 if (ret < 0)
1456 goto out_init_net;
1458 ret = nf_conntrack_init_net(net);
1459 if (ret < 0)
1460 goto out_net;
1462 if (net_eq(net, &init_net)) {
1463 /* For use by REJECT target */
1464 rcu_assign_pointer(ip_ct_attach, nf_conntrack_attach);
1465 rcu_assign_pointer(nf_ct_destroy, destroy_conntrack);
1467 /* Howto get NAT offsets */
1468 rcu_assign_pointer(nf_ct_nat_offset, NULL);
1470 return 0;
1472 out_net:
1473 if (net_eq(net, &init_net))
1474 nf_conntrack_cleanup_init_net();
1475 out_init_net:
1476 return ret;