rt2x00: Use ioremap for SoC devices instead of KSEG1ADDR.
[linux-2.6/btrfs-unstable.git] / net / netfilter / nf_conntrack_core.c
blob1eacf8d9966aa292f7f051964f822a00c0ab6605
1 /* Connection state tracking for netfilter. This is separated from,
2 but required by, the NAT layer; it can also be used by an iptables
3 extension. */
5 /* (C) 1999-2001 Paul `Rusty' Russell
6 * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
7 * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
14 #include <linux/types.h>
15 #include <linux/netfilter.h>
16 #include <linux/module.h>
17 #include <linux/sched.h>
18 #include <linux/skbuff.h>
19 #include <linux/proc_fs.h>
20 #include <linux/vmalloc.h>
21 #include <linux/stddef.h>
22 #include <linux/slab.h>
23 #include <linux/random.h>
24 #include <linux/jhash.h>
25 #include <linux/err.h>
26 #include <linux/percpu.h>
27 #include <linux/moduleparam.h>
28 #include <linux/notifier.h>
29 #include <linux/kernel.h>
30 #include <linux/netdevice.h>
31 #include <linux/socket.h>
32 #include <linux/mm.h>
33 #include <linux/nsproxy.h>
34 #include <linux/rculist_nulls.h>
36 #include <net/netfilter/nf_conntrack.h>
37 #include <net/netfilter/nf_conntrack_l3proto.h>
38 #include <net/netfilter/nf_conntrack_l4proto.h>
39 #include <net/netfilter/nf_conntrack_expect.h>
40 #include <net/netfilter/nf_conntrack_helper.h>
41 #include <net/netfilter/nf_conntrack_core.h>
42 #include <net/netfilter/nf_conntrack_extend.h>
43 #include <net/netfilter/nf_conntrack_acct.h>
44 #include <net/netfilter/nf_conntrack_ecache.h>
45 #include <net/netfilter/nf_conntrack_zones.h>
46 #include <net/netfilter/nf_nat.h>
47 #include <net/netfilter/nf_nat_core.h>
49 #define NF_CONNTRACK_VERSION "0.5.0"
51 int (*nfnetlink_parse_nat_setup_hook)(struct nf_conn *ct,
52 enum nf_nat_manip_type manip,
53 const struct nlattr *attr) __read_mostly;
54 EXPORT_SYMBOL_GPL(nfnetlink_parse_nat_setup_hook);
56 DEFINE_SPINLOCK(nf_conntrack_lock);
57 EXPORT_SYMBOL_GPL(nf_conntrack_lock);
59 unsigned int nf_conntrack_htable_size __read_mostly;
60 EXPORT_SYMBOL_GPL(nf_conntrack_htable_size);
62 unsigned int nf_conntrack_max __read_mostly;
63 EXPORT_SYMBOL_GPL(nf_conntrack_max);
65 DEFINE_PER_CPU(struct nf_conn, nf_conntrack_untracked);
66 EXPORT_PER_CPU_SYMBOL(nf_conntrack_untracked);
68 static unsigned int nf_conntrack_hash_rnd __read_mostly;
70 static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple, u16 zone)
72 unsigned int n;
74 /* The direction must be ignored, so we hash everything up to the
75 * destination ports (which is a multiple of 4) and treat the last
76 * three bytes manually.
78 n = (sizeof(tuple->src) + sizeof(tuple->dst.u3)) / sizeof(u32);
79 return jhash2((u32 *)tuple, n, zone ^ nf_conntrack_hash_rnd ^
80 (((__force __u16)tuple->dst.u.all << 16) |
81 tuple->dst.protonum));
84 static u32 __hash_bucket(u32 hash, unsigned int size)
86 return ((u64)hash * size) >> 32;
89 static u32 hash_bucket(u32 hash, const struct net *net)
91 return __hash_bucket(hash, net->ct.htable_size);
94 static u_int32_t __hash_conntrack(const struct nf_conntrack_tuple *tuple,
95 u16 zone, unsigned int size)
97 return __hash_bucket(hash_conntrack_raw(tuple, zone), size);
100 static inline u_int32_t hash_conntrack(const struct net *net, u16 zone,
101 const struct nf_conntrack_tuple *tuple)
103 return __hash_conntrack(tuple, zone, net->ct.htable_size);
106 bool
107 nf_ct_get_tuple(const struct sk_buff *skb,
108 unsigned int nhoff,
109 unsigned int dataoff,
110 u_int16_t l3num,
111 u_int8_t protonum,
112 struct nf_conntrack_tuple *tuple,
113 const struct nf_conntrack_l3proto *l3proto,
114 const struct nf_conntrack_l4proto *l4proto)
116 memset(tuple, 0, sizeof(*tuple));
118 tuple->src.l3num = l3num;
119 if (l3proto->pkt_to_tuple(skb, nhoff, tuple) == 0)
120 return false;
122 tuple->dst.protonum = protonum;
123 tuple->dst.dir = IP_CT_DIR_ORIGINAL;
125 return l4proto->pkt_to_tuple(skb, dataoff, tuple);
127 EXPORT_SYMBOL_GPL(nf_ct_get_tuple);
129 bool nf_ct_get_tuplepr(const struct sk_buff *skb, unsigned int nhoff,
130 u_int16_t l3num, struct nf_conntrack_tuple *tuple)
132 struct nf_conntrack_l3proto *l3proto;
133 struct nf_conntrack_l4proto *l4proto;
134 unsigned int protoff;
135 u_int8_t protonum;
136 int ret;
138 rcu_read_lock();
140 l3proto = __nf_ct_l3proto_find(l3num);
141 ret = l3proto->get_l4proto(skb, nhoff, &protoff, &protonum);
142 if (ret != NF_ACCEPT) {
143 rcu_read_unlock();
144 return false;
147 l4proto = __nf_ct_l4proto_find(l3num, protonum);
149 ret = nf_ct_get_tuple(skb, nhoff, protoff, l3num, protonum, tuple,
150 l3proto, l4proto);
152 rcu_read_unlock();
153 return ret;
155 EXPORT_SYMBOL_GPL(nf_ct_get_tuplepr);
157 bool
158 nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
159 const struct nf_conntrack_tuple *orig,
160 const struct nf_conntrack_l3proto *l3proto,
161 const struct nf_conntrack_l4proto *l4proto)
163 memset(inverse, 0, sizeof(*inverse));
165 inverse->src.l3num = orig->src.l3num;
166 if (l3proto->invert_tuple(inverse, orig) == 0)
167 return false;
169 inverse->dst.dir = !orig->dst.dir;
171 inverse->dst.protonum = orig->dst.protonum;
172 return l4proto->invert_tuple(inverse, orig);
174 EXPORT_SYMBOL_GPL(nf_ct_invert_tuple);
176 static void
177 clean_from_lists(struct nf_conn *ct)
179 pr_debug("clean_from_lists(%p)\n", ct);
180 hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
181 hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode);
183 /* Destroy all pending expectations */
184 nf_ct_remove_expectations(ct);
187 static void
188 destroy_conntrack(struct nf_conntrack *nfct)
190 struct nf_conn *ct = (struct nf_conn *)nfct;
191 struct net *net = nf_ct_net(ct);
192 struct nf_conntrack_l4proto *l4proto;
194 pr_debug("destroy_conntrack(%p)\n", ct);
195 NF_CT_ASSERT(atomic_read(&nfct->use) == 0);
196 NF_CT_ASSERT(!timer_pending(&ct->timeout));
198 /* To make sure we don't get any weird locking issues here:
199 * destroy_conntrack() MUST NOT be called with a write lock
200 * to nf_conntrack_lock!!! -HW */
201 rcu_read_lock();
202 l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
203 if (l4proto && l4proto->destroy)
204 l4proto->destroy(ct);
206 rcu_read_unlock();
208 spin_lock_bh(&nf_conntrack_lock);
209 /* Expectations will have been removed in clean_from_lists,
210 * except TFTP can create an expectation on the first packet,
211 * before connection is in the list, so we need to clean here,
212 * too. */
213 nf_ct_remove_expectations(ct);
215 /* We overload first tuple to link into unconfirmed list. */
216 if (!nf_ct_is_confirmed(ct)) {
217 BUG_ON(hlist_nulls_unhashed(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode));
218 hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
221 NF_CT_STAT_INC(net, delete);
222 spin_unlock_bh(&nf_conntrack_lock);
224 if (ct->master)
225 nf_ct_put(ct->master);
227 pr_debug("destroy_conntrack: returning ct=%p to slab\n", ct);
228 nf_conntrack_free(ct);
231 void nf_ct_delete_from_lists(struct nf_conn *ct)
233 struct net *net = nf_ct_net(ct);
235 nf_ct_helper_destroy(ct);
236 spin_lock_bh(&nf_conntrack_lock);
237 /* Inside lock so preempt is disabled on module removal path.
238 * Otherwise we can get spurious warnings. */
239 NF_CT_STAT_INC(net, delete_list);
240 clean_from_lists(ct);
241 spin_unlock_bh(&nf_conntrack_lock);
243 EXPORT_SYMBOL_GPL(nf_ct_delete_from_lists);
245 static void death_by_event(unsigned long ul_conntrack)
247 struct nf_conn *ct = (void *)ul_conntrack;
248 struct net *net = nf_ct_net(ct);
250 if (nf_conntrack_event(IPCT_DESTROY, ct) < 0) {
251 /* bad luck, let's retry again */
252 ct->timeout.expires = jiffies +
253 (random32() % net->ct.sysctl_events_retry_timeout);
254 add_timer(&ct->timeout);
255 return;
257 /* we've got the event delivered, now it's dying */
258 set_bit(IPS_DYING_BIT, &ct->status);
259 spin_lock(&nf_conntrack_lock);
260 hlist_nulls_del(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
261 spin_unlock(&nf_conntrack_lock);
262 nf_ct_put(ct);
265 void nf_ct_insert_dying_list(struct nf_conn *ct)
267 struct net *net = nf_ct_net(ct);
269 /* add this conntrack to the dying list */
270 spin_lock_bh(&nf_conntrack_lock);
271 hlist_nulls_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
272 &net->ct.dying);
273 spin_unlock_bh(&nf_conntrack_lock);
274 /* set a new timer to retry event delivery */
275 setup_timer(&ct->timeout, death_by_event, (unsigned long)ct);
276 ct->timeout.expires = jiffies +
277 (random32() % net->ct.sysctl_events_retry_timeout);
278 add_timer(&ct->timeout);
280 EXPORT_SYMBOL_GPL(nf_ct_insert_dying_list);
282 static void death_by_timeout(unsigned long ul_conntrack)
284 struct nf_conn *ct = (void *)ul_conntrack;
286 if (!test_bit(IPS_DYING_BIT, &ct->status) &&
287 unlikely(nf_conntrack_event(IPCT_DESTROY, ct) < 0)) {
288 /* destroy event was not delivered */
289 nf_ct_delete_from_lists(ct);
290 nf_ct_insert_dying_list(ct);
291 return;
293 set_bit(IPS_DYING_BIT, &ct->status);
294 nf_ct_delete_from_lists(ct);
295 nf_ct_put(ct);
299 * Warning :
300 * - Caller must take a reference on returned object
301 * and recheck nf_ct_tuple_equal(tuple, &h->tuple)
302 * OR
303 * - Caller must lock nf_conntrack_lock before calling this function
305 static struct nf_conntrack_tuple_hash *
306 ____nf_conntrack_find(struct net *net, u16 zone,
307 const struct nf_conntrack_tuple *tuple, u32 hash)
309 struct nf_conntrack_tuple_hash *h;
310 struct hlist_nulls_node *n;
311 unsigned int bucket = hash_bucket(hash, net);
313 /* Disable BHs the entire time since we normally need to disable them
314 * at least once for the stats anyway.
316 local_bh_disable();
317 begin:
318 hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[bucket], hnnode) {
319 if (nf_ct_tuple_equal(tuple, &h->tuple) &&
320 nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)) == zone) {
321 NF_CT_STAT_INC(net, found);
322 local_bh_enable();
323 return h;
325 NF_CT_STAT_INC(net, searched);
328 * if the nulls value we got at the end of this lookup is
329 * not the expected one, we must restart lookup.
330 * We probably met an item that was moved to another chain.
332 if (get_nulls_value(n) != bucket) {
333 NF_CT_STAT_INC(net, search_restart);
334 goto begin;
336 local_bh_enable();
338 return NULL;
341 struct nf_conntrack_tuple_hash *
342 __nf_conntrack_find(struct net *net, u16 zone,
343 const struct nf_conntrack_tuple *tuple)
345 return ____nf_conntrack_find(net, zone, tuple,
346 hash_conntrack_raw(tuple, zone));
348 EXPORT_SYMBOL_GPL(__nf_conntrack_find);
350 /* Find a connection corresponding to a tuple. */
351 static struct nf_conntrack_tuple_hash *
352 __nf_conntrack_find_get(struct net *net, u16 zone,
353 const struct nf_conntrack_tuple *tuple, u32 hash)
355 struct nf_conntrack_tuple_hash *h;
356 struct nf_conn *ct;
358 rcu_read_lock();
359 begin:
360 h = ____nf_conntrack_find(net, zone, tuple, hash);
361 if (h) {
362 ct = nf_ct_tuplehash_to_ctrack(h);
363 if (unlikely(nf_ct_is_dying(ct) ||
364 !atomic_inc_not_zero(&ct->ct_general.use)))
365 h = NULL;
366 else {
367 if (unlikely(!nf_ct_tuple_equal(tuple, &h->tuple) ||
368 nf_ct_zone(ct) != zone)) {
369 nf_ct_put(ct);
370 goto begin;
374 rcu_read_unlock();
376 return h;
379 struct nf_conntrack_tuple_hash *
380 nf_conntrack_find_get(struct net *net, u16 zone,
381 const struct nf_conntrack_tuple *tuple)
383 return __nf_conntrack_find_get(net, zone, tuple,
384 hash_conntrack_raw(tuple, zone));
386 EXPORT_SYMBOL_GPL(nf_conntrack_find_get);
388 static void __nf_conntrack_hash_insert(struct nf_conn *ct,
389 unsigned int hash,
390 unsigned int repl_hash)
392 struct net *net = nf_ct_net(ct);
394 hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
395 &net->ct.hash[hash]);
396 hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode,
397 &net->ct.hash[repl_hash]);
400 void nf_conntrack_hash_insert(struct nf_conn *ct)
402 struct net *net = nf_ct_net(ct);
403 unsigned int hash, repl_hash;
404 u16 zone;
406 zone = nf_ct_zone(ct);
407 hash = hash_conntrack(net, zone, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
408 repl_hash = hash_conntrack(net, zone, &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
410 __nf_conntrack_hash_insert(ct, hash, repl_hash);
412 EXPORT_SYMBOL_GPL(nf_conntrack_hash_insert);
414 /* Confirm a connection given skb; places it in hash table */
416 __nf_conntrack_confirm(struct sk_buff *skb)
418 unsigned int hash, repl_hash;
419 struct nf_conntrack_tuple_hash *h;
420 struct nf_conn *ct;
421 struct nf_conn_help *help;
422 struct hlist_nulls_node *n;
423 enum ip_conntrack_info ctinfo;
424 struct net *net;
425 u16 zone;
427 ct = nf_ct_get(skb, &ctinfo);
428 net = nf_ct_net(ct);
430 /* ipt_REJECT uses nf_conntrack_attach to attach related
431 ICMP/TCP RST packets in other direction. Actual packet
432 which created connection will be IP_CT_NEW or for an
433 expected connection, IP_CT_RELATED. */
434 if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
435 return NF_ACCEPT;
437 zone = nf_ct_zone(ct);
438 /* reuse the hash saved before */
439 hash = *(unsigned long *)&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev;
440 hash = hash_bucket(hash, net);
441 repl_hash = hash_conntrack(net, zone,
442 &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
444 /* We're not in hash table, and we refuse to set up related
445 connections for unconfirmed conns. But packet copies and
446 REJECT will give spurious warnings here. */
447 /* NF_CT_ASSERT(atomic_read(&ct->ct_general.use) == 1); */
449 /* No external references means noone else could have
450 confirmed us. */
451 NF_CT_ASSERT(!nf_ct_is_confirmed(ct));
452 pr_debug("Confirming conntrack %p\n", ct);
454 spin_lock_bh(&nf_conntrack_lock);
456 /* We have to check the DYING flag inside the lock to prevent
457 a race against nf_ct_get_next_corpse() possibly called from
458 user context, else we insert an already 'dead' hash, blocking
459 further use of that particular connection -JM */
461 if (unlikely(nf_ct_is_dying(ct))) {
462 spin_unlock_bh(&nf_conntrack_lock);
463 return NF_ACCEPT;
466 /* See if there's one in the list already, including reverse:
467 NAT could have grabbed it without realizing, since we're
468 not in the hash. If there is, we lost race. */
469 hlist_nulls_for_each_entry(h, n, &net->ct.hash[hash], hnnode)
470 if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
471 &h->tuple) &&
472 zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
473 goto out;
474 hlist_nulls_for_each_entry(h, n, &net->ct.hash[repl_hash], hnnode)
475 if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple,
476 &h->tuple) &&
477 zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
478 goto out;
480 /* Remove from unconfirmed list */
481 hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
483 /* Timer relative to confirmation time, not original
484 setting time, otherwise we'd get timer wrap in
485 weird delay cases. */
486 ct->timeout.expires += jiffies;
487 add_timer(&ct->timeout);
488 atomic_inc(&ct->ct_general.use);
489 set_bit(IPS_CONFIRMED_BIT, &ct->status);
491 /* Since the lookup is lockless, hash insertion must be done after
492 * starting the timer and setting the CONFIRMED bit. The RCU barriers
493 * guarantee that no other CPU can find the conntrack before the above
494 * stores are visible.
496 __nf_conntrack_hash_insert(ct, hash, repl_hash);
497 NF_CT_STAT_INC(net, insert);
498 spin_unlock_bh(&nf_conntrack_lock);
500 help = nfct_help(ct);
501 if (help && help->helper)
502 nf_conntrack_event_cache(IPCT_HELPER, ct);
504 nf_conntrack_event_cache(master_ct(ct) ?
505 IPCT_RELATED : IPCT_NEW, ct);
506 return NF_ACCEPT;
508 out:
509 NF_CT_STAT_INC(net, insert_failed);
510 spin_unlock_bh(&nf_conntrack_lock);
511 return NF_DROP;
513 EXPORT_SYMBOL_GPL(__nf_conntrack_confirm);
515 /* Returns true if a connection correspondings to the tuple (required
516 for NAT). */
518 nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
519 const struct nf_conn *ignored_conntrack)
521 struct net *net = nf_ct_net(ignored_conntrack);
522 struct nf_conntrack_tuple_hash *h;
523 struct hlist_nulls_node *n;
524 struct nf_conn *ct;
525 u16 zone = nf_ct_zone(ignored_conntrack);
526 unsigned int hash = hash_conntrack(net, zone, tuple);
528 /* Disable BHs the entire time since we need to disable them at
529 * least once for the stats anyway.
531 rcu_read_lock_bh();
532 hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnnode) {
533 ct = nf_ct_tuplehash_to_ctrack(h);
534 if (ct != ignored_conntrack &&
535 nf_ct_tuple_equal(tuple, &h->tuple) &&
536 nf_ct_zone(ct) == zone) {
537 NF_CT_STAT_INC(net, found);
538 rcu_read_unlock_bh();
539 return 1;
541 NF_CT_STAT_INC(net, searched);
543 rcu_read_unlock_bh();
545 return 0;
547 EXPORT_SYMBOL_GPL(nf_conntrack_tuple_taken);
549 #define NF_CT_EVICTION_RANGE 8
551 /* There's a small race here where we may free a just-assured
552 connection. Too bad: we're in trouble anyway. */
553 static noinline int early_drop(struct net *net, unsigned int hash)
555 /* Use oldest entry, which is roughly LRU */
556 struct nf_conntrack_tuple_hash *h;
557 struct nf_conn *ct = NULL, *tmp;
558 struct hlist_nulls_node *n;
559 unsigned int i, cnt = 0;
560 int dropped = 0;
562 rcu_read_lock();
563 for (i = 0; i < net->ct.htable_size; i++) {
564 hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash],
565 hnnode) {
566 tmp = nf_ct_tuplehash_to_ctrack(h);
567 if (!test_bit(IPS_ASSURED_BIT, &tmp->status))
568 ct = tmp;
569 cnt++;
572 if (ct != NULL) {
573 if (likely(!nf_ct_is_dying(ct) &&
574 atomic_inc_not_zero(&ct->ct_general.use)))
575 break;
576 else
577 ct = NULL;
580 if (cnt >= NF_CT_EVICTION_RANGE)
581 break;
583 hash = (hash + 1) % net->ct.htable_size;
585 rcu_read_unlock();
587 if (!ct)
588 return dropped;
590 if (del_timer(&ct->timeout)) {
591 death_by_timeout((unsigned long)ct);
592 dropped = 1;
593 NF_CT_STAT_INC_ATOMIC(net, early_drop);
595 nf_ct_put(ct);
596 return dropped;
599 static struct nf_conn *
600 __nf_conntrack_alloc(struct net *net, u16 zone,
601 const struct nf_conntrack_tuple *orig,
602 const struct nf_conntrack_tuple *repl,
603 gfp_t gfp, u32 hash)
605 struct nf_conn *ct;
607 if (unlikely(!nf_conntrack_hash_rnd)) {
608 unsigned int rand;
611 * Why not initialize nf_conntrack_rnd in a "init()" function ?
612 * Because there isn't enough entropy when system initializing,
613 * and we initialize it as late as possible.
615 do {
616 get_random_bytes(&rand, sizeof(rand));
617 } while (!rand);
618 cmpxchg(&nf_conntrack_hash_rnd, 0, rand);
620 /* recompute the hash as nf_conntrack_hash_rnd is initialized */
621 hash = hash_conntrack_raw(orig, zone);
624 /* We don't want any race condition at early drop stage */
625 atomic_inc(&net->ct.count);
627 if (nf_conntrack_max &&
628 unlikely(atomic_read(&net->ct.count) > nf_conntrack_max)) {
629 if (!early_drop(net, hash_bucket(hash, net))) {
630 atomic_dec(&net->ct.count);
631 if (net_ratelimit())
632 printk(KERN_WARNING
633 "nf_conntrack: table full, dropping"
634 " packet.\n");
635 return ERR_PTR(-ENOMEM);
640 * Do not use kmem_cache_zalloc(), as this cache uses
641 * SLAB_DESTROY_BY_RCU.
643 ct = kmem_cache_alloc(net->ct.nf_conntrack_cachep, gfp);
644 if (ct == NULL) {
645 pr_debug("nf_conntrack_alloc: Can't alloc conntrack.\n");
646 atomic_dec(&net->ct.count);
647 return ERR_PTR(-ENOMEM);
650 * Let ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode.next
651 * and ct->tuplehash[IP_CT_DIR_REPLY].hnnode.next unchanged.
653 memset(&ct->tuplehash[IP_CT_DIR_MAX], 0,
654 sizeof(*ct) - offsetof(struct nf_conn, tuplehash[IP_CT_DIR_MAX]));
655 spin_lock_init(&ct->lock);
656 ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig;
657 ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode.pprev = NULL;
658 ct->tuplehash[IP_CT_DIR_REPLY].tuple = *repl;
659 /* save hash for reusing when confirming */
660 *(unsigned long *)(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev) = hash;
661 /* Don't set timer yet: wait for confirmation */
662 setup_timer(&ct->timeout, death_by_timeout, (unsigned long)ct);
663 write_pnet(&ct->ct_net, net);
664 #ifdef CONFIG_NF_CONNTRACK_ZONES
665 if (zone) {
666 struct nf_conntrack_zone *nf_ct_zone;
668 nf_ct_zone = nf_ct_ext_add(ct, NF_CT_EXT_ZONE, GFP_ATOMIC);
669 if (!nf_ct_zone)
670 goto out_free;
671 nf_ct_zone->id = zone;
673 #endif
675 * changes to lookup keys must be done before setting refcnt to 1
677 smp_wmb();
678 atomic_set(&ct->ct_general.use, 1);
679 return ct;
681 #ifdef CONFIG_NF_CONNTRACK_ZONES
682 out_free:
683 kmem_cache_free(net->ct.nf_conntrack_cachep, ct);
684 return ERR_PTR(-ENOMEM);
685 #endif
688 struct nf_conn *nf_conntrack_alloc(struct net *net, u16 zone,
689 const struct nf_conntrack_tuple *orig,
690 const struct nf_conntrack_tuple *repl,
691 gfp_t gfp)
693 return __nf_conntrack_alloc(net, zone, orig, repl, gfp, 0);
695 EXPORT_SYMBOL_GPL(nf_conntrack_alloc);
697 void nf_conntrack_free(struct nf_conn *ct)
699 struct net *net = nf_ct_net(ct);
701 nf_ct_ext_destroy(ct);
702 atomic_dec(&net->ct.count);
703 nf_ct_ext_free(ct);
704 kmem_cache_free(net->ct.nf_conntrack_cachep, ct);
706 EXPORT_SYMBOL_GPL(nf_conntrack_free);
708 /* Allocate a new conntrack: we return -ENOMEM if classification
709 failed due to stress. Otherwise it really is unclassifiable. */
710 static struct nf_conntrack_tuple_hash *
711 init_conntrack(struct net *net, struct nf_conn *tmpl,
712 const struct nf_conntrack_tuple *tuple,
713 struct nf_conntrack_l3proto *l3proto,
714 struct nf_conntrack_l4proto *l4proto,
715 struct sk_buff *skb,
716 unsigned int dataoff, u32 hash)
718 struct nf_conn *ct;
719 struct nf_conn_help *help;
720 struct nf_conntrack_tuple repl_tuple;
721 struct nf_conntrack_ecache *ecache;
722 struct nf_conntrack_expect *exp;
723 u16 zone = tmpl ? nf_ct_zone(tmpl) : NF_CT_DEFAULT_ZONE;
725 if (!nf_ct_invert_tuple(&repl_tuple, tuple, l3proto, l4proto)) {
726 pr_debug("Can't invert tuple.\n");
727 return NULL;
730 ct = __nf_conntrack_alloc(net, zone, tuple, &repl_tuple, GFP_ATOMIC,
731 hash);
732 if (IS_ERR(ct)) {
733 pr_debug("Can't allocate conntrack.\n");
734 return (struct nf_conntrack_tuple_hash *)ct;
737 if (!l4proto->new(ct, skb, dataoff)) {
738 nf_conntrack_free(ct);
739 pr_debug("init conntrack: can't track with proto module\n");
740 return NULL;
743 nf_ct_acct_ext_add(ct, GFP_ATOMIC);
745 ecache = tmpl ? nf_ct_ecache_find(tmpl) : NULL;
746 nf_ct_ecache_ext_add(ct, ecache ? ecache->ctmask : 0,
747 ecache ? ecache->expmask : 0,
748 GFP_ATOMIC);
750 spin_lock_bh(&nf_conntrack_lock);
751 exp = nf_ct_find_expectation(net, zone, tuple);
752 if (exp) {
753 pr_debug("conntrack: expectation arrives ct=%p exp=%p\n",
754 ct, exp);
755 /* Welcome, Mr. Bond. We've been expecting you... */
756 __set_bit(IPS_EXPECTED_BIT, &ct->status);
757 ct->master = exp->master;
758 if (exp->helper) {
759 help = nf_ct_helper_ext_add(ct, GFP_ATOMIC);
760 if (help)
761 rcu_assign_pointer(help->helper, exp->helper);
764 #ifdef CONFIG_NF_CONNTRACK_MARK
765 ct->mark = exp->master->mark;
766 #endif
767 #ifdef CONFIG_NF_CONNTRACK_SECMARK
768 ct->secmark = exp->master->secmark;
769 #endif
770 nf_conntrack_get(&ct->master->ct_general);
771 NF_CT_STAT_INC(net, expect_new);
772 } else {
773 __nf_ct_try_assign_helper(ct, tmpl, GFP_ATOMIC);
774 NF_CT_STAT_INC(net, new);
777 /* Overload tuple linked list to put us in unconfirmed list. */
778 hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
779 &net->ct.unconfirmed);
781 spin_unlock_bh(&nf_conntrack_lock);
783 if (exp) {
784 if (exp->expectfn)
785 exp->expectfn(ct, exp);
786 nf_ct_expect_put(exp);
789 return &ct->tuplehash[IP_CT_DIR_ORIGINAL];
792 /* On success, returns conntrack ptr, sets skb->nfct and ctinfo */
793 static inline struct nf_conn *
794 resolve_normal_ct(struct net *net, struct nf_conn *tmpl,
795 struct sk_buff *skb,
796 unsigned int dataoff,
797 u_int16_t l3num,
798 u_int8_t protonum,
799 struct nf_conntrack_l3proto *l3proto,
800 struct nf_conntrack_l4proto *l4proto,
801 int *set_reply,
802 enum ip_conntrack_info *ctinfo)
804 struct nf_conntrack_tuple tuple;
805 struct nf_conntrack_tuple_hash *h;
806 struct nf_conn *ct;
807 u16 zone = tmpl ? nf_ct_zone(tmpl) : NF_CT_DEFAULT_ZONE;
808 u32 hash;
810 if (!nf_ct_get_tuple(skb, skb_network_offset(skb),
811 dataoff, l3num, protonum, &tuple, l3proto,
812 l4proto)) {
813 pr_debug("resolve_normal_ct: Can't get tuple\n");
814 return NULL;
817 /* look for tuple match */
818 hash = hash_conntrack_raw(&tuple, zone);
819 h = __nf_conntrack_find_get(net, zone, &tuple, hash);
820 if (!h) {
821 h = init_conntrack(net, tmpl, &tuple, l3proto, l4proto,
822 skb, dataoff, hash);
823 if (!h)
824 return NULL;
825 if (IS_ERR(h))
826 return (void *)h;
828 ct = nf_ct_tuplehash_to_ctrack(h);
830 /* It exists; we have (non-exclusive) reference. */
831 if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY) {
832 *ctinfo = IP_CT_ESTABLISHED + IP_CT_IS_REPLY;
833 /* Please set reply bit if this packet OK */
834 *set_reply = 1;
835 } else {
836 /* Once we've had two way comms, always ESTABLISHED. */
837 if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
838 pr_debug("nf_conntrack_in: normal packet for %p\n", ct);
839 *ctinfo = IP_CT_ESTABLISHED;
840 } else if (test_bit(IPS_EXPECTED_BIT, &ct->status)) {
841 pr_debug("nf_conntrack_in: related packet for %p\n",
842 ct);
843 *ctinfo = IP_CT_RELATED;
844 } else {
845 pr_debug("nf_conntrack_in: new packet for %p\n", ct);
846 *ctinfo = IP_CT_NEW;
848 *set_reply = 0;
850 skb->nfct = &ct->ct_general;
851 skb->nfctinfo = *ctinfo;
852 return ct;
855 unsigned int
856 nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
857 struct sk_buff *skb)
859 struct nf_conn *ct, *tmpl = NULL;
860 enum ip_conntrack_info ctinfo;
861 struct nf_conntrack_l3proto *l3proto;
862 struct nf_conntrack_l4proto *l4proto;
863 unsigned int dataoff;
864 u_int8_t protonum;
865 int set_reply = 0;
866 int ret;
868 if (skb->nfct) {
869 /* Previously seen (loopback or untracked)? Ignore. */
870 tmpl = (struct nf_conn *)skb->nfct;
871 if (!nf_ct_is_template(tmpl)) {
872 NF_CT_STAT_INC_ATOMIC(net, ignore);
873 return NF_ACCEPT;
875 skb->nfct = NULL;
878 /* rcu_read_lock()ed by nf_hook_slow */
879 l3proto = __nf_ct_l3proto_find(pf);
880 ret = l3proto->get_l4proto(skb, skb_network_offset(skb),
881 &dataoff, &protonum);
882 if (ret <= 0) {
883 pr_debug("not prepared to track yet or error occured\n");
884 NF_CT_STAT_INC_ATOMIC(net, error);
885 NF_CT_STAT_INC_ATOMIC(net, invalid);
886 ret = -ret;
887 goto out;
890 l4proto = __nf_ct_l4proto_find(pf, protonum);
892 /* It may be an special packet, error, unclean...
893 * inverse of the return code tells to the netfilter
894 * core what to do with the packet. */
895 if (l4proto->error != NULL) {
896 ret = l4proto->error(net, tmpl, skb, dataoff, &ctinfo,
897 pf, hooknum);
898 if (ret <= 0) {
899 NF_CT_STAT_INC_ATOMIC(net, error);
900 NF_CT_STAT_INC_ATOMIC(net, invalid);
901 ret = -ret;
902 goto out;
906 ct = resolve_normal_ct(net, tmpl, skb, dataoff, pf, protonum,
907 l3proto, l4proto, &set_reply, &ctinfo);
908 if (!ct) {
909 /* Not valid part of a connection */
910 NF_CT_STAT_INC_ATOMIC(net, invalid);
911 ret = NF_ACCEPT;
912 goto out;
915 if (IS_ERR(ct)) {
916 /* Too stressed to deal. */
917 NF_CT_STAT_INC_ATOMIC(net, drop);
918 ret = NF_DROP;
919 goto out;
922 NF_CT_ASSERT(skb->nfct);
924 ret = l4proto->packet(ct, skb, dataoff, ctinfo, pf, hooknum);
925 if (ret <= 0) {
926 /* Invalid: inverse of the return code tells
927 * the netfilter core what to do */
928 pr_debug("nf_conntrack_in: Can't track with proto module\n");
929 nf_conntrack_put(skb->nfct);
930 skb->nfct = NULL;
931 NF_CT_STAT_INC_ATOMIC(net, invalid);
932 if (ret == -NF_DROP)
933 NF_CT_STAT_INC_ATOMIC(net, drop);
934 ret = -ret;
935 goto out;
938 if (set_reply && !test_and_set_bit(IPS_SEEN_REPLY_BIT, &ct->status))
939 nf_conntrack_event_cache(IPCT_REPLY, ct);
940 out:
941 if (tmpl)
942 nf_ct_put(tmpl);
944 return ret;
946 EXPORT_SYMBOL_GPL(nf_conntrack_in);
948 bool nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse,
949 const struct nf_conntrack_tuple *orig)
951 bool ret;
953 rcu_read_lock();
954 ret = nf_ct_invert_tuple(inverse, orig,
955 __nf_ct_l3proto_find(orig->src.l3num),
956 __nf_ct_l4proto_find(orig->src.l3num,
957 orig->dst.protonum));
958 rcu_read_unlock();
959 return ret;
961 EXPORT_SYMBOL_GPL(nf_ct_invert_tuplepr);
963 /* Alter reply tuple (maybe alter helper). This is for NAT, and is
964 implicitly racy: see __nf_conntrack_confirm */
965 void nf_conntrack_alter_reply(struct nf_conn *ct,
966 const struct nf_conntrack_tuple *newreply)
968 struct nf_conn_help *help = nfct_help(ct);
970 /* Should be unconfirmed, so not in hash table yet */
971 NF_CT_ASSERT(!nf_ct_is_confirmed(ct));
973 pr_debug("Altering reply tuple of %p to ", ct);
974 nf_ct_dump_tuple(newreply);
976 ct->tuplehash[IP_CT_DIR_REPLY].tuple = *newreply;
977 if (ct->master || (help && !hlist_empty(&help->expectations)))
978 return;
980 rcu_read_lock();
981 __nf_ct_try_assign_helper(ct, NULL, GFP_ATOMIC);
982 rcu_read_unlock();
984 EXPORT_SYMBOL_GPL(nf_conntrack_alter_reply);
986 /* Refresh conntrack for this many jiffies and do accounting if do_acct is 1 */
987 void __nf_ct_refresh_acct(struct nf_conn *ct,
988 enum ip_conntrack_info ctinfo,
989 const struct sk_buff *skb,
990 unsigned long extra_jiffies,
991 int do_acct)
993 NF_CT_ASSERT(ct->timeout.data == (unsigned long)ct);
994 NF_CT_ASSERT(skb);
996 /* Only update if this is not a fixed timeout */
997 if (test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status))
998 goto acct;
1000 /* If not in hash table, timer will not be active yet */
1001 if (!nf_ct_is_confirmed(ct)) {
1002 ct->timeout.expires = extra_jiffies;
1003 } else {
1004 unsigned long newtime = jiffies + extra_jiffies;
1006 /* Only update the timeout if the new timeout is at least
1007 HZ jiffies from the old timeout. Need del_timer for race
1008 avoidance (may already be dying). */
1009 if (newtime - ct->timeout.expires >= HZ)
1010 mod_timer_pending(&ct->timeout, newtime);
1013 acct:
1014 if (do_acct) {
1015 struct nf_conn_counter *acct;
1017 acct = nf_conn_acct_find(ct);
1018 if (acct) {
1019 spin_lock_bh(&ct->lock);
1020 acct[CTINFO2DIR(ctinfo)].packets++;
1021 acct[CTINFO2DIR(ctinfo)].bytes += skb->len;
1022 spin_unlock_bh(&ct->lock);
1026 EXPORT_SYMBOL_GPL(__nf_ct_refresh_acct);
1028 bool __nf_ct_kill_acct(struct nf_conn *ct,
1029 enum ip_conntrack_info ctinfo,
1030 const struct sk_buff *skb,
1031 int do_acct)
1033 if (do_acct) {
1034 struct nf_conn_counter *acct;
1036 acct = nf_conn_acct_find(ct);
1037 if (acct) {
1038 spin_lock_bh(&ct->lock);
1039 acct[CTINFO2DIR(ctinfo)].packets++;
1040 acct[CTINFO2DIR(ctinfo)].bytes +=
1041 skb->len - skb_network_offset(skb);
1042 spin_unlock_bh(&ct->lock);
1046 if (del_timer(&ct->timeout)) {
1047 ct->timeout.function((unsigned long)ct);
1048 return true;
1050 return false;
1052 EXPORT_SYMBOL_GPL(__nf_ct_kill_acct);
1054 #ifdef CONFIG_NF_CONNTRACK_ZONES
1055 static struct nf_ct_ext_type nf_ct_zone_extend __read_mostly = {
1056 .len = sizeof(struct nf_conntrack_zone),
1057 .align = __alignof__(struct nf_conntrack_zone),
1058 .id = NF_CT_EXT_ZONE,
1060 #endif
1062 #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
1064 #include <linux/netfilter/nfnetlink.h>
1065 #include <linux/netfilter/nfnetlink_conntrack.h>
1066 #include <linux/mutex.h>
1068 /* Generic function for tcp/udp/sctp/dccp and alike. This needs to be
1069 * in ip_conntrack_core, since we don't want the protocols to autoload
1070 * or depend on ctnetlink */
1071 int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb,
1072 const struct nf_conntrack_tuple *tuple)
1074 NLA_PUT_BE16(skb, CTA_PROTO_SRC_PORT, tuple->src.u.tcp.port);
1075 NLA_PUT_BE16(skb, CTA_PROTO_DST_PORT, tuple->dst.u.tcp.port);
1076 return 0;
1078 nla_put_failure:
1079 return -1;
1081 EXPORT_SYMBOL_GPL(nf_ct_port_tuple_to_nlattr);
1083 const struct nla_policy nf_ct_port_nla_policy[CTA_PROTO_MAX+1] = {
1084 [CTA_PROTO_SRC_PORT] = { .type = NLA_U16 },
1085 [CTA_PROTO_DST_PORT] = { .type = NLA_U16 },
1087 EXPORT_SYMBOL_GPL(nf_ct_port_nla_policy);
1089 int nf_ct_port_nlattr_to_tuple(struct nlattr *tb[],
1090 struct nf_conntrack_tuple *t)
1092 if (!tb[CTA_PROTO_SRC_PORT] || !tb[CTA_PROTO_DST_PORT])
1093 return -EINVAL;
1095 t->src.u.tcp.port = nla_get_be16(tb[CTA_PROTO_SRC_PORT]);
1096 t->dst.u.tcp.port = nla_get_be16(tb[CTA_PROTO_DST_PORT]);
1098 return 0;
1100 EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_to_tuple);
1102 int nf_ct_port_nlattr_tuple_size(void)
1104 return nla_policy_len(nf_ct_port_nla_policy, CTA_PROTO_MAX + 1);
1106 EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_tuple_size);
1107 #endif
1109 /* Used by ipt_REJECT and ip6t_REJECT. */
1110 static void nf_conntrack_attach(struct sk_buff *nskb, struct sk_buff *skb)
1112 struct nf_conn *ct;
1113 enum ip_conntrack_info ctinfo;
1115 /* This ICMP is in reverse direction to the packet which caused it */
1116 ct = nf_ct_get(skb, &ctinfo);
1117 if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL)
1118 ctinfo = IP_CT_RELATED + IP_CT_IS_REPLY;
1119 else
1120 ctinfo = IP_CT_RELATED;
1122 /* Attach to new skbuff, and increment count */
1123 nskb->nfct = &ct->ct_general;
1124 nskb->nfctinfo = ctinfo;
1125 nf_conntrack_get(nskb->nfct);
1128 /* Bring out ya dead! */
1129 static struct nf_conn *
1130 get_next_corpse(struct net *net, int (*iter)(struct nf_conn *i, void *data),
1131 void *data, unsigned int *bucket)
1133 struct nf_conntrack_tuple_hash *h;
1134 struct nf_conn *ct;
1135 struct hlist_nulls_node *n;
1137 spin_lock_bh(&nf_conntrack_lock);
1138 for (; *bucket < net->ct.htable_size; (*bucket)++) {
1139 hlist_nulls_for_each_entry(h, n, &net->ct.hash[*bucket], hnnode) {
1140 ct = nf_ct_tuplehash_to_ctrack(h);
1141 if (iter(ct, data))
1142 goto found;
1145 hlist_nulls_for_each_entry(h, n, &net->ct.unconfirmed, hnnode) {
1146 ct = nf_ct_tuplehash_to_ctrack(h);
1147 if (iter(ct, data))
1148 set_bit(IPS_DYING_BIT, &ct->status);
1150 spin_unlock_bh(&nf_conntrack_lock);
1151 return NULL;
1152 found:
1153 atomic_inc(&ct->ct_general.use);
1154 spin_unlock_bh(&nf_conntrack_lock);
1155 return ct;
1158 void nf_ct_iterate_cleanup(struct net *net,
1159 int (*iter)(struct nf_conn *i, void *data),
1160 void *data)
1162 struct nf_conn *ct;
1163 unsigned int bucket = 0;
1165 while ((ct = get_next_corpse(net, iter, data, &bucket)) != NULL) {
1166 /* Time to push up daises... */
1167 if (del_timer(&ct->timeout))
1168 death_by_timeout((unsigned long)ct);
1169 /* ... else the timer will get him soon. */
1171 nf_ct_put(ct);
1174 EXPORT_SYMBOL_GPL(nf_ct_iterate_cleanup);
1176 struct __nf_ct_flush_report {
1177 u32 pid;
1178 int report;
1181 static int kill_report(struct nf_conn *i, void *data)
1183 struct __nf_ct_flush_report *fr = (struct __nf_ct_flush_report *)data;
1185 /* If we fail to deliver the event, death_by_timeout() will retry */
1186 if (nf_conntrack_event_report(IPCT_DESTROY, i,
1187 fr->pid, fr->report) < 0)
1188 return 1;
1190 /* Avoid the delivery of the destroy event in death_by_timeout(). */
1191 set_bit(IPS_DYING_BIT, &i->status);
1192 return 1;
1195 static int kill_all(struct nf_conn *i, void *data)
1197 return 1;
1200 void nf_ct_free_hashtable(void *hash, int vmalloced, unsigned int size)
1202 if (vmalloced)
1203 vfree(hash);
1204 else
1205 free_pages((unsigned long)hash,
1206 get_order(sizeof(struct hlist_head) * size));
1208 EXPORT_SYMBOL_GPL(nf_ct_free_hashtable);
1210 void nf_conntrack_flush_report(struct net *net, u32 pid, int report)
1212 struct __nf_ct_flush_report fr = {
1213 .pid = pid,
1214 .report = report,
1216 nf_ct_iterate_cleanup(net, kill_report, &fr);
1218 EXPORT_SYMBOL_GPL(nf_conntrack_flush_report);
1220 static void nf_ct_release_dying_list(struct net *net)
1222 struct nf_conntrack_tuple_hash *h;
1223 struct nf_conn *ct;
1224 struct hlist_nulls_node *n;
1226 spin_lock_bh(&nf_conntrack_lock);
1227 hlist_nulls_for_each_entry(h, n, &net->ct.dying, hnnode) {
1228 ct = nf_ct_tuplehash_to_ctrack(h);
1229 /* never fails to remove them, no listeners at this point */
1230 nf_ct_kill(ct);
1232 spin_unlock_bh(&nf_conntrack_lock);
1235 static int untrack_refs(void)
1237 int cnt = 0, cpu;
1239 for_each_possible_cpu(cpu) {
1240 struct nf_conn *ct = &per_cpu(nf_conntrack_untracked, cpu);
1242 cnt += atomic_read(&ct->ct_general.use) - 1;
1244 return cnt;
1247 static void nf_conntrack_cleanup_init_net(void)
1249 while (untrack_refs() > 0)
1250 schedule();
1252 nf_conntrack_helper_fini();
1253 nf_conntrack_proto_fini();
1254 #ifdef CONFIG_NF_CONNTRACK_ZONES
1255 nf_ct_extend_unregister(&nf_ct_zone_extend);
1256 #endif
1259 static void nf_conntrack_cleanup_net(struct net *net)
1261 i_see_dead_people:
1262 nf_ct_iterate_cleanup(net, kill_all, NULL);
1263 nf_ct_release_dying_list(net);
1264 if (atomic_read(&net->ct.count) != 0) {
1265 schedule();
1266 goto i_see_dead_people;
1269 nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc,
1270 net->ct.htable_size);
1271 nf_conntrack_ecache_fini(net);
1272 nf_conntrack_acct_fini(net);
1273 nf_conntrack_expect_fini(net);
1274 kmem_cache_destroy(net->ct.nf_conntrack_cachep);
1275 kfree(net->ct.slabname);
1276 free_percpu(net->ct.stat);
1279 /* Mishearing the voices in his head, our hero wonders how he's
1280 supposed to kill the mall. */
1281 void nf_conntrack_cleanup(struct net *net)
1283 if (net_eq(net, &init_net))
1284 rcu_assign_pointer(ip_ct_attach, NULL);
1286 /* This makes sure all current packets have passed through
1287 netfilter framework. Roll on, two-stage module
1288 delete... */
1289 synchronize_net();
1291 nf_conntrack_cleanup_net(net);
1293 if (net_eq(net, &init_net)) {
1294 rcu_assign_pointer(nf_ct_destroy, NULL);
1295 nf_conntrack_cleanup_init_net();
1299 void *nf_ct_alloc_hashtable(unsigned int *sizep, int *vmalloced, int nulls)
1301 struct hlist_nulls_head *hash;
1302 unsigned int nr_slots, i;
1303 size_t sz;
1305 *vmalloced = 0;
1307 BUILD_BUG_ON(sizeof(struct hlist_nulls_head) != sizeof(struct hlist_head));
1308 nr_slots = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_nulls_head));
1309 sz = nr_slots * sizeof(struct hlist_nulls_head);
1310 hash = (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO,
1311 get_order(sz));
1312 if (!hash) {
1313 *vmalloced = 1;
1314 printk(KERN_WARNING "nf_conntrack: falling back to vmalloc.\n");
1315 hash = __vmalloc(sz, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL);
1318 if (hash && nulls)
1319 for (i = 0; i < nr_slots; i++)
1320 INIT_HLIST_NULLS_HEAD(&hash[i], i);
1322 return hash;
1324 EXPORT_SYMBOL_GPL(nf_ct_alloc_hashtable);
1326 int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
1328 int i, bucket, vmalloced, old_vmalloced;
1329 unsigned int hashsize, old_size;
1330 struct hlist_nulls_head *hash, *old_hash;
1331 struct nf_conntrack_tuple_hash *h;
1332 struct nf_conn *ct;
1334 if (current->nsproxy->net_ns != &init_net)
1335 return -EOPNOTSUPP;
1337 /* On boot, we can set this without any fancy locking. */
1338 if (!nf_conntrack_htable_size)
1339 return param_set_uint(val, kp);
1341 hashsize = simple_strtoul(val, NULL, 0);
1342 if (!hashsize)
1343 return -EINVAL;
1345 hash = nf_ct_alloc_hashtable(&hashsize, &vmalloced, 1);
1346 if (!hash)
1347 return -ENOMEM;
1349 /* Lookups in the old hash might happen in parallel, which means we
1350 * might get false negatives during connection lookup. New connections
1351 * created because of a false negative won't make it into the hash
1352 * though since that required taking the lock.
1354 spin_lock_bh(&nf_conntrack_lock);
1355 for (i = 0; i < init_net.ct.htable_size; i++) {
1356 while (!hlist_nulls_empty(&init_net.ct.hash[i])) {
1357 h = hlist_nulls_entry(init_net.ct.hash[i].first,
1358 struct nf_conntrack_tuple_hash, hnnode);
1359 ct = nf_ct_tuplehash_to_ctrack(h);
1360 hlist_nulls_del_rcu(&h->hnnode);
1361 bucket = __hash_conntrack(&h->tuple, nf_ct_zone(ct),
1362 hashsize);
1363 hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]);
1366 old_size = init_net.ct.htable_size;
1367 old_vmalloced = init_net.ct.hash_vmalloc;
1368 old_hash = init_net.ct.hash;
1370 init_net.ct.htable_size = nf_conntrack_htable_size = hashsize;
1371 init_net.ct.hash_vmalloc = vmalloced;
1372 init_net.ct.hash = hash;
1373 spin_unlock_bh(&nf_conntrack_lock);
1375 nf_ct_free_hashtable(old_hash, old_vmalloced, old_size);
1376 return 0;
1378 EXPORT_SYMBOL_GPL(nf_conntrack_set_hashsize);
1380 module_param_call(hashsize, nf_conntrack_set_hashsize, param_get_uint,
1381 &nf_conntrack_htable_size, 0600);
1383 void nf_ct_untracked_status_or(unsigned long bits)
1385 int cpu;
1387 for_each_possible_cpu(cpu)
1388 per_cpu(nf_conntrack_untracked, cpu).status |= bits;
1390 EXPORT_SYMBOL_GPL(nf_ct_untracked_status_or);
1392 static int nf_conntrack_init_init_net(void)
1394 int max_factor = 8;
1395 int ret, cpu;
1397 /* Idea from tcp.c: use 1/16384 of memory. On i386: 32MB
1398 * machine has 512 buckets. >= 1GB machines have 16384 buckets. */
1399 if (!nf_conntrack_htable_size) {
1400 nf_conntrack_htable_size
1401 = (((totalram_pages << PAGE_SHIFT) / 16384)
1402 / sizeof(struct hlist_head));
1403 if (totalram_pages > (1024 * 1024 * 1024 / PAGE_SIZE))
1404 nf_conntrack_htable_size = 16384;
1405 if (nf_conntrack_htable_size < 32)
1406 nf_conntrack_htable_size = 32;
1408 /* Use a max. factor of four by default to get the same max as
1409 * with the old struct list_heads. When a table size is given
1410 * we use the old value of 8 to avoid reducing the max.
1411 * entries. */
1412 max_factor = 4;
1414 nf_conntrack_max = max_factor * nf_conntrack_htable_size;
1416 printk(KERN_INFO "nf_conntrack version %s (%u buckets, %d max)\n",
1417 NF_CONNTRACK_VERSION, nf_conntrack_htable_size,
1418 nf_conntrack_max);
1420 ret = nf_conntrack_proto_init();
1421 if (ret < 0)
1422 goto err_proto;
1424 ret = nf_conntrack_helper_init();
1425 if (ret < 0)
1426 goto err_helper;
1428 #ifdef CONFIG_NF_CONNTRACK_ZONES
1429 ret = nf_ct_extend_register(&nf_ct_zone_extend);
1430 if (ret < 0)
1431 goto err_extend;
1432 #endif
1433 /* Set up fake conntrack: to never be deleted, not in any hashes */
1434 for_each_possible_cpu(cpu) {
1435 struct nf_conn *ct = &per_cpu(nf_conntrack_untracked, cpu);
1436 write_pnet(&ct->ct_net, &init_net);
1437 atomic_set(&ct->ct_general.use, 1);
1439 /* - and look it like as a confirmed connection */
1440 nf_ct_untracked_status_or(IPS_CONFIRMED | IPS_UNTRACKED);
1441 return 0;
1443 #ifdef CONFIG_NF_CONNTRACK_ZONES
1444 err_extend:
1445 nf_conntrack_helper_fini();
1446 #endif
1447 err_helper:
1448 nf_conntrack_proto_fini();
1449 err_proto:
1450 return ret;
1454 * We need to use special "null" values, not used in hash table
1456 #define UNCONFIRMED_NULLS_VAL ((1<<30)+0)
1457 #define DYING_NULLS_VAL ((1<<30)+1)
1459 static int nf_conntrack_init_net(struct net *net)
1461 int ret;
1463 atomic_set(&net->ct.count, 0);
1464 INIT_HLIST_NULLS_HEAD(&net->ct.unconfirmed, UNCONFIRMED_NULLS_VAL);
1465 INIT_HLIST_NULLS_HEAD(&net->ct.dying, DYING_NULLS_VAL);
1466 net->ct.stat = alloc_percpu(struct ip_conntrack_stat);
1467 if (!net->ct.stat) {
1468 ret = -ENOMEM;
1469 goto err_stat;
1472 net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net);
1473 if (!net->ct.slabname) {
1474 ret = -ENOMEM;
1475 goto err_slabname;
1478 net->ct.nf_conntrack_cachep = kmem_cache_create(net->ct.slabname,
1479 sizeof(struct nf_conn), 0,
1480 SLAB_DESTROY_BY_RCU, NULL);
1481 if (!net->ct.nf_conntrack_cachep) {
1482 printk(KERN_ERR "Unable to create nf_conn slab cache\n");
1483 ret = -ENOMEM;
1484 goto err_cache;
1487 net->ct.htable_size = nf_conntrack_htable_size;
1488 net->ct.hash = nf_ct_alloc_hashtable(&net->ct.htable_size,
1489 &net->ct.hash_vmalloc, 1);
1490 if (!net->ct.hash) {
1491 ret = -ENOMEM;
1492 printk(KERN_ERR "Unable to create nf_conntrack_hash\n");
1493 goto err_hash;
1495 ret = nf_conntrack_expect_init(net);
1496 if (ret < 0)
1497 goto err_expect;
1498 ret = nf_conntrack_acct_init(net);
1499 if (ret < 0)
1500 goto err_acct;
1501 ret = nf_conntrack_ecache_init(net);
1502 if (ret < 0)
1503 goto err_ecache;
1505 return 0;
1507 err_ecache:
1508 nf_conntrack_acct_fini(net);
1509 err_acct:
1510 nf_conntrack_expect_fini(net);
1511 err_expect:
1512 nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc,
1513 net->ct.htable_size);
1514 err_hash:
1515 kmem_cache_destroy(net->ct.nf_conntrack_cachep);
1516 err_cache:
1517 kfree(net->ct.slabname);
1518 err_slabname:
1519 free_percpu(net->ct.stat);
1520 err_stat:
1521 return ret;
1524 s16 (*nf_ct_nat_offset)(const struct nf_conn *ct,
1525 enum ip_conntrack_dir dir,
1526 u32 seq);
1527 EXPORT_SYMBOL_GPL(nf_ct_nat_offset);
1529 int nf_conntrack_init(struct net *net)
1531 int ret;
1533 if (net_eq(net, &init_net)) {
1534 ret = nf_conntrack_init_init_net();
1535 if (ret < 0)
1536 goto out_init_net;
1538 ret = nf_conntrack_init_net(net);
1539 if (ret < 0)
1540 goto out_net;
1542 if (net_eq(net, &init_net)) {
1543 /* For use by REJECT target */
1544 rcu_assign_pointer(ip_ct_attach, nf_conntrack_attach);
1545 rcu_assign_pointer(nf_ct_destroy, destroy_conntrack);
1547 /* Howto get NAT offsets */
1548 rcu_assign_pointer(nf_ct_nat_offset, NULL);
1550 return 0;
1552 out_net:
1553 if (net_eq(net, &init_net))
1554 nf_conntrack_cleanup_init_net();
1555 out_init_net:
1556 return ret;