1 /* Expectation handling for nf_conntrack. */
3 /* (C) 1999-2001 Paul `Rusty' Russell
4 * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
5 * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/types.h>
13 #include <linux/netfilter.h>
14 #include <linux/skbuff.h>
15 #include <linux/proc_fs.h>
16 #include <linux/seq_file.h>
17 #include <linux/stddef.h>
18 #include <linux/slab.h>
19 #include <linux/err.h>
20 #include <linux/percpu.h>
21 #include <linux/kernel.h>
22 #include <linux/jhash.h>
23 #include <net/net_namespace.h>
25 #include <net/netfilter/nf_conntrack.h>
26 #include <net/netfilter/nf_conntrack_core.h>
27 #include <net/netfilter/nf_conntrack_expect.h>
28 #include <net/netfilter/nf_conntrack_helper.h>
29 #include <net/netfilter/nf_conntrack_tuple.h>
30 #include <net/netfilter/nf_conntrack_zones.h>
32 unsigned int nf_ct_expect_hsize __read_mostly
;
33 EXPORT_SYMBOL_GPL(nf_ct_expect_hsize
);
35 unsigned int nf_ct_expect_max __read_mostly
;
37 static struct kmem_cache
*nf_ct_expect_cachep __read_mostly
;
39 static HLIST_HEAD(nf_ct_userspace_expect_list
);
41 /* nf_conntrack_expect helper functions */
42 void nf_ct_unlink_expect_report(struct nf_conntrack_expect
*exp
,
45 struct nf_conn_help
*master_help
= nfct_help(exp
->master
);
46 struct net
*net
= nf_ct_exp_net(exp
);
48 NF_CT_ASSERT(!timer_pending(&exp
->timeout
));
50 hlist_del_rcu(&exp
->hnode
);
51 net
->ct
.expect_count
--;
53 hlist_del(&exp
->lnode
);
54 if (!(exp
->flags
& NF_CT_EXPECT_USERSPACE
))
55 master_help
->expecting
[exp
->class]--;
57 nf_ct_expect_event_report(IPEXP_DESTROY
, exp
, pid
, report
);
58 nf_ct_expect_put(exp
);
60 NF_CT_STAT_INC(net
, expect_delete
);
62 EXPORT_SYMBOL_GPL(nf_ct_unlink_expect_report
);
64 static void nf_ct_expectation_timed_out(unsigned long ul_expect
)
66 struct nf_conntrack_expect
*exp
= (void *)ul_expect
;
68 spin_lock_bh(&nf_conntrack_lock
);
69 nf_ct_unlink_expect(exp
);
70 spin_unlock_bh(&nf_conntrack_lock
);
71 nf_ct_expect_put(exp
);
74 static unsigned int nf_ct_expect_dst_hash(const struct nf_conntrack_tuple
*tuple
)
78 if (unlikely(!nf_conntrack_hash_rnd
)) {
79 init_nf_conntrack_hash_rnd();
82 hash
= jhash2(tuple
->dst
.u3
.all
, ARRAY_SIZE(tuple
->dst
.u3
.all
),
83 (((tuple
->dst
.protonum
^ tuple
->src
.l3num
) << 16) |
84 (__force __u16
)tuple
->dst
.u
.all
) ^ nf_conntrack_hash_rnd
);
85 return ((u64
)hash
* nf_ct_expect_hsize
) >> 32;
88 struct nf_conntrack_expect
*
89 __nf_ct_expect_find(struct net
*net
, u16 zone
,
90 const struct nf_conntrack_tuple
*tuple
)
92 struct nf_conntrack_expect
*i
;
96 if (!net
->ct
.expect_count
)
99 h
= nf_ct_expect_dst_hash(tuple
);
100 hlist_for_each_entry_rcu(i
, n
, &net
->ct
.expect_hash
[h
], hnode
) {
101 if (nf_ct_tuple_mask_cmp(tuple
, &i
->tuple
, &i
->mask
) &&
102 nf_ct_zone(i
->master
) == zone
)
107 EXPORT_SYMBOL_GPL(__nf_ct_expect_find
);
109 /* Just find a expectation corresponding to a tuple. */
110 struct nf_conntrack_expect
*
111 nf_ct_expect_find_get(struct net
*net
, u16 zone
,
112 const struct nf_conntrack_tuple
*tuple
)
114 struct nf_conntrack_expect
*i
;
117 i
= __nf_ct_expect_find(net
, zone
, tuple
);
118 if (i
&& !atomic_inc_not_zero(&i
->use
))
124 EXPORT_SYMBOL_GPL(nf_ct_expect_find_get
);
126 /* If an expectation for this connection is found, it gets delete from
127 * global list then returned. */
128 struct nf_conntrack_expect
*
129 nf_ct_find_expectation(struct net
*net
, u16 zone
,
130 const struct nf_conntrack_tuple
*tuple
)
132 struct nf_conntrack_expect
*i
, *exp
= NULL
;
133 struct hlist_node
*n
;
136 if (!net
->ct
.expect_count
)
139 h
= nf_ct_expect_dst_hash(tuple
);
140 hlist_for_each_entry(i
, n
, &net
->ct
.expect_hash
[h
], hnode
) {
141 if (!(i
->flags
& NF_CT_EXPECT_INACTIVE
) &&
142 nf_ct_tuple_mask_cmp(tuple
, &i
->tuple
, &i
->mask
) &&
143 nf_ct_zone(i
->master
) == zone
) {
151 /* If master is not in hash table yet (ie. packet hasn't left
152 this machine yet), how can other end know about expected?
153 Hence these are not the droids you are looking for (if
154 master ct never got confirmed, we'd hold a reference to it
155 and weird things would happen to future packets). */
156 if (!nf_ct_is_confirmed(exp
->master
))
159 if (exp
->flags
& NF_CT_EXPECT_PERMANENT
) {
160 atomic_inc(&exp
->use
);
162 } else if (del_timer(&exp
->timeout
)) {
163 nf_ct_unlink_expect(exp
);
170 /* delete all expectations for this conntrack */
171 void nf_ct_remove_expectations(struct nf_conn
*ct
)
173 struct nf_conn_help
*help
= nfct_help(ct
);
174 struct nf_conntrack_expect
*exp
;
175 struct hlist_node
*n
, *next
;
177 /* Optimization: most connection never expect any others. */
181 hlist_for_each_entry_safe(exp
, n
, next
, &help
->expectations
, lnode
) {
182 if (del_timer(&exp
->timeout
)) {
183 nf_ct_unlink_expect(exp
);
184 nf_ct_expect_put(exp
);
188 EXPORT_SYMBOL_GPL(nf_ct_remove_expectations
);
190 /* Would two expected things clash? */
191 static inline int expect_clash(const struct nf_conntrack_expect
*a
,
192 const struct nf_conntrack_expect
*b
)
194 /* Part covered by intersection of masks must be unequal,
195 otherwise they clash */
196 struct nf_conntrack_tuple_mask intersect_mask
;
199 intersect_mask
.src
.u
.all
= a
->mask
.src
.u
.all
& b
->mask
.src
.u
.all
;
201 for (count
= 0; count
< NF_CT_TUPLE_L3SIZE
; count
++){
202 intersect_mask
.src
.u3
.all
[count
] =
203 a
->mask
.src
.u3
.all
[count
] & b
->mask
.src
.u3
.all
[count
];
206 return nf_ct_tuple_mask_cmp(&a
->tuple
, &b
->tuple
, &intersect_mask
);
209 static inline int expect_matches(const struct nf_conntrack_expect
*a
,
210 const struct nf_conntrack_expect
*b
)
212 return a
->master
== b
->master
&& a
->class == b
->class &&
213 nf_ct_tuple_equal(&a
->tuple
, &b
->tuple
) &&
214 nf_ct_tuple_mask_equal(&a
->mask
, &b
->mask
) &&
215 nf_ct_zone(a
->master
) == nf_ct_zone(b
->master
);
218 /* Generally a bad idea to call this: could have matched already. */
219 void nf_ct_unexpect_related(struct nf_conntrack_expect
*exp
)
221 spin_lock_bh(&nf_conntrack_lock
);
222 if (del_timer(&exp
->timeout
)) {
223 nf_ct_unlink_expect(exp
);
224 nf_ct_expect_put(exp
);
226 spin_unlock_bh(&nf_conntrack_lock
);
228 EXPORT_SYMBOL_GPL(nf_ct_unexpect_related
);
230 /* We don't increase the master conntrack refcount for non-fulfilled
231 * conntracks. During the conntrack destruction, the expectations are
232 * always killed before the conntrack itself */
233 struct nf_conntrack_expect
*nf_ct_expect_alloc(struct nf_conn
*me
)
235 struct nf_conntrack_expect
*new;
237 new = kmem_cache_alloc(nf_ct_expect_cachep
, GFP_ATOMIC
);
242 atomic_set(&new->use
, 1);
245 EXPORT_SYMBOL_GPL(nf_ct_expect_alloc
);
247 void nf_ct_expect_init(struct nf_conntrack_expect
*exp
, unsigned int class,
249 const union nf_inet_addr
*saddr
,
250 const union nf_inet_addr
*daddr
,
251 u_int8_t proto
, const __be16
*src
, const __be16
*dst
)
255 if (family
== AF_INET
)
262 exp
->expectfn
= NULL
;
264 exp
->tuple
.src
.l3num
= family
;
265 exp
->tuple
.dst
.protonum
= proto
;
268 memcpy(&exp
->tuple
.src
.u3
, saddr
, len
);
269 if (sizeof(exp
->tuple
.src
.u3
) > len
)
270 /* address needs to be cleared for nf_ct_tuple_equal */
271 memset((void *)&exp
->tuple
.src
.u3
+ len
, 0x00,
272 sizeof(exp
->tuple
.src
.u3
) - len
);
273 memset(&exp
->mask
.src
.u3
, 0xFF, len
);
274 if (sizeof(exp
->mask
.src
.u3
) > len
)
275 memset((void *)&exp
->mask
.src
.u3
+ len
, 0x00,
276 sizeof(exp
->mask
.src
.u3
) - len
);
278 memset(&exp
->tuple
.src
.u3
, 0x00, sizeof(exp
->tuple
.src
.u3
));
279 memset(&exp
->mask
.src
.u3
, 0x00, sizeof(exp
->mask
.src
.u3
));
283 exp
->tuple
.src
.u
.all
= *src
;
284 exp
->mask
.src
.u
.all
= htons(0xFFFF);
286 exp
->tuple
.src
.u
.all
= 0;
287 exp
->mask
.src
.u
.all
= 0;
290 memcpy(&exp
->tuple
.dst
.u3
, daddr
, len
);
291 if (sizeof(exp
->tuple
.dst
.u3
) > len
)
292 /* address needs to be cleared for nf_ct_tuple_equal */
293 memset((void *)&exp
->tuple
.dst
.u3
+ len
, 0x00,
294 sizeof(exp
->tuple
.dst
.u3
) - len
);
296 exp
->tuple
.dst
.u
.all
= *dst
;
298 EXPORT_SYMBOL_GPL(nf_ct_expect_init
);
300 static void nf_ct_expect_free_rcu(struct rcu_head
*head
)
302 struct nf_conntrack_expect
*exp
;
304 exp
= container_of(head
, struct nf_conntrack_expect
, rcu
);
305 kmem_cache_free(nf_ct_expect_cachep
, exp
);
308 void nf_ct_expect_put(struct nf_conntrack_expect
*exp
)
310 if (atomic_dec_and_test(&exp
->use
))
311 call_rcu(&exp
->rcu
, nf_ct_expect_free_rcu
);
313 EXPORT_SYMBOL_GPL(nf_ct_expect_put
);
315 static void nf_ct_expect_insert(struct nf_conntrack_expect
*exp
)
317 struct nf_conn_help
*master_help
= nfct_help(exp
->master
);
318 struct net
*net
= nf_ct_exp_net(exp
);
319 const struct nf_conntrack_expect_policy
*p
;
320 unsigned int h
= nf_ct_expect_dst_hash(&exp
->tuple
);
322 /* two references : one for hash insert, one for the timer */
323 atomic_add(2, &exp
->use
);
326 hlist_add_head(&exp
->lnode
, &master_help
->expectations
);
327 master_help
->expecting
[exp
->class]++;
328 } else if (exp
->flags
& NF_CT_EXPECT_USERSPACE
)
329 hlist_add_head(&exp
->lnode
, &nf_ct_userspace_expect_list
);
331 hlist_add_head_rcu(&exp
->hnode
, &net
->ct
.expect_hash
[h
]);
332 net
->ct
.expect_count
++;
334 setup_timer(&exp
->timeout
, nf_ct_expectation_timed_out
,
337 p
= &rcu_dereference_protected(
339 lockdep_is_held(&nf_conntrack_lock
)
340 )->expect_policy
[exp
->class];
341 exp
->timeout
.expires
= jiffies
+ p
->timeout
* HZ
;
343 add_timer(&exp
->timeout
);
345 NF_CT_STAT_INC(net
, expect_create
);
348 /* Race with expectations being used means we could have none to find; OK. */
349 static void evict_oldest_expect(struct nf_conn
*master
,
350 struct nf_conntrack_expect
*new)
352 struct nf_conn_help
*master_help
= nfct_help(master
);
353 struct nf_conntrack_expect
*exp
, *last
= NULL
;
354 struct hlist_node
*n
;
356 hlist_for_each_entry(exp
, n
, &master_help
->expectations
, lnode
) {
357 if (exp
->class == new->class)
361 if (last
&& del_timer(&last
->timeout
)) {
362 nf_ct_unlink_expect(last
);
363 nf_ct_expect_put(last
);
367 static inline int refresh_timer(struct nf_conntrack_expect
*i
)
369 struct nf_conn_help
*master_help
= nfct_help(i
->master
);
370 const struct nf_conntrack_expect_policy
*p
;
372 if (!del_timer(&i
->timeout
))
375 p
= &rcu_dereference_protected(
377 lockdep_is_held(&nf_conntrack_lock
)
378 )->expect_policy
[i
->class];
379 i
->timeout
.expires
= jiffies
+ p
->timeout
* HZ
;
380 add_timer(&i
->timeout
);
384 static inline int __nf_ct_expect_check(struct nf_conntrack_expect
*expect
)
386 const struct nf_conntrack_expect_policy
*p
;
387 struct nf_conntrack_expect
*i
;
388 struct nf_conn
*master
= expect
->master
;
389 struct nf_conn_help
*master_help
= nfct_help(master
);
390 struct net
*net
= nf_ct_exp_net(expect
);
391 struct hlist_node
*n
;
395 /* Don't allow expectations created from kernel-space with no helper */
396 if (!(expect
->flags
& NF_CT_EXPECT_USERSPACE
) &&
397 (!master_help
|| (master_help
&& !master_help
->helper
))) {
401 h
= nf_ct_expect_dst_hash(&expect
->tuple
);
402 hlist_for_each_entry(i
, n
, &net
->ct
.expect_hash
[h
], hnode
) {
403 if (expect_matches(i
, expect
)) {
404 /* Refresh timer: if it's dying, ignore.. */
405 if (refresh_timer(i
)) {
409 } else if (expect_clash(i
, expect
)) {
414 /* Will be over limit? */
416 p
= &rcu_dereference_protected(
418 lockdep_is_held(&nf_conntrack_lock
)
419 )->expect_policy
[expect
->class];
420 if (p
->max_expected
&&
421 master_help
->expecting
[expect
->class] >= p
->max_expected
) {
422 evict_oldest_expect(master
, expect
);
423 if (master_help
->expecting
[expect
->class]
424 >= p
->max_expected
) {
431 if (net
->ct
.expect_count
>= nf_ct_expect_max
) {
434 "nf_conntrack: expectation table full\n");
441 int nf_ct_expect_related_report(struct nf_conntrack_expect
*expect
,
446 spin_lock_bh(&nf_conntrack_lock
);
447 ret
= __nf_ct_expect_check(expect
);
452 nf_ct_expect_insert(expect
);
453 spin_unlock_bh(&nf_conntrack_lock
);
454 nf_ct_expect_event_report(IPEXP_NEW
, expect
, pid
, report
);
457 spin_unlock_bh(&nf_conntrack_lock
);
460 EXPORT_SYMBOL_GPL(nf_ct_expect_related_report
);
462 void nf_ct_remove_userspace_expectations(void)
464 struct nf_conntrack_expect
*exp
;
465 struct hlist_node
*n
, *next
;
467 hlist_for_each_entry_safe(exp
, n
, next
,
468 &nf_ct_userspace_expect_list
, lnode
) {
469 if (del_timer(&exp
->timeout
)) {
470 nf_ct_unlink_expect(exp
);
471 nf_ct_expect_put(exp
);
475 EXPORT_SYMBOL_GPL(nf_ct_remove_userspace_expectations
);
477 #ifdef CONFIG_PROC_FS
478 struct ct_expect_iter_state
{
479 struct seq_net_private p
;
483 static struct hlist_node
*ct_expect_get_first(struct seq_file
*seq
)
485 struct net
*net
= seq_file_net(seq
);
486 struct ct_expect_iter_state
*st
= seq
->private;
487 struct hlist_node
*n
;
489 for (st
->bucket
= 0; st
->bucket
< nf_ct_expect_hsize
; st
->bucket
++) {
490 n
= rcu_dereference(hlist_first_rcu(&net
->ct
.expect_hash
[st
->bucket
]));
497 static struct hlist_node
*ct_expect_get_next(struct seq_file
*seq
,
498 struct hlist_node
*head
)
500 struct net
*net
= seq_file_net(seq
);
501 struct ct_expect_iter_state
*st
= seq
->private;
503 head
= rcu_dereference(hlist_next_rcu(head
));
504 while (head
== NULL
) {
505 if (++st
->bucket
>= nf_ct_expect_hsize
)
507 head
= rcu_dereference(hlist_first_rcu(&net
->ct
.expect_hash
[st
->bucket
]));
512 static struct hlist_node
*ct_expect_get_idx(struct seq_file
*seq
, loff_t pos
)
514 struct hlist_node
*head
= ct_expect_get_first(seq
);
517 while (pos
&& (head
= ct_expect_get_next(seq
, head
)))
519 return pos
? NULL
: head
;
522 static void *exp_seq_start(struct seq_file
*seq
, loff_t
*pos
)
526 return ct_expect_get_idx(seq
, *pos
);
529 static void *exp_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
532 return ct_expect_get_next(seq
, v
);
535 static void exp_seq_stop(struct seq_file
*seq
, void *v
)
541 static int exp_seq_show(struct seq_file
*s
, void *v
)
543 struct nf_conntrack_expect
*expect
;
544 struct nf_conntrack_helper
*helper
;
545 struct hlist_node
*n
= v
;
548 expect
= hlist_entry(n
, struct nf_conntrack_expect
, hnode
);
550 if (expect
->timeout
.function
)
551 seq_printf(s
, "%ld ", timer_pending(&expect
->timeout
)
552 ? (long)(expect
->timeout
.expires
- jiffies
)/HZ
: 0);
555 seq_printf(s
, "l3proto = %u proto=%u ",
556 expect
->tuple
.src
.l3num
,
557 expect
->tuple
.dst
.protonum
);
558 print_tuple(s
, &expect
->tuple
,
559 __nf_ct_l3proto_find(expect
->tuple
.src
.l3num
),
560 __nf_ct_l4proto_find(expect
->tuple
.src
.l3num
,
561 expect
->tuple
.dst
.protonum
));
563 if (expect
->flags
& NF_CT_EXPECT_PERMANENT
) {
564 seq_printf(s
, "PERMANENT");
567 if (expect
->flags
& NF_CT_EXPECT_INACTIVE
) {
568 seq_printf(s
, "%sINACTIVE", delim
);
571 if (expect
->flags
& NF_CT_EXPECT_USERSPACE
)
572 seq_printf(s
, "%sUSERSPACE", delim
);
574 helper
= rcu_dereference(nfct_help(expect
->master
)->helper
);
576 seq_printf(s
, "%s%s", expect
->flags
? " " : "", helper
->name
);
577 if (helper
->expect_policy
[expect
->class].name
)
579 helper
->expect_policy
[expect
->class].name
);
582 return seq_putc(s
, '\n');
585 static const struct seq_operations exp_seq_ops
= {
586 .start
= exp_seq_start
,
587 .next
= exp_seq_next
,
588 .stop
= exp_seq_stop
,
592 static int exp_open(struct inode
*inode
, struct file
*file
)
594 return seq_open_net(inode
, file
, &exp_seq_ops
,
595 sizeof(struct ct_expect_iter_state
));
598 static const struct file_operations exp_file_ops
= {
599 .owner
= THIS_MODULE
,
603 .release
= seq_release_net
,
605 #endif /* CONFIG_PROC_FS */
607 static int exp_proc_init(struct net
*net
)
609 #ifdef CONFIG_PROC_FS
610 struct proc_dir_entry
*proc
;
612 proc
= proc_net_fops_create(net
, "nf_conntrack_expect", 0440, &exp_file_ops
);
615 #endif /* CONFIG_PROC_FS */
619 static void exp_proc_remove(struct net
*net
)
621 #ifdef CONFIG_PROC_FS
622 proc_net_remove(net
, "nf_conntrack_expect");
623 #endif /* CONFIG_PROC_FS */
626 module_param_named(expect_hashsize
, nf_ct_expect_hsize
, uint
, 0400);
628 int nf_conntrack_expect_init(struct net
*net
)
632 if (net_eq(net
, &init_net
)) {
633 if (!nf_ct_expect_hsize
) {
634 nf_ct_expect_hsize
= net
->ct
.htable_size
/ 256;
635 if (!nf_ct_expect_hsize
)
636 nf_ct_expect_hsize
= 1;
638 nf_ct_expect_max
= nf_ct_expect_hsize
* 4;
641 net
->ct
.expect_count
= 0;
642 net
->ct
.expect_hash
= nf_ct_alloc_hashtable(&nf_ct_expect_hsize
, 0);
643 if (net
->ct
.expect_hash
== NULL
)
646 if (net_eq(net
, &init_net
)) {
647 nf_ct_expect_cachep
= kmem_cache_create("nf_conntrack_expect",
648 sizeof(struct nf_conntrack_expect
),
650 if (!nf_ct_expect_cachep
)
654 err
= exp_proc_init(net
);
661 if (net_eq(net
, &init_net
))
662 kmem_cache_destroy(nf_ct_expect_cachep
);
664 nf_ct_free_hashtable(net
->ct
.expect_hash
, nf_ct_expect_hsize
);
669 void nf_conntrack_expect_fini(struct net
*net
)
671 exp_proc_remove(net
);
672 if (net_eq(net
, &init_net
)) {
673 rcu_barrier(); /* Wait for call_rcu() before destroy */
674 kmem_cache_destroy(nf_ct_expect_cachep
);
676 nf_ct_free_hashtable(net
->ct
.expect_hash
, nf_ct_expect_hsize
);