drm: Fix authentication kernel crash
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / net / netfilter / nf_conntrack_expect.c
blob340c80d968d412ac77684a79b9bb98ceccb68b3f
1 /* Expectation handling for nf_conntrack. */
3 /* (C) 1999-2001 Paul `Rusty' Russell
4 * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
5 * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/types.h>
13 #include <linux/netfilter.h>
14 #include <linux/skbuff.h>
15 #include <linux/proc_fs.h>
16 #include <linux/seq_file.h>
17 #include <linux/stddef.h>
18 #include <linux/slab.h>
19 #include <linux/err.h>
20 #include <linux/percpu.h>
21 #include <linux/kernel.h>
22 #include <linux/jhash.h>
23 #include <linux/moduleparam.h>
24 #include <linux/export.h>
25 #include <net/net_namespace.h>
27 #include <net/netfilter/nf_conntrack.h>
28 #include <net/netfilter/nf_conntrack_core.h>
29 #include <net/netfilter/nf_conntrack_expect.h>
30 #include <net/netfilter/nf_conntrack_helper.h>
31 #include <net/netfilter/nf_conntrack_tuple.h>
32 #include <net/netfilter/nf_conntrack_zones.h>
34 unsigned int nf_ct_expect_hsize __read_mostly;
35 EXPORT_SYMBOL_GPL(nf_ct_expect_hsize);
37 unsigned int nf_ct_expect_max __read_mostly;
39 static struct kmem_cache *nf_ct_expect_cachep __read_mostly;
41 static HLIST_HEAD(nf_ct_userspace_expect_list);
43 /* nf_conntrack_expect helper functions */
44 void nf_ct_unlink_expect_report(struct nf_conntrack_expect *exp,
45 u32 pid, int report)
47 struct nf_conn_help *master_help = nfct_help(exp->master);
48 struct net *net = nf_ct_exp_net(exp);
50 NF_CT_ASSERT(!timer_pending(&exp->timeout));
52 hlist_del_rcu(&exp->hnode);
53 net->ct.expect_count--;
55 hlist_del(&exp->lnode);
56 if (!(exp->flags & NF_CT_EXPECT_USERSPACE))
57 master_help->expecting[exp->class]--;
59 nf_ct_expect_event_report(IPEXP_DESTROY, exp, pid, report);
60 nf_ct_expect_put(exp);
62 NF_CT_STAT_INC(net, expect_delete);
64 EXPORT_SYMBOL_GPL(nf_ct_unlink_expect_report);
66 static void nf_ct_expectation_timed_out(unsigned long ul_expect)
68 struct nf_conntrack_expect *exp = (void *)ul_expect;
70 spin_lock_bh(&nf_conntrack_lock);
71 nf_ct_unlink_expect(exp);
72 spin_unlock_bh(&nf_conntrack_lock);
73 nf_ct_expect_put(exp);
76 static unsigned int nf_ct_expect_dst_hash(const struct nf_conntrack_tuple *tuple)
78 unsigned int hash;
80 if (unlikely(!nf_conntrack_hash_rnd)) {
81 init_nf_conntrack_hash_rnd();
84 hash = jhash2(tuple->dst.u3.all, ARRAY_SIZE(tuple->dst.u3.all),
85 (((tuple->dst.protonum ^ tuple->src.l3num) << 16) |
86 (__force __u16)tuple->dst.u.all) ^ nf_conntrack_hash_rnd);
87 return ((u64)hash * nf_ct_expect_hsize) >> 32;
90 struct nf_conntrack_expect *
91 __nf_ct_expect_find(struct net *net, u16 zone,
92 const struct nf_conntrack_tuple *tuple)
94 struct nf_conntrack_expect *i;
95 struct hlist_node *n;
96 unsigned int h;
98 if (!net->ct.expect_count)
99 return NULL;
101 h = nf_ct_expect_dst_hash(tuple);
102 hlist_for_each_entry_rcu(i, n, &net->ct.expect_hash[h], hnode) {
103 if (nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) &&
104 nf_ct_zone(i->master) == zone)
105 return i;
107 return NULL;
109 EXPORT_SYMBOL_GPL(__nf_ct_expect_find);
111 /* Just find a expectation corresponding to a tuple. */
112 struct nf_conntrack_expect *
113 nf_ct_expect_find_get(struct net *net, u16 zone,
114 const struct nf_conntrack_tuple *tuple)
116 struct nf_conntrack_expect *i;
118 rcu_read_lock();
119 i = __nf_ct_expect_find(net, zone, tuple);
120 if (i && !atomic_inc_not_zero(&i->use))
121 i = NULL;
122 rcu_read_unlock();
124 return i;
126 EXPORT_SYMBOL_GPL(nf_ct_expect_find_get);
128 /* If an expectation for this connection is found, it gets delete from
129 * global list then returned. */
130 struct nf_conntrack_expect *
131 nf_ct_find_expectation(struct net *net, u16 zone,
132 const struct nf_conntrack_tuple *tuple)
134 struct nf_conntrack_expect *i, *exp = NULL;
135 struct hlist_node *n;
136 unsigned int h;
138 if (!net->ct.expect_count)
139 return NULL;
141 h = nf_ct_expect_dst_hash(tuple);
142 hlist_for_each_entry(i, n, &net->ct.expect_hash[h], hnode) {
143 if (!(i->flags & NF_CT_EXPECT_INACTIVE) &&
144 nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) &&
145 nf_ct_zone(i->master) == zone) {
146 exp = i;
147 break;
150 if (!exp)
151 return NULL;
153 /* If master is not in hash table yet (ie. packet hasn't left
154 this machine yet), how can other end know about expected?
155 Hence these are not the droids you are looking for (if
156 master ct never got confirmed, we'd hold a reference to it
157 and weird things would happen to future packets). */
158 if (!nf_ct_is_confirmed(exp->master))
159 return NULL;
161 if (exp->flags & NF_CT_EXPECT_PERMANENT) {
162 atomic_inc(&exp->use);
163 return exp;
164 } else if (del_timer(&exp->timeout)) {
165 nf_ct_unlink_expect(exp);
166 return exp;
169 return NULL;
172 /* delete all expectations for this conntrack */
173 void nf_ct_remove_expectations(struct nf_conn *ct)
175 struct nf_conn_help *help = nfct_help(ct);
176 struct nf_conntrack_expect *exp;
177 struct hlist_node *n, *next;
179 /* Optimization: most connection never expect any others. */
180 if (!help)
181 return;
183 hlist_for_each_entry_safe(exp, n, next, &help->expectations, lnode) {
184 if (del_timer(&exp->timeout)) {
185 nf_ct_unlink_expect(exp);
186 nf_ct_expect_put(exp);
190 EXPORT_SYMBOL_GPL(nf_ct_remove_expectations);
192 /* Would two expected things clash? */
193 static inline int expect_clash(const struct nf_conntrack_expect *a,
194 const struct nf_conntrack_expect *b)
196 /* Part covered by intersection of masks must be unequal,
197 otherwise they clash */
198 struct nf_conntrack_tuple_mask intersect_mask;
199 int count;
201 intersect_mask.src.u.all = a->mask.src.u.all & b->mask.src.u.all;
203 for (count = 0; count < NF_CT_TUPLE_L3SIZE; count++){
204 intersect_mask.src.u3.all[count] =
205 a->mask.src.u3.all[count] & b->mask.src.u3.all[count];
208 return nf_ct_tuple_mask_cmp(&a->tuple, &b->tuple, &intersect_mask);
211 static inline int expect_matches(const struct nf_conntrack_expect *a,
212 const struct nf_conntrack_expect *b)
214 return a->master == b->master && a->class == b->class &&
215 nf_ct_tuple_equal(&a->tuple, &b->tuple) &&
216 nf_ct_tuple_mask_equal(&a->mask, &b->mask) &&
217 nf_ct_zone(a->master) == nf_ct_zone(b->master);
220 /* Generally a bad idea to call this: could have matched already. */
221 void nf_ct_unexpect_related(struct nf_conntrack_expect *exp)
223 spin_lock_bh(&nf_conntrack_lock);
224 if (del_timer(&exp->timeout)) {
225 nf_ct_unlink_expect(exp);
226 nf_ct_expect_put(exp);
228 spin_unlock_bh(&nf_conntrack_lock);
230 EXPORT_SYMBOL_GPL(nf_ct_unexpect_related);
232 /* We don't increase the master conntrack refcount for non-fulfilled
233 * conntracks. During the conntrack destruction, the expectations are
234 * always killed before the conntrack itself */
235 struct nf_conntrack_expect *nf_ct_expect_alloc(struct nf_conn *me)
237 struct nf_conntrack_expect *new;
239 new = kmem_cache_alloc(nf_ct_expect_cachep, GFP_ATOMIC);
240 if (!new)
241 return NULL;
243 new->master = me;
244 atomic_set(&new->use, 1);
245 return new;
247 EXPORT_SYMBOL_GPL(nf_ct_expect_alloc);
249 void nf_ct_expect_init(struct nf_conntrack_expect *exp, unsigned int class,
250 u_int8_t family,
251 const union nf_inet_addr *saddr,
252 const union nf_inet_addr *daddr,
253 u_int8_t proto, const __be16 *src, const __be16 *dst)
255 int len;
257 if (family == AF_INET)
258 len = 4;
259 else
260 len = 16;
262 exp->flags = 0;
263 exp->class = class;
264 exp->expectfn = NULL;
265 exp->helper = NULL;
266 exp->tuple.src.l3num = family;
267 exp->tuple.dst.protonum = proto;
269 if (saddr) {
270 memcpy(&exp->tuple.src.u3, saddr, len);
271 if (sizeof(exp->tuple.src.u3) > len)
272 /* address needs to be cleared for nf_ct_tuple_equal */
273 memset((void *)&exp->tuple.src.u3 + len, 0x00,
274 sizeof(exp->tuple.src.u3) - len);
275 memset(&exp->mask.src.u3, 0xFF, len);
276 if (sizeof(exp->mask.src.u3) > len)
277 memset((void *)&exp->mask.src.u3 + len, 0x00,
278 sizeof(exp->mask.src.u3) - len);
279 } else {
280 memset(&exp->tuple.src.u3, 0x00, sizeof(exp->tuple.src.u3));
281 memset(&exp->mask.src.u3, 0x00, sizeof(exp->mask.src.u3));
284 if (src) {
285 exp->tuple.src.u.all = *src;
286 exp->mask.src.u.all = htons(0xFFFF);
287 } else {
288 exp->tuple.src.u.all = 0;
289 exp->mask.src.u.all = 0;
292 memcpy(&exp->tuple.dst.u3, daddr, len);
293 if (sizeof(exp->tuple.dst.u3) > len)
294 /* address needs to be cleared for nf_ct_tuple_equal */
295 memset((void *)&exp->tuple.dst.u3 + len, 0x00,
296 sizeof(exp->tuple.dst.u3) - len);
298 exp->tuple.dst.u.all = *dst;
300 EXPORT_SYMBOL_GPL(nf_ct_expect_init);
302 static void nf_ct_expect_free_rcu(struct rcu_head *head)
304 struct nf_conntrack_expect *exp;
306 exp = container_of(head, struct nf_conntrack_expect, rcu);
307 kmem_cache_free(nf_ct_expect_cachep, exp);
310 void nf_ct_expect_put(struct nf_conntrack_expect *exp)
312 if (atomic_dec_and_test(&exp->use))
313 call_rcu(&exp->rcu, nf_ct_expect_free_rcu);
315 EXPORT_SYMBOL_GPL(nf_ct_expect_put);
317 static void nf_ct_expect_insert(struct nf_conntrack_expect *exp)
319 struct nf_conn_help *master_help = nfct_help(exp->master);
320 struct net *net = nf_ct_exp_net(exp);
321 const struct nf_conntrack_expect_policy *p;
322 unsigned int h = nf_ct_expect_dst_hash(&exp->tuple);
324 /* two references : one for hash insert, one for the timer */
325 atomic_add(2, &exp->use);
327 if (master_help) {
328 hlist_add_head(&exp->lnode, &master_help->expectations);
329 master_help->expecting[exp->class]++;
330 } else if (exp->flags & NF_CT_EXPECT_USERSPACE)
331 hlist_add_head(&exp->lnode, &nf_ct_userspace_expect_list);
333 hlist_add_head_rcu(&exp->hnode, &net->ct.expect_hash[h]);
334 net->ct.expect_count++;
336 setup_timer(&exp->timeout, nf_ct_expectation_timed_out,
337 (unsigned long)exp);
338 if (master_help) {
339 p = &rcu_dereference_protected(
340 master_help->helper,
341 lockdep_is_held(&nf_conntrack_lock)
342 )->expect_policy[exp->class];
343 exp->timeout.expires = jiffies + p->timeout * HZ;
345 add_timer(&exp->timeout);
347 NF_CT_STAT_INC(net, expect_create);
350 /* Race with expectations being used means we could have none to find; OK. */
351 static void evict_oldest_expect(struct nf_conn *master,
352 struct nf_conntrack_expect *new)
354 struct nf_conn_help *master_help = nfct_help(master);
355 struct nf_conntrack_expect *exp, *last = NULL;
356 struct hlist_node *n;
358 hlist_for_each_entry(exp, n, &master_help->expectations, lnode) {
359 if (exp->class == new->class)
360 last = exp;
363 if (last && del_timer(&last->timeout)) {
364 nf_ct_unlink_expect(last);
365 nf_ct_expect_put(last);
369 static inline int refresh_timer(struct nf_conntrack_expect *i)
371 struct nf_conn_help *master_help = nfct_help(i->master);
372 const struct nf_conntrack_expect_policy *p;
374 if (!del_timer(&i->timeout))
375 return 0;
377 p = &rcu_dereference_protected(
378 master_help->helper,
379 lockdep_is_held(&nf_conntrack_lock)
380 )->expect_policy[i->class];
381 i->timeout.expires = jiffies + p->timeout * HZ;
382 add_timer(&i->timeout);
383 return 1;
386 static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect)
388 const struct nf_conntrack_expect_policy *p;
389 struct nf_conntrack_expect *i;
390 struct nf_conn *master = expect->master;
391 struct nf_conn_help *master_help = nfct_help(master);
392 struct net *net = nf_ct_exp_net(expect);
393 struct hlist_node *n;
394 unsigned int h;
395 int ret = 1;
397 /* Don't allow expectations created from kernel-space with no helper */
398 if (!(expect->flags & NF_CT_EXPECT_USERSPACE) &&
399 (!master_help || (master_help && !master_help->helper))) {
400 ret = -ESHUTDOWN;
401 goto out;
403 h = nf_ct_expect_dst_hash(&expect->tuple);
404 hlist_for_each_entry(i, n, &net->ct.expect_hash[h], hnode) {
405 if (expect_matches(i, expect)) {
406 /* Refresh timer: if it's dying, ignore.. */
407 if (refresh_timer(i)) {
408 ret = 0;
409 goto out;
411 } else if (expect_clash(i, expect)) {
412 ret = -EBUSY;
413 goto out;
416 /* Will be over limit? */
417 if (master_help) {
418 p = &rcu_dereference_protected(
419 master_help->helper,
420 lockdep_is_held(&nf_conntrack_lock)
421 )->expect_policy[expect->class];
422 if (p->max_expected &&
423 master_help->expecting[expect->class] >= p->max_expected) {
424 evict_oldest_expect(master, expect);
425 if (master_help->expecting[expect->class]
426 >= p->max_expected) {
427 ret = -EMFILE;
428 goto out;
433 if (net->ct.expect_count >= nf_ct_expect_max) {
434 if (net_ratelimit())
435 printk(KERN_WARNING
436 "nf_conntrack: expectation table full\n");
437 ret = -EMFILE;
439 out:
440 return ret;
443 int nf_ct_expect_related_report(struct nf_conntrack_expect *expect,
444 u32 pid, int report)
446 int ret;
448 spin_lock_bh(&nf_conntrack_lock);
449 ret = __nf_ct_expect_check(expect);
450 if (ret <= 0)
451 goto out;
453 ret = 0;
454 nf_ct_expect_insert(expect);
455 spin_unlock_bh(&nf_conntrack_lock);
456 nf_ct_expect_event_report(IPEXP_NEW, expect, pid, report);
457 return ret;
458 out:
459 spin_unlock_bh(&nf_conntrack_lock);
460 return ret;
462 EXPORT_SYMBOL_GPL(nf_ct_expect_related_report);
464 void nf_ct_remove_userspace_expectations(void)
466 struct nf_conntrack_expect *exp;
467 struct hlist_node *n, *next;
469 hlist_for_each_entry_safe(exp, n, next,
470 &nf_ct_userspace_expect_list, lnode) {
471 if (del_timer(&exp->timeout)) {
472 nf_ct_unlink_expect(exp);
473 nf_ct_expect_put(exp);
477 EXPORT_SYMBOL_GPL(nf_ct_remove_userspace_expectations);
479 #ifdef CONFIG_PROC_FS
480 struct ct_expect_iter_state {
481 struct seq_net_private p;
482 unsigned int bucket;
485 static struct hlist_node *ct_expect_get_first(struct seq_file *seq)
487 struct net *net = seq_file_net(seq);
488 struct ct_expect_iter_state *st = seq->private;
489 struct hlist_node *n;
491 for (st->bucket = 0; st->bucket < nf_ct_expect_hsize; st->bucket++) {
492 n = rcu_dereference(hlist_first_rcu(&net->ct.expect_hash[st->bucket]));
493 if (n)
494 return n;
496 return NULL;
499 static struct hlist_node *ct_expect_get_next(struct seq_file *seq,
500 struct hlist_node *head)
502 struct net *net = seq_file_net(seq);
503 struct ct_expect_iter_state *st = seq->private;
505 head = rcu_dereference(hlist_next_rcu(head));
506 while (head == NULL) {
507 if (++st->bucket >= nf_ct_expect_hsize)
508 return NULL;
509 head = rcu_dereference(hlist_first_rcu(&net->ct.expect_hash[st->bucket]));
511 return head;
514 static struct hlist_node *ct_expect_get_idx(struct seq_file *seq, loff_t pos)
516 struct hlist_node *head = ct_expect_get_first(seq);
518 if (head)
519 while (pos && (head = ct_expect_get_next(seq, head)))
520 pos--;
521 return pos ? NULL : head;
524 static void *exp_seq_start(struct seq_file *seq, loff_t *pos)
525 __acquires(RCU)
527 rcu_read_lock();
528 return ct_expect_get_idx(seq, *pos);
531 static void *exp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
533 (*pos)++;
534 return ct_expect_get_next(seq, v);
537 static void exp_seq_stop(struct seq_file *seq, void *v)
538 __releases(RCU)
540 rcu_read_unlock();
543 static int exp_seq_show(struct seq_file *s, void *v)
545 struct nf_conntrack_expect *expect;
546 struct nf_conntrack_helper *helper;
547 struct hlist_node *n = v;
548 char *delim = "";
550 expect = hlist_entry(n, struct nf_conntrack_expect, hnode);
552 if (expect->timeout.function)
553 seq_printf(s, "%ld ", timer_pending(&expect->timeout)
554 ? (long)(expect->timeout.expires - jiffies)/HZ : 0);
555 else
556 seq_printf(s, "- ");
557 seq_printf(s, "l3proto = %u proto=%u ",
558 expect->tuple.src.l3num,
559 expect->tuple.dst.protonum);
560 print_tuple(s, &expect->tuple,
561 __nf_ct_l3proto_find(expect->tuple.src.l3num),
562 __nf_ct_l4proto_find(expect->tuple.src.l3num,
563 expect->tuple.dst.protonum));
565 if (expect->flags & NF_CT_EXPECT_PERMANENT) {
566 seq_printf(s, "PERMANENT");
567 delim = ",";
569 if (expect->flags & NF_CT_EXPECT_INACTIVE) {
570 seq_printf(s, "%sINACTIVE", delim);
571 delim = ",";
573 if (expect->flags & NF_CT_EXPECT_USERSPACE)
574 seq_printf(s, "%sUSERSPACE", delim);
576 helper = rcu_dereference(nfct_help(expect->master)->helper);
577 if (helper) {
578 seq_printf(s, "%s%s", expect->flags ? " " : "", helper->name);
579 if (helper->expect_policy[expect->class].name)
580 seq_printf(s, "/%s",
581 helper->expect_policy[expect->class].name);
584 return seq_putc(s, '\n');
587 static const struct seq_operations exp_seq_ops = {
588 .start = exp_seq_start,
589 .next = exp_seq_next,
590 .stop = exp_seq_stop,
591 .show = exp_seq_show
594 static int exp_open(struct inode *inode, struct file *file)
596 return seq_open_net(inode, file, &exp_seq_ops,
597 sizeof(struct ct_expect_iter_state));
600 static const struct file_operations exp_file_ops = {
601 .owner = THIS_MODULE,
602 .open = exp_open,
603 .read = seq_read,
604 .llseek = seq_lseek,
605 .release = seq_release_net,
607 #endif /* CONFIG_PROC_FS */
609 static int exp_proc_init(struct net *net)
611 #ifdef CONFIG_PROC_FS
612 struct proc_dir_entry *proc;
614 proc = proc_net_fops_create(net, "nf_conntrack_expect", 0440, &exp_file_ops);
615 if (!proc)
616 return -ENOMEM;
617 #endif /* CONFIG_PROC_FS */
618 return 0;
621 static void exp_proc_remove(struct net *net)
623 #ifdef CONFIG_PROC_FS
624 proc_net_remove(net, "nf_conntrack_expect");
625 #endif /* CONFIG_PROC_FS */
628 module_param_named(expect_hashsize, nf_ct_expect_hsize, uint, 0400);
630 int nf_conntrack_expect_init(struct net *net)
632 int err = -ENOMEM;
634 if (net_eq(net, &init_net)) {
635 if (!nf_ct_expect_hsize) {
636 nf_ct_expect_hsize = net->ct.htable_size / 256;
637 if (!nf_ct_expect_hsize)
638 nf_ct_expect_hsize = 1;
640 nf_ct_expect_max = nf_ct_expect_hsize * 4;
643 net->ct.expect_count = 0;
644 net->ct.expect_hash = nf_ct_alloc_hashtable(&nf_ct_expect_hsize, 0);
645 if (net->ct.expect_hash == NULL)
646 goto err1;
648 if (net_eq(net, &init_net)) {
649 nf_ct_expect_cachep = kmem_cache_create("nf_conntrack_expect",
650 sizeof(struct nf_conntrack_expect),
651 0, 0, NULL);
652 if (!nf_ct_expect_cachep)
653 goto err2;
656 err = exp_proc_init(net);
657 if (err < 0)
658 goto err3;
660 return 0;
662 err3:
663 if (net_eq(net, &init_net))
664 kmem_cache_destroy(nf_ct_expect_cachep);
665 err2:
666 nf_ct_free_hashtable(net->ct.expect_hash, nf_ct_expect_hsize);
667 err1:
668 return err;
671 void nf_conntrack_expect_fini(struct net *net)
673 exp_proc_remove(net);
674 if (net_eq(net, &init_net)) {
675 rcu_barrier(); /* Wait for call_rcu() before destroy */
676 kmem_cache_destroy(nf_ct_expect_cachep);
678 nf_ct_free_hashtable(net->ct.expect_hash, nf_ct_expect_hsize);