mlxsw: spectrum: Add support for access cable info via ethtool
[linux-2.6/btrfs-unstable.git] / net / netfilter / nf_conntrack_expect.c
blobe03d16ed550d6bf07a537f4b150148a3cd967f59
1 /* Expectation handling for nf_conntrack. */
3 /* (C) 1999-2001 Paul `Rusty' Russell
4 * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
5 * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
6 * (c) 2005-2012 Patrick McHardy <kaber@trash.net>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #include <linux/types.h>
14 #include <linux/netfilter.h>
15 #include <linux/skbuff.h>
16 #include <linux/proc_fs.h>
17 #include <linux/seq_file.h>
18 #include <linux/stddef.h>
19 #include <linux/slab.h>
20 #include <linux/err.h>
21 #include <linux/percpu.h>
22 #include <linux/kernel.h>
23 #include <linux/jhash.h>
24 #include <linux/moduleparam.h>
25 #include <linux/export.h>
26 #include <net/net_namespace.h>
27 #include <net/netns/hash.h>
29 #include <net/netfilter/nf_conntrack.h>
30 #include <net/netfilter/nf_conntrack_core.h>
31 #include <net/netfilter/nf_conntrack_expect.h>
32 #include <net/netfilter/nf_conntrack_helper.h>
33 #include <net/netfilter/nf_conntrack_tuple.h>
34 #include <net/netfilter/nf_conntrack_zones.h>
36 unsigned int nf_ct_expect_hsize __read_mostly;
37 EXPORT_SYMBOL_GPL(nf_ct_expect_hsize);
39 struct hlist_head *nf_ct_expect_hash __read_mostly;
40 EXPORT_SYMBOL_GPL(nf_ct_expect_hash);
42 unsigned int nf_ct_expect_max __read_mostly;
44 static struct kmem_cache *nf_ct_expect_cachep __read_mostly;
45 static unsigned int nf_ct_expect_hashrnd __read_mostly;
47 /* nf_conntrack_expect helper functions */
48 void nf_ct_unlink_expect_report(struct nf_conntrack_expect *exp,
49 u32 portid, int report)
51 struct nf_conn_help *master_help = nfct_help(exp->master);
52 struct net *net = nf_ct_exp_net(exp);
54 NF_CT_ASSERT(master_help);
55 NF_CT_ASSERT(!timer_pending(&exp->timeout));
57 hlist_del_rcu(&exp->hnode);
58 net->ct.expect_count--;
60 hlist_del_rcu(&exp->lnode);
61 master_help->expecting[exp->class]--;
63 nf_ct_expect_event_report(IPEXP_DESTROY, exp, portid, report);
64 nf_ct_expect_put(exp);
66 NF_CT_STAT_INC(net, expect_delete);
68 EXPORT_SYMBOL_GPL(nf_ct_unlink_expect_report);
70 static void nf_ct_expectation_timed_out(unsigned long ul_expect)
72 struct nf_conntrack_expect *exp = (void *)ul_expect;
74 spin_lock_bh(&nf_conntrack_expect_lock);
75 nf_ct_unlink_expect(exp);
76 spin_unlock_bh(&nf_conntrack_expect_lock);
77 nf_ct_expect_put(exp);
80 static unsigned int nf_ct_expect_dst_hash(const struct net *n, const struct nf_conntrack_tuple *tuple)
82 unsigned int hash, seed;
84 get_random_once(&nf_ct_expect_hashrnd, sizeof(nf_ct_expect_hashrnd));
86 seed = nf_ct_expect_hashrnd ^ net_hash_mix(n);
88 hash = jhash2(tuple->dst.u3.all, ARRAY_SIZE(tuple->dst.u3.all),
89 (((tuple->dst.protonum ^ tuple->src.l3num) << 16) |
90 (__force __u16)tuple->dst.u.all) ^ seed);
92 return reciprocal_scale(hash, nf_ct_expect_hsize);
95 static bool
96 nf_ct_exp_equal(const struct nf_conntrack_tuple *tuple,
97 const struct nf_conntrack_expect *i,
98 const struct nf_conntrack_zone *zone,
99 const struct net *net)
101 return nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) &&
102 net_eq(net, nf_ct_net(i->master)) &&
103 nf_ct_zone_equal_any(i->master, zone);
106 bool nf_ct_remove_expect(struct nf_conntrack_expect *exp)
108 if (del_timer(&exp->timeout)) {
109 nf_ct_unlink_expect(exp);
110 nf_ct_expect_put(exp);
111 return true;
113 return false;
115 EXPORT_SYMBOL_GPL(nf_ct_remove_expect);
117 struct nf_conntrack_expect *
118 __nf_ct_expect_find(struct net *net,
119 const struct nf_conntrack_zone *zone,
120 const struct nf_conntrack_tuple *tuple)
122 struct nf_conntrack_expect *i;
123 unsigned int h;
125 if (!net->ct.expect_count)
126 return NULL;
128 h = nf_ct_expect_dst_hash(net, tuple);
129 hlist_for_each_entry_rcu(i, &nf_ct_expect_hash[h], hnode) {
130 if (nf_ct_exp_equal(tuple, i, zone, net))
131 return i;
133 return NULL;
135 EXPORT_SYMBOL_GPL(__nf_ct_expect_find);
137 /* Just find a expectation corresponding to a tuple. */
138 struct nf_conntrack_expect *
139 nf_ct_expect_find_get(struct net *net,
140 const struct nf_conntrack_zone *zone,
141 const struct nf_conntrack_tuple *tuple)
143 struct nf_conntrack_expect *i;
145 rcu_read_lock();
146 i = __nf_ct_expect_find(net, zone, tuple);
147 if (i && !refcount_inc_not_zero(&i->use))
148 i = NULL;
149 rcu_read_unlock();
151 return i;
153 EXPORT_SYMBOL_GPL(nf_ct_expect_find_get);
155 /* If an expectation for this connection is found, it gets delete from
156 * global list then returned. */
157 struct nf_conntrack_expect *
158 nf_ct_find_expectation(struct net *net,
159 const struct nf_conntrack_zone *zone,
160 const struct nf_conntrack_tuple *tuple)
162 struct nf_conntrack_expect *i, *exp = NULL;
163 unsigned int h;
165 if (!net->ct.expect_count)
166 return NULL;
168 h = nf_ct_expect_dst_hash(net, tuple);
169 hlist_for_each_entry(i, &nf_ct_expect_hash[h], hnode) {
170 if (!(i->flags & NF_CT_EXPECT_INACTIVE) &&
171 nf_ct_exp_equal(tuple, i, zone, net)) {
172 exp = i;
173 break;
176 if (!exp)
177 return NULL;
179 /* If master is not in hash table yet (ie. packet hasn't left
180 this machine yet), how can other end know about expected?
181 Hence these are not the droids you are looking for (if
182 master ct never got confirmed, we'd hold a reference to it
183 and weird things would happen to future packets). */
184 if (!nf_ct_is_confirmed(exp->master))
185 return NULL;
187 /* Avoid race with other CPUs, that for exp->master ct, is
188 * about to invoke ->destroy(), or nf_ct_delete() via timeout
189 * or early_drop().
191 * The atomic_inc_not_zero() check tells: If that fails, we
192 * know that the ct is being destroyed. If it succeeds, we
193 * can be sure the ct cannot disappear underneath.
195 if (unlikely(nf_ct_is_dying(exp->master) ||
196 !atomic_inc_not_zero(&exp->master->ct_general.use)))
197 return NULL;
199 if (exp->flags & NF_CT_EXPECT_PERMANENT) {
200 refcount_inc(&exp->use);
201 return exp;
202 } else if (del_timer(&exp->timeout)) {
203 nf_ct_unlink_expect(exp);
204 return exp;
206 /* Undo exp->master refcnt increase, if del_timer() failed */
207 nf_ct_put(exp->master);
209 return NULL;
212 /* delete all expectations for this conntrack */
213 void nf_ct_remove_expectations(struct nf_conn *ct)
215 struct nf_conn_help *help = nfct_help(ct);
216 struct nf_conntrack_expect *exp;
217 struct hlist_node *next;
219 /* Optimization: most connection never expect any others. */
220 if (!help)
221 return;
223 spin_lock_bh(&nf_conntrack_expect_lock);
224 hlist_for_each_entry_safe(exp, next, &help->expectations, lnode) {
225 nf_ct_remove_expect(exp);
227 spin_unlock_bh(&nf_conntrack_expect_lock);
229 EXPORT_SYMBOL_GPL(nf_ct_remove_expectations);
231 /* Would two expected things clash? */
232 static inline int expect_clash(const struct nf_conntrack_expect *a,
233 const struct nf_conntrack_expect *b)
235 /* Part covered by intersection of masks must be unequal,
236 otherwise they clash */
237 struct nf_conntrack_tuple_mask intersect_mask;
238 int count;
240 intersect_mask.src.u.all = a->mask.src.u.all & b->mask.src.u.all;
242 for (count = 0; count < NF_CT_TUPLE_L3SIZE; count++){
243 intersect_mask.src.u3.all[count] =
244 a->mask.src.u3.all[count] & b->mask.src.u3.all[count];
247 return nf_ct_tuple_mask_cmp(&a->tuple, &b->tuple, &intersect_mask) &&
248 net_eq(nf_ct_net(a->master), nf_ct_net(b->master)) &&
249 nf_ct_zone_equal_any(a->master, nf_ct_zone(b->master));
252 static inline int expect_matches(const struct nf_conntrack_expect *a,
253 const struct nf_conntrack_expect *b)
255 return a->master == b->master && a->class == b->class &&
256 nf_ct_tuple_equal(&a->tuple, &b->tuple) &&
257 nf_ct_tuple_mask_equal(&a->mask, &b->mask) &&
258 net_eq(nf_ct_net(a->master), nf_ct_net(b->master)) &&
259 nf_ct_zone_equal_any(a->master, nf_ct_zone(b->master));
262 /* Generally a bad idea to call this: could have matched already. */
263 void nf_ct_unexpect_related(struct nf_conntrack_expect *exp)
265 spin_lock_bh(&nf_conntrack_expect_lock);
266 nf_ct_remove_expect(exp);
267 spin_unlock_bh(&nf_conntrack_expect_lock);
269 EXPORT_SYMBOL_GPL(nf_ct_unexpect_related);
271 /* We don't increase the master conntrack refcount for non-fulfilled
272 * conntracks. During the conntrack destruction, the expectations are
273 * always killed before the conntrack itself */
274 struct nf_conntrack_expect *nf_ct_expect_alloc(struct nf_conn *me)
276 struct nf_conntrack_expect *new;
278 new = kmem_cache_alloc(nf_ct_expect_cachep, GFP_ATOMIC);
279 if (!new)
280 return NULL;
282 new->master = me;
283 refcount_set(&new->use, 1);
284 return new;
286 EXPORT_SYMBOL_GPL(nf_ct_expect_alloc);
288 void nf_ct_expect_init(struct nf_conntrack_expect *exp, unsigned int class,
289 u_int8_t family,
290 const union nf_inet_addr *saddr,
291 const union nf_inet_addr *daddr,
292 u_int8_t proto, const __be16 *src, const __be16 *dst)
294 int len;
296 if (family == AF_INET)
297 len = 4;
298 else
299 len = 16;
301 exp->flags = 0;
302 exp->class = class;
303 exp->expectfn = NULL;
304 exp->helper = NULL;
305 exp->tuple.src.l3num = family;
306 exp->tuple.dst.protonum = proto;
308 if (saddr) {
309 memcpy(&exp->tuple.src.u3, saddr, len);
310 if (sizeof(exp->tuple.src.u3) > len)
311 /* address needs to be cleared for nf_ct_tuple_equal */
312 memset((void *)&exp->tuple.src.u3 + len, 0x00,
313 sizeof(exp->tuple.src.u3) - len);
314 memset(&exp->mask.src.u3, 0xFF, len);
315 if (sizeof(exp->mask.src.u3) > len)
316 memset((void *)&exp->mask.src.u3 + len, 0x00,
317 sizeof(exp->mask.src.u3) - len);
318 } else {
319 memset(&exp->tuple.src.u3, 0x00, sizeof(exp->tuple.src.u3));
320 memset(&exp->mask.src.u3, 0x00, sizeof(exp->mask.src.u3));
323 if (src) {
324 exp->tuple.src.u.all = *src;
325 exp->mask.src.u.all = htons(0xFFFF);
326 } else {
327 exp->tuple.src.u.all = 0;
328 exp->mask.src.u.all = 0;
331 memcpy(&exp->tuple.dst.u3, daddr, len);
332 if (sizeof(exp->tuple.dst.u3) > len)
333 /* address needs to be cleared for nf_ct_tuple_equal */
334 memset((void *)&exp->tuple.dst.u3 + len, 0x00,
335 sizeof(exp->tuple.dst.u3) - len);
337 exp->tuple.dst.u.all = *dst;
339 #ifdef CONFIG_NF_NAT_NEEDED
340 memset(&exp->saved_addr, 0, sizeof(exp->saved_addr));
341 memset(&exp->saved_proto, 0, sizeof(exp->saved_proto));
342 #endif
344 EXPORT_SYMBOL_GPL(nf_ct_expect_init);
346 static void nf_ct_expect_free_rcu(struct rcu_head *head)
348 struct nf_conntrack_expect *exp;
350 exp = container_of(head, struct nf_conntrack_expect, rcu);
351 kmem_cache_free(nf_ct_expect_cachep, exp);
354 void nf_ct_expect_put(struct nf_conntrack_expect *exp)
356 if (refcount_dec_and_test(&exp->use))
357 call_rcu(&exp->rcu, nf_ct_expect_free_rcu);
359 EXPORT_SYMBOL_GPL(nf_ct_expect_put);
361 static void nf_ct_expect_insert(struct nf_conntrack_expect *exp)
363 struct nf_conn_help *master_help = nfct_help(exp->master);
364 struct nf_conntrack_helper *helper;
365 struct net *net = nf_ct_exp_net(exp);
366 unsigned int h = nf_ct_expect_dst_hash(net, &exp->tuple);
368 /* two references : one for hash insert, one for the timer */
369 refcount_add(2, &exp->use);
371 hlist_add_head_rcu(&exp->lnode, &master_help->expectations);
372 master_help->expecting[exp->class]++;
374 hlist_add_head_rcu(&exp->hnode, &nf_ct_expect_hash[h]);
375 net->ct.expect_count++;
377 setup_timer(&exp->timeout, nf_ct_expectation_timed_out,
378 (unsigned long)exp);
379 helper = rcu_dereference_protected(master_help->helper,
380 lockdep_is_held(&nf_conntrack_expect_lock));
381 if (helper) {
382 exp->timeout.expires = jiffies +
383 helper->expect_policy[exp->class].timeout * HZ;
385 add_timer(&exp->timeout);
387 NF_CT_STAT_INC(net, expect_create);
390 /* Race with expectations being used means we could have none to find; OK. */
391 static void evict_oldest_expect(struct nf_conn *master,
392 struct nf_conntrack_expect *new)
394 struct nf_conn_help *master_help = nfct_help(master);
395 struct nf_conntrack_expect *exp, *last = NULL;
397 hlist_for_each_entry(exp, &master_help->expectations, lnode) {
398 if (exp->class == new->class)
399 last = exp;
402 if (last)
403 nf_ct_remove_expect(last);
406 static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect)
408 const struct nf_conntrack_expect_policy *p;
409 struct nf_conntrack_expect *i;
410 struct nf_conn *master = expect->master;
411 struct nf_conn_help *master_help = nfct_help(master);
412 struct nf_conntrack_helper *helper;
413 struct net *net = nf_ct_exp_net(expect);
414 struct hlist_node *next;
415 unsigned int h;
416 int ret = 0;
418 if (!master_help) {
419 ret = -ESHUTDOWN;
420 goto out;
422 h = nf_ct_expect_dst_hash(net, &expect->tuple);
423 hlist_for_each_entry_safe(i, next, &nf_ct_expect_hash[h], hnode) {
424 if (expect_matches(i, expect)) {
425 if (nf_ct_remove_expect(expect))
426 break;
427 } else if (expect_clash(i, expect)) {
428 ret = -EBUSY;
429 goto out;
432 /* Will be over limit? */
433 helper = rcu_dereference_protected(master_help->helper,
434 lockdep_is_held(&nf_conntrack_expect_lock));
435 if (helper) {
436 p = &helper->expect_policy[expect->class];
437 if (p->max_expected &&
438 master_help->expecting[expect->class] >= p->max_expected) {
439 evict_oldest_expect(master, expect);
440 if (master_help->expecting[expect->class]
441 >= p->max_expected) {
442 ret = -EMFILE;
443 goto out;
448 if (net->ct.expect_count >= nf_ct_expect_max) {
449 net_warn_ratelimited("nf_conntrack: expectation table full\n");
450 ret = -EMFILE;
452 out:
453 return ret;
456 int nf_ct_expect_related_report(struct nf_conntrack_expect *expect,
457 u32 portid, int report)
459 int ret;
461 spin_lock_bh(&nf_conntrack_expect_lock);
462 ret = __nf_ct_expect_check(expect);
463 if (ret < 0)
464 goto out;
466 nf_ct_expect_insert(expect);
468 spin_unlock_bh(&nf_conntrack_expect_lock);
469 nf_ct_expect_event_report(IPEXP_NEW, expect, portid, report);
470 return 0;
471 out:
472 spin_unlock_bh(&nf_conntrack_expect_lock);
473 return ret;
475 EXPORT_SYMBOL_GPL(nf_ct_expect_related_report);
477 #ifdef CONFIG_NF_CONNTRACK_PROCFS
478 struct ct_expect_iter_state {
479 struct seq_net_private p;
480 unsigned int bucket;
483 static struct hlist_node *ct_expect_get_first(struct seq_file *seq)
485 struct ct_expect_iter_state *st = seq->private;
486 struct hlist_node *n;
488 for (st->bucket = 0; st->bucket < nf_ct_expect_hsize; st->bucket++) {
489 n = rcu_dereference(hlist_first_rcu(&nf_ct_expect_hash[st->bucket]));
490 if (n)
491 return n;
493 return NULL;
496 static struct hlist_node *ct_expect_get_next(struct seq_file *seq,
497 struct hlist_node *head)
499 struct ct_expect_iter_state *st = seq->private;
501 head = rcu_dereference(hlist_next_rcu(head));
502 while (head == NULL) {
503 if (++st->bucket >= nf_ct_expect_hsize)
504 return NULL;
505 head = rcu_dereference(hlist_first_rcu(&nf_ct_expect_hash[st->bucket]));
507 return head;
510 static struct hlist_node *ct_expect_get_idx(struct seq_file *seq, loff_t pos)
512 struct hlist_node *head = ct_expect_get_first(seq);
514 if (head)
515 while (pos && (head = ct_expect_get_next(seq, head)))
516 pos--;
517 return pos ? NULL : head;
520 static void *exp_seq_start(struct seq_file *seq, loff_t *pos)
521 __acquires(RCU)
523 rcu_read_lock();
524 return ct_expect_get_idx(seq, *pos);
527 static void *exp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
529 (*pos)++;
530 return ct_expect_get_next(seq, v);
533 static void exp_seq_stop(struct seq_file *seq, void *v)
534 __releases(RCU)
536 rcu_read_unlock();
539 static int exp_seq_show(struct seq_file *s, void *v)
541 struct nf_conntrack_expect *expect;
542 struct nf_conntrack_helper *helper;
543 struct hlist_node *n = v;
544 char *delim = "";
546 expect = hlist_entry(n, struct nf_conntrack_expect, hnode);
548 if (expect->timeout.function)
549 seq_printf(s, "%ld ", timer_pending(&expect->timeout)
550 ? (long)(expect->timeout.expires - jiffies)/HZ : 0);
551 else
552 seq_puts(s, "- ");
553 seq_printf(s, "l3proto = %u proto=%u ",
554 expect->tuple.src.l3num,
555 expect->tuple.dst.protonum);
556 print_tuple(s, &expect->tuple,
557 __nf_ct_l3proto_find(expect->tuple.src.l3num),
558 __nf_ct_l4proto_find(expect->tuple.src.l3num,
559 expect->tuple.dst.protonum));
561 if (expect->flags & NF_CT_EXPECT_PERMANENT) {
562 seq_puts(s, "PERMANENT");
563 delim = ",";
565 if (expect->flags & NF_CT_EXPECT_INACTIVE) {
566 seq_printf(s, "%sINACTIVE", delim);
567 delim = ",";
569 if (expect->flags & NF_CT_EXPECT_USERSPACE)
570 seq_printf(s, "%sUSERSPACE", delim);
572 helper = rcu_dereference(nfct_help(expect->master)->helper);
573 if (helper) {
574 seq_printf(s, "%s%s", expect->flags ? " " : "", helper->name);
575 if (helper->expect_policy[expect->class].name[0])
576 seq_printf(s, "/%s",
577 helper->expect_policy[expect->class].name);
580 seq_putc(s, '\n');
582 return 0;
585 static const struct seq_operations exp_seq_ops = {
586 .start = exp_seq_start,
587 .next = exp_seq_next,
588 .stop = exp_seq_stop,
589 .show = exp_seq_show
592 static int exp_open(struct inode *inode, struct file *file)
594 return seq_open_net(inode, file, &exp_seq_ops,
595 sizeof(struct ct_expect_iter_state));
598 static const struct file_operations exp_file_ops = {
599 .owner = THIS_MODULE,
600 .open = exp_open,
601 .read = seq_read,
602 .llseek = seq_lseek,
603 .release = seq_release_net,
605 #endif /* CONFIG_NF_CONNTRACK_PROCFS */
607 static int exp_proc_init(struct net *net)
609 #ifdef CONFIG_NF_CONNTRACK_PROCFS
610 struct proc_dir_entry *proc;
611 kuid_t root_uid;
612 kgid_t root_gid;
614 proc = proc_create("nf_conntrack_expect", 0440, net->proc_net,
615 &exp_file_ops);
616 if (!proc)
617 return -ENOMEM;
619 root_uid = make_kuid(net->user_ns, 0);
620 root_gid = make_kgid(net->user_ns, 0);
621 if (uid_valid(root_uid) && gid_valid(root_gid))
622 proc_set_user(proc, root_uid, root_gid);
623 #endif /* CONFIG_NF_CONNTRACK_PROCFS */
624 return 0;
627 static void exp_proc_remove(struct net *net)
629 #ifdef CONFIG_NF_CONNTRACK_PROCFS
630 remove_proc_entry("nf_conntrack_expect", net->proc_net);
631 #endif /* CONFIG_NF_CONNTRACK_PROCFS */
634 module_param_named(expect_hashsize, nf_ct_expect_hsize, uint, 0400);
636 int nf_conntrack_expect_pernet_init(struct net *net)
638 net->ct.expect_count = 0;
639 return exp_proc_init(net);
642 void nf_conntrack_expect_pernet_fini(struct net *net)
644 exp_proc_remove(net);
647 int nf_conntrack_expect_init(void)
649 if (!nf_ct_expect_hsize) {
650 nf_ct_expect_hsize = nf_conntrack_htable_size / 256;
651 if (!nf_ct_expect_hsize)
652 nf_ct_expect_hsize = 1;
654 nf_ct_expect_max = nf_ct_expect_hsize * 4;
655 nf_ct_expect_cachep = kmem_cache_create("nf_conntrack_expect",
656 sizeof(struct nf_conntrack_expect),
657 0, 0, NULL);
658 if (!nf_ct_expect_cachep)
659 return -ENOMEM;
661 nf_ct_expect_hash = nf_ct_alloc_hashtable(&nf_ct_expect_hsize, 0);
662 if (!nf_ct_expect_hash) {
663 kmem_cache_destroy(nf_ct_expect_cachep);
664 return -ENOMEM;
667 return 0;
670 void nf_conntrack_expect_fini(void)
672 rcu_barrier(); /* Wait for call_rcu() before destroy */
673 kmem_cache_destroy(nf_ct_expect_cachep);
674 nf_ct_free_hashtable(nf_ct_expect_hash, nf_ct_expect_hsize);