blkcg: misc preparations for unified hierarchy interface
[linux-2.6/btrfs-unstable.git] / net / netfilter / x_tables.c
blobd324fe71260c9f24b02507e4f429c0ba1e328d98
1 /*
2 * x_tables core - Backend for {ip,ip6,arp}_tables
4 * Copyright (C) 2006-2006 Harald Welte <laforge@netfilter.org>
5 * Copyright (C) 2006-2012 Patrick McHardy <kaber@trash.net>
7 * Based on existing ip_tables code which is
8 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
9 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/socket.h>
20 #include <linux/net.h>
21 #include <linux/proc_fs.h>
22 #include <linux/seq_file.h>
23 #include <linux/string.h>
24 #include <linux/vmalloc.h>
25 #include <linux/mutex.h>
26 #include <linux/mm.h>
27 #include <linux/slab.h>
28 #include <linux/audit.h>
29 #include <net/net_namespace.h>
31 #include <linux/netfilter/x_tables.h>
32 #include <linux/netfilter_arp.h>
33 #include <linux/netfilter_ipv4/ip_tables.h>
34 #include <linux/netfilter_ipv6/ip6_tables.h>
35 #include <linux/netfilter_arp/arp_tables.h>
37 MODULE_LICENSE("GPL");
38 MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
39 MODULE_DESCRIPTION("{ip,ip6,arp,eb}_tables backend module");
41 #define SMP_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1))
43 struct compat_delta {
44 unsigned int offset; /* offset in kernel */
45 int delta; /* delta in 32bit user land */
48 struct xt_af {
49 struct mutex mutex;
50 struct list_head match;
51 struct list_head target;
52 #ifdef CONFIG_COMPAT
53 struct mutex compat_mutex;
54 struct compat_delta *compat_tab;
55 unsigned int number; /* number of slots in compat_tab[] */
56 unsigned int cur; /* number of used slots in compat_tab[] */
57 #endif
60 static struct xt_af *xt;
62 static const char *const xt_prefix[NFPROTO_NUMPROTO] = {
63 [NFPROTO_UNSPEC] = "x",
64 [NFPROTO_IPV4] = "ip",
65 [NFPROTO_ARP] = "arp",
66 [NFPROTO_BRIDGE] = "eb",
67 [NFPROTO_IPV6] = "ip6",
70 /* Allow this many total (re)entries. */
71 static const unsigned int xt_jumpstack_multiplier = 2;
73 /* Registration hooks for targets. */
74 int xt_register_target(struct xt_target *target)
76 u_int8_t af = target->family;
78 mutex_lock(&xt[af].mutex);
79 list_add(&target->list, &xt[af].target);
80 mutex_unlock(&xt[af].mutex);
81 return 0;
83 EXPORT_SYMBOL(xt_register_target);
85 void
86 xt_unregister_target(struct xt_target *target)
88 u_int8_t af = target->family;
90 mutex_lock(&xt[af].mutex);
91 list_del(&target->list);
92 mutex_unlock(&xt[af].mutex);
94 EXPORT_SYMBOL(xt_unregister_target);
96 int
97 xt_register_targets(struct xt_target *target, unsigned int n)
99 unsigned int i;
100 int err = 0;
102 for (i = 0; i < n; i++) {
103 err = xt_register_target(&target[i]);
104 if (err)
105 goto err;
107 return err;
109 err:
110 if (i > 0)
111 xt_unregister_targets(target, i);
112 return err;
114 EXPORT_SYMBOL(xt_register_targets);
116 void
117 xt_unregister_targets(struct xt_target *target, unsigned int n)
119 while (n-- > 0)
120 xt_unregister_target(&target[n]);
122 EXPORT_SYMBOL(xt_unregister_targets);
124 int xt_register_match(struct xt_match *match)
126 u_int8_t af = match->family;
128 mutex_lock(&xt[af].mutex);
129 list_add(&match->list, &xt[af].match);
130 mutex_unlock(&xt[af].mutex);
131 return 0;
133 EXPORT_SYMBOL(xt_register_match);
135 void
136 xt_unregister_match(struct xt_match *match)
138 u_int8_t af = match->family;
140 mutex_lock(&xt[af].mutex);
141 list_del(&match->list);
142 mutex_unlock(&xt[af].mutex);
144 EXPORT_SYMBOL(xt_unregister_match);
147 xt_register_matches(struct xt_match *match, unsigned int n)
149 unsigned int i;
150 int err = 0;
152 for (i = 0; i < n; i++) {
153 err = xt_register_match(&match[i]);
154 if (err)
155 goto err;
157 return err;
159 err:
160 if (i > 0)
161 xt_unregister_matches(match, i);
162 return err;
164 EXPORT_SYMBOL(xt_register_matches);
166 void
167 xt_unregister_matches(struct xt_match *match, unsigned int n)
169 while (n-- > 0)
170 xt_unregister_match(&match[n]);
172 EXPORT_SYMBOL(xt_unregister_matches);
176 * These are weird, but module loading must not be done with mutex
177 * held (since they will register), and we have to have a single
178 * function to use.
181 /* Find match, grabs ref. Returns ERR_PTR() on error. */
182 struct xt_match *xt_find_match(u8 af, const char *name, u8 revision)
184 struct xt_match *m;
185 int err = -ENOENT;
187 mutex_lock(&xt[af].mutex);
188 list_for_each_entry(m, &xt[af].match, list) {
189 if (strcmp(m->name, name) == 0) {
190 if (m->revision == revision) {
191 if (try_module_get(m->me)) {
192 mutex_unlock(&xt[af].mutex);
193 return m;
195 } else
196 err = -EPROTOTYPE; /* Found something. */
199 mutex_unlock(&xt[af].mutex);
201 if (af != NFPROTO_UNSPEC)
202 /* Try searching again in the family-independent list */
203 return xt_find_match(NFPROTO_UNSPEC, name, revision);
205 return ERR_PTR(err);
207 EXPORT_SYMBOL(xt_find_match);
209 struct xt_match *
210 xt_request_find_match(uint8_t nfproto, const char *name, uint8_t revision)
212 struct xt_match *match;
214 match = xt_find_match(nfproto, name, revision);
215 if (IS_ERR(match)) {
216 request_module("%st_%s", xt_prefix[nfproto], name);
217 match = xt_find_match(nfproto, name, revision);
220 return match;
222 EXPORT_SYMBOL_GPL(xt_request_find_match);
224 /* Find target, grabs ref. Returns ERR_PTR() on error. */
225 struct xt_target *xt_find_target(u8 af, const char *name, u8 revision)
227 struct xt_target *t;
228 int err = -ENOENT;
230 mutex_lock(&xt[af].mutex);
231 list_for_each_entry(t, &xt[af].target, list) {
232 if (strcmp(t->name, name) == 0) {
233 if (t->revision == revision) {
234 if (try_module_get(t->me)) {
235 mutex_unlock(&xt[af].mutex);
236 return t;
238 } else
239 err = -EPROTOTYPE; /* Found something. */
242 mutex_unlock(&xt[af].mutex);
244 if (af != NFPROTO_UNSPEC)
245 /* Try searching again in the family-independent list */
246 return xt_find_target(NFPROTO_UNSPEC, name, revision);
248 return ERR_PTR(err);
250 EXPORT_SYMBOL(xt_find_target);
252 struct xt_target *xt_request_find_target(u8 af, const char *name, u8 revision)
254 struct xt_target *target;
256 target = xt_find_target(af, name, revision);
257 if (IS_ERR(target)) {
258 request_module("%st_%s", xt_prefix[af], name);
259 target = xt_find_target(af, name, revision);
262 return target;
264 EXPORT_SYMBOL_GPL(xt_request_find_target);
266 static int match_revfn(u8 af, const char *name, u8 revision, int *bestp)
268 const struct xt_match *m;
269 int have_rev = 0;
271 list_for_each_entry(m, &xt[af].match, list) {
272 if (strcmp(m->name, name) == 0) {
273 if (m->revision > *bestp)
274 *bestp = m->revision;
275 if (m->revision == revision)
276 have_rev = 1;
280 if (af != NFPROTO_UNSPEC && !have_rev)
281 return match_revfn(NFPROTO_UNSPEC, name, revision, bestp);
283 return have_rev;
286 static int target_revfn(u8 af, const char *name, u8 revision, int *bestp)
288 const struct xt_target *t;
289 int have_rev = 0;
291 list_for_each_entry(t, &xt[af].target, list) {
292 if (strcmp(t->name, name) == 0) {
293 if (t->revision > *bestp)
294 *bestp = t->revision;
295 if (t->revision == revision)
296 have_rev = 1;
300 if (af != NFPROTO_UNSPEC && !have_rev)
301 return target_revfn(NFPROTO_UNSPEC, name, revision, bestp);
303 return have_rev;
306 /* Returns true or false (if no such extension at all) */
307 int xt_find_revision(u8 af, const char *name, u8 revision, int target,
308 int *err)
310 int have_rev, best = -1;
312 mutex_lock(&xt[af].mutex);
313 if (target == 1)
314 have_rev = target_revfn(af, name, revision, &best);
315 else
316 have_rev = match_revfn(af, name, revision, &best);
317 mutex_unlock(&xt[af].mutex);
319 /* Nothing at all? Return 0 to try loading module. */
320 if (best == -1) {
321 *err = -ENOENT;
322 return 0;
325 *err = best;
326 if (!have_rev)
327 *err = -EPROTONOSUPPORT;
328 return 1;
330 EXPORT_SYMBOL_GPL(xt_find_revision);
332 static char *
333 textify_hooks(char *buf, size_t size, unsigned int mask, uint8_t nfproto)
335 static const char *const inetbr_names[] = {
336 "PREROUTING", "INPUT", "FORWARD",
337 "OUTPUT", "POSTROUTING", "BROUTING",
339 static const char *const arp_names[] = {
340 "INPUT", "FORWARD", "OUTPUT",
342 const char *const *names;
343 unsigned int i, max;
344 char *p = buf;
345 bool np = false;
346 int res;
348 names = (nfproto == NFPROTO_ARP) ? arp_names : inetbr_names;
349 max = (nfproto == NFPROTO_ARP) ? ARRAY_SIZE(arp_names) :
350 ARRAY_SIZE(inetbr_names);
351 *p = '\0';
352 for (i = 0; i < max; ++i) {
353 if (!(mask & (1 << i)))
354 continue;
355 res = snprintf(p, size, "%s%s", np ? "/" : "", names[i]);
356 if (res > 0) {
357 size -= res;
358 p += res;
360 np = true;
363 return buf;
366 int xt_check_match(struct xt_mtchk_param *par,
367 unsigned int size, u_int8_t proto, bool inv_proto)
369 int ret;
371 if (XT_ALIGN(par->match->matchsize) != size &&
372 par->match->matchsize != -1) {
374 * ebt_among is exempt from centralized matchsize checking
375 * because it uses a dynamic-size data set.
377 pr_err("%s_tables: %s.%u match: invalid size "
378 "%u (kernel) != (user) %u\n",
379 xt_prefix[par->family], par->match->name,
380 par->match->revision,
381 XT_ALIGN(par->match->matchsize), size);
382 return -EINVAL;
384 if (par->match->table != NULL &&
385 strcmp(par->match->table, par->table) != 0) {
386 pr_err("%s_tables: %s match: only valid in %s table, not %s\n",
387 xt_prefix[par->family], par->match->name,
388 par->match->table, par->table);
389 return -EINVAL;
391 if (par->match->hooks && (par->hook_mask & ~par->match->hooks) != 0) {
392 char used[64], allow[64];
394 pr_err("%s_tables: %s match: used from hooks %s, but only "
395 "valid from %s\n",
396 xt_prefix[par->family], par->match->name,
397 textify_hooks(used, sizeof(used), par->hook_mask,
398 par->family),
399 textify_hooks(allow, sizeof(allow), par->match->hooks,
400 par->family));
401 return -EINVAL;
403 if (par->match->proto && (par->match->proto != proto || inv_proto)) {
404 pr_err("%s_tables: %s match: only valid for protocol %u\n",
405 xt_prefix[par->family], par->match->name,
406 par->match->proto);
407 return -EINVAL;
409 if (par->match->checkentry != NULL) {
410 ret = par->match->checkentry(par);
411 if (ret < 0)
412 return ret;
413 else if (ret > 0)
414 /* Flag up potential errors. */
415 return -EIO;
417 return 0;
419 EXPORT_SYMBOL_GPL(xt_check_match);
421 #ifdef CONFIG_COMPAT
422 int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta)
424 struct xt_af *xp = &xt[af];
426 if (!xp->compat_tab) {
427 if (!xp->number)
428 return -EINVAL;
429 xp->compat_tab = vmalloc(sizeof(struct compat_delta) * xp->number);
430 if (!xp->compat_tab)
431 return -ENOMEM;
432 xp->cur = 0;
435 if (xp->cur >= xp->number)
436 return -EINVAL;
438 if (xp->cur)
439 delta += xp->compat_tab[xp->cur - 1].delta;
440 xp->compat_tab[xp->cur].offset = offset;
441 xp->compat_tab[xp->cur].delta = delta;
442 xp->cur++;
443 return 0;
445 EXPORT_SYMBOL_GPL(xt_compat_add_offset);
447 void xt_compat_flush_offsets(u_int8_t af)
449 if (xt[af].compat_tab) {
450 vfree(xt[af].compat_tab);
451 xt[af].compat_tab = NULL;
452 xt[af].number = 0;
453 xt[af].cur = 0;
456 EXPORT_SYMBOL_GPL(xt_compat_flush_offsets);
458 int xt_compat_calc_jump(u_int8_t af, unsigned int offset)
460 struct compat_delta *tmp = xt[af].compat_tab;
461 int mid, left = 0, right = xt[af].cur - 1;
463 while (left <= right) {
464 mid = (left + right) >> 1;
465 if (offset > tmp[mid].offset)
466 left = mid + 1;
467 else if (offset < tmp[mid].offset)
468 right = mid - 1;
469 else
470 return mid ? tmp[mid - 1].delta : 0;
472 return left ? tmp[left - 1].delta : 0;
474 EXPORT_SYMBOL_GPL(xt_compat_calc_jump);
476 void xt_compat_init_offsets(u_int8_t af, unsigned int number)
478 xt[af].number = number;
479 xt[af].cur = 0;
481 EXPORT_SYMBOL(xt_compat_init_offsets);
483 int xt_compat_match_offset(const struct xt_match *match)
485 u_int16_t csize = match->compatsize ? : match->matchsize;
486 return XT_ALIGN(match->matchsize) - COMPAT_XT_ALIGN(csize);
488 EXPORT_SYMBOL_GPL(xt_compat_match_offset);
490 int xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
491 unsigned int *size)
493 const struct xt_match *match = m->u.kernel.match;
494 struct compat_xt_entry_match *cm = (struct compat_xt_entry_match *)m;
495 int pad, off = xt_compat_match_offset(match);
496 u_int16_t msize = cm->u.user.match_size;
498 m = *dstptr;
499 memcpy(m, cm, sizeof(*cm));
500 if (match->compat_from_user)
501 match->compat_from_user(m->data, cm->data);
502 else
503 memcpy(m->data, cm->data, msize - sizeof(*cm));
504 pad = XT_ALIGN(match->matchsize) - match->matchsize;
505 if (pad > 0)
506 memset(m->data + match->matchsize, 0, pad);
508 msize += off;
509 m->u.user.match_size = msize;
511 *size += off;
512 *dstptr += msize;
513 return 0;
515 EXPORT_SYMBOL_GPL(xt_compat_match_from_user);
517 int xt_compat_match_to_user(const struct xt_entry_match *m,
518 void __user **dstptr, unsigned int *size)
520 const struct xt_match *match = m->u.kernel.match;
521 struct compat_xt_entry_match __user *cm = *dstptr;
522 int off = xt_compat_match_offset(match);
523 u_int16_t msize = m->u.user.match_size - off;
525 if (copy_to_user(cm, m, sizeof(*cm)) ||
526 put_user(msize, &cm->u.user.match_size) ||
527 copy_to_user(cm->u.user.name, m->u.kernel.match->name,
528 strlen(m->u.kernel.match->name) + 1))
529 return -EFAULT;
531 if (match->compat_to_user) {
532 if (match->compat_to_user((void __user *)cm->data, m->data))
533 return -EFAULT;
534 } else {
535 if (copy_to_user(cm->data, m->data, msize - sizeof(*cm)))
536 return -EFAULT;
539 *size -= off;
540 *dstptr += msize;
541 return 0;
543 EXPORT_SYMBOL_GPL(xt_compat_match_to_user);
544 #endif /* CONFIG_COMPAT */
546 int xt_check_target(struct xt_tgchk_param *par,
547 unsigned int size, u_int8_t proto, bool inv_proto)
549 int ret;
551 if (XT_ALIGN(par->target->targetsize) != size) {
552 pr_err("%s_tables: %s.%u target: invalid size "
553 "%u (kernel) != (user) %u\n",
554 xt_prefix[par->family], par->target->name,
555 par->target->revision,
556 XT_ALIGN(par->target->targetsize), size);
557 return -EINVAL;
559 if (par->target->table != NULL &&
560 strcmp(par->target->table, par->table) != 0) {
561 pr_err("%s_tables: %s target: only valid in %s table, not %s\n",
562 xt_prefix[par->family], par->target->name,
563 par->target->table, par->table);
564 return -EINVAL;
566 if (par->target->hooks && (par->hook_mask & ~par->target->hooks) != 0) {
567 char used[64], allow[64];
569 pr_err("%s_tables: %s target: used from hooks %s, but only "
570 "usable from %s\n",
571 xt_prefix[par->family], par->target->name,
572 textify_hooks(used, sizeof(used), par->hook_mask,
573 par->family),
574 textify_hooks(allow, sizeof(allow), par->target->hooks,
575 par->family));
576 return -EINVAL;
578 if (par->target->proto && (par->target->proto != proto || inv_proto)) {
579 pr_err("%s_tables: %s target: only valid for protocol %u\n",
580 xt_prefix[par->family], par->target->name,
581 par->target->proto);
582 return -EINVAL;
584 if (par->target->checkentry != NULL) {
585 ret = par->target->checkentry(par);
586 if (ret < 0)
587 return ret;
588 else if (ret > 0)
589 /* Flag up potential errors. */
590 return -EIO;
592 return 0;
594 EXPORT_SYMBOL_GPL(xt_check_target);
596 #ifdef CONFIG_COMPAT
597 int xt_compat_target_offset(const struct xt_target *target)
599 u_int16_t csize = target->compatsize ? : target->targetsize;
600 return XT_ALIGN(target->targetsize) - COMPAT_XT_ALIGN(csize);
602 EXPORT_SYMBOL_GPL(xt_compat_target_offset);
604 void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
605 unsigned int *size)
607 const struct xt_target *target = t->u.kernel.target;
608 struct compat_xt_entry_target *ct = (struct compat_xt_entry_target *)t;
609 int pad, off = xt_compat_target_offset(target);
610 u_int16_t tsize = ct->u.user.target_size;
612 t = *dstptr;
613 memcpy(t, ct, sizeof(*ct));
614 if (target->compat_from_user)
615 target->compat_from_user(t->data, ct->data);
616 else
617 memcpy(t->data, ct->data, tsize - sizeof(*ct));
618 pad = XT_ALIGN(target->targetsize) - target->targetsize;
619 if (pad > 0)
620 memset(t->data + target->targetsize, 0, pad);
622 tsize += off;
623 t->u.user.target_size = tsize;
625 *size += off;
626 *dstptr += tsize;
628 EXPORT_SYMBOL_GPL(xt_compat_target_from_user);
630 int xt_compat_target_to_user(const struct xt_entry_target *t,
631 void __user **dstptr, unsigned int *size)
633 const struct xt_target *target = t->u.kernel.target;
634 struct compat_xt_entry_target __user *ct = *dstptr;
635 int off = xt_compat_target_offset(target);
636 u_int16_t tsize = t->u.user.target_size - off;
638 if (copy_to_user(ct, t, sizeof(*ct)) ||
639 put_user(tsize, &ct->u.user.target_size) ||
640 copy_to_user(ct->u.user.name, t->u.kernel.target->name,
641 strlen(t->u.kernel.target->name) + 1))
642 return -EFAULT;
644 if (target->compat_to_user) {
645 if (target->compat_to_user((void __user *)ct->data, t->data))
646 return -EFAULT;
647 } else {
648 if (copy_to_user(ct->data, t->data, tsize - sizeof(*ct)))
649 return -EFAULT;
652 *size -= off;
653 *dstptr += tsize;
654 return 0;
656 EXPORT_SYMBOL_GPL(xt_compat_target_to_user);
657 #endif
659 struct xt_table_info *xt_alloc_table_info(unsigned int size)
661 struct xt_table_info *info = NULL;
662 size_t sz = sizeof(*info) + size;
664 /* Pedantry: prevent them from hitting BUG() in vmalloc.c --RR */
665 if ((SMP_ALIGN(size) >> PAGE_SHIFT) + 2 > totalram_pages)
666 return NULL;
668 if (sz <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER))
669 info = kmalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
670 if (!info) {
671 info = vmalloc(sz);
672 if (!info)
673 return NULL;
675 memset(info, 0, sizeof(*info));
676 info->size = size;
677 return info;
679 EXPORT_SYMBOL(xt_alloc_table_info);
681 void xt_free_table_info(struct xt_table_info *info)
683 int cpu;
685 if (info->jumpstack != NULL) {
686 for_each_possible_cpu(cpu)
687 kvfree(info->jumpstack[cpu]);
688 kvfree(info->jumpstack);
691 free_percpu(info->stackptr);
693 kvfree(info);
695 EXPORT_SYMBOL(xt_free_table_info);
697 /* Find table by name, grabs mutex & ref. Returns ERR_PTR() on error. */
698 struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af,
699 const char *name)
701 struct xt_table *t;
703 mutex_lock(&xt[af].mutex);
704 list_for_each_entry(t, &net->xt.tables[af], list)
705 if (strcmp(t->name, name) == 0 && try_module_get(t->me))
706 return t;
707 mutex_unlock(&xt[af].mutex);
708 return NULL;
710 EXPORT_SYMBOL_GPL(xt_find_table_lock);
712 void xt_table_unlock(struct xt_table *table)
714 mutex_unlock(&xt[table->af].mutex);
716 EXPORT_SYMBOL_GPL(xt_table_unlock);
718 #ifdef CONFIG_COMPAT
719 void xt_compat_lock(u_int8_t af)
721 mutex_lock(&xt[af].compat_mutex);
723 EXPORT_SYMBOL_GPL(xt_compat_lock);
725 void xt_compat_unlock(u_int8_t af)
727 mutex_unlock(&xt[af].compat_mutex);
729 EXPORT_SYMBOL_GPL(xt_compat_unlock);
730 #endif
732 DEFINE_PER_CPU(seqcount_t, xt_recseq);
733 EXPORT_PER_CPU_SYMBOL_GPL(xt_recseq);
735 static int xt_jumpstack_alloc(struct xt_table_info *i)
737 unsigned int size;
738 int cpu;
740 i->stackptr = alloc_percpu(unsigned int);
741 if (i->stackptr == NULL)
742 return -ENOMEM;
744 size = sizeof(void **) * nr_cpu_ids;
745 if (size > PAGE_SIZE)
746 i->jumpstack = vzalloc(size);
747 else
748 i->jumpstack = kzalloc(size, GFP_KERNEL);
749 if (i->jumpstack == NULL)
750 return -ENOMEM;
752 i->stacksize *= xt_jumpstack_multiplier;
753 size = sizeof(void *) * i->stacksize;
754 for_each_possible_cpu(cpu) {
755 if (size > PAGE_SIZE)
756 i->jumpstack[cpu] = vmalloc_node(size,
757 cpu_to_node(cpu));
758 else
759 i->jumpstack[cpu] = kmalloc_node(size,
760 GFP_KERNEL, cpu_to_node(cpu));
761 if (i->jumpstack[cpu] == NULL)
763 * Freeing will be done later on by the callers. The
764 * chain is: xt_replace_table -> __do_replace ->
765 * do_replace -> xt_free_table_info.
767 return -ENOMEM;
770 return 0;
773 struct xt_table_info *
774 xt_replace_table(struct xt_table *table,
775 unsigned int num_counters,
776 struct xt_table_info *newinfo,
777 int *error)
779 struct xt_table_info *private;
780 int ret;
782 ret = xt_jumpstack_alloc(newinfo);
783 if (ret < 0) {
784 *error = ret;
785 return NULL;
788 /* Do the substitution. */
789 local_bh_disable();
790 private = table->private;
792 /* Check inside lock: is the old number correct? */
793 if (num_counters != private->number) {
794 pr_debug("num_counters != table->private->number (%u/%u)\n",
795 num_counters, private->number);
796 local_bh_enable();
797 *error = -EAGAIN;
798 return NULL;
801 newinfo->initial_entries = private->initial_entries;
803 * Ensure contents of newinfo are visible before assigning to
804 * private.
806 smp_wmb();
807 table->private = newinfo;
810 * Even though table entries have now been swapped, other CPU's
811 * may still be using the old entries. This is okay, because
812 * resynchronization happens because of the locking done
813 * during the get_counters() routine.
815 local_bh_enable();
817 #ifdef CONFIG_AUDIT
818 if (audit_enabled) {
819 struct audit_buffer *ab;
821 ab = audit_log_start(current->audit_context, GFP_KERNEL,
822 AUDIT_NETFILTER_CFG);
823 if (ab) {
824 audit_log_format(ab, "table=%s family=%u entries=%u",
825 table->name, table->af,
826 private->number);
827 audit_log_end(ab);
830 #endif
832 return private;
834 EXPORT_SYMBOL_GPL(xt_replace_table);
836 struct xt_table *xt_register_table(struct net *net,
837 const struct xt_table *input_table,
838 struct xt_table_info *bootstrap,
839 struct xt_table_info *newinfo)
841 int ret;
842 struct xt_table_info *private;
843 struct xt_table *t, *table;
845 /* Don't add one object to multiple lists. */
846 table = kmemdup(input_table, sizeof(struct xt_table), GFP_KERNEL);
847 if (!table) {
848 ret = -ENOMEM;
849 goto out;
852 mutex_lock(&xt[table->af].mutex);
853 /* Don't autoload: we'd eat our tail... */
854 list_for_each_entry(t, &net->xt.tables[table->af], list) {
855 if (strcmp(t->name, table->name) == 0) {
856 ret = -EEXIST;
857 goto unlock;
861 /* Simplifies replace_table code. */
862 table->private = bootstrap;
864 if (!xt_replace_table(table, 0, newinfo, &ret))
865 goto unlock;
867 private = table->private;
868 pr_debug("table->private->number = %u\n", private->number);
870 /* save number of initial entries */
871 private->initial_entries = private->number;
873 list_add(&table->list, &net->xt.tables[table->af]);
874 mutex_unlock(&xt[table->af].mutex);
875 return table;
877 unlock:
878 mutex_unlock(&xt[table->af].mutex);
879 kfree(table);
880 out:
881 return ERR_PTR(ret);
883 EXPORT_SYMBOL_GPL(xt_register_table);
885 void *xt_unregister_table(struct xt_table *table)
887 struct xt_table_info *private;
889 mutex_lock(&xt[table->af].mutex);
890 private = table->private;
891 list_del(&table->list);
892 mutex_unlock(&xt[table->af].mutex);
893 kfree(table);
895 return private;
897 EXPORT_SYMBOL_GPL(xt_unregister_table);
899 #ifdef CONFIG_PROC_FS
900 struct xt_names_priv {
901 struct seq_net_private p;
902 u_int8_t af;
904 static void *xt_table_seq_start(struct seq_file *seq, loff_t *pos)
906 struct xt_names_priv *priv = seq->private;
907 struct net *net = seq_file_net(seq);
908 u_int8_t af = priv->af;
910 mutex_lock(&xt[af].mutex);
911 return seq_list_start(&net->xt.tables[af], *pos);
914 static void *xt_table_seq_next(struct seq_file *seq, void *v, loff_t *pos)
916 struct xt_names_priv *priv = seq->private;
917 struct net *net = seq_file_net(seq);
918 u_int8_t af = priv->af;
920 return seq_list_next(v, &net->xt.tables[af], pos);
923 static void xt_table_seq_stop(struct seq_file *seq, void *v)
925 struct xt_names_priv *priv = seq->private;
926 u_int8_t af = priv->af;
928 mutex_unlock(&xt[af].mutex);
931 static int xt_table_seq_show(struct seq_file *seq, void *v)
933 struct xt_table *table = list_entry(v, struct xt_table, list);
935 if (*table->name)
936 seq_printf(seq, "%s\n", table->name);
937 return 0;
940 static const struct seq_operations xt_table_seq_ops = {
941 .start = xt_table_seq_start,
942 .next = xt_table_seq_next,
943 .stop = xt_table_seq_stop,
944 .show = xt_table_seq_show,
947 static int xt_table_open(struct inode *inode, struct file *file)
949 int ret;
950 struct xt_names_priv *priv;
952 ret = seq_open_net(inode, file, &xt_table_seq_ops,
953 sizeof(struct xt_names_priv));
954 if (!ret) {
955 priv = ((struct seq_file *)file->private_data)->private;
956 priv->af = (unsigned long)PDE_DATA(inode);
958 return ret;
961 static const struct file_operations xt_table_ops = {
962 .owner = THIS_MODULE,
963 .open = xt_table_open,
964 .read = seq_read,
965 .llseek = seq_lseek,
966 .release = seq_release_net,
970 * Traverse state for ip{,6}_{tables,matches} for helping crossing
971 * the multi-AF mutexes.
973 struct nf_mttg_trav {
974 struct list_head *head, *curr;
975 uint8_t class, nfproto;
978 enum {
979 MTTG_TRAV_INIT,
980 MTTG_TRAV_NFP_UNSPEC,
981 MTTG_TRAV_NFP_SPEC,
982 MTTG_TRAV_DONE,
985 static void *xt_mttg_seq_next(struct seq_file *seq, void *v, loff_t *ppos,
986 bool is_target)
988 static const uint8_t next_class[] = {
989 [MTTG_TRAV_NFP_UNSPEC] = MTTG_TRAV_NFP_SPEC,
990 [MTTG_TRAV_NFP_SPEC] = MTTG_TRAV_DONE,
992 struct nf_mttg_trav *trav = seq->private;
994 switch (trav->class) {
995 case MTTG_TRAV_INIT:
996 trav->class = MTTG_TRAV_NFP_UNSPEC;
997 mutex_lock(&xt[NFPROTO_UNSPEC].mutex);
998 trav->head = trav->curr = is_target ?
999 &xt[NFPROTO_UNSPEC].target : &xt[NFPROTO_UNSPEC].match;
1000 break;
1001 case MTTG_TRAV_NFP_UNSPEC:
1002 trav->curr = trav->curr->next;
1003 if (trav->curr != trav->head)
1004 break;
1005 mutex_unlock(&xt[NFPROTO_UNSPEC].mutex);
1006 mutex_lock(&xt[trav->nfproto].mutex);
1007 trav->head = trav->curr = is_target ?
1008 &xt[trav->nfproto].target : &xt[trav->nfproto].match;
1009 trav->class = next_class[trav->class];
1010 break;
1011 case MTTG_TRAV_NFP_SPEC:
1012 trav->curr = trav->curr->next;
1013 if (trav->curr != trav->head)
1014 break;
1015 /* fallthru, _stop will unlock */
1016 default:
1017 return NULL;
1020 if (ppos != NULL)
1021 ++*ppos;
1022 return trav;
1025 static void *xt_mttg_seq_start(struct seq_file *seq, loff_t *pos,
1026 bool is_target)
1028 struct nf_mttg_trav *trav = seq->private;
1029 unsigned int j;
1031 trav->class = MTTG_TRAV_INIT;
1032 for (j = 0; j < *pos; ++j)
1033 if (xt_mttg_seq_next(seq, NULL, NULL, is_target) == NULL)
1034 return NULL;
1035 return trav;
1038 static void xt_mttg_seq_stop(struct seq_file *seq, void *v)
1040 struct nf_mttg_trav *trav = seq->private;
1042 switch (trav->class) {
1043 case MTTG_TRAV_NFP_UNSPEC:
1044 mutex_unlock(&xt[NFPROTO_UNSPEC].mutex);
1045 break;
1046 case MTTG_TRAV_NFP_SPEC:
1047 mutex_unlock(&xt[trav->nfproto].mutex);
1048 break;
1052 static void *xt_match_seq_start(struct seq_file *seq, loff_t *pos)
1054 return xt_mttg_seq_start(seq, pos, false);
1057 static void *xt_match_seq_next(struct seq_file *seq, void *v, loff_t *ppos)
1059 return xt_mttg_seq_next(seq, v, ppos, false);
1062 static int xt_match_seq_show(struct seq_file *seq, void *v)
1064 const struct nf_mttg_trav *trav = seq->private;
1065 const struct xt_match *match;
1067 switch (trav->class) {
1068 case MTTG_TRAV_NFP_UNSPEC:
1069 case MTTG_TRAV_NFP_SPEC:
1070 if (trav->curr == trav->head)
1071 return 0;
1072 match = list_entry(trav->curr, struct xt_match, list);
1073 if (*match->name)
1074 seq_printf(seq, "%s\n", match->name);
1076 return 0;
1079 static const struct seq_operations xt_match_seq_ops = {
1080 .start = xt_match_seq_start,
1081 .next = xt_match_seq_next,
1082 .stop = xt_mttg_seq_stop,
1083 .show = xt_match_seq_show,
1086 static int xt_match_open(struct inode *inode, struct file *file)
1088 struct nf_mttg_trav *trav;
1089 trav = __seq_open_private(file, &xt_match_seq_ops, sizeof(*trav));
1090 if (!trav)
1091 return -ENOMEM;
1093 trav->nfproto = (unsigned long)PDE_DATA(inode);
1094 return 0;
1097 static const struct file_operations xt_match_ops = {
1098 .owner = THIS_MODULE,
1099 .open = xt_match_open,
1100 .read = seq_read,
1101 .llseek = seq_lseek,
1102 .release = seq_release_private,
1105 static void *xt_target_seq_start(struct seq_file *seq, loff_t *pos)
1107 return xt_mttg_seq_start(seq, pos, true);
1110 static void *xt_target_seq_next(struct seq_file *seq, void *v, loff_t *ppos)
1112 return xt_mttg_seq_next(seq, v, ppos, true);
1115 static int xt_target_seq_show(struct seq_file *seq, void *v)
1117 const struct nf_mttg_trav *trav = seq->private;
1118 const struct xt_target *target;
1120 switch (trav->class) {
1121 case MTTG_TRAV_NFP_UNSPEC:
1122 case MTTG_TRAV_NFP_SPEC:
1123 if (trav->curr == trav->head)
1124 return 0;
1125 target = list_entry(trav->curr, struct xt_target, list);
1126 if (*target->name)
1127 seq_printf(seq, "%s\n", target->name);
1129 return 0;
1132 static const struct seq_operations xt_target_seq_ops = {
1133 .start = xt_target_seq_start,
1134 .next = xt_target_seq_next,
1135 .stop = xt_mttg_seq_stop,
1136 .show = xt_target_seq_show,
1139 static int xt_target_open(struct inode *inode, struct file *file)
1141 struct nf_mttg_trav *trav;
1142 trav = __seq_open_private(file, &xt_target_seq_ops, sizeof(*trav));
1143 if (!trav)
1144 return -ENOMEM;
1146 trav->nfproto = (unsigned long)PDE_DATA(inode);
1147 return 0;
1150 static const struct file_operations xt_target_ops = {
1151 .owner = THIS_MODULE,
1152 .open = xt_target_open,
1153 .read = seq_read,
1154 .llseek = seq_lseek,
1155 .release = seq_release_private,
1158 #define FORMAT_TABLES "_tables_names"
1159 #define FORMAT_MATCHES "_tables_matches"
1160 #define FORMAT_TARGETS "_tables_targets"
1162 #endif /* CONFIG_PROC_FS */
1165 * xt_hook_link - set up hooks for a new table
1166 * @table: table with metadata needed to set up hooks
1167 * @fn: Hook function
1169 * This function will take care of creating and registering the necessary
1170 * Netfilter hooks for XT tables.
1172 struct nf_hook_ops *xt_hook_link(const struct xt_table *table, nf_hookfn *fn)
1174 unsigned int hook_mask = table->valid_hooks;
1175 uint8_t i, num_hooks = hweight32(hook_mask);
1176 uint8_t hooknum;
1177 struct nf_hook_ops *ops;
1178 int ret;
1180 ops = kmalloc(sizeof(*ops) * num_hooks, GFP_KERNEL);
1181 if (ops == NULL)
1182 return ERR_PTR(-ENOMEM);
1184 for (i = 0, hooknum = 0; i < num_hooks && hook_mask != 0;
1185 hook_mask >>= 1, ++hooknum) {
1186 if (!(hook_mask & 1))
1187 continue;
1188 ops[i].hook = fn;
1189 ops[i].owner = table->me;
1190 ops[i].pf = table->af;
1191 ops[i].hooknum = hooknum;
1192 ops[i].priority = table->priority;
1193 ++i;
1196 ret = nf_register_hooks(ops, num_hooks);
1197 if (ret < 0) {
1198 kfree(ops);
1199 return ERR_PTR(ret);
1202 return ops;
1204 EXPORT_SYMBOL_GPL(xt_hook_link);
1207 * xt_hook_unlink - remove hooks for a table
1208 * @ops: nf_hook_ops array as returned by nf_hook_link
1209 * @hook_mask: the very same mask that was passed to nf_hook_link
1211 void xt_hook_unlink(const struct xt_table *table, struct nf_hook_ops *ops)
1213 nf_unregister_hooks(ops, hweight32(table->valid_hooks));
1214 kfree(ops);
1216 EXPORT_SYMBOL_GPL(xt_hook_unlink);
1218 int xt_proto_init(struct net *net, u_int8_t af)
1220 #ifdef CONFIG_PROC_FS
1221 char buf[XT_FUNCTION_MAXNAMELEN];
1222 struct proc_dir_entry *proc;
1223 #endif
1225 if (af >= ARRAY_SIZE(xt_prefix))
1226 return -EINVAL;
1229 #ifdef CONFIG_PROC_FS
1230 strlcpy(buf, xt_prefix[af], sizeof(buf));
1231 strlcat(buf, FORMAT_TABLES, sizeof(buf));
1232 proc = proc_create_data(buf, 0440, net->proc_net, &xt_table_ops,
1233 (void *)(unsigned long)af);
1234 if (!proc)
1235 goto out;
1237 strlcpy(buf, xt_prefix[af], sizeof(buf));
1238 strlcat(buf, FORMAT_MATCHES, sizeof(buf));
1239 proc = proc_create_data(buf, 0440, net->proc_net, &xt_match_ops,
1240 (void *)(unsigned long)af);
1241 if (!proc)
1242 goto out_remove_tables;
1244 strlcpy(buf, xt_prefix[af], sizeof(buf));
1245 strlcat(buf, FORMAT_TARGETS, sizeof(buf));
1246 proc = proc_create_data(buf, 0440, net->proc_net, &xt_target_ops,
1247 (void *)(unsigned long)af);
1248 if (!proc)
1249 goto out_remove_matches;
1250 #endif
1252 return 0;
1254 #ifdef CONFIG_PROC_FS
1255 out_remove_matches:
1256 strlcpy(buf, xt_prefix[af], sizeof(buf));
1257 strlcat(buf, FORMAT_MATCHES, sizeof(buf));
1258 remove_proc_entry(buf, net->proc_net);
1260 out_remove_tables:
1261 strlcpy(buf, xt_prefix[af], sizeof(buf));
1262 strlcat(buf, FORMAT_TABLES, sizeof(buf));
1263 remove_proc_entry(buf, net->proc_net);
1264 out:
1265 return -1;
1266 #endif
1268 EXPORT_SYMBOL_GPL(xt_proto_init);
1270 void xt_proto_fini(struct net *net, u_int8_t af)
1272 #ifdef CONFIG_PROC_FS
1273 char buf[XT_FUNCTION_MAXNAMELEN];
1275 strlcpy(buf, xt_prefix[af], sizeof(buf));
1276 strlcat(buf, FORMAT_TABLES, sizeof(buf));
1277 remove_proc_entry(buf, net->proc_net);
1279 strlcpy(buf, xt_prefix[af], sizeof(buf));
1280 strlcat(buf, FORMAT_TARGETS, sizeof(buf));
1281 remove_proc_entry(buf, net->proc_net);
1283 strlcpy(buf, xt_prefix[af], sizeof(buf));
1284 strlcat(buf, FORMAT_MATCHES, sizeof(buf));
1285 remove_proc_entry(buf, net->proc_net);
1286 #endif /*CONFIG_PROC_FS*/
1288 EXPORT_SYMBOL_GPL(xt_proto_fini);
1290 static int __net_init xt_net_init(struct net *net)
1292 int i;
1294 for (i = 0; i < NFPROTO_NUMPROTO; i++)
1295 INIT_LIST_HEAD(&net->xt.tables[i]);
1296 return 0;
1299 static struct pernet_operations xt_net_ops = {
1300 .init = xt_net_init,
1303 static int __init xt_init(void)
1305 unsigned int i;
1306 int rv;
1308 for_each_possible_cpu(i) {
1309 seqcount_init(&per_cpu(xt_recseq, i));
1312 xt = kmalloc(sizeof(struct xt_af) * NFPROTO_NUMPROTO, GFP_KERNEL);
1313 if (!xt)
1314 return -ENOMEM;
1316 for (i = 0; i < NFPROTO_NUMPROTO; i++) {
1317 mutex_init(&xt[i].mutex);
1318 #ifdef CONFIG_COMPAT
1319 mutex_init(&xt[i].compat_mutex);
1320 xt[i].compat_tab = NULL;
1321 #endif
1322 INIT_LIST_HEAD(&xt[i].target);
1323 INIT_LIST_HEAD(&xt[i].match);
1325 rv = register_pernet_subsys(&xt_net_ops);
1326 if (rv < 0)
1327 kfree(xt);
1328 return rv;
1331 static void __exit xt_fini(void)
1333 unregister_pernet_subsys(&xt_net_ops);
1334 kfree(xt);
1337 module_init(xt_init);
1338 module_exit(xt_fini);