netfilter: xtables: optimize call flow around xt_ematch_foreach
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / net / ipv4 / netfilter / ip_tables.c
blob36edc7d5f28472a71a93b05d9569d819ee92be07
1 /*
2 * Packet matching code.
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 #include <linux/cache.h>
13 #include <linux/capability.h>
14 #include <linux/skbuff.h>
15 #include <linux/kmod.h>
16 #include <linux/vmalloc.h>
17 #include <linux/netdevice.h>
18 #include <linux/module.h>
19 #include <linux/icmp.h>
20 #include <net/ip.h>
21 #include <net/compat.h>
22 #include <asm/uaccess.h>
23 #include <linux/mutex.h>
24 #include <linux/proc_fs.h>
25 #include <linux/err.h>
26 #include <linux/cpumask.h>
28 #include <linux/netfilter/x_tables.h>
29 #include <linux/netfilter_ipv4/ip_tables.h>
30 #include <net/netfilter/nf_log.h>
31 #include "../../netfilter/xt_repldata.h"
33 MODULE_LICENSE("GPL");
34 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
35 MODULE_DESCRIPTION("IPv4 packet filter");
37 /*#define DEBUG_IP_FIREWALL*/
38 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
39 /*#define DEBUG_IP_FIREWALL_USER*/
41 #ifdef DEBUG_IP_FIREWALL
42 #define dprintf(format, args...) printk(format , ## args)
43 #else
44 #define dprintf(format, args...)
45 #endif
47 #ifdef DEBUG_IP_FIREWALL_USER
48 #define duprintf(format, args...) printk(format , ## args)
49 #else
50 #define duprintf(format, args...)
51 #endif
53 #ifdef CONFIG_NETFILTER_DEBUG
54 #define IP_NF_ASSERT(x) \
55 do { \
56 if (!(x)) \
57 printk("IP_NF_ASSERT: %s:%s:%u\n", \
58 __func__, __FILE__, __LINE__); \
59 } while(0)
60 #else
61 #define IP_NF_ASSERT(x)
62 #endif
64 #if 0
65 /* All the better to debug you with... */
66 #define static
67 #define inline
68 #endif
70 void *ipt_alloc_initial_table(const struct xt_table *info)
72 return xt_alloc_initial_table(ipt, IPT);
74 EXPORT_SYMBOL_GPL(ipt_alloc_initial_table);
77 We keep a set of rules for each CPU, so we can avoid write-locking
78 them in the softirq when updating the counters and therefore
79 only need to read-lock in the softirq; doing a write_lock_bh() in user
80 context stops packets coming through and allows user context to read
81 the counters or update the rules.
83 Hence the start of any table is given by get_table() below. */
85 /* Returns whether matches rule or not. */
86 /* Performance critical - called for every packet */
87 static inline bool
88 ip_packet_match(const struct iphdr *ip,
89 const char *indev,
90 const char *outdev,
91 const struct ipt_ip *ipinfo,
92 int isfrag)
94 unsigned long ret;
96 #define FWINV(bool, invflg) ((bool) ^ !!(ipinfo->invflags & (invflg)))
98 if (FWINV((ip->saddr&ipinfo->smsk.s_addr) != ipinfo->src.s_addr,
99 IPT_INV_SRCIP) ||
100 FWINV((ip->daddr&ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr,
101 IPT_INV_DSTIP)) {
102 dprintf("Source or dest mismatch.\n");
104 dprintf("SRC: %pI4. Mask: %pI4. Target: %pI4.%s\n",
105 &ip->saddr, &ipinfo->smsk.s_addr, &ipinfo->src.s_addr,
106 ipinfo->invflags & IPT_INV_SRCIP ? " (INV)" : "");
107 dprintf("DST: %pI4 Mask: %pI4 Target: %pI4.%s\n",
108 &ip->daddr, &ipinfo->dmsk.s_addr, &ipinfo->dst.s_addr,
109 ipinfo->invflags & IPT_INV_DSTIP ? " (INV)" : "");
110 return false;
113 ret = ifname_compare_aligned(indev, ipinfo->iniface, ipinfo->iniface_mask);
115 if (FWINV(ret != 0, IPT_INV_VIA_IN)) {
116 dprintf("VIA in mismatch (%s vs %s).%s\n",
117 indev, ipinfo->iniface,
118 ipinfo->invflags&IPT_INV_VIA_IN ?" (INV)":"");
119 return false;
122 ret = ifname_compare_aligned(outdev, ipinfo->outiface, ipinfo->outiface_mask);
124 if (FWINV(ret != 0, IPT_INV_VIA_OUT)) {
125 dprintf("VIA out mismatch (%s vs %s).%s\n",
126 outdev, ipinfo->outiface,
127 ipinfo->invflags&IPT_INV_VIA_OUT ?" (INV)":"");
128 return false;
131 /* Check specific protocol */
132 if (ipinfo->proto &&
133 FWINV(ip->protocol != ipinfo->proto, IPT_INV_PROTO)) {
134 dprintf("Packet protocol %hi does not match %hi.%s\n",
135 ip->protocol, ipinfo->proto,
136 ipinfo->invflags&IPT_INV_PROTO ? " (INV)":"");
137 return false;
140 /* If we have a fragment rule but the packet is not a fragment
141 * then we return zero */
142 if (FWINV((ipinfo->flags&IPT_F_FRAG) && !isfrag, IPT_INV_FRAG)) {
143 dprintf("Fragment rule but not fragment.%s\n",
144 ipinfo->invflags & IPT_INV_FRAG ? " (INV)" : "");
145 return false;
148 return true;
151 static bool
152 ip_checkentry(const struct ipt_ip *ip)
154 if (ip->flags & ~IPT_F_MASK) {
155 duprintf("Unknown flag bits set: %08X\n",
156 ip->flags & ~IPT_F_MASK);
157 return false;
159 if (ip->invflags & ~IPT_INV_MASK) {
160 duprintf("Unknown invflag bits set: %08X\n",
161 ip->invflags & ~IPT_INV_MASK);
162 return false;
164 return true;
167 static unsigned int
168 ipt_error(struct sk_buff *skb, const struct xt_target_param *par)
170 if (net_ratelimit())
171 printk("ip_tables: error: `%s'\n",
172 (const char *)par->targinfo);
174 return NF_DROP;
177 /* Performance critical - called for every packet */
178 static inline bool
179 do_match(const struct ipt_entry_match *m, const struct sk_buff *skb,
180 struct xt_match_param *par)
182 par->match = m->u.kernel.match;
183 par->matchinfo = m->data;
185 /* Stop iteration if it doesn't match */
186 if (!m->u.kernel.match->match(skb, par))
187 return true;
188 else
189 return false;
192 /* Performance critical */
193 static inline struct ipt_entry *
194 get_entry(const void *base, unsigned int offset)
196 return (struct ipt_entry *)(base + offset);
199 /* All zeroes == unconditional rule. */
200 /* Mildly perf critical (only if packet tracing is on) */
201 static inline bool unconditional(const struct ipt_ip *ip)
203 static const struct ipt_ip uncond;
205 return memcmp(ip, &uncond, sizeof(uncond)) == 0;
206 #undef FWINV
209 /* for const-correctness */
210 static inline const struct ipt_entry_target *
211 ipt_get_target_c(const struct ipt_entry *e)
213 return ipt_get_target((struct ipt_entry *)e);
216 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
217 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
218 static const char *const hooknames[] = {
219 [NF_INET_PRE_ROUTING] = "PREROUTING",
220 [NF_INET_LOCAL_IN] = "INPUT",
221 [NF_INET_FORWARD] = "FORWARD",
222 [NF_INET_LOCAL_OUT] = "OUTPUT",
223 [NF_INET_POST_ROUTING] = "POSTROUTING",
226 enum nf_ip_trace_comments {
227 NF_IP_TRACE_COMMENT_RULE,
228 NF_IP_TRACE_COMMENT_RETURN,
229 NF_IP_TRACE_COMMENT_POLICY,
232 static const char *const comments[] = {
233 [NF_IP_TRACE_COMMENT_RULE] = "rule",
234 [NF_IP_TRACE_COMMENT_RETURN] = "return",
235 [NF_IP_TRACE_COMMENT_POLICY] = "policy",
238 static struct nf_loginfo trace_loginfo = {
239 .type = NF_LOG_TYPE_LOG,
240 .u = {
241 .log = {
242 .level = 4,
243 .logflags = NF_LOG_MASK,
248 /* Mildly perf critical (only if packet tracing is on) */
249 static inline int
250 get_chainname_rulenum(const struct ipt_entry *s, const struct ipt_entry *e,
251 const char *hookname, const char **chainname,
252 const char **comment, unsigned int *rulenum)
254 const struct ipt_standard_target *t = (void *)ipt_get_target_c(s);
256 if (strcmp(t->target.u.kernel.target->name, IPT_ERROR_TARGET) == 0) {
257 /* Head of user chain: ERROR target with chainname */
258 *chainname = t->target.data;
259 (*rulenum) = 0;
260 } else if (s == e) {
261 (*rulenum)++;
263 if (s->target_offset == sizeof(struct ipt_entry) &&
264 strcmp(t->target.u.kernel.target->name,
265 IPT_STANDARD_TARGET) == 0 &&
266 t->verdict < 0 &&
267 unconditional(&s->ip)) {
268 /* Tail of chains: STANDARD target (return/policy) */
269 *comment = *chainname == hookname
270 ? comments[NF_IP_TRACE_COMMENT_POLICY]
271 : comments[NF_IP_TRACE_COMMENT_RETURN];
273 return 1;
274 } else
275 (*rulenum)++;
277 return 0;
280 static void trace_packet(const struct sk_buff *skb,
281 unsigned int hook,
282 const struct net_device *in,
283 const struct net_device *out,
284 const char *tablename,
285 const struct xt_table_info *private,
286 const struct ipt_entry *e)
288 const void *table_base;
289 const struct ipt_entry *root;
290 const char *hookname, *chainname, *comment;
291 const struct ipt_entry *iter;
292 unsigned int rulenum = 0;
294 table_base = private->entries[smp_processor_id()];
295 root = get_entry(table_base, private->hook_entry[hook]);
297 hookname = chainname = hooknames[hook];
298 comment = comments[NF_IP_TRACE_COMMENT_RULE];
300 xt_entry_foreach(iter, root, private->size - private->hook_entry[hook])
301 if (get_chainname_rulenum(iter, e, hookname,
302 &chainname, &comment, &rulenum) != 0)
303 break;
305 nf_log_packet(AF_INET, hook, skb, in, out, &trace_loginfo,
306 "TRACE: %s:%s:%s:%u ",
307 tablename, chainname, comment, rulenum);
309 #endif
311 static inline __pure
312 struct ipt_entry *ipt_next_entry(const struct ipt_entry *entry)
314 return (void *)entry + entry->next_offset;
317 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
318 unsigned int
319 ipt_do_table(struct sk_buff *skb,
320 unsigned int hook,
321 const struct net_device *in,
322 const struct net_device *out,
323 struct xt_table *table)
325 #define tb_comefrom ((struct ipt_entry *)table_base)->comefrom
327 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
328 const struct iphdr *ip;
329 bool hotdrop = false;
330 /* Initializing verdict to NF_DROP keeps gcc happy. */
331 unsigned int verdict = NF_DROP;
332 const char *indev, *outdev;
333 const void *table_base;
334 struct ipt_entry *e, *back;
335 const struct xt_table_info *private;
336 struct xt_match_param mtpar;
337 struct xt_target_param tgpar;
339 /* Initialization */
340 ip = ip_hdr(skb);
341 indev = in ? in->name : nulldevname;
342 outdev = out ? out->name : nulldevname;
343 /* We handle fragments by dealing with the first fragment as
344 * if it was a normal packet. All other fragments are treated
345 * normally, except that they will NEVER match rules that ask
346 * things we don't know, ie. tcp syn flag or ports). If the
347 * rule is also a fragment-specific rule, non-fragments won't
348 * match it. */
349 mtpar.fragoff = ntohs(ip->frag_off) & IP_OFFSET;
350 mtpar.thoff = ip_hdrlen(skb);
351 mtpar.hotdrop = &hotdrop;
352 mtpar.in = tgpar.in = in;
353 mtpar.out = tgpar.out = out;
354 mtpar.family = tgpar.family = NFPROTO_IPV4;
355 mtpar.hooknum = tgpar.hooknum = hook;
357 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
358 xt_info_rdlock_bh();
359 private = table->private;
360 table_base = private->entries[smp_processor_id()];
362 e = get_entry(table_base, private->hook_entry[hook]);
364 /* For return from builtin chain */
365 back = get_entry(table_base, private->underflow[hook]);
367 do {
368 const struct ipt_entry_target *t;
369 const struct xt_entry_match *ematch;
371 IP_NF_ASSERT(e);
372 IP_NF_ASSERT(back);
373 if (!ip_packet_match(ip, indev, outdev,
374 &e->ip, mtpar.fragoff)) {
375 no_match:
376 e = ipt_next_entry(e);
377 continue;
380 xt_ematch_foreach(ematch, e)
381 if (do_match(ematch, skb, &mtpar) != 0)
382 goto no_match;
384 ADD_COUNTER(e->counters, ntohs(ip->tot_len), 1);
386 t = ipt_get_target(e);
387 IP_NF_ASSERT(t->u.kernel.target);
389 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
390 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
391 /* The packet is traced: log it */
392 if (unlikely(skb->nf_trace))
393 trace_packet(skb, hook, in, out,
394 table->name, private, e);
395 #endif
396 /* Standard target? */
397 if (!t->u.kernel.target->target) {
398 int v;
400 v = ((struct ipt_standard_target *)t)->verdict;
401 if (v < 0) {
402 /* Pop from stack? */
403 if (v != IPT_RETURN) {
404 verdict = (unsigned)(-v) - 1;
405 break;
407 e = back;
408 back = get_entry(table_base, back->comefrom);
409 continue;
411 if (table_base + v != ipt_next_entry(e) &&
412 !(e->ip.flags & IPT_F_GOTO)) {
413 /* Save old back ptr in next entry */
414 struct ipt_entry *next = ipt_next_entry(e);
415 next->comefrom = (void *)back - table_base;
416 /* set back pointer to next entry */
417 back = next;
420 e = get_entry(table_base, v);
421 continue;
424 /* Targets which reenter must return
425 abs. verdicts */
426 tgpar.target = t->u.kernel.target;
427 tgpar.targinfo = t->data;
430 #ifdef CONFIG_NETFILTER_DEBUG
431 tb_comefrom = 0xeeeeeeec;
432 #endif
433 verdict = t->u.kernel.target->target(skb, &tgpar);
434 #ifdef CONFIG_NETFILTER_DEBUG
435 if (tb_comefrom != 0xeeeeeeec && verdict == IPT_CONTINUE) {
436 printk("Target %s reentered!\n",
437 t->u.kernel.target->name);
438 verdict = NF_DROP;
440 tb_comefrom = 0x57acc001;
441 #endif
442 /* Target might have changed stuff. */
443 ip = ip_hdr(skb);
444 if (verdict == IPT_CONTINUE)
445 e = ipt_next_entry(e);
446 else
447 /* Verdict */
448 break;
449 } while (!hotdrop);
450 xt_info_rdunlock_bh();
452 #ifdef DEBUG_ALLOW_ALL
453 return NF_ACCEPT;
454 #else
455 if (hotdrop)
456 return NF_DROP;
457 else return verdict;
458 #endif
460 #undef tb_comefrom
463 /* Figures out from what hook each rule can be called: returns 0 if
464 there are loops. Puts hook bitmask in comefrom. */
465 static int
466 mark_source_chains(const struct xt_table_info *newinfo,
467 unsigned int valid_hooks, void *entry0)
469 unsigned int hook;
471 /* No recursion; use packet counter to save back ptrs (reset
472 to 0 as we leave), and comefrom to save source hook bitmask */
473 for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
474 unsigned int pos = newinfo->hook_entry[hook];
475 struct ipt_entry *e = (struct ipt_entry *)(entry0 + pos);
477 if (!(valid_hooks & (1 << hook)))
478 continue;
480 /* Set initial back pointer. */
481 e->counters.pcnt = pos;
483 for (;;) {
484 const struct ipt_standard_target *t
485 = (void *)ipt_get_target_c(e);
486 int visited = e->comefrom & (1 << hook);
488 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
489 printk("iptables: loop hook %u pos %u %08X.\n",
490 hook, pos, e->comefrom);
491 return 0;
493 e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
495 /* Unconditional return/END. */
496 if ((e->target_offset == sizeof(struct ipt_entry) &&
497 (strcmp(t->target.u.user.name,
498 IPT_STANDARD_TARGET) == 0) &&
499 t->verdict < 0 && unconditional(&e->ip)) ||
500 visited) {
501 unsigned int oldpos, size;
503 if ((strcmp(t->target.u.user.name,
504 IPT_STANDARD_TARGET) == 0) &&
505 t->verdict < -NF_MAX_VERDICT - 1) {
506 duprintf("mark_source_chains: bad "
507 "negative verdict (%i)\n",
508 t->verdict);
509 return 0;
512 /* Return: backtrack through the last
513 big jump. */
514 do {
515 e->comefrom ^= (1<<NF_INET_NUMHOOKS);
516 #ifdef DEBUG_IP_FIREWALL_USER
517 if (e->comefrom
518 & (1 << NF_INET_NUMHOOKS)) {
519 duprintf("Back unset "
520 "on hook %u "
521 "rule %u\n",
522 hook, pos);
524 #endif
525 oldpos = pos;
526 pos = e->counters.pcnt;
527 e->counters.pcnt = 0;
529 /* We're at the start. */
530 if (pos == oldpos)
531 goto next;
533 e = (struct ipt_entry *)
534 (entry0 + pos);
535 } while (oldpos == pos + e->next_offset);
537 /* Move along one */
538 size = e->next_offset;
539 e = (struct ipt_entry *)
540 (entry0 + pos + size);
541 e->counters.pcnt = pos;
542 pos += size;
543 } else {
544 int newpos = t->verdict;
546 if (strcmp(t->target.u.user.name,
547 IPT_STANDARD_TARGET) == 0 &&
548 newpos >= 0) {
549 if (newpos > newinfo->size -
550 sizeof(struct ipt_entry)) {
551 duprintf("mark_source_chains: "
552 "bad verdict (%i)\n",
553 newpos);
554 return 0;
556 /* This a jump; chase it. */
557 duprintf("Jump rule %u -> %u\n",
558 pos, newpos);
559 } else {
560 /* ... this is a fallthru */
561 newpos = pos + e->next_offset;
563 e = (struct ipt_entry *)
564 (entry0 + newpos);
565 e->counters.pcnt = pos;
566 pos = newpos;
569 next:
570 duprintf("Finished chain %u\n", hook);
572 return 1;
575 static void cleanup_match(struct ipt_entry_match *m, struct net *net)
577 struct xt_mtdtor_param par;
579 par.net = net;
580 par.match = m->u.kernel.match;
581 par.matchinfo = m->data;
582 par.family = NFPROTO_IPV4;
583 if (par.match->destroy != NULL)
584 par.match->destroy(&par);
585 module_put(par.match->me);
588 static int
589 check_entry(const struct ipt_entry *e, const char *name)
591 const struct ipt_entry_target *t;
593 if (!ip_checkentry(&e->ip)) {
594 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
595 return -EINVAL;
598 if (e->target_offset + sizeof(struct ipt_entry_target) >
599 e->next_offset)
600 return -EINVAL;
602 t = ipt_get_target_c(e);
603 if (e->target_offset + t->u.target_size > e->next_offset)
604 return -EINVAL;
606 return 0;
609 static int
610 check_match(struct ipt_entry_match *m, struct xt_mtchk_param *par)
612 const struct ipt_ip *ip = par->entryinfo;
613 int ret;
615 par->match = m->u.kernel.match;
616 par->matchinfo = m->data;
618 ret = xt_check_match(par, m->u.match_size - sizeof(*m),
619 ip->proto, ip->invflags & IPT_INV_PROTO);
620 if (ret < 0) {
621 duprintf("ip_tables: check failed for `%s'.\n",
622 par.match->name);
623 return ret;
625 return 0;
628 static int
629 find_check_match(struct ipt_entry_match *m, struct xt_mtchk_param *par)
631 struct xt_match *match;
632 int ret;
634 match = try_then_request_module(xt_find_match(AF_INET, m->u.user.name,
635 m->u.user.revision),
636 "ipt_%s", m->u.user.name);
637 if (IS_ERR(match) || !match) {
638 duprintf("find_check_match: `%s' not found\n", m->u.user.name);
639 return match ? PTR_ERR(match) : -ENOENT;
641 m->u.kernel.match = match;
643 ret = check_match(m, par);
644 if (ret)
645 goto err;
647 return 0;
648 err:
649 module_put(m->u.kernel.match->me);
650 return ret;
653 static int check_target(struct ipt_entry *e, struct net *net, const char *name)
655 struct ipt_entry_target *t = ipt_get_target(e);
656 struct xt_tgchk_param par = {
657 .net = net,
658 .table = name,
659 .entryinfo = e,
660 .target = t->u.kernel.target,
661 .targinfo = t->data,
662 .hook_mask = e->comefrom,
663 .family = NFPROTO_IPV4,
665 int ret;
667 ret = xt_check_target(&par, t->u.target_size - sizeof(*t),
668 e->ip.proto, e->ip.invflags & IPT_INV_PROTO);
669 if (ret < 0) {
670 duprintf("ip_tables: check failed for `%s'.\n",
671 t->u.kernel.target->name);
672 return ret;
674 return 0;
677 static int
678 find_check_entry(struct ipt_entry *e, struct net *net, const char *name,
679 unsigned int size)
681 struct ipt_entry_target *t;
682 struct xt_target *target;
683 int ret;
684 unsigned int j;
685 struct xt_mtchk_param mtpar;
686 struct xt_entry_match *ematch;
688 ret = check_entry(e, name);
689 if (ret)
690 return ret;
692 j = 0;
693 mtpar.net = net;
694 mtpar.table = name;
695 mtpar.entryinfo = &e->ip;
696 mtpar.hook_mask = e->comefrom;
697 mtpar.family = NFPROTO_IPV4;
698 xt_ematch_foreach(ematch, e) {
699 ret = find_check_match(ematch, &mtpar);
700 if (ret != 0)
701 goto cleanup_matches;
702 ++j;
705 t = ipt_get_target(e);
706 target = try_then_request_module(xt_find_target(AF_INET,
707 t->u.user.name,
708 t->u.user.revision),
709 "ipt_%s", t->u.user.name);
710 if (IS_ERR(target) || !target) {
711 duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
712 ret = target ? PTR_ERR(target) : -ENOENT;
713 goto cleanup_matches;
715 t->u.kernel.target = target;
717 ret = check_target(e, net, name);
718 if (ret)
719 goto err;
720 return 0;
721 err:
722 module_put(t->u.kernel.target->me);
723 cleanup_matches:
724 xt_ematch_foreach(ematch, e) {
725 if (j-- == 0)
726 break;
727 cleanup_match(ematch, net);
729 return ret;
732 static bool check_underflow(const struct ipt_entry *e)
734 const struct ipt_entry_target *t;
735 unsigned int verdict;
737 if (!unconditional(&e->ip))
738 return false;
739 t = ipt_get_target_c(e);
740 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
741 return false;
742 verdict = ((struct ipt_standard_target *)t)->verdict;
743 verdict = -verdict - 1;
744 return verdict == NF_DROP || verdict == NF_ACCEPT;
747 static int
748 check_entry_size_and_hooks(struct ipt_entry *e,
749 struct xt_table_info *newinfo,
750 const unsigned char *base,
751 const unsigned char *limit,
752 const unsigned int *hook_entries,
753 const unsigned int *underflows,
754 unsigned int valid_hooks)
756 unsigned int h;
758 if ((unsigned long)e % __alignof__(struct ipt_entry) != 0 ||
759 (unsigned char *)e + sizeof(struct ipt_entry) >= limit) {
760 duprintf("Bad offset %p\n", e);
761 return -EINVAL;
764 if (e->next_offset
765 < sizeof(struct ipt_entry) + sizeof(struct ipt_entry_target)) {
766 duprintf("checking: element %p size %u\n",
767 e, e->next_offset);
768 return -EINVAL;
771 /* Check hooks & underflows */
772 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
773 if (!(valid_hooks & (1 << h)))
774 continue;
775 if ((unsigned char *)e - base == hook_entries[h])
776 newinfo->hook_entry[h] = hook_entries[h];
777 if ((unsigned char *)e - base == underflows[h]) {
778 if (!check_underflow(e)) {
779 pr_err("Underflows must be unconditional and "
780 "use the STANDARD target with "
781 "ACCEPT/DROP\n");
782 return -EINVAL;
784 newinfo->underflow[h] = underflows[h];
788 /* Clear counters and comefrom */
789 e->counters = ((struct xt_counters) { 0, 0 });
790 e->comefrom = 0;
791 return 0;
794 static void
795 cleanup_entry(struct ipt_entry *e, struct net *net)
797 struct xt_tgdtor_param par;
798 struct ipt_entry_target *t;
799 struct xt_entry_match *ematch;
801 /* Cleanup all matches */
802 xt_ematch_foreach(ematch, e)
803 cleanup_match(ematch, net);
804 t = ipt_get_target(e);
806 par.net = net;
807 par.target = t->u.kernel.target;
808 par.targinfo = t->data;
809 par.family = NFPROTO_IPV4;
810 if (par.target->destroy != NULL)
811 par.target->destroy(&par);
812 module_put(par.target->me);
815 /* Checks and translates the user-supplied table segment (held in
816 newinfo) */
817 static int
818 translate_table(struct net *net,
819 const char *name,
820 unsigned int valid_hooks,
821 struct xt_table_info *newinfo,
822 void *entry0,
823 unsigned int size,
824 unsigned int number,
825 const unsigned int *hook_entries,
826 const unsigned int *underflows)
828 struct ipt_entry *iter;
829 unsigned int i;
830 int ret = 0;
832 newinfo->size = size;
833 newinfo->number = number;
835 /* Init all hooks to impossible value. */
836 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
837 newinfo->hook_entry[i] = 0xFFFFFFFF;
838 newinfo->underflow[i] = 0xFFFFFFFF;
841 duprintf("translate_table: size %u\n", newinfo->size);
842 i = 0;
843 /* Walk through entries, checking offsets. */
844 xt_entry_foreach(iter, entry0, newinfo->size) {
845 ret = check_entry_size_and_hooks(iter, newinfo, entry0,
846 entry0 + size, hook_entries, underflows, valid_hooks);
847 if (ret != 0)
848 return ret;
849 ++i;
852 if (i != number) {
853 duprintf("translate_table: %u not %u entries\n",
854 i, number);
855 return -EINVAL;
858 /* Check hooks all assigned */
859 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
860 /* Only hooks which are valid */
861 if (!(valid_hooks & (1 << i)))
862 continue;
863 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
864 duprintf("Invalid hook entry %u %u\n",
865 i, hook_entries[i]);
866 return -EINVAL;
868 if (newinfo->underflow[i] == 0xFFFFFFFF) {
869 duprintf("Invalid underflow %u %u\n",
870 i, underflows[i]);
871 return -EINVAL;
875 if (!mark_source_chains(newinfo, valid_hooks, entry0))
876 return -ELOOP;
878 /* Finally, each sanity check must pass */
879 i = 0;
880 xt_entry_foreach(iter, entry0, newinfo->size) {
881 ret = find_check_entry(iter, net, name, size);
882 if (ret != 0)
883 break;
884 ++i;
887 if (ret != 0) {
888 xt_entry_foreach(iter, entry0, newinfo->size) {
889 if (i-- == 0)
890 break;
891 cleanup_entry(iter, net);
893 return ret;
896 /* And one copy for every other CPU */
897 for_each_possible_cpu(i) {
898 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
899 memcpy(newinfo->entries[i], entry0, newinfo->size);
902 return ret;
905 static void
906 get_counters(const struct xt_table_info *t,
907 struct xt_counters counters[])
909 struct ipt_entry *iter;
910 unsigned int cpu;
911 unsigned int i;
912 unsigned int curcpu;
914 /* Instead of clearing (by a previous call to memset())
915 * the counters and using adds, we set the counters
916 * with data used by 'current' CPU.
918 * Bottom half has to be disabled to prevent deadlock
919 * if new softirq were to run and call ipt_do_table
921 local_bh_disable();
922 curcpu = smp_processor_id();
924 i = 0;
925 xt_entry_foreach(iter, t->entries[curcpu], t->size) {
926 SET_COUNTER(counters[i], iter->counters.bcnt,
927 iter->counters.pcnt);
928 ++i;
931 for_each_possible_cpu(cpu) {
932 if (cpu == curcpu)
933 continue;
934 i = 0;
935 xt_info_wrlock(cpu);
936 xt_entry_foreach(iter, t->entries[cpu], t->size) {
937 ADD_COUNTER(counters[i], iter->counters.bcnt,
938 iter->counters.pcnt);
939 ++i; /* macro does multi eval of i */
941 xt_info_wrunlock(cpu);
943 local_bh_enable();
946 static struct xt_counters *alloc_counters(const struct xt_table *table)
948 unsigned int countersize;
949 struct xt_counters *counters;
950 const struct xt_table_info *private = table->private;
952 /* We need atomic snapshot of counters: rest doesn't change
953 (other than comefrom, which userspace doesn't care
954 about). */
955 countersize = sizeof(struct xt_counters) * private->number;
956 counters = vmalloc_node(countersize, numa_node_id());
958 if (counters == NULL)
959 return ERR_PTR(-ENOMEM);
961 get_counters(private, counters);
963 return counters;
966 static int
967 copy_entries_to_user(unsigned int total_size,
968 const struct xt_table *table,
969 void __user *userptr)
971 unsigned int off, num;
972 const struct ipt_entry *e;
973 struct xt_counters *counters;
974 const struct xt_table_info *private = table->private;
975 int ret = 0;
976 const void *loc_cpu_entry;
978 counters = alloc_counters(table);
979 if (IS_ERR(counters))
980 return PTR_ERR(counters);
982 /* choose the copy that is on our node/cpu, ...
983 * This choice is lazy (because current thread is
984 * allowed to migrate to another cpu)
986 loc_cpu_entry = private->entries[raw_smp_processor_id()];
987 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
988 ret = -EFAULT;
989 goto free_counters;
992 /* FIXME: use iterator macros --RR */
993 /* ... then go back and fix counters and names */
994 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
995 unsigned int i;
996 const struct ipt_entry_match *m;
997 const struct ipt_entry_target *t;
999 e = (struct ipt_entry *)(loc_cpu_entry + off);
1000 if (copy_to_user(userptr + off
1001 + offsetof(struct ipt_entry, counters),
1002 &counters[num],
1003 sizeof(counters[num])) != 0) {
1004 ret = -EFAULT;
1005 goto free_counters;
1008 for (i = sizeof(struct ipt_entry);
1009 i < e->target_offset;
1010 i += m->u.match_size) {
1011 m = (void *)e + i;
1013 if (copy_to_user(userptr + off + i
1014 + offsetof(struct ipt_entry_match,
1015 u.user.name),
1016 m->u.kernel.match->name,
1017 strlen(m->u.kernel.match->name)+1)
1018 != 0) {
1019 ret = -EFAULT;
1020 goto free_counters;
1024 t = ipt_get_target_c(e);
1025 if (copy_to_user(userptr + off + e->target_offset
1026 + offsetof(struct ipt_entry_target,
1027 u.user.name),
1028 t->u.kernel.target->name,
1029 strlen(t->u.kernel.target->name)+1) != 0) {
1030 ret = -EFAULT;
1031 goto free_counters;
1035 free_counters:
1036 vfree(counters);
1037 return ret;
1040 #ifdef CONFIG_COMPAT
1041 static void compat_standard_from_user(void *dst, const void *src)
1043 int v = *(compat_int_t *)src;
1045 if (v > 0)
1046 v += xt_compat_calc_jump(AF_INET, v);
1047 memcpy(dst, &v, sizeof(v));
1050 static int compat_standard_to_user(void __user *dst, const void *src)
1052 compat_int_t cv = *(int *)src;
1054 if (cv > 0)
1055 cv -= xt_compat_calc_jump(AF_INET, cv);
1056 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
1059 static int compat_calc_entry(const struct ipt_entry *e,
1060 const struct xt_table_info *info,
1061 const void *base, struct xt_table_info *newinfo)
1063 const struct xt_entry_match *ematch;
1064 const struct ipt_entry_target *t;
1065 unsigned int entry_offset;
1066 int off, i, ret;
1068 off = sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1069 entry_offset = (void *)e - base;
1070 xt_ematch_foreach(ematch, e)
1071 off += xt_compat_match_offset(ematch->u.kernel.match);
1072 t = ipt_get_target_c(e);
1073 off += xt_compat_target_offset(t->u.kernel.target);
1074 newinfo->size -= off;
1075 ret = xt_compat_add_offset(AF_INET, entry_offset, off);
1076 if (ret)
1077 return ret;
1079 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1080 if (info->hook_entry[i] &&
1081 (e < (struct ipt_entry *)(base + info->hook_entry[i])))
1082 newinfo->hook_entry[i] -= off;
1083 if (info->underflow[i] &&
1084 (e < (struct ipt_entry *)(base + info->underflow[i])))
1085 newinfo->underflow[i] -= off;
1087 return 0;
1090 static int compat_table_info(const struct xt_table_info *info,
1091 struct xt_table_info *newinfo)
1093 struct ipt_entry *iter;
1094 void *loc_cpu_entry;
1095 int ret;
1097 if (!newinfo || !info)
1098 return -EINVAL;
1100 /* we dont care about newinfo->entries[] */
1101 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
1102 newinfo->initial_entries = 0;
1103 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1104 xt_entry_foreach(iter, loc_cpu_entry, info->size) {
1105 ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
1106 if (ret != 0)
1107 return ret;
1109 return 0;
1111 #endif
1113 static int get_info(struct net *net, void __user *user,
1114 const int *len, int compat)
1116 char name[IPT_TABLE_MAXNAMELEN];
1117 struct xt_table *t;
1118 int ret;
1120 if (*len != sizeof(struct ipt_getinfo)) {
1121 duprintf("length %u != %zu\n", *len,
1122 sizeof(struct ipt_getinfo));
1123 return -EINVAL;
1126 if (copy_from_user(name, user, sizeof(name)) != 0)
1127 return -EFAULT;
1129 name[IPT_TABLE_MAXNAMELEN-1] = '\0';
1130 #ifdef CONFIG_COMPAT
1131 if (compat)
1132 xt_compat_lock(AF_INET);
1133 #endif
1134 t = try_then_request_module(xt_find_table_lock(net, AF_INET, name),
1135 "iptable_%s", name);
1136 if (t && !IS_ERR(t)) {
1137 struct ipt_getinfo info;
1138 const struct xt_table_info *private = t->private;
1139 #ifdef CONFIG_COMPAT
1140 struct xt_table_info tmp;
1142 if (compat) {
1143 ret = compat_table_info(private, &tmp);
1144 xt_compat_flush_offsets(AF_INET);
1145 private = &tmp;
1147 #endif
1148 info.valid_hooks = t->valid_hooks;
1149 memcpy(info.hook_entry, private->hook_entry,
1150 sizeof(info.hook_entry));
1151 memcpy(info.underflow, private->underflow,
1152 sizeof(info.underflow));
1153 info.num_entries = private->number;
1154 info.size = private->size;
1155 strcpy(info.name, name);
1157 if (copy_to_user(user, &info, *len) != 0)
1158 ret = -EFAULT;
1159 else
1160 ret = 0;
1162 xt_table_unlock(t);
1163 module_put(t->me);
1164 } else
1165 ret = t ? PTR_ERR(t) : -ENOENT;
1166 #ifdef CONFIG_COMPAT
1167 if (compat)
1168 xt_compat_unlock(AF_INET);
1169 #endif
1170 return ret;
1173 static int
1174 get_entries(struct net *net, struct ipt_get_entries __user *uptr,
1175 const int *len)
1177 int ret;
1178 struct ipt_get_entries get;
1179 struct xt_table *t;
1181 if (*len < sizeof(get)) {
1182 duprintf("get_entries: %u < %zu\n", *len, sizeof(get));
1183 return -EINVAL;
1185 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1186 return -EFAULT;
1187 if (*len != sizeof(struct ipt_get_entries) + get.size) {
1188 duprintf("get_entries: %u != %zu\n",
1189 *len, sizeof(get) + get.size);
1190 return -EINVAL;
1193 t = xt_find_table_lock(net, AF_INET, get.name);
1194 if (t && !IS_ERR(t)) {
1195 const struct xt_table_info *private = t->private;
1196 duprintf("t->private->number = %u\n", private->number);
1197 if (get.size == private->size)
1198 ret = copy_entries_to_user(private->size,
1199 t, uptr->entrytable);
1200 else {
1201 duprintf("get_entries: I've got %u not %u!\n",
1202 private->size, get.size);
1203 ret = -EAGAIN;
1205 module_put(t->me);
1206 xt_table_unlock(t);
1207 } else
1208 ret = t ? PTR_ERR(t) : -ENOENT;
1210 return ret;
1213 static int
1214 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1215 struct xt_table_info *newinfo, unsigned int num_counters,
1216 void __user *counters_ptr)
1218 int ret;
1219 struct xt_table *t;
1220 struct xt_table_info *oldinfo;
1221 struct xt_counters *counters;
1222 void *loc_cpu_old_entry;
1223 struct ipt_entry *iter;
1225 ret = 0;
1226 counters = vmalloc(num_counters * sizeof(struct xt_counters));
1227 if (!counters) {
1228 ret = -ENOMEM;
1229 goto out;
1232 t = try_then_request_module(xt_find_table_lock(net, AF_INET, name),
1233 "iptable_%s", name);
1234 if (!t || IS_ERR(t)) {
1235 ret = t ? PTR_ERR(t) : -ENOENT;
1236 goto free_newinfo_counters_untrans;
1239 /* You lied! */
1240 if (valid_hooks != t->valid_hooks) {
1241 duprintf("Valid hook crap: %08X vs %08X\n",
1242 valid_hooks, t->valid_hooks);
1243 ret = -EINVAL;
1244 goto put_module;
1247 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1248 if (!oldinfo)
1249 goto put_module;
1251 /* Update module usage count based on number of rules */
1252 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1253 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1254 if ((oldinfo->number > oldinfo->initial_entries) ||
1255 (newinfo->number <= oldinfo->initial_entries))
1256 module_put(t->me);
1257 if ((oldinfo->number > oldinfo->initial_entries) &&
1258 (newinfo->number <= oldinfo->initial_entries))
1259 module_put(t->me);
1261 /* Get the old counters, and synchronize with replace */
1262 get_counters(oldinfo, counters);
1264 /* Decrease module usage counts and free resource */
1265 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1266 xt_entry_foreach(iter, loc_cpu_old_entry, oldinfo->size)
1267 cleanup_entry(iter, net);
1269 xt_free_table_info(oldinfo);
1270 if (copy_to_user(counters_ptr, counters,
1271 sizeof(struct xt_counters) * num_counters) != 0)
1272 ret = -EFAULT;
1273 vfree(counters);
1274 xt_table_unlock(t);
1275 return ret;
1277 put_module:
1278 module_put(t->me);
1279 xt_table_unlock(t);
1280 free_newinfo_counters_untrans:
1281 vfree(counters);
1282 out:
1283 return ret;
1286 static int
1287 do_replace(struct net *net, const void __user *user, unsigned int len)
1289 int ret;
1290 struct ipt_replace tmp;
1291 struct xt_table_info *newinfo;
1292 void *loc_cpu_entry;
1293 struct ipt_entry *iter;
1295 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1296 return -EFAULT;
1298 /* overflow check */
1299 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1300 return -ENOMEM;
1302 newinfo = xt_alloc_table_info(tmp.size);
1303 if (!newinfo)
1304 return -ENOMEM;
1306 /* choose the copy that is on our node/cpu */
1307 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1308 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1309 tmp.size) != 0) {
1310 ret = -EFAULT;
1311 goto free_newinfo;
1314 ret = translate_table(net, tmp.name, tmp.valid_hooks,
1315 newinfo, loc_cpu_entry, tmp.size, tmp.num_entries,
1316 tmp.hook_entry, tmp.underflow);
1317 if (ret != 0)
1318 goto free_newinfo;
1320 duprintf("ip_tables: Translated table\n");
1322 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1323 tmp.num_counters, tmp.counters);
1324 if (ret)
1325 goto free_newinfo_untrans;
1326 return 0;
1328 free_newinfo_untrans:
1329 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1330 cleanup_entry(iter, net);
1331 free_newinfo:
1332 xt_free_table_info(newinfo);
1333 return ret;
1336 static int
1337 do_add_counters(struct net *net, const void __user *user,
1338 unsigned int len, int compat)
1340 unsigned int i, curcpu;
1341 struct xt_counters_info tmp;
1342 struct xt_counters *paddc;
1343 unsigned int num_counters;
1344 const char *name;
1345 int size;
1346 void *ptmp;
1347 struct xt_table *t;
1348 const struct xt_table_info *private;
1349 int ret = 0;
1350 void *loc_cpu_entry;
1351 struct ipt_entry *iter;
1352 #ifdef CONFIG_COMPAT
1353 struct compat_xt_counters_info compat_tmp;
1355 if (compat) {
1356 ptmp = &compat_tmp;
1357 size = sizeof(struct compat_xt_counters_info);
1358 } else
1359 #endif
1361 ptmp = &tmp;
1362 size = sizeof(struct xt_counters_info);
1365 if (copy_from_user(ptmp, user, size) != 0)
1366 return -EFAULT;
1368 #ifdef CONFIG_COMPAT
1369 if (compat) {
1370 num_counters = compat_tmp.num_counters;
1371 name = compat_tmp.name;
1372 } else
1373 #endif
1375 num_counters = tmp.num_counters;
1376 name = tmp.name;
1379 if (len != size + num_counters * sizeof(struct xt_counters))
1380 return -EINVAL;
1382 paddc = vmalloc_node(len - size, numa_node_id());
1383 if (!paddc)
1384 return -ENOMEM;
1386 if (copy_from_user(paddc, user + size, len - size) != 0) {
1387 ret = -EFAULT;
1388 goto free;
1391 t = xt_find_table_lock(net, AF_INET, name);
1392 if (!t || IS_ERR(t)) {
1393 ret = t ? PTR_ERR(t) : -ENOENT;
1394 goto free;
1397 local_bh_disable();
1398 private = t->private;
1399 if (private->number != num_counters) {
1400 ret = -EINVAL;
1401 goto unlock_up_free;
1404 i = 0;
1405 /* Choose the copy that is on our node */
1406 curcpu = smp_processor_id();
1407 loc_cpu_entry = private->entries[curcpu];
1408 xt_info_wrlock(curcpu);
1409 xt_entry_foreach(iter, loc_cpu_entry, private->size) {
1410 ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt);
1411 ++i;
1413 xt_info_wrunlock(curcpu);
1414 unlock_up_free:
1415 local_bh_enable();
1416 xt_table_unlock(t);
1417 module_put(t->me);
1418 free:
1419 vfree(paddc);
1421 return ret;
1424 #ifdef CONFIG_COMPAT
1425 struct compat_ipt_replace {
1426 char name[IPT_TABLE_MAXNAMELEN];
1427 u32 valid_hooks;
1428 u32 num_entries;
1429 u32 size;
1430 u32 hook_entry[NF_INET_NUMHOOKS];
1431 u32 underflow[NF_INET_NUMHOOKS];
1432 u32 num_counters;
1433 compat_uptr_t counters; /* struct ipt_counters * */
1434 struct compat_ipt_entry entries[0];
1437 static int
1438 compat_copy_entry_to_user(struct ipt_entry *e, void __user **dstptr,
1439 unsigned int *size, struct xt_counters *counters,
1440 unsigned int i)
1442 struct ipt_entry_target *t;
1443 struct compat_ipt_entry __user *ce;
1444 u_int16_t target_offset, next_offset;
1445 compat_uint_t origsize;
1446 const struct xt_entry_match *ematch;
1447 int ret = 0;
1449 origsize = *size;
1450 ce = (struct compat_ipt_entry __user *)*dstptr;
1451 if (copy_to_user(ce, e, sizeof(struct ipt_entry)) != 0 ||
1452 copy_to_user(&ce->counters, &counters[i],
1453 sizeof(counters[i])) != 0)
1454 return -EFAULT;
1456 *dstptr += sizeof(struct compat_ipt_entry);
1457 *size -= sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1459 xt_ematch_foreach(ematch, e) {
1460 ret = xt_compat_match_to_user(ematch, dstptr, size);
1461 if (ret != 0)
1462 return ret;
1464 target_offset = e->target_offset - (origsize - *size);
1465 t = ipt_get_target(e);
1466 ret = xt_compat_target_to_user(t, dstptr, size);
1467 if (ret)
1468 return ret;
1469 next_offset = e->next_offset - (origsize - *size);
1470 if (put_user(target_offset, &ce->target_offset) != 0 ||
1471 put_user(next_offset, &ce->next_offset) != 0)
1472 return -EFAULT;
1473 return 0;
1476 static int
1477 compat_find_calc_match(struct ipt_entry_match *m,
1478 const char *name,
1479 const struct ipt_ip *ip,
1480 unsigned int hookmask,
1481 int *size)
1483 struct xt_match *match;
1485 match = try_then_request_module(xt_find_match(AF_INET, m->u.user.name,
1486 m->u.user.revision),
1487 "ipt_%s", m->u.user.name);
1488 if (IS_ERR(match) || !match) {
1489 duprintf("compat_check_calc_match: `%s' not found\n",
1490 m->u.user.name);
1491 return match ? PTR_ERR(match) : -ENOENT;
1493 m->u.kernel.match = match;
1494 *size += xt_compat_match_offset(match);
1495 return 0;
1498 static void compat_release_entry(struct compat_ipt_entry *e)
1500 struct ipt_entry_target *t;
1501 struct xt_entry_match *ematch;
1503 /* Cleanup all matches */
1504 xt_ematch_foreach(ematch, e)
1505 module_put(ematch->u.kernel.match->me);
1506 t = compat_ipt_get_target(e);
1507 module_put(t->u.kernel.target->me);
1510 static int
1511 check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
1512 struct xt_table_info *newinfo,
1513 unsigned int *size,
1514 const unsigned char *base,
1515 const unsigned char *limit,
1516 const unsigned int *hook_entries,
1517 const unsigned int *underflows,
1518 const char *name)
1520 struct xt_entry_match *ematch;
1521 struct ipt_entry_target *t;
1522 struct xt_target *target;
1523 unsigned int entry_offset;
1524 unsigned int j;
1525 int ret, off, h;
1527 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1528 if ((unsigned long)e % __alignof__(struct compat_ipt_entry) != 0 ||
1529 (unsigned char *)e + sizeof(struct compat_ipt_entry) >= limit) {
1530 duprintf("Bad offset %p, limit = %p\n", e, limit);
1531 return -EINVAL;
1534 if (e->next_offset < sizeof(struct compat_ipt_entry) +
1535 sizeof(struct compat_xt_entry_target)) {
1536 duprintf("checking: element %p size %u\n",
1537 e, e->next_offset);
1538 return -EINVAL;
1541 /* For purposes of check_entry casting the compat entry is fine */
1542 ret = check_entry((struct ipt_entry *)e, name);
1543 if (ret)
1544 return ret;
1546 off = sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1547 entry_offset = (void *)e - (void *)base;
1548 j = 0;
1549 xt_ematch_foreach(ematch, e) {
1550 ret = compat_find_calc_match(ematch, name,
1551 &e->ip, e->comefrom, &off);
1552 if (ret != 0)
1553 goto release_matches;
1554 ++j;
1557 t = compat_ipt_get_target(e);
1558 target = try_then_request_module(xt_find_target(AF_INET,
1559 t->u.user.name,
1560 t->u.user.revision),
1561 "ipt_%s", t->u.user.name);
1562 if (IS_ERR(target) || !target) {
1563 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1564 t->u.user.name);
1565 ret = target ? PTR_ERR(target) : -ENOENT;
1566 goto release_matches;
1568 t->u.kernel.target = target;
1570 off += xt_compat_target_offset(target);
1571 *size += off;
1572 ret = xt_compat_add_offset(AF_INET, entry_offset, off);
1573 if (ret)
1574 goto out;
1576 /* Check hooks & underflows */
1577 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1578 if ((unsigned char *)e - base == hook_entries[h])
1579 newinfo->hook_entry[h] = hook_entries[h];
1580 if ((unsigned char *)e - base == underflows[h])
1581 newinfo->underflow[h] = underflows[h];
1584 /* Clear counters and comefrom */
1585 memset(&e->counters, 0, sizeof(e->counters));
1586 e->comefrom = 0;
1587 return 0;
1589 out:
1590 module_put(t->u.kernel.target->me);
1591 release_matches:
1592 xt_ematch_foreach(ematch, e) {
1593 if (j-- == 0)
1594 break;
1595 module_put(ematch->u.kernel.match->me);
1597 return ret;
1600 static int
1601 compat_copy_entry_from_user(struct compat_ipt_entry *e, void **dstptr,
1602 unsigned int *size, const char *name,
1603 struct xt_table_info *newinfo, unsigned char *base)
1605 struct ipt_entry_target *t;
1606 struct xt_target *target;
1607 struct ipt_entry *de;
1608 unsigned int origsize;
1609 int ret, h;
1610 struct xt_entry_match *ematch;
1612 ret = 0;
1613 origsize = *size;
1614 de = (struct ipt_entry *)*dstptr;
1615 memcpy(de, e, sizeof(struct ipt_entry));
1616 memcpy(&de->counters, &e->counters, sizeof(e->counters));
1618 *dstptr += sizeof(struct ipt_entry);
1619 *size += sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1621 xt_ematch_foreach(ematch, e) {
1622 ret = xt_compat_match_from_user(ematch, dstptr, size);
1623 if (ret != 0)
1624 return ret;
1626 de->target_offset = e->target_offset - (origsize - *size);
1627 t = compat_ipt_get_target(e);
1628 target = t->u.kernel.target;
1629 xt_compat_target_from_user(t, dstptr, size);
1631 de->next_offset = e->next_offset - (origsize - *size);
1632 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1633 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1634 newinfo->hook_entry[h] -= origsize - *size;
1635 if ((unsigned char *)de - base < newinfo->underflow[h])
1636 newinfo->underflow[h] -= origsize - *size;
1638 return ret;
1641 static int
1642 compat_check_entry(struct ipt_entry *e, struct net *net, const char *name)
1644 struct xt_entry_match *ematch;
1645 struct xt_mtchk_param mtpar;
1646 unsigned int j;
1647 int ret = 0;
1649 j = 0;
1650 mtpar.net = net;
1651 mtpar.table = name;
1652 mtpar.entryinfo = &e->ip;
1653 mtpar.hook_mask = e->comefrom;
1654 mtpar.family = NFPROTO_IPV4;
1655 xt_ematch_foreach(ematch, e) {
1656 ret = check_match(ematch, &mtpar);
1657 if (ret != 0)
1658 goto cleanup_matches;
1659 ++j;
1662 ret = check_target(e, net, name);
1663 if (ret)
1664 goto cleanup_matches;
1665 return 0;
1667 cleanup_matches:
1668 xt_ematch_foreach(ematch, e) {
1669 if (j-- == 0)
1670 break;
1671 cleanup_match(ematch, net);
1673 return ret;
1676 static int
1677 translate_compat_table(struct net *net,
1678 const char *name,
1679 unsigned int valid_hooks,
1680 struct xt_table_info **pinfo,
1681 void **pentry0,
1682 unsigned int total_size,
1683 unsigned int number,
1684 unsigned int *hook_entries,
1685 unsigned int *underflows)
1687 unsigned int i, j;
1688 struct xt_table_info *newinfo, *info;
1689 void *pos, *entry0, *entry1;
1690 struct compat_ipt_entry *iter0;
1691 struct ipt_entry *iter1;
1692 unsigned int size;
1693 int ret;
1695 info = *pinfo;
1696 entry0 = *pentry0;
1697 size = total_size;
1698 info->number = number;
1700 /* Init all hooks to impossible value. */
1701 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1702 info->hook_entry[i] = 0xFFFFFFFF;
1703 info->underflow[i] = 0xFFFFFFFF;
1706 duprintf("translate_compat_table: size %u\n", info->size);
1707 j = 0;
1708 xt_compat_lock(AF_INET);
1709 /* Walk through entries, checking offsets. */
1710 xt_entry_foreach(iter0, entry0, total_size) {
1711 ret = check_compat_entry_size_and_hooks(iter0, info, &size,
1712 entry0, entry0 + total_size, hook_entries, underflows,
1713 name);
1714 if (ret != 0)
1715 goto out_unlock;
1716 ++j;
1719 ret = -EINVAL;
1720 if (j != number) {
1721 duprintf("translate_compat_table: %u not %u entries\n",
1722 j, number);
1723 goto out_unlock;
1726 /* Check hooks all assigned */
1727 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1728 /* Only hooks which are valid */
1729 if (!(valid_hooks & (1 << i)))
1730 continue;
1731 if (info->hook_entry[i] == 0xFFFFFFFF) {
1732 duprintf("Invalid hook entry %u %u\n",
1733 i, hook_entries[i]);
1734 goto out_unlock;
1736 if (info->underflow[i] == 0xFFFFFFFF) {
1737 duprintf("Invalid underflow %u %u\n",
1738 i, underflows[i]);
1739 goto out_unlock;
1743 ret = -ENOMEM;
1744 newinfo = xt_alloc_table_info(size);
1745 if (!newinfo)
1746 goto out_unlock;
1748 newinfo->number = number;
1749 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1750 newinfo->hook_entry[i] = info->hook_entry[i];
1751 newinfo->underflow[i] = info->underflow[i];
1753 entry1 = newinfo->entries[raw_smp_processor_id()];
1754 pos = entry1;
1755 size = total_size;
1756 xt_entry_foreach(iter0, entry0, total_size) {
1757 ret = compat_copy_entry_from_user(iter0, &pos,
1758 &size, name, newinfo, entry1);
1759 if (ret != 0)
1760 break;
1762 xt_compat_flush_offsets(AF_INET);
1763 xt_compat_unlock(AF_INET);
1764 if (ret)
1765 goto free_newinfo;
1767 ret = -ELOOP;
1768 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1769 goto free_newinfo;
1771 i = 0;
1772 xt_entry_foreach(iter1, entry1, newinfo->size) {
1773 ret = compat_check_entry(iter1, net, name);
1774 if (ret != 0)
1775 break;
1776 ++i;
1778 if (ret) {
1780 * The first i matches need cleanup_entry (calls ->destroy)
1781 * because they had called ->check already. The other j-i
1782 * entries need only release.
1784 int skip = i;
1785 j -= i;
1786 xt_entry_foreach(iter0, entry0, newinfo->size) {
1787 if (skip-- > 0)
1788 continue;
1789 if (j-- == 0)
1790 break;
1791 compat_release_entry(iter0);
1793 xt_entry_foreach(iter1, entry1, newinfo->size) {
1794 if (i-- == 0)
1795 break;
1796 cleanup_entry(iter1, net);
1798 xt_free_table_info(newinfo);
1799 return ret;
1802 /* And one copy for every other CPU */
1803 for_each_possible_cpu(i)
1804 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1805 memcpy(newinfo->entries[i], entry1, newinfo->size);
1807 *pinfo = newinfo;
1808 *pentry0 = entry1;
1809 xt_free_table_info(info);
1810 return 0;
1812 free_newinfo:
1813 xt_free_table_info(newinfo);
1814 out:
1815 xt_entry_foreach(iter0, entry0, total_size) {
1816 if (j-- == 0)
1817 break;
1818 compat_release_entry(iter0);
1820 return ret;
1821 out_unlock:
1822 xt_compat_flush_offsets(AF_INET);
1823 xt_compat_unlock(AF_INET);
1824 goto out;
1827 static int
1828 compat_do_replace(struct net *net, void __user *user, unsigned int len)
1830 int ret;
1831 struct compat_ipt_replace tmp;
1832 struct xt_table_info *newinfo;
1833 void *loc_cpu_entry;
1834 struct ipt_entry *iter;
1836 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1837 return -EFAULT;
1839 /* overflow check */
1840 if (tmp.size >= INT_MAX / num_possible_cpus())
1841 return -ENOMEM;
1842 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1843 return -ENOMEM;
1845 newinfo = xt_alloc_table_info(tmp.size);
1846 if (!newinfo)
1847 return -ENOMEM;
1849 /* choose the copy that is on our node/cpu */
1850 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1851 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1852 tmp.size) != 0) {
1853 ret = -EFAULT;
1854 goto free_newinfo;
1857 ret = translate_compat_table(net, tmp.name, tmp.valid_hooks,
1858 &newinfo, &loc_cpu_entry, tmp.size,
1859 tmp.num_entries, tmp.hook_entry,
1860 tmp.underflow);
1861 if (ret != 0)
1862 goto free_newinfo;
1864 duprintf("compat_do_replace: Translated table\n");
1866 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1867 tmp.num_counters, compat_ptr(tmp.counters));
1868 if (ret)
1869 goto free_newinfo_untrans;
1870 return 0;
1872 free_newinfo_untrans:
1873 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1874 cleanup_entry(iter, net);
1875 free_newinfo:
1876 xt_free_table_info(newinfo);
1877 return ret;
1880 static int
1881 compat_do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user,
1882 unsigned int len)
1884 int ret;
1886 if (!capable(CAP_NET_ADMIN))
1887 return -EPERM;
1889 switch (cmd) {
1890 case IPT_SO_SET_REPLACE:
1891 ret = compat_do_replace(sock_net(sk), user, len);
1892 break;
1894 case IPT_SO_SET_ADD_COUNTERS:
1895 ret = do_add_counters(sock_net(sk), user, len, 1);
1896 break;
1898 default:
1899 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd);
1900 ret = -EINVAL;
1903 return ret;
1906 struct compat_ipt_get_entries {
1907 char name[IPT_TABLE_MAXNAMELEN];
1908 compat_uint_t size;
1909 struct compat_ipt_entry entrytable[0];
1912 static int
1913 compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1914 void __user *userptr)
1916 struct xt_counters *counters;
1917 const struct xt_table_info *private = table->private;
1918 void __user *pos;
1919 unsigned int size;
1920 int ret = 0;
1921 const void *loc_cpu_entry;
1922 unsigned int i = 0;
1923 struct ipt_entry *iter;
1925 counters = alloc_counters(table);
1926 if (IS_ERR(counters))
1927 return PTR_ERR(counters);
1929 /* choose the copy that is on our node/cpu, ...
1930 * This choice is lazy (because current thread is
1931 * allowed to migrate to another cpu)
1933 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1934 pos = userptr;
1935 size = total_size;
1936 xt_entry_foreach(iter, loc_cpu_entry, total_size) {
1937 ret = compat_copy_entry_to_user(iter, &pos,
1938 &size, counters, i++);
1939 if (ret != 0)
1940 break;
1943 vfree(counters);
1944 return ret;
1947 static int
1948 compat_get_entries(struct net *net, struct compat_ipt_get_entries __user *uptr,
1949 int *len)
1951 int ret;
1952 struct compat_ipt_get_entries get;
1953 struct xt_table *t;
1955 if (*len < sizeof(get)) {
1956 duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get));
1957 return -EINVAL;
1960 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1961 return -EFAULT;
1963 if (*len != sizeof(struct compat_ipt_get_entries) + get.size) {
1964 duprintf("compat_get_entries: %u != %zu\n",
1965 *len, sizeof(get) + get.size);
1966 return -EINVAL;
1969 xt_compat_lock(AF_INET);
1970 t = xt_find_table_lock(net, AF_INET, get.name);
1971 if (t && !IS_ERR(t)) {
1972 const struct xt_table_info *private = t->private;
1973 struct xt_table_info info;
1974 duprintf("t->private->number = %u\n", private->number);
1975 ret = compat_table_info(private, &info);
1976 if (!ret && get.size == info.size) {
1977 ret = compat_copy_entries_to_user(private->size,
1978 t, uptr->entrytable);
1979 } else if (!ret) {
1980 duprintf("compat_get_entries: I've got %u not %u!\n",
1981 private->size, get.size);
1982 ret = -EAGAIN;
1984 xt_compat_flush_offsets(AF_INET);
1985 module_put(t->me);
1986 xt_table_unlock(t);
1987 } else
1988 ret = t ? PTR_ERR(t) : -ENOENT;
1990 xt_compat_unlock(AF_INET);
1991 return ret;
1994 static int do_ipt_get_ctl(struct sock *, int, void __user *, int *);
1996 static int
1997 compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1999 int ret;
2001 if (!capable(CAP_NET_ADMIN))
2002 return -EPERM;
2004 switch (cmd) {
2005 case IPT_SO_GET_INFO:
2006 ret = get_info(sock_net(sk), user, len, 1);
2007 break;
2008 case IPT_SO_GET_ENTRIES:
2009 ret = compat_get_entries(sock_net(sk), user, len);
2010 break;
2011 default:
2012 ret = do_ipt_get_ctl(sk, cmd, user, len);
2014 return ret;
2016 #endif
2018 static int
2019 do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2021 int ret;
2023 if (!capable(CAP_NET_ADMIN))
2024 return -EPERM;
2026 switch (cmd) {
2027 case IPT_SO_SET_REPLACE:
2028 ret = do_replace(sock_net(sk), user, len);
2029 break;
2031 case IPT_SO_SET_ADD_COUNTERS:
2032 ret = do_add_counters(sock_net(sk), user, len, 0);
2033 break;
2035 default:
2036 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd);
2037 ret = -EINVAL;
2040 return ret;
2043 static int
2044 do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2046 int ret;
2048 if (!capable(CAP_NET_ADMIN))
2049 return -EPERM;
2051 switch (cmd) {
2052 case IPT_SO_GET_INFO:
2053 ret = get_info(sock_net(sk), user, len, 0);
2054 break;
2056 case IPT_SO_GET_ENTRIES:
2057 ret = get_entries(sock_net(sk), user, len);
2058 break;
2060 case IPT_SO_GET_REVISION_MATCH:
2061 case IPT_SO_GET_REVISION_TARGET: {
2062 struct ipt_get_revision rev;
2063 int target;
2065 if (*len != sizeof(rev)) {
2066 ret = -EINVAL;
2067 break;
2069 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2070 ret = -EFAULT;
2071 break;
2074 if (cmd == IPT_SO_GET_REVISION_TARGET)
2075 target = 1;
2076 else
2077 target = 0;
2079 try_then_request_module(xt_find_revision(AF_INET, rev.name,
2080 rev.revision,
2081 target, &ret),
2082 "ipt_%s", rev.name);
2083 break;
2086 default:
2087 duprintf("do_ipt_get_ctl: unknown request %i\n", cmd);
2088 ret = -EINVAL;
2091 return ret;
2094 struct xt_table *ipt_register_table(struct net *net,
2095 const struct xt_table *table,
2096 const struct ipt_replace *repl)
2098 int ret;
2099 struct xt_table_info *newinfo;
2100 struct xt_table_info bootstrap
2101 = { 0, 0, 0, { 0 }, { 0 }, { } };
2102 void *loc_cpu_entry;
2103 struct xt_table *new_table;
2105 newinfo = xt_alloc_table_info(repl->size);
2106 if (!newinfo) {
2107 ret = -ENOMEM;
2108 goto out;
2111 /* choose the copy on our node/cpu, but dont care about preemption */
2112 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2113 memcpy(loc_cpu_entry, repl->entries, repl->size);
2115 ret = translate_table(net, table->name, table->valid_hooks,
2116 newinfo, loc_cpu_entry, repl->size,
2117 repl->num_entries,
2118 repl->hook_entry,
2119 repl->underflow);
2120 if (ret != 0)
2121 goto out_free;
2123 new_table = xt_register_table(net, table, &bootstrap, newinfo);
2124 if (IS_ERR(new_table)) {
2125 ret = PTR_ERR(new_table);
2126 goto out_free;
2129 return new_table;
2131 out_free:
2132 xt_free_table_info(newinfo);
2133 out:
2134 return ERR_PTR(ret);
2137 void ipt_unregister_table(struct net *net, struct xt_table *table)
2139 struct xt_table_info *private;
2140 void *loc_cpu_entry;
2141 struct module *table_owner = table->me;
2142 struct ipt_entry *iter;
2144 private = xt_unregister_table(table);
2146 /* Decrease module usage counts and free resources */
2147 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2148 xt_entry_foreach(iter, loc_cpu_entry, private->size)
2149 cleanup_entry(iter, net);
2150 if (private->number > private->initial_entries)
2151 module_put(table_owner);
2152 xt_free_table_info(private);
2155 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2156 static inline bool
2157 icmp_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2158 u_int8_t type, u_int8_t code,
2159 bool invert)
2161 return ((test_type == 0xFF) ||
2162 (type == test_type && code >= min_code && code <= max_code))
2163 ^ invert;
2166 static bool
2167 icmp_match(const struct sk_buff *skb, const struct xt_match_param *par)
2169 const struct icmphdr *ic;
2170 struct icmphdr _icmph;
2171 const struct ipt_icmp *icmpinfo = par->matchinfo;
2173 /* Must not be a fragment. */
2174 if (par->fragoff != 0)
2175 return false;
2177 ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph);
2178 if (ic == NULL) {
2179 /* We've been asked to examine this packet, and we
2180 * can't. Hence, no choice but to drop.
2182 duprintf("Dropping evil ICMP tinygram.\n");
2183 *par->hotdrop = true;
2184 return false;
2187 return icmp_type_code_match(icmpinfo->type,
2188 icmpinfo->code[0],
2189 icmpinfo->code[1],
2190 ic->type, ic->code,
2191 !!(icmpinfo->invflags&IPT_ICMP_INV));
2194 static bool icmp_checkentry(const struct xt_mtchk_param *par)
2196 const struct ipt_icmp *icmpinfo = par->matchinfo;
2198 /* Must specify no unknown invflags */
2199 return !(icmpinfo->invflags & ~IPT_ICMP_INV);
2202 /* The built-in targets: standard (NULL) and error. */
2203 static struct xt_target ipt_standard_target __read_mostly = {
2204 .name = IPT_STANDARD_TARGET,
2205 .targetsize = sizeof(int),
2206 .family = NFPROTO_IPV4,
2207 #ifdef CONFIG_COMPAT
2208 .compatsize = sizeof(compat_int_t),
2209 .compat_from_user = compat_standard_from_user,
2210 .compat_to_user = compat_standard_to_user,
2211 #endif
2214 static struct xt_target ipt_error_target __read_mostly = {
2215 .name = IPT_ERROR_TARGET,
2216 .target = ipt_error,
2217 .targetsize = IPT_FUNCTION_MAXNAMELEN,
2218 .family = NFPROTO_IPV4,
2221 static struct nf_sockopt_ops ipt_sockopts = {
2222 .pf = PF_INET,
2223 .set_optmin = IPT_BASE_CTL,
2224 .set_optmax = IPT_SO_SET_MAX+1,
2225 .set = do_ipt_set_ctl,
2226 #ifdef CONFIG_COMPAT
2227 .compat_set = compat_do_ipt_set_ctl,
2228 #endif
2229 .get_optmin = IPT_BASE_CTL,
2230 .get_optmax = IPT_SO_GET_MAX+1,
2231 .get = do_ipt_get_ctl,
2232 #ifdef CONFIG_COMPAT
2233 .compat_get = compat_do_ipt_get_ctl,
2234 #endif
2235 .owner = THIS_MODULE,
2238 static struct xt_match icmp_matchstruct __read_mostly = {
2239 .name = "icmp",
2240 .match = icmp_match,
2241 .matchsize = sizeof(struct ipt_icmp),
2242 .checkentry = icmp_checkentry,
2243 .proto = IPPROTO_ICMP,
2244 .family = NFPROTO_IPV4,
2247 static int __net_init ip_tables_net_init(struct net *net)
2249 return xt_proto_init(net, NFPROTO_IPV4);
2252 static void __net_exit ip_tables_net_exit(struct net *net)
2254 xt_proto_fini(net, NFPROTO_IPV4);
2257 static struct pernet_operations ip_tables_net_ops = {
2258 .init = ip_tables_net_init,
2259 .exit = ip_tables_net_exit,
2262 static int __init ip_tables_init(void)
2264 int ret;
2266 ret = register_pernet_subsys(&ip_tables_net_ops);
2267 if (ret < 0)
2268 goto err1;
2270 /* Noone else will be downing sem now, so we won't sleep */
2271 ret = xt_register_target(&ipt_standard_target);
2272 if (ret < 0)
2273 goto err2;
2274 ret = xt_register_target(&ipt_error_target);
2275 if (ret < 0)
2276 goto err3;
2277 ret = xt_register_match(&icmp_matchstruct);
2278 if (ret < 0)
2279 goto err4;
2281 /* Register setsockopt */
2282 ret = nf_register_sockopt(&ipt_sockopts);
2283 if (ret < 0)
2284 goto err5;
2286 printk(KERN_INFO "ip_tables: (C) 2000-2006 Netfilter Core Team\n");
2287 return 0;
2289 err5:
2290 xt_unregister_match(&icmp_matchstruct);
2291 err4:
2292 xt_unregister_target(&ipt_error_target);
2293 err3:
2294 xt_unregister_target(&ipt_standard_target);
2295 err2:
2296 unregister_pernet_subsys(&ip_tables_net_ops);
2297 err1:
2298 return ret;
2301 static void __exit ip_tables_fini(void)
2303 nf_unregister_sockopt(&ipt_sockopts);
2305 xt_unregister_match(&icmp_matchstruct);
2306 xt_unregister_target(&ipt_error_target);
2307 xt_unregister_target(&ipt_standard_target);
2309 unregister_pernet_subsys(&ip_tables_net_ops);
2312 EXPORT_SYMBOL(ipt_register_table);
2313 EXPORT_SYMBOL(ipt_unregister_table);
2314 EXPORT_SYMBOL(ipt_do_table);
2315 module_init(ip_tables_init);
2316 module_exit(ip_tables_fini);