netfilter: nf_nat_core: don't check if the tuple is used if there is no other choice
[linux-2.6/x86.git] / net / ipv4 / netfilter / ip_tables.c
blobd163f2e3b2e99e5f18ae9997d3c74867b3e79354
1 /*
2 * Packet matching code.
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 #include <linux/cache.h>
13 #include <linux/capability.h>
14 #include <linux/skbuff.h>
15 #include <linux/kmod.h>
16 #include <linux/vmalloc.h>
17 #include <linux/netdevice.h>
18 #include <linux/module.h>
19 #include <linux/icmp.h>
20 #include <net/ip.h>
21 #include <net/compat.h>
22 #include <asm/uaccess.h>
23 #include <linux/mutex.h>
24 #include <linux/proc_fs.h>
25 #include <linux/err.h>
26 #include <linux/cpumask.h>
28 #include <linux/netfilter/x_tables.h>
29 #include <linux/netfilter_ipv4/ip_tables.h>
30 #include <net/netfilter/nf_log.h>
31 #include "../../netfilter/xt_repldata.h"
33 MODULE_LICENSE("GPL");
34 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
35 MODULE_DESCRIPTION("IPv4 packet filter");
37 /*#define DEBUG_IP_FIREWALL*/
38 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
39 /*#define DEBUG_IP_FIREWALL_USER*/
41 #ifdef DEBUG_IP_FIREWALL
42 #define dprintf(format, args...) pr_info(format , ## args)
43 #else
44 #define dprintf(format, args...)
45 #endif
47 #ifdef DEBUG_IP_FIREWALL_USER
48 #define duprintf(format, args...) pr_info(format , ## args)
49 #else
50 #define duprintf(format, args...)
51 #endif
53 #ifdef CONFIG_NETFILTER_DEBUG
54 #define IP_NF_ASSERT(x) WARN_ON(!(x))
55 #else
56 #define IP_NF_ASSERT(x)
57 #endif
59 #if 0
60 /* All the better to debug you with... */
61 #define static
62 #define inline
63 #endif
65 void *ipt_alloc_initial_table(const struct xt_table *info)
67 return xt_alloc_initial_table(ipt, IPT);
69 EXPORT_SYMBOL_GPL(ipt_alloc_initial_table);
72 We keep a set of rules for each CPU, so we can avoid write-locking
73 them in the softirq when updating the counters and therefore
74 only need to read-lock in the softirq; doing a write_lock_bh() in user
75 context stops packets coming through and allows user context to read
76 the counters or update the rules.
78 Hence the start of any table is given by get_table() below. */
80 /* Returns whether matches rule or not. */
81 /* Performance critical - called for every packet */
82 static inline bool
83 ip_packet_match(const struct iphdr *ip,
84 const char *indev,
85 const char *outdev,
86 const struct ipt_ip *ipinfo,
87 int isfrag)
89 unsigned long ret;
91 #define FWINV(bool, invflg) ((bool) ^ !!(ipinfo->invflags & (invflg)))
93 if (FWINV((ip->saddr&ipinfo->smsk.s_addr) != ipinfo->src.s_addr,
94 IPT_INV_SRCIP) ||
95 FWINV((ip->daddr&ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr,
96 IPT_INV_DSTIP)) {
97 dprintf("Source or dest mismatch.\n");
99 dprintf("SRC: %pI4. Mask: %pI4. Target: %pI4.%s\n",
100 &ip->saddr, &ipinfo->smsk.s_addr, &ipinfo->src.s_addr,
101 ipinfo->invflags & IPT_INV_SRCIP ? " (INV)" : "");
102 dprintf("DST: %pI4 Mask: %pI4 Target: %pI4.%s\n",
103 &ip->daddr, &ipinfo->dmsk.s_addr, &ipinfo->dst.s_addr,
104 ipinfo->invflags & IPT_INV_DSTIP ? " (INV)" : "");
105 return false;
108 ret = ifname_compare_aligned(indev, ipinfo->iniface, ipinfo->iniface_mask);
110 if (FWINV(ret != 0, IPT_INV_VIA_IN)) {
111 dprintf("VIA in mismatch (%s vs %s).%s\n",
112 indev, ipinfo->iniface,
113 ipinfo->invflags&IPT_INV_VIA_IN ?" (INV)":"");
114 return false;
117 ret = ifname_compare_aligned(outdev, ipinfo->outiface, ipinfo->outiface_mask);
119 if (FWINV(ret != 0, IPT_INV_VIA_OUT)) {
120 dprintf("VIA out mismatch (%s vs %s).%s\n",
121 outdev, ipinfo->outiface,
122 ipinfo->invflags&IPT_INV_VIA_OUT ?" (INV)":"");
123 return false;
126 /* Check specific protocol */
127 if (ipinfo->proto &&
128 FWINV(ip->protocol != ipinfo->proto, IPT_INV_PROTO)) {
129 dprintf("Packet protocol %hi does not match %hi.%s\n",
130 ip->protocol, ipinfo->proto,
131 ipinfo->invflags&IPT_INV_PROTO ? " (INV)":"");
132 return false;
135 /* If we have a fragment rule but the packet is not a fragment
136 * then we return zero */
137 if (FWINV((ipinfo->flags&IPT_F_FRAG) && !isfrag, IPT_INV_FRAG)) {
138 dprintf("Fragment rule but not fragment.%s\n",
139 ipinfo->invflags & IPT_INV_FRAG ? " (INV)" : "");
140 return false;
143 return true;
146 static bool
147 ip_checkentry(const struct ipt_ip *ip)
149 if (ip->flags & ~IPT_F_MASK) {
150 duprintf("Unknown flag bits set: %08X\n",
151 ip->flags & ~IPT_F_MASK);
152 return false;
154 if (ip->invflags & ~IPT_INV_MASK) {
155 duprintf("Unknown invflag bits set: %08X\n",
156 ip->invflags & ~IPT_INV_MASK);
157 return false;
159 return true;
162 static unsigned int
163 ipt_error(struct sk_buff *skb, const struct xt_action_param *par)
165 if (net_ratelimit())
166 pr_info("error: `%s'\n", (const char *)par->targinfo);
168 return NF_DROP;
171 /* Performance critical */
172 static inline struct ipt_entry *
173 get_entry(const void *base, unsigned int offset)
175 return (struct ipt_entry *)(base + offset);
178 /* All zeroes == unconditional rule. */
179 /* Mildly perf critical (only if packet tracing is on) */
180 static inline bool unconditional(const struct ipt_ip *ip)
182 static const struct ipt_ip uncond;
184 return memcmp(ip, &uncond, sizeof(uncond)) == 0;
185 #undef FWINV
188 /* for const-correctness */
189 static inline const struct ipt_entry_target *
190 ipt_get_target_c(const struct ipt_entry *e)
192 return ipt_get_target((struct ipt_entry *)e);
195 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
196 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
197 static const char *const hooknames[] = {
198 [NF_INET_PRE_ROUTING] = "PREROUTING",
199 [NF_INET_LOCAL_IN] = "INPUT",
200 [NF_INET_FORWARD] = "FORWARD",
201 [NF_INET_LOCAL_OUT] = "OUTPUT",
202 [NF_INET_POST_ROUTING] = "POSTROUTING",
205 enum nf_ip_trace_comments {
206 NF_IP_TRACE_COMMENT_RULE,
207 NF_IP_TRACE_COMMENT_RETURN,
208 NF_IP_TRACE_COMMENT_POLICY,
211 static const char *const comments[] = {
212 [NF_IP_TRACE_COMMENT_RULE] = "rule",
213 [NF_IP_TRACE_COMMENT_RETURN] = "return",
214 [NF_IP_TRACE_COMMENT_POLICY] = "policy",
217 static struct nf_loginfo trace_loginfo = {
218 .type = NF_LOG_TYPE_LOG,
219 .u = {
220 .log = {
221 .level = 4,
222 .logflags = NF_LOG_MASK,
227 /* Mildly perf critical (only if packet tracing is on) */
228 static inline int
229 get_chainname_rulenum(const struct ipt_entry *s, const struct ipt_entry *e,
230 const char *hookname, const char **chainname,
231 const char **comment, unsigned int *rulenum)
233 const struct ipt_standard_target *t = (void *)ipt_get_target_c(s);
235 if (strcmp(t->target.u.kernel.target->name, IPT_ERROR_TARGET) == 0) {
236 /* Head of user chain: ERROR target with chainname */
237 *chainname = t->target.data;
238 (*rulenum) = 0;
239 } else if (s == e) {
240 (*rulenum)++;
242 if (s->target_offset == sizeof(struct ipt_entry) &&
243 strcmp(t->target.u.kernel.target->name,
244 IPT_STANDARD_TARGET) == 0 &&
245 t->verdict < 0 &&
246 unconditional(&s->ip)) {
247 /* Tail of chains: STANDARD target (return/policy) */
248 *comment = *chainname == hookname
249 ? comments[NF_IP_TRACE_COMMENT_POLICY]
250 : comments[NF_IP_TRACE_COMMENT_RETURN];
252 return 1;
253 } else
254 (*rulenum)++;
256 return 0;
259 static void trace_packet(const struct sk_buff *skb,
260 unsigned int hook,
261 const struct net_device *in,
262 const struct net_device *out,
263 const char *tablename,
264 const struct xt_table_info *private,
265 const struct ipt_entry *e)
267 const void *table_base;
268 const struct ipt_entry *root;
269 const char *hookname, *chainname, *comment;
270 const struct ipt_entry *iter;
271 unsigned int rulenum = 0;
273 table_base = private->entries[smp_processor_id()];
274 root = get_entry(table_base, private->hook_entry[hook]);
276 hookname = chainname = hooknames[hook];
277 comment = comments[NF_IP_TRACE_COMMENT_RULE];
279 xt_entry_foreach(iter, root, private->size - private->hook_entry[hook])
280 if (get_chainname_rulenum(iter, e, hookname,
281 &chainname, &comment, &rulenum) != 0)
282 break;
284 nf_log_packet(AF_INET, hook, skb, in, out, &trace_loginfo,
285 "TRACE: %s:%s:%s:%u ",
286 tablename, chainname, comment, rulenum);
288 #endif
290 static inline __pure
291 struct ipt_entry *ipt_next_entry(const struct ipt_entry *entry)
293 return (void *)entry + entry->next_offset;
296 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
297 unsigned int
298 ipt_do_table(struct sk_buff *skb,
299 unsigned int hook,
300 const struct net_device *in,
301 const struct net_device *out,
302 struct xt_table *table)
304 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
305 const struct iphdr *ip;
306 /* Initializing verdict to NF_DROP keeps gcc happy. */
307 unsigned int verdict = NF_DROP;
308 const char *indev, *outdev;
309 const void *table_base;
310 struct ipt_entry *e, **jumpstack;
311 unsigned int *stackptr, origptr, cpu;
312 const struct xt_table_info *private;
313 struct xt_action_param acpar;
315 /* Initialization */
316 ip = ip_hdr(skb);
317 indev = in ? in->name : nulldevname;
318 outdev = out ? out->name : nulldevname;
319 /* We handle fragments by dealing with the first fragment as
320 * if it was a normal packet. All other fragments are treated
321 * normally, except that they will NEVER match rules that ask
322 * things we don't know, ie. tcp syn flag or ports). If the
323 * rule is also a fragment-specific rule, non-fragments won't
324 * match it. */
325 acpar.fragoff = ntohs(ip->frag_off) & IP_OFFSET;
326 acpar.thoff = ip_hdrlen(skb);
327 acpar.hotdrop = false;
328 acpar.in = in;
329 acpar.out = out;
330 acpar.family = NFPROTO_IPV4;
331 acpar.hooknum = hook;
333 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
334 xt_info_rdlock_bh();
335 private = table->private;
336 cpu = smp_processor_id();
337 table_base = private->entries[cpu];
338 jumpstack = (struct ipt_entry **)private->jumpstack[cpu];
339 stackptr = per_cpu_ptr(private->stackptr, cpu);
340 origptr = *stackptr;
342 e = get_entry(table_base, private->hook_entry[hook]);
344 pr_debug("Entering %s(hook %u); sp at %u (UF %p)\n",
345 table->name, hook, origptr,
346 get_entry(table_base, private->underflow[hook]));
348 do {
349 const struct ipt_entry_target *t;
350 const struct xt_entry_match *ematch;
352 IP_NF_ASSERT(e);
353 if (!ip_packet_match(ip, indev, outdev,
354 &e->ip, acpar.fragoff)) {
355 no_match:
356 e = ipt_next_entry(e);
357 continue;
360 xt_ematch_foreach(ematch, e) {
361 acpar.match = ematch->u.kernel.match;
362 acpar.matchinfo = ematch->data;
363 if (!acpar.match->match(skb, &acpar))
364 goto no_match;
367 ADD_COUNTER(e->counters, skb->len, 1);
369 t = ipt_get_target(e);
370 IP_NF_ASSERT(t->u.kernel.target);
372 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
373 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
374 /* The packet is traced: log it */
375 if (unlikely(skb->nf_trace))
376 trace_packet(skb, hook, in, out,
377 table->name, private, e);
378 #endif
379 /* Standard target? */
380 if (!t->u.kernel.target->target) {
381 int v;
383 v = ((struct ipt_standard_target *)t)->verdict;
384 if (v < 0) {
385 /* Pop from stack? */
386 if (v != IPT_RETURN) {
387 verdict = (unsigned)(-v) - 1;
388 break;
390 if (*stackptr == 0) {
391 e = get_entry(table_base,
392 private->underflow[hook]);
393 pr_debug("Underflow (this is normal) "
394 "to %p\n", e);
395 } else {
396 e = jumpstack[--*stackptr];
397 pr_debug("Pulled %p out from pos %u\n",
398 e, *stackptr);
399 e = ipt_next_entry(e);
401 continue;
403 if (table_base + v != ipt_next_entry(e) &&
404 !(e->ip.flags & IPT_F_GOTO)) {
405 if (*stackptr >= private->stacksize) {
406 verdict = NF_DROP;
407 break;
409 jumpstack[(*stackptr)++] = e;
410 pr_debug("Pushed %p into pos %u\n",
411 e, *stackptr - 1);
414 e = get_entry(table_base, v);
415 continue;
418 acpar.target = t->u.kernel.target;
419 acpar.targinfo = t->data;
421 verdict = t->u.kernel.target->target(skb, &acpar);
422 /* Target might have changed stuff. */
423 ip = ip_hdr(skb);
424 if (verdict == IPT_CONTINUE)
425 e = ipt_next_entry(e);
426 else
427 /* Verdict */
428 break;
429 } while (!acpar.hotdrop);
430 xt_info_rdunlock_bh();
431 pr_debug("Exiting %s; resetting sp from %u to %u\n",
432 __func__, *stackptr, origptr);
433 *stackptr = origptr;
434 #ifdef DEBUG_ALLOW_ALL
435 return NF_ACCEPT;
436 #else
437 if (acpar.hotdrop)
438 return NF_DROP;
439 else return verdict;
440 #endif
443 /* Figures out from what hook each rule can be called: returns 0 if
444 there are loops. Puts hook bitmask in comefrom. */
445 static int
446 mark_source_chains(const struct xt_table_info *newinfo,
447 unsigned int valid_hooks, void *entry0)
449 unsigned int hook;
451 /* No recursion; use packet counter to save back ptrs (reset
452 to 0 as we leave), and comefrom to save source hook bitmask */
453 for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
454 unsigned int pos = newinfo->hook_entry[hook];
455 struct ipt_entry *e = (struct ipt_entry *)(entry0 + pos);
457 if (!(valid_hooks & (1 << hook)))
458 continue;
460 /* Set initial back pointer. */
461 e->counters.pcnt = pos;
463 for (;;) {
464 const struct ipt_standard_target *t
465 = (void *)ipt_get_target_c(e);
466 int visited = e->comefrom & (1 << hook);
468 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
469 pr_err("iptables: loop hook %u pos %u %08X.\n",
470 hook, pos, e->comefrom);
471 return 0;
473 e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
475 /* Unconditional return/END. */
476 if ((e->target_offset == sizeof(struct ipt_entry) &&
477 (strcmp(t->target.u.user.name,
478 IPT_STANDARD_TARGET) == 0) &&
479 t->verdict < 0 && unconditional(&e->ip)) ||
480 visited) {
481 unsigned int oldpos, size;
483 if ((strcmp(t->target.u.user.name,
484 IPT_STANDARD_TARGET) == 0) &&
485 t->verdict < -NF_MAX_VERDICT - 1) {
486 duprintf("mark_source_chains: bad "
487 "negative verdict (%i)\n",
488 t->verdict);
489 return 0;
492 /* Return: backtrack through the last
493 big jump. */
494 do {
495 e->comefrom ^= (1<<NF_INET_NUMHOOKS);
496 #ifdef DEBUG_IP_FIREWALL_USER
497 if (e->comefrom
498 & (1 << NF_INET_NUMHOOKS)) {
499 duprintf("Back unset "
500 "on hook %u "
501 "rule %u\n",
502 hook, pos);
504 #endif
505 oldpos = pos;
506 pos = e->counters.pcnt;
507 e->counters.pcnt = 0;
509 /* We're at the start. */
510 if (pos == oldpos)
511 goto next;
513 e = (struct ipt_entry *)
514 (entry0 + pos);
515 } while (oldpos == pos + e->next_offset);
517 /* Move along one */
518 size = e->next_offset;
519 e = (struct ipt_entry *)
520 (entry0 + pos + size);
521 e->counters.pcnt = pos;
522 pos += size;
523 } else {
524 int newpos = t->verdict;
526 if (strcmp(t->target.u.user.name,
527 IPT_STANDARD_TARGET) == 0 &&
528 newpos >= 0) {
529 if (newpos > newinfo->size -
530 sizeof(struct ipt_entry)) {
531 duprintf("mark_source_chains: "
532 "bad verdict (%i)\n",
533 newpos);
534 return 0;
536 /* This a jump; chase it. */
537 duprintf("Jump rule %u -> %u\n",
538 pos, newpos);
539 } else {
540 /* ... this is a fallthru */
541 newpos = pos + e->next_offset;
543 e = (struct ipt_entry *)
544 (entry0 + newpos);
545 e->counters.pcnt = pos;
546 pos = newpos;
549 next:
550 duprintf("Finished chain %u\n", hook);
552 return 1;
555 static void cleanup_match(struct ipt_entry_match *m, struct net *net)
557 struct xt_mtdtor_param par;
559 par.net = net;
560 par.match = m->u.kernel.match;
561 par.matchinfo = m->data;
562 par.family = NFPROTO_IPV4;
563 if (par.match->destroy != NULL)
564 par.match->destroy(&par);
565 module_put(par.match->me);
568 static int
569 check_entry(const struct ipt_entry *e, const char *name)
571 const struct ipt_entry_target *t;
573 if (!ip_checkentry(&e->ip)) {
574 duprintf("ip check failed %p %s.\n", e, par->match->name);
575 return -EINVAL;
578 if (e->target_offset + sizeof(struct ipt_entry_target) >
579 e->next_offset)
580 return -EINVAL;
582 t = ipt_get_target_c(e);
583 if (e->target_offset + t->u.target_size > e->next_offset)
584 return -EINVAL;
586 return 0;
589 static int
590 check_match(struct ipt_entry_match *m, struct xt_mtchk_param *par)
592 const struct ipt_ip *ip = par->entryinfo;
593 int ret;
595 par->match = m->u.kernel.match;
596 par->matchinfo = m->data;
598 ret = xt_check_match(par, m->u.match_size - sizeof(*m),
599 ip->proto, ip->invflags & IPT_INV_PROTO);
600 if (ret < 0) {
601 duprintf("check failed for `%s'.\n", par->match->name);
602 return ret;
604 return 0;
607 static int
608 find_check_match(struct ipt_entry_match *m, struct xt_mtchk_param *par)
610 struct xt_match *match;
611 int ret;
613 match = xt_request_find_match(NFPROTO_IPV4, m->u.user.name,
614 m->u.user.revision);
615 if (IS_ERR(match)) {
616 duprintf("find_check_match: `%s' not found\n", m->u.user.name);
617 return PTR_ERR(match);
619 m->u.kernel.match = match;
621 ret = check_match(m, par);
622 if (ret)
623 goto err;
625 return 0;
626 err:
627 module_put(m->u.kernel.match->me);
628 return ret;
631 static int check_target(struct ipt_entry *e, struct net *net, const char *name)
633 struct ipt_entry_target *t = ipt_get_target(e);
634 struct xt_tgchk_param par = {
635 .net = net,
636 .table = name,
637 .entryinfo = e,
638 .target = t->u.kernel.target,
639 .targinfo = t->data,
640 .hook_mask = e->comefrom,
641 .family = NFPROTO_IPV4,
643 int ret;
645 ret = xt_check_target(&par, t->u.target_size - sizeof(*t),
646 e->ip.proto, e->ip.invflags & IPT_INV_PROTO);
647 if (ret < 0) {
648 duprintf("check failed for `%s'.\n",
649 t->u.kernel.target->name);
650 return ret;
652 return 0;
655 static int
656 find_check_entry(struct ipt_entry *e, struct net *net, const char *name,
657 unsigned int size)
659 struct ipt_entry_target *t;
660 struct xt_target *target;
661 int ret;
662 unsigned int j;
663 struct xt_mtchk_param mtpar;
664 struct xt_entry_match *ematch;
666 ret = check_entry(e, name);
667 if (ret)
668 return ret;
670 j = 0;
671 mtpar.net = net;
672 mtpar.table = name;
673 mtpar.entryinfo = &e->ip;
674 mtpar.hook_mask = e->comefrom;
675 mtpar.family = NFPROTO_IPV4;
676 xt_ematch_foreach(ematch, e) {
677 ret = find_check_match(ematch, &mtpar);
678 if (ret != 0)
679 goto cleanup_matches;
680 ++j;
683 t = ipt_get_target(e);
684 target = xt_request_find_target(NFPROTO_IPV4, t->u.user.name,
685 t->u.user.revision);
686 if (IS_ERR(target)) {
687 duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
688 ret = PTR_ERR(target);
689 goto cleanup_matches;
691 t->u.kernel.target = target;
693 ret = check_target(e, net, name);
694 if (ret)
695 goto err;
696 return 0;
697 err:
698 module_put(t->u.kernel.target->me);
699 cleanup_matches:
700 xt_ematch_foreach(ematch, e) {
701 if (j-- == 0)
702 break;
703 cleanup_match(ematch, net);
705 return ret;
708 static bool check_underflow(const struct ipt_entry *e)
710 const struct ipt_entry_target *t;
711 unsigned int verdict;
713 if (!unconditional(&e->ip))
714 return false;
715 t = ipt_get_target_c(e);
716 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
717 return false;
718 verdict = ((struct ipt_standard_target *)t)->verdict;
719 verdict = -verdict - 1;
720 return verdict == NF_DROP || verdict == NF_ACCEPT;
723 static int
724 check_entry_size_and_hooks(struct ipt_entry *e,
725 struct xt_table_info *newinfo,
726 const unsigned char *base,
727 const unsigned char *limit,
728 const unsigned int *hook_entries,
729 const unsigned int *underflows,
730 unsigned int valid_hooks)
732 unsigned int h;
734 if ((unsigned long)e % __alignof__(struct ipt_entry) != 0 ||
735 (unsigned char *)e + sizeof(struct ipt_entry) >= limit) {
736 duprintf("Bad offset %p\n", e);
737 return -EINVAL;
740 if (e->next_offset
741 < sizeof(struct ipt_entry) + sizeof(struct ipt_entry_target)) {
742 duprintf("checking: element %p size %u\n",
743 e, e->next_offset);
744 return -EINVAL;
747 /* Check hooks & underflows */
748 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
749 if (!(valid_hooks & (1 << h)))
750 continue;
751 if ((unsigned char *)e - base == hook_entries[h])
752 newinfo->hook_entry[h] = hook_entries[h];
753 if ((unsigned char *)e - base == underflows[h]) {
754 if (!check_underflow(e)) {
755 pr_err("Underflows must be unconditional and "
756 "use the STANDARD target with "
757 "ACCEPT/DROP\n");
758 return -EINVAL;
760 newinfo->underflow[h] = underflows[h];
764 /* Clear counters and comefrom */
765 e->counters = ((struct xt_counters) { 0, 0 });
766 e->comefrom = 0;
767 return 0;
770 static void
771 cleanup_entry(struct ipt_entry *e, struct net *net)
773 struct xt_tgdtor_param par;
774 struct ipt_entry_target *t;
775 struct xt_entry_match *ematch;
777 /* Cleanup all matches */
778 xt_ematch_foreach(ematch, e)
779 cleanup_match(ematch, net);
780 t = ipt_get_target(e);
782 par.net = net;
783 par.target = t->u.kernel.target;
784 par.targinfo = t->data;
785 par.family = NFPROTO_IPV4;
786 if (par.target->destroy != NULL)
787 par.target->destroy(&par);
788 module_put(par.target->me);
791 /* Checks and translates the user-supplied table segment (held in
792 newinfo) */
793 static int
794 translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
795 const struct ipt_replace *repl)
797 struct ipt_entry *iter;
798 unsigned int i;
799 int ret = 0;
801 newinfo->size = repl->size;
802 newinfo->number = repl->num_entries;
804 /* Init all hooks to impossible value. */
805 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
806 newinfo->hook_entry[i] = 0xFFFFFFFF;
807 newinfo->underflow[i] = 0xFFFFFFFF;
810 duprintf("translate_table: size %u\n", newinfo->size);
811 i = 0;
812 /* Walk through entries, checking offsets. */
813 xt_entry_foreach(iter, entry0, newinfo->size) {
814 ret = check_entry_size_and_hooks(iter, newinfo, entry0,
815 entry0 + repl->size,
816 repl->hook_entry,
817 repl->underflow,
818 repl->valid_hooks);
819 if (ret != 0)
820 return ret;
821 ++i;
822 if (strcmp(ipt_get_target(iter)->u.user.name,
823 XT_ERROR_TARGET) == 0)
824 ++newinfo->stacksize;
827 if (i != repl->num_entries) {
828 duprintf("translate_table: %u not %u entries\n",
829 i, repl->num_entries);
830 return -EINVAL;
833 /* Check hooks all assigned */
834 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
835 /* Only hooks which are valid */
836 if (!(repl->valid_hooks & (1 << i)))
837 continue;
838 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
839 duprintf("Invalid hook entry %u %u\n",
840 i, repl->hook_entry[i]);
841 return -EINVAL;
843 if (newinfo->underflow[i] == 0xFFFFFFFF) {
844 duprintf("Invalid underflow %u %u\n",
845 i, repl->underflow[i]);
846 return -EINVAL;
850 if (!mark_source_chains(newinfo, repl->valid_hooks, entry0))
851 return -ELOOP;
853 /* Finally, each sanity check must pass */
854 i = 0;
855 xt_entry_foreach(iter, entry0, newinfo->size) {
856 ret = find_check_entry(iter, net, repl->name, repl->size);
857 if (ret != 0)
858 break;
859 ++i;
862 if (ret != 0) {
863 xt_entry_foreach(iter, entry0, newinfo->size) {
864 if (i-- == 0)
865 break;
866 cleanup_entry(iter, net);
868 return ret;
871 /* And one copy for every other CPU */
872 for_each_possible_cpu(i) {
873 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
874 memcpy(newinfo->entries[i], entry0, newinfo->size);
877 return ret;
880 static void
881 get_counters(const struct xt_table_info *t,
882 struct xt_counters counters[])
884 struct ipt_entry *iter;
885 unsigned int cpu;
886 unsigned int i;
887 unsigned int curcpu = get_cpu();
889 /* Instead of clearing (by a previous call to memset())
890 * the counters and using adds, we set the counters
891 * with data used by 'current' CPU.
893 * Bottom half has to be disabled to prevent deadlock
894 * if new softirq were to run and call ipt_do_table
896 local_bh_disable();
897 i = 0;
898 xt_entry_foreach(iter, t->entries[curcpu], t->size) {
899 SET_COUNTER(counters[i], iter->counters.bcnt,
900 iter->counters.pcnt);
901 ++i;
903 local_bh_enable();
904 /* Processing counters from other cpus, we can let bottom half enabled,
905 * (preemption is disabled)
908 for_each_possible_cpu(cpu) {
909 if (cpu == curcpu)
910 continue;
911 i = 0;
912 local_bh_disable();
913 xt_info_wrlock(cpu);
914 xt_entry_foreach(iter, t->entries[cpu], t->size) {
915 ADD_COUNTER(counters[i], iter->counters.bcnt,
916 iter->counters.pcnt);
917 ++i; /* macro does multi eval of i */
919 xt_info_wrunlock(cpu);
920 local_bh_enable();
922 put_cpu();
925 static struct xt_counters *alloc_counters(const struct xt_table *table)
927 unsigned int countersize;
928 struct xt_counters *counters;
929 const struct xt_table_info *private = table->private;
931 /* We need atomic snapshot of counters: rest doesn't change
932 (other than comefrom, which userspace doesn't care
933 about). */
934 countersize = sizeof(struct xt_counters) * private->number;
935 counters = vmalloc(countersize);
937 if (counters == NULL)
938 return ERR_PTR(-ENOMEM);
940 get_counters(private, counters);
942 return counters;
945 static int
946 copy_entries_to_user(unsigned int total_size,
947 const struct xt_table *table,
948 void __user *userptr)
950 unsigned int off, num;
951 const struct ipt_entry *e;
952 struct xt_counters *counters;
953 const struct xt_table_info *private = table->private;
954 int ret = 0;
955 const void *loc_cpu_entry;
957 counters = alloc_counters(table);
958 if (IS_ERR(counters))
959 return PTR_ERR(counters);
961 /* choose the copy that is on our node/cpu, ...
962 * This choice is lazy (because current thread is
963 * allowed to migrate to another cpu)
965 loc_cpu_entry = private->entries[raw_smp_processor_id()];
966 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
967 ret = -EFAULT;
968 goto free_counters;
971 /* FIXME: use iterator macros --RR */
972 /* ... then go back and fix counters and names */
973 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
974 unsigned int i;
975 const struct ipt_entry_match *m;
976 const struct ipt_entry_target *t;
978 e = (struct ipt_entry *)(loc_cpu_entry + off);
979 if (copy_to_user(userptr + off
980 + offsetof(struct ipt_entry, counters),
981 &counters[num],
982 sizeof(counters[num])) != 0) {
983 ret = -EFAULT;
984 goto free_counters;
987 for (i = sizeof(struct ipt_entry);
988 i < e->target_offset;
989 i += m->u.match_size) {
990 m = (void *)e + i;
992 if (copy_to_user(userptr + off + i
993 + offsetof(struct ipt_entry_match,
994 u.user.name),
995 m->u.kernel.match->name,
996 strlen(m->u.kernel.match->name)+1)
997 != 0) {
998 ret = -EFAULT;
999 goto free_counters;
1003 t = ipt_get_target_c(e);
1004 if (copy_to_user(userptr + off + e->target_offset
1005 + offsetof(struct ipt_entry_target,
1006 u.user.name),
1007 t->u.kernel.target->name,
1008 strlen(t->u.kernel.target->name)+1) != 0) {
1009 ret = -EFAULT;
1010 goto free_counters;
1014 free_counters:
1015 vfree(counters);
1016 return ret;
1019 #ifdef CONFIG_COMPAT
1020 static void compat_standard_from_user(void *dst, const void *src)
1022 int v = *(compat_int_t *)src;
1024 if (v > 0)
1025 v += xt_compat_calc_jump(AF_INET, v);
1026 memcpy(dst, &v, sizeof(v));
1029 static int compat_standard_to_user(void __user *dst, const void *src)
1031 compat_int_t cv = *(int *)src;
1033 if (cv > 0)
1034 cv -= xt_compat_calc_jump(AF_INET, cv);
1035 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
1038 static int compat_calc_entry(const struct ipt_entry *e,
1039 const struct xt_table_info *info,
1040 const void *base, struct xt_table_info *newinfo)
1042 const struct xt_entry_match *ematch;
1043 const struct ipt_entry_target *t;
1044 unsigned int entry_offset;
1045 int off, i, ret;
1047 off = sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1048 entry_offset = (void *)e - base;
1049 xt_ematch_foreach(ematch, e)
1050 off += xt_compat_match_offset(ematch->u.kernel.match);
1051 t = ipt_get_target_c(e);
1052 off += xt_compat_target_offset(t->u.kernel.target);
1053 newinfo->size -= off;
1054 ret = xt_compat_add_offset(AF_INET, entry_offset, off);
1055 if (ret)
1056 return ret;
1058 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1059 if (info->hook_entry[i] &&
1060 (e < (struct ipt_entry *)(base + info->hook_entry[i])))
1061 newinfo->hook_entry[i] -= off;
1062 if (info->underflow[i] &&
1063 (e < (struct ipt_entry *)(base + info->underflow[i])))
1064 newinfo->underflow[i] -= off;
1066 return 0;
1069 static int compat_table_info(const struct xt_table_info *info,
1070 struct xt_table_info *newinfo)
1072 struct ipt_entry *iter;
1073 void *loc_cpu_entry;
1074 int ret;
1076 if (!newinfo || !info)
1077 return -EINVAL;
1079 /* we dont care about newinfo->entries[] */
1080 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
1081 newinfo->initial_entries = 0;
1082 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1083 xt_entry_foreach(iter, loc_cpu_entry, info->size) {
1084 ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
1085 if (ret != 0)
1086 return ret;
1088 return 0;
1090 #endif
1092 static int get_info(struct net *net, void __user *user,
1093 const int *len, int compat)
1095 char name[IPT_TABLE_MAXNAMELEN];
1096 struct xt_table *t;
1097 int ret;
1099 if (*len != sizeof(struct ipt_getinfo)) {
1100 duprintf("length %u != %zu\n", *len,
1101 sizeof(struct ipt_getinfo));
1102 return -EINVAL;
1105 if (copy_from_user(name, user, sizeof(name)) != 0)
1106 return -EFAULT;
1108 name[IPT_TABLE_MAXNAMELEN-1] = '\0';
1109 #ifdef CONFIG_COMPAT
1110 if (compat)
1111 xt_compat_lock(AF_INET);
1112 #endif
1113 t = try_then_request_module(xt_find_table_lock(net, AF_INET, name),
1114 "iptable_%s", name);
1115 if (t && !IS_ERR(t)) {
1116 struct ipt_getinfo info;
1117 const struct xt_table_info *private = t->private;
1118 #ifdef CONFIG_COMPAT
1119 struct xt_table_info tmp;
1121 if (compat) {
1122 ret = compat_table_info(private, &tmp);
1123 xt_compat_flush_offsets(AF_INET);
1124 private = &tmp;
1126 #endif
1127 info.valid_hooks = t->valid_hooks;
1128 memcpy(info.hook_entry, private->hook_entry,
1129 sizeof(info.hook_entry));
1130 memcpy(info.underflow, private->underflow,
1131 sizeof(info.underflow));
1132 info.num_entries = private->number;
1133 info.size = private->size;
1134 strcpy(info.name, name);
1136 if (copy_to_user(user, &info, *len) != 0)
1137 ret = -EFAULT;
1138 else
1139 ret = 0;
1141 xt_table_unlock(t);
1142 module_put(t->me);
1143 } else
1144 ret = t ? PTR_ERR(t) : -ENOENT;
1145 #ifdef CONFIG_COMPAT
1146 if (compat)
1147 xt_compat_unlock(AF_INET);
1148 #endif
1149 return ret;
1152 static int
1153 get_entries(struct net *net, struct ipt_get_entries __user *uptr,
1154 const int *len)
1156 int ret;
1157 struct ipt_get_entries get;
1158 struct xt_table *t;
1160 if (*len < sizeof(get)) {
1161 duprintf("get_entries: %u < %zu\n", *len, sizeof(get));
1162 return -EINVAL;
1164 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1165 return -EFAULT;
1166 if (*len != sizeof(struct ipt_get_entries) + get.size) {
1167 duprintf("get_entries: %u != %zu\n",
1168 *len, sizeof(get) + get.size);
1169 return -EINVAL;
1172 t = xt_find_table_lock(net, AF_INET, get.name);
1173 if (t && !IS_ERR(t)) {
1174 const struct xt_table_info *private = t->private;
1175 duprintf("t->private->number = %u\n", private->number);
1176 if (get.size == private->size)
1177 ret = copy_entries_to_user(private->size,
1178 t, uptr->entrytable);
1179 else {
1180 duprintf("get_entries: I've got %u not %u!\n",
1181 private->size, get.size);
1182 ret = -EAGAIN;
1184 module_put(t->me);
1185 xt_table_unlock(t);
1186 } else
1187 ret = t ? PTR_ERR(t) : -ENOENT;
1189 return ret;
1192 static int
1193 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1194 struct xt_table_info *newinfo, unsigned int num_counters,
1195 void __user *counters_ptr)
1197 int ret;
1198 struct xt_table *t;
1199 struct xt_table_info *oldinfo;
1200 struct xt_counters *counters;
1201 void *loc_cpu_old_entry;
1202 struct ipt_entry *iter;
1204 ret = 0;
1205 counters = vmalloc(num_counters * sizeof(struct xt_counters));
1206 if (!counters) {
1207 ret = -ENOMEM;
1208 goto out;
1211 t = try_then_request_module(xt_find_table_lock(net, AF_INET, name),
1212 "iptable_%s", name);
1213 if (!t || IS_ERR(t)) {
1214 ret = t ? PTR_ERR(t) : -ENOENT;
1215 goto free_newinfo_counters_untrans;
1218 /* You lied! */
1219 if (valid_hooks != t->valid_hooks) {
1220 duprintf("Valid hook crap: %08X vs %08X\n",
1221 valid_hooks, t->valid_hooks);
1222 ret = -EINVAL;
1223 goto put_module;
1226 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1227 if (!oldinfo)
1228 goto put_module;
1230 /* Update module usage count based on number of rules */
1231 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1232 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1233 if ((oldinfo->number > oldinfo->initial_entries) ||
1234 (newinfo->number <= oldinfo->initial_entries))
1235 module_put(t->me);
1236 if ((oldinfo->number > oldinfo->initial_entries) &&
1237 (newinfo->number <= oldinfo->initial_entries))
1238 module_put(t->me);
1240 /* Get the old counters, and synchronize with replace */
1241 get_counters(oldinfo, counters);
1243 /* Decrease module usage counts and free resource */
1244 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1245 xt_entry_foreach(iter, loc_cpu_old_entry, oldinfo->size)
1246 cleanup_entry(iter, net);
1248 xt_free_table_info(oldinfo);
1249 if (copy_to_user(counters_ptr, counters,
1250 sizeof(struct xt_counters) * num_counters) != 0)
1251 ret = -EFAULT;
1252 vfree(counters);
1253 xt_table_unlock(t);
1254 return ret;
1256 put_module:
1257 module_put(t->me);
1258 xt_table_unlock(t);
1259 free_newinfo_counters_untrans:
1260 vfree(counters);
1261 out:
1262 return ret;
1265 static int
1266 do_replace(struct net *net, const void __user *user, unsigned int len)
1268 int ret;
1269 struct ipt_replace tmp;
1270 struct xt_table_info *newinfo;
1271 void *loc_cpu_entry;
1272 struct ipt_entry *iter;
1274 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1275 return -EFAULT;
1277 /* overflow check */
1278 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1279 return -ENOMEM;
1281 newinfo = xt_alloc_table_info(tmp.size);
1282 if (!newinfo)
1283 return -ENOMEM;
1285 /* choose the copy that is on our node/cpu */
1286 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1287 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1288 tmp.size) != 0) {
1289 ret = -EFAULT;
1290 goto free_newinfo;
1293 ret = translate_table(net, newinfo, loc_cpu_entry, &tmp);
1294 if (ret != 0)
1295 goto free_newinfo;
1297 duprintf("Translated table\n");
1299 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1300 tmp.num_counters, tmp.counters);
1301 if (ret)
1302 goto free_newinfo_untrans;
1303 return 0;
1305 free_newinfo_untrans:
1306 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1307 cleanup_entry(iter, net);
1308 free_newinfo:
1309 xt_free_table_info(newinfo);
1310 return ret;
1313 static int
1314 do_add_counters(struct net *net, const void __user *user,
1315 unsigned int len, int compat)
1317 unsigned int i, curcpu;
1318 struct xt_counters_info tmp;
1319 struct xt_counters *paddc;
1320 unsigned int num_counters;
1321 const char *name;
1322 int size;
1323 void *ptmp;
1324 struct xt_table *t;
1325 const struct xt_table_info *private;
1326 int ret = 0;
1327 void *loc_cpu_entry;
1328 struct ipt_entry *iter;
1329 #ifdef CONFIG_COMPAT
1330 struct compat_xt_counters_info compat_tmp;
1332 if (compat) {
1333 ptmp = &compat_tmp;
1334 size = sizeof(struct compat_xt_counters_info);
1335 } else
1336 #endif
1338 ptmp = &tmp;
1339 size = sizeof(struct xt_counters_info);
1342 if (copy_from_user(ptmp, user, size) != 0)
1343 return -EFAULT;
1345 #ifdef CONFIG_COMPAT
1346 if (compat) {
1347 num_counters = compat_tmp.num_counters;
1348 name = compat_tmp.name;
1349 } else
1350 #endif
1352 num_counters = tmp.num_counters;
1353 name = tmp.name;
1356 if (len != size + num_counters * sizeof(struct xt_counters))
1357 return -EINVAL;
1359 paddc = vmalloc(len - size);
1360 if (!paddc)
1361 return -ENOMEM;
1363 if (copy_from_user(paddc, user + size, len - size) != 0) {
1364 ret = -EFAULT;
1365 goto free;
1368 t = xt_find_table_lock(net, AF_INET, name);
1369 if (!t || IS_ERR(t)) {
1370 ret = t ? PTR_ERR(t) : -ENOENT;
1371 goto free;
1374 local_bh_disable();
1375 private = t->private;
1376 if (private->number != num_counters) {
1377 ret = -EINVAL;
1378 goto unlock_up_free;
1381 i = 0;
1382 /* Choose the copy that is on our node */
1383 curcpu = smp_processor_id();
1384 loc_cpu_entry = private->entries[curcpu];
1385 xt_info_wrlock(curcpu);
1386 xt_entry_foreach(iter, loc_cpu_entry, private->size) {
1387 ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt);
1388 ++i;
1390 xt_info_wrunlock(curcpu);
1391 unlock_up_free:
1392 local_bh_enable();
1393 xt_table_unlock(t);
1394 module_put(t->me);
1395 free:
1396 vfree(paddc);
1398 return ret;
1401 #ifdef CONFIG_COMPAT
1402 struct compat_ipt_replace {
1403 char name[IPT_TABLE_MAXNAMELEN];
1404 u32 valid_hooks;
1405 u32 num_entries;
1406 u32 size;
1407 u32 hook_entry[NF_INET_NUMHOOKS];
1408 u32 underflow[NF_INET_NUMHOOKS];
1409 u32 num_counters;
1410 compat_uptr_t counters; /* struct ipt_counters * */
1411 struct compat_ipt_entry entries[0];
1414 static int
1415 compat_copy_entry_to_user(struct ipt_entry *e, void __user **dstptr,
1416 unsigned int *size, struct xt_counters *counters,
1417 unsigned int i)
1419 struct ipt_entry_target *t;
1420 struct compat_ipt_entry __user *ce;
1421 u_int16_t target_offset, next_offset;
1422 compat_uint_t origsize;
1423 const struct xt_entry_match *ematch;
1424 int ret = 0;
1426 origsize = *size;
1427 ce = (struct compat_ipt_entry __user *)*dstptr;
1428 if (copy_to_user(ce, e, sizeof(struct ipt_entry)) != 0 ||
1429 copy_to_user(&ce->counters, &counters[i],
1430 sizeof(counters[i])) != 0)
1431 return -EFAULT;
1433 *dstptr += sizeof(struct compat_ipt_entry);
1434 *size -= sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1436 xt_ematch_foreach(ematch, e) {
1437 ret = xt_compat_match_to_user(ematch, dstptr, size);
1438 if (ret != 0)
1439 return ret;
1441 target_offset = e->target_offset - (origsize - *size);
1442 t = ipt_get_target(e);
1443 ret = xt_compat_target_to_user(t, dstptr, size);
1444 if (ret)
1445 return ret;
1446 next_offset = e->next_offset - (origsize - *size);
1447 if (put_user(target_offset, &ce->target_offset) != 0 ||
1448 put_user(next_offset, &ce->next_offset) != 0)
1449 return -EFAULT;
1450 return 0;
1453 static int
1454 compat_find_calc_match(struct ipt_entry_match *m,
1455 const char *name,
1456 const struct ipt_ip *ip,
1457 unsigned int hookmask,
1458 int *size)
1460 struct xt_match *match;
1462 match = xt_request_find_match(NFPROTO_IPV4, m->u.user.name,
1463 m->u.user.revision);
1464 if (IS_ERR(match)) {
1465 duprintf("compat_check_calc_match: `%s' not found\n",
1466 m->u.user.name);
1467 return PTR_ERR(match);
1469 m->u.kernel.match = match;
1470 *size += xt_compat_match_offset(match);
1471 return 0;
1474 static void compat_release_entry(struct compat_ipt_entry *e)
1476 struct ipt_entry_target *t;
1477 struct xt_entry_match *ematch;
1479 /* Cleanup all matches */
1480 xt_ematch_foreach(ematch, e)
1481 module_put(ematch->u.kernel.match->me);
1482 t = compat_ipt_get_target(e);
1483 module_put(t->u.kernel.target->me);
1486 static int
1487 check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
1488 struct xt_table_info *newinfo,
1489 unsigned int *size,
1490 const unsigned char *base,
1491 const unsigned char *limit,
1492 const unsigned int *hook_entries,
1493 const unsigned int *underflows,
1494 const char *name)
1496 struct xt_entry_match *ematch;
1497 struct ipt_entry_target *t;
1498 struct xt_target *target;
1499 unsigned int entry_offset;
1500 unsigned int j;
1501 int ret, off, h;
1503 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1504 if ((unsigned long)e % __alignof__(struct compat_ipt_entry) != 0 ||
1505 (unsigned char *)e + sizeof(struct compat_ipt_entry) >= limit) {
1506 duprintf("Bad offset %p, limit = %p\n", e, limit);
1507 return -EINVAL;
1510 if (e->next_offset < sizeof(struct compat_ipt_entry) +
1511 sizeof(struct compat_xt_entry_target)) {
1512 duprintf("checking: element %p size %u\n",
1513 e, e->next_offset);
1514 return -EINVAL;
1517 /* For purposes of check_entry casting the compat entry is fine */
1518 ret = check_entry((struct ipt_entry *)e, name);
1519 if (ret)
1520 return ret;
1522 off = sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1523 entry_offset = (void *)e - (void *)base;
1524 j = 0;
1525 xt_ematch_foreach(ematch, e) {
1526 ret = compat_find_calc_match(ematch, name,
1527 &e->ip, e->comefrom, &off);
1528 if (ret != 0)
1529 goto release_matches;
1530 ++j;
1533 t = compat_ipt_get_target(e);
1534 target = xt_request_find_target(NFPROTO_IPV4, t->u.user.name,
1535 t->u.user.revision);
1536 if (IS_ERR(target)) {
1537 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1538 t->u.user.name);
1539 ret = PTR_ERR(target);
1540 goto release_matches;
1542 t->u.kernel.target = target;
1544 off += xt_compat_target_offset(target);
1545 *size += off;
1546 ret = xt_compat_add_offset(AF_INET, entry_offset, off);
1547 if (ret)
1548 goto out;
1550 /* Check hooks & underflows */
1551 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1552 if ((unsigned char *)e - base == hook_entries[h])
1553 newinfo->hook_entry[h] = hook_entries[h];
1554 if ((unsigned char *)e - base == underflows[h])
1555 newinfo->underflow[h] = underflows[h];
1558 /* Clear counters and comefrom */
1559 memset(&e->counters, 0, sizeof(e->counters));
1560 e->comefrom = 0;
1561 return 0;
1563 out:
1564 module_put(t->u.kernel.target->me);
1565 release_matches:
1566 xt_ematch_foreach(ematch, e) {
1567 if (j-- == 0)
1568 break;
1569 module_put(ematch->u.kernel.match->me);
1571 return ret;
1574 static int
1575 compat_copy_entry_from_user(struct compat_ipt_entry *e, void **dstptr,
1576 unsigned int *size, const char *name,
1577 struct xt_table_info *newinfo, unsigned char *base)
1579 struct ipt_entry_target *t;
1580 struct xt_target *target;
1581 struct ipt_entry *de;
1582 unsigned int origsize;
1583 int ret, h;
1584 struct xt_entry_match *ematch;
1586 ret = 0;
1587 origsize = *size;
1588 de = (struct ipt_entry *)*dstptr;
1589 memcpy(de, e, sizeof(struct ipt_entry));
1590 memcpy(&de->counters, &e->counters, sizeof(e->counters));
1592 *dstptr += sizeof(struct ipt_entry);
1593 *size += sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1595 xt_ematch_foreach(ematch, e) {
1596 ret = xt_compat_match_from_user(ematch, dstptr, size);
1597 if (ret != 0)
1598 return ret;
1600 de->target_offset = e->target_offset - (origsize - *size);
1601 t = compat_ipt_get_target(e);
1602 target = t->u.kernel.target;
1603 xt_compat_target_from_user(t, dstptr, size);
1605 de->next_offset = e->next_offset - (origsize - *size);
1606 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1607 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1608 newinfo->hook_entry[h] -= origsize - *size;
1609 if ((unsigned char *)de - base < newinfo->underflow[h])
1610 newinfo->underflow[h] -= origsize - *size;
1612 return ret;
1615 static int
1616 compat_check_entry(struct ipt_entry *e, struct net *net, const char *name)
1618 struct xt_entry_match *ematch;
1619 struct xt_mtchk_param mtpar;
1620 unsigned int j;
1621 int ret = 0;
1623 j = 0;
1624 mtpar.net = net;
1625 mtpar.table = name;
1626 mtpar.entryinfo = &e->ip;
1627 mtpar.hook_mask = e->comefrom;
1628 mtpar.family = NFPROTO_IPV4;
1629 xt_ematch_foreach(ematch, e) {
1630 ret = check_match(ematch, &mtpar);
1631 if (ret != 0)
1632 goto cleanup_matches;
1633 ++j;
1636 ret = check_target(e, net, name);
1637 if (ret)
1638 goto cleanup_matches;
1639 return 0;
1641 cleanup_matches:
1642 xt_ematch_foreach(ematch, e) {
1643 if (j-- == 0)
1644 break;
1645 cleanup_match(ematch, net);
1647 return ret;
1650 static int
1651 translate_compat_table(struct net *net,
1652 const char *name,
1653 unsigned int valid_hooks,
1654 struct xt_table_info **pinfo,
1655 void **pentry0,
1656 unsigned int total_size,
1657 unsigned int number,
1658 unsigned int *hook_entries,
1659 unsigned int *underflows)
1661 unsigned int i, j;
1662 struct xt_table_info *newinfo, *info;
1663 void *pos, *entry0, *entry1;
1664 struct compat_ipt_entry *iter0;
1665 struct ipt_entry *iter1;
1666 unsigned int size;
1667 int ret;
1669 info = *pinfo;
1670 entry0 = *pentry0;
1671 size = total_size;
1672 info->number = number;
1674 /* Init all hooks to impossible value. */
1675 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1676 info->hook_entry[i] = 0xFFFFFFFF;
1677 info->underflow[i] = 0xFFFFFFFF;
1680 duprintf("translate_compat_table: size %u\n", info->size);
1681 j = 0;
1682 xt_compat_lock(AF_INET);
1683 /* Walk through entries, checking offsets. */
1684 xt_entry_foreach(iter0, entry0, total_size) {
1685 ret = check_compat_entry_size_and_hooks(iter0, info, &size,
1686 entry0,
1687 entry0 + total_size,
1688 hook_entries,
1689 underflows,
1690 name);
1691 if (ret != 0)
1692 goto out_unlock;
1693 ++j;
1696 ret = -EINVAL;
1697 if (j != number) {
1698 duprintf("translate_compat_table: %u not %u entries\n",
1699 j, number);
1700 goto out_unlock;
1703 /* Check hooks all assigned */
1704 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1705 /* Only hooks which are valid */
1706 if (!(valid_hooks & (1 << i)))
1707 continue;
1708 if (info->hook_entry[i] == 0xFFFFFFFF) {
1709 duprintf("Invalid hook entry %u %u\n",
1710 i, hook_entries[i]);
1711 goto out_unlock;
1713 if (info->underflow[i] == 0xFFFFFFFF) {
1714 duprintf("Invalid underflow %u %u\n",
1715 i, underflows[i]);
1716 goto out_unlock;
1720 ret = -ENOMEM;
1721 newinfo = xt_alloc_table_info(size);
1722 if (!newinfo)
1723 goto out_unlock;
1725 newinfo->number = number;
1726 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1727 newinfo->hook_entry[i] = info->hook_entry[i];
1728 newinfo->underflow[i] = info->underflow[i];
1730 entry1 = newinfo->entries[raw_smp_processor_id()];
1731 pos = entry1;
1732 size = total_size;
1733 xt_entry_foreach(iter0, entry0, total_size) {
1734 ret = compat_copy_entry_from_user(iter0, &pos, &size,
1735 name, newinfo, entry1);
1736 if (ret != 0)
1737 break;
1739 xt_compat_flush_offsets(AF_INET);
1740 xt_compat_unlock(AF_INET);
1741 if (ret)
1742 goto free_newinfo;
1744 ret = -ELOOP;
1745 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1746 goto free_newinfo;
1748 i = 0;
1749 xt_entry_foreach(iter1, entry1, newinfo->size) {
1750 ret = compat_check_entry(iter1, net, name);
1751 if (ret != 0)
1752 break;
1753 ++i;
1754 if (strcmp(ipt_get_target(iter1)->u.user.name,
1755 XT_ERROR_TARGET) == 0)
1756 ++newinfo->stacksize;
1758 if (ret) {
1760 * The first i matches need cleanup_entry (calls ->destroy)
1761 * because they had called ->check already. The other j-i
1762 * entries need only release.
1764 int skip = i;
1765 j -= i;
1766 xt_entry_foreach(iter0, entry0, newinfo->size) {
1767 if (skip-- > 0)
1768 continue;
1769 if (j-- == 0)
1770 break;
1771 compat_release_entry(iter0);
1773 xt_entry_foreach(iter1, entry1, newinfo->size) {
1774 if (i-- == 0)
1775 break;
1776 cleanup_entry(iter1, net);
1778 xt_free_table_info(newinfo);
1779 return ret;
1782 /* And one copy for every other CPU */
1783 for_each_possible_cpu(i)
1784 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1785 memcpy(newinfo->entries[i], entry1, newinfo->size);
1787 *pinfo = newinfo;
1788 *pentry0 = entry1;
1789 xt_free_table_info(info);
1790 return 0;
1792 free_newinfo:
1793 xt_free_table_info(newinfo);
1794 out:
1795 xt_entry_foreach(iter0, entry0, total_size) {
1796 if (j-- == 0)
1797 break;
1798 compat_release_entry(iter0);
1800 return ret;
1801 out_unlock:
1802 xt_compat_flush_offsets(AF_INET);
1803 xt_compat_unlock(AF_INET);
1804 goto out;
1807 static int
1808 compat_do_replace(struct net *net, void __user *user, unsigned int len)
1810 int ret;
1811 struct compat_ipt_replace tmp;
1812 struct xt_table_info *newinfo;
1813 void *loc_cpu_entry;
1814 struct ipt_entry *iter;
1816 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1817 return -EFAULT;
1819 /* overflow check */
1820 if (tmp.size >= INT_MAX / num_possible_cpus())
1821 return -ENOMEM;
1822 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1823 return -ENOMEM;
1825 newinfo = xt_alloc_table_info(tmp.size);
1826 if (!newinfo)
1827 return -ENOMEM;
1829 /* choose the copy that is on our node/cpu */
1830 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1831 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1832 tmp.size) != 0) {
1833 ret = -EFAULT;
1834 goto free_newinfo;
1837 ret = translate_compat_table(net, tmp.name, tmp.valid_hooks,
1838 &newinfo, &loc_cpu_entry, tmp.size,
1839 tmp.num_entries, tmp.hook_entry,
1840 tmp.underflow);
1841 if (ret != 0)
1842 goto free_newinfo;
1844 duprintf("compat_do_replace: Translated table\n");
1846 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1847 tmp.num_counters, compat_ptr(tmp.counters));
1848 if (ret)
1849 goto free_newinfo_untrans;
1850 return 0;
1852 free_newinfo_untrans:
1853 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1854 cleanup_entry(iter, net);
1855 free_newinfo:
1856 xt_free_table_info(newinfo);
1857 return ret;
1860 static int
1861 compat_do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user,
1862 unsigned int len)
1864 int ret;
1866 if (!capable(CAP_NET_ADMIN))
1867 return -EPERM;
1869 switch (cmd) {
1870 case IPT_SO_SET_REPLACE:
1871 ret = compat_do_replace(sock_net(sk), user, len);
1872 break;
1874 case IPT_SO_SET_ADD_COUNTERS:
1875 ret = do_add_counters(sock_net(sk), user, len, 1);
1876 break;
1878 default:
1879 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd);
1880 ret = -EINVAL;
1883 return ret;
1886 struct compat_ipt_get_entries {
1887 char name[IPT_TABLE_MAXNAMELEN];
1888 compat_uint_t size;
1889 struct compat_ipt_entry entrytable[0];
1892 static int
1893 compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1894 void __user *userptr)
1896 struct xt_counters *counters;
1897 const struct xt_table_info *private = table->private;
1898 void __user *pos;
1899 unsigned int size;
1900 int ret = 0;
1901 const void *loc_cpu_entry;
1902 unsigned int i = 0;
1903 struct ipt_entry *iter;
1905 counters = alloc_counters(table);
1906 if (IS_ERR(counters))
1907 return PTR_ERR(counters);
1909 /* choose the copy that is on our node/cpu, ...
1910 * This choice is lazy (because current thread is
1911 * allowed to migrate to another cpu)
1913 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1914 pos = userptr;
1915 size = total_size;
1916 xt_entry_foreach(iter, loc_cpu_entry, total_size) {
1917 ret = compat_copy_entry_to_user(iter, &pos,
1918 &size, counters, i++);
1919 if (ret != 0)
1920 break;
1923 vfree(counters);
1924 return ret;
1927 static int
1928 compat_get_entries(struct net *net, struct compat_ipt_get_entries __user *uptr,
1929 int *len)
1931 int ret;
1932 struct compat_ipt_get_entries get;
1933 struct xt_table *t;
1935 if (*len < sizeof(get)) {
1936 duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get));
1937 return -EINVAL;
1940 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1941 return -EFAULT;
1943 if (*len != sizeof(struct compat_ipt_get_entries) + get.size) {
1944 duprintf("compat_get_entries: %u != %zu\n",
1945 *len, sizeof(get) + get.size);
1946 return -EINVAL;
1949 xt_compat_lock(AF_INET);
1950 t = xt_find_table_lock(net, AF_INET, get.name);
1951 if (t && !IS_ERR(t)) {
1952 const struct xt_table_info *private = t->private;
1953 struct xt_table_info info;
1954 duprintf("t->private->number = %u\n", private->number);
1955 ret = compat_table_info(private, &info);
1956 if (!ret && get.size == info.size) {
1957 ret = compat_copy_entries_to_user(private->size,
1958 t, uptr->entrytable);
1959 } else if (!ret) {
1960 duprintf("compat_get_entries: I've got %u not %u!\n",
1961 private->size, get.size);
1962 ret = -EAGAIN;
1964 xt_compat_flush_offsets(AF_INET);
1965 module_put(t->me);
1966 xt_table_unlock(t);
1967 } else
1968 ret = t ? PTR_ERR(t) : -ENOENT;
1970 xt_compat_unlock(AF_INET);
1971 return ret;
1974 static int do_ipt_get_ctl(struct sock *, int, void __user *, int *);
1976 static int
1977 compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1979 int ret;
1981 if (!capable(CAP_NET_ADMIN))
1982 return -EPERM;
1984 switch (cmd) {
1985 case IPT_SO_GET_INFO:
1986 ret = get_info(sock_net(sk), user, len, 1);
1987 break;
1988 case IPT_SO_GET_ENTRIES:
1989 ret = compat_get_entries(sock_net(sk), user, len);
1990 break;
1991 default:
1992 ret = do_ipt_get_ctl(sk, cmd, user, len);
1994 return ret;
1996 #endif
1998 static int
1999 do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2001 int ret;
2003 if (!capable(CAP_NET_ADMIN))
2004 return -EPERM;
2006 switch (cmd) {
2007 case IPT_SO_SET_REPLACE:
2008 ret = do_replace(sock_net(sk), user, len);
2009 break;
2011 case IPT_SO_SET_ADD_COUNTERS:
2012 ret = do_add_counters(sock_net(sk), user, len, 0);
2013 break;
2015 default:
2016 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd);
2017 ret = -EINVAL;
2020 return ret;
2023 static int
2024 do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2026 int ret;
2028 if (!capable(CAP_NET_ADMIN))
2029 return -EPERM;
2031 switch (cmd) {
2032 case IPT_SO_GET_INFO:
2033 ret = get_info(sock_net(sk), user, len, 0);
2034 break;
2036 case IPT_SO_GET_ENTRIES:
2037 ret = get_entries(sock_net(sk), user, len);
2038 break;
2040 case IPT_SO_GET_REVISION_MATCH:
2041 case IPT_SO_GET_REVISION_TARGET: {
2042 struct ipt_get_revision rev;
2043 int target;
2045 if (*len != sizeof(rev)) {
2046 ret = -EINVAL;
2047 break;
2049 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2050 ret = -EFAULT;
2051 break;
2054 if (cmd == IPT_SO_GET_REVISION_TARGET)
2055 target = 1;
2056 else
2057 target = 0;
2059 try_then_request_module(xt_find_revision(AF_INET, rev.name,
2060 rev.revision,
2061 target, &ret),
2062 "ipt_%s", rev.name);
2063 break;
2066 default:
2067 duprintf("do_ipt_get_ctl: unknown request %i\n", cmd);
2068 ret = -EINVAL;
2071 return ret;
2074 struct xt_table *ipt_register_table(struct net *net,
2075 const struct xt_table *table,
2076 const struct ipt_replace *repl)
2078 int ret;
2079 struct xt_table_info *newinfo;
2080 struct xt_table_info bootstrap = {0};
2081 void *loc_cpu_entry;
2082 struct xt_table *new_table;
2084 newinfo = xt_alloc_table_info(repl->size);
2085 if (!newinfo) {
2086 ret = -ENOMEM;
2087 goto out;
2090 /* choose the copy on our node/cpu, but dont care about preemption */
2091 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2092 memcpy(loc_cpu_entry, repl->entries, repl->size);
2094 ret = translate_table(net, newinfo, loc_cpu_entry, repl);
2095 if (ret != 0)
2096 goto out_free;
2098 new_table = xt_register_table(net, table, &bootstrap, newinfo);
2099 if (IS_ERR(new_table)) {
2100 ret = PTR_ERR(new_table);
2101 goto out_free;
2104 return new_table;
2106 out_free:
2107 xt_free_table_info(newinfo);
2108 out:
2109 return ERR_PTR(ret);
2112 void ipt_unregister_table(struct net *net, struct xt_table *table)
2114 struct xt_table_info *private;
2115 void *loc_cpu_entry;
2116 struct module *table_owner = table->me;
2117 struct ipt_entry *iter;
2119 private = xt_unregister_table(table);
2121 /* Decrease module usage counts and free resources */
2122 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2123 xt_entry_foreach(iter, loc_cpu_entry, private->size)
2124 cleanup_entry(iter, net);
2125 if (private->number > private->initial_entries)
2126 module_put(table_owner);
2127 xt_free_table_info(private);
2130 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2131 static inline bool
2132 icmp_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2133 u_int8_t type, u_int8_t code,
2134 bool invert)
2136 return ((test_type == 0xFF) ||
2137 (type == test_type && code >= min_code && code <= max_code))
2138 ^ invert;
2141 static bool
2142 icmp_match(const struct sk_buff *skb, struct xt_action_param *par)
2144 const struct icmphdr *ic;
2145 struct icmphdr _icmph;
2146 const struct ipt_icmp *icmpinfo = par->matchinfo;
2148 /* Must not be a fragment. */
2149 if (par->fragoff != 0)
2150 return false;
2152 ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph);
2153 if (ic == NULL) {
2154 /* We've been asked to examine this packet, and we
2155 * can't. Hence, no choice but to drop.
2157 duprintf("Dropping evil ICMP tinygram.\n");
2158 par->hotdrop = true;
2159 return false;
2162 return icmp_type_code_match(icmpinfo->type,
2163 icmpinfo->code[0],
2164 icmpinfo->code[1],
2165 ic->type, ic->code,
2166 !!(icmpinfo->invflags&IPT_ICMP_INV));
2169 static int icmp_checkentry(const struct xt_mtchk_param *par)
2171 const struct ipt_icmp *icmpinfo = par->matchinfo;
2173 /* Must specify no unknown invflags */
2174 return (icmpinfo->invflags & ~IPT_ICMP_INV) ? -EINVAL : 0;
2177 static struct xt_target ipt_builtin_tg[] __read_mostly = {
2179 .name = IPT_STANDARD_TARGET,
2180 .targetsize = sizeof(int),
2181 .family = NFPROTO_IPV4,
2182 #ifdef CONFIG_COMPAT
2183 .compatsize = sizeof(compat_int_t),
2184 .compat_from_user = compat_standard_from_user,
2185 .compat_to_user = compat_standard_to_user,
2186 #endif
2189 .name = IPT_ERROR_TARGET,
2190 .target = ipt_error,
2191 .targetsize = IPT_FUNCTION_MAXNAMELEN,
2192 .family = NFPROTO_IPV4,
2196 static struct nf_sockopt_ops ipt_sockopts = {
2197 .pf = PF_INET,
2198 .set_optmin = IPT_BASE_CTL,
2199 .set_optmax = IPT_SO_SET_MAX+1,
2200 .set = do_ipt_set_ctl,
2201 #ifdef CONFIG_COMPAT
2202 .compat_set = compat_do_ipt_set_ctl,
2203 #endif
2204 .get_optmin = IPT_BASE_CTL,
2205 .get_optmax = IPT_SO_GET_MAX+1,
2206 .get = do_ipt_get_ctl,
2207 #ifdef CONFIG_COMPAT
2208 .compat_get = compat_do_ipt_get_ctl,
2209 #endif
2210 .owner = THIS_MODULE,
2213 static struct xt_match ipt_builtin_mt[] __read_mostly = {
2215 .name = "icmp",
2216 .match = icmp_match,
2217 .matchsize = sizeof(struct ipt_icmp),
2218 .checkentry = icmp_checkentry,
2219 .proto = IPPROTO_ICMP,
2220 .family = NFPROTO_IPV4,
2224 static int __net_init ip_tables_net_init(struct net *net)
2226 return xt_proto_init(net, NFPROTO_IPV4);
2229 static void __net_exit ip_tables_net_exit(struct net *net)
2231 xt_proto_fini(net, NFPROTO_IPV4);
2234 static struct pernet_operations ip_tables_net_ops = {
2235 .init = ip_tables_net_init,
2236 .exit = ip_tables_net_exit,
2239 static int __init ip_tables_init(void)
2241 int ret;
2243 ret = register_pernet_subsys(&ip_tables_net_ops);
2244 if (ret < 0)
2245 goto err1;
2247 /* Noone else will be downing sem now, so we won't sleep */
2248 ret = xt_register_targets(ipt_builtin_tg, ARRAY_SIZE(ipt_builtin_tg));
2249 if (ret < 0)
2250 goto err2;
2251 ret = xt_register_matches(ipt_builtin_mt, ARRAY_SIZE(ipt_builtin_mt));
2252 if (ret < 0)
2253 goto err4;
2255 /* Register setsockopt */
2256 ret = nf_register_sockopt(&ipt_sockopts);
2257 if (ret < 0)
2258 goto err5;
2260 pr_info("(C) 2000-2006 Netfilter Core Team\n");
2261 return 0;
2263 err5:
2264 xt_unregister_matches(ipt_builtin_mt, ARRAY_SIZE(ipt_builtin_mt));
2265 err4:
2266 xt_unregister_targets(ipt_builtin_tg, ARRAY_SIZE(ipt_builtin_tg));
2267 err2:
2268 unregister_pernet_subsys(&ip_tables_net_ops);
2269 err1:
2270 return ret;
2273 static void __exit ip_tables_fini(void)
2275 nf_unregister_sockopt(&ipt_sockopts);
2277 xt_unregister_matches(ipt_builtin_mt, ARRAY_SIZE(ipt_builtin_mt));
2278 xt_unregister_targets(ipt_builtin_tg, ARRAY_SIZE(ipt_builtin_tg));
2279 unregister_pernet_subsys(&ip_tables_net_ops);
2282 EXPORT_SYMBOL(ipt_register_table);
2283 EXPORT_SYMBOL(ipt_unregister_table);
2284 EXPORT_SYMBOL(ipt_do_table);
2285 module_init(ip_tables_init);
2286 module_exit(ip_tables_fini);