netfilter: xtables: compat out of scope fix
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / net / ipv4 / netfilter / ip_tables.c
blob3ce53cf13d5a71d5068e96e593f0e65a3d5033f8
1 /*
2 * Packet matching code.
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 #include <linux/cache.h>
13 #include <linux/capability.h>
14 #include <linux/skbuff.h>
15 #include <linux/kmod.h>
16 #include <linux/vmalloc.h>
17 #include <linux/netdevice.h>
18 #include <linux/module.h>
19 #include <linux/icmp.h>
20 #include <net/ip.h>
21 #include <net/compat.h>
22 #include <asm/uaccess.h>
23 #include <linux/mutex.h>
24 #include <linux/proc_fs.h>
25 #include <linux/err.h>
26 #include <linux/cpumask.h>
28 #include <linux/netfilter/x_tables.h>
29 #include <linux/netfilter_ipv4/ip_tables.h>
30 #include <net/netfilter/nf_log.h>
32 MODULE_LICENSE("GPL");
33 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
34 MODULE_DESCRIPTION("IPv4 packet filter");
36 /*#define DEBUG_IP_FIREWALL*/
37 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
38 /*#define DEBUG_IP_FIREWALL_USER*/
40 #ifdef DEBUG_IP_FIREWALL
41 #define dprintf(format, args...) printk(format , ## args)
42 #else
43 #define dprintf(format, args...)
44 #endif
46 #ifdef DEBUG_IP_FIREWALL_USER
47 #define duprintf(format, args...) printk(format , ## args)
48 #else
49 #define duprintf(format, args...)
50 #endif
52 #ifdef CONFIG_NETFILTER_DEBUG
53 #define IP_NF_ASSERT(x) \
54 do { \
55 if (!(x)) \
56 printk("IP_NF_ASSERT: %s:%s:%u\n", \
57 __func__, __FILE__, __LINE__); \
58 } while(0)
59 #else
60 #define IP_NF_ASSERT(x)
61 #endif
63 #if 0
64 /* All the better to debug you with... */
65 #define static
66 #define inline
67 #endif
70 We keep a set of rules for each CPU, so we can avoid write-locking
71 them in the softirq when updating the counters and therefore
72 only need to read-lock in the softirq; doing a write_lock_bh() in user
73 context stops packets coming through and allows user context to read
74 the counters or update the rules.
76 Hence the start of any table is given by get_table() below. */
78 /* Returns whether matches rule or not. */
79 /* Performance critical - called for every packet */
80 static inline bool
81 ip_packet_match(const struct iphdr *ip,
82 const char *indev,
83 const char *outdev,
84 const struct ipt_ip *ipinfo,
85 int isfrag)
87 unsigned long ret;
89 #define FWINV(bool, invflg) ((bool) ^ !!(ipinfo->invflags & (invflg)))
91 if (FWINV((ip->saddr&ipinfo->smsk.s_addr) != ipinfo->src.s_addr,
92 IPT_INV_SRCIP) ||
93 FWINV((ip->daddr&ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr,
94 IPT_INV_DSTIP)) {
95 dprintf("Source or dest mismatch.\n");
97 dprintf("SRC: %pI4. Mask: %pI4. Target: %pI4.%s\n",
98 &ip->saddr, &ipinfo->smsk.s_addr, &ipinfo->src.s_addr,
99 ipinfo->invflags & IPT_INV_SRCIP ? " (INV)" : "");
100 dprintf("DST: %pI4 Mask: %pI4 Target: %pI4.%s\n",
101 &ip->daddr, &ipinfo->dmsk.s_addr, &ipinfo->dst.s_addr,
102 ipinfo->invflags & IPT_INV_DSTIP ? " (INV)" : "");
103 return false;
106 ret = ifname_compare_aligned(indev, ipinfo->iniface, ipinfo->iniface_mask);
108 if (FWINV(ret != 0, IPT_INV_VIA_IN)) {
109 dprintf("VIA in mismatch (%s vs %s).%s\n",
110 indev, ipinfo->iniface,
111 ipinfo->invflags&IPT_INV_VIA_IN ?" (INV)":"");
112 return false;
115 ret = ifname_compare_aligned(outdev, ipinfo->outiface, ipinfo->outiface_mask);
117 if (FWINV(ret != 0, IPT_INV_VIA_OUT)) {
118 dprintf("VIA out mismatch (%s vs %s).%s\n",
119 outdev, ipinfo->outiface,
120 ipinfo->invflags&IPT_INV_VIA_OUT ?" (INV)":"");
121 return false;
124 /* Check specific protocol */
125 if (ipinfo->proto &&
126 FWINV(ip->protocol != ipinfo->proto, IPT_INV_PROTO)) {
127 dprintf("Packet protocol %hi does not match %hi.%s\n",
128 ip->protocol, ipinfo->proto,
129 ipinfo->invflags&IPT_INV_PROTO ? " (INV)":"");
130 return false;
133 /* If we have a fragment rule but the packet is not a fragment
134 * then we return zero */
135 if (FWINV((ipinfo->flags&IPT_F_FRAG) && !isfrag, IPT_INV_FRAG)) {
136 dprintf("Fragment rule but not fragment.%s\n",
137 ipinfo->invflags & IPT_INV_FRAG ? " (INV)" : "");
138 return false;
141 return true;
144 static bool
145 ip_checkentry(const struct ipt_ip *ip)
147 if (ip->flags & ~IPT_F_MASK) {
148 duprintf("Unknown flag bits set: %08X\n",
149 ip->flags & ~IPT_F_MASK);
150 return false;
152 if (ip->invflags & ~IPT_INV_MASK) {
153 duprintf("Unknown invflag bits set: %08X\n",
154 ip->invflags & ~IPT_INV_MASK);
155 return false;
157 return true;
160 static unsigned int
161 ipt_error(struct sk_buff *skb, const struct xt_target_param *par)
163 if (net_ratelimit())
164 printk("ip_tables: error: `%s'\n",
165 (const char *)par->targinfo);
167 return NF_DROP;
170 /* Performance critical - called for every packet */
171 static inline bool
172 do_match(struct ipt_entry_match *m, const struct sk_buff *skb,
173 struct xt_match_param *par)
175 par->match = m->u.kernel.match;
176 par->matchinfo = m->data;
178 /* Stop iteration if it doesn't match */
179 if (!m->u.kernel.match->match(skb, par))
180 return true;
181 else
182 return false;
185 /* Performance critical */
186 static inline struct ipt_entry *
187 get_entry(void *base, unsigned int offset)
189 return (struct ipt_entry *)(base + offset);
192 /* All zeroes == unconditional rule. */
193 /* Mildly perf critical (only if packet tracing is on) */
194 static inline bool unconditional(const struct ipt_ip *ip)
196 static const struct ipt_ip uncond;
198 return memcmp(ip, &uncond, sizeof(uncond)) == 0;
199 #undef FWINV
202 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
203 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
204 static const char *const hooknames[] = {
205 [NF_INET_PRE_ROUTING] = "PREROUTING",
206 [NF_INET_LOCAL_IN] = "INPUT",
207 [NF_INET_FORWARD] = "FORWARD",
208 [NF_INET_LOCAL_OUT] = "OUTPUT",
209 [NF_INET_POST_ROUTING] = "POSTROUTING",
212 enum nf_ip_trace_comments {
213 NF_IP_TRACE_COMMENT_RULE,
214 NF_IP_TRACE_COMMENT_RETURN,
215 NF_IP_TRACE_COMMENT_POLICY,
218 static const char *const comments[] = {
219 [NF_IP_TRACE_COMMENT_RULE] = "rule",
220 [NF_IP_TRACE_COMMENT_RETURN] = "return",
221 [NF_IP_TRACE_COMMENT_POLICY] = "policy",
224 static struct nf_loginfo trace_loginfo = {
225 .type = NF_LOG_TYPE_LOG,
226 .u = {
227 .log = {
228 .level = 4,
229 .logflags = NF_LOG_MASK,
234 /* Mildly perf critical (only if packet tracing is on) */
235 static inline int
236 get_chainname_rulenum(struct ipt_entry *s, struct ipt_entry *e,
237 const char *hookname, const char **chainname,
238 const char **comment, unsigned int *rulenum)
240 struct ipt_standard_target *t = (void *)ipt_get_target(s);
242 if (strcmp(t->target.u.kernel.target->name, IPT_ERROR_TARGET) == 0) {
243 /* Head of user chain: ERROR target with chainname */
244 *chainname = t->target.data;
245 (*rulenum) = 0;
246 } else if (s == e) {
247 (*rulenum)++;
249 if (s->target_offset == sizeof(struct ipt_entry) &&
250 strcmp(t->target.u.kernel.target->name,
251 IPT_STANDARD_TARGET) == 0 &&
252 t->verdict < 0 &&
253 unconditional(&s->ip)) {
254 /* Tail of chains: STANDARD target (return/policy) */
255 *comment = *chainname == hookname
256 ? comments[NF_IP_TRACE_COMMENT_POLICY]
257 : comments[NF_IP_TRACE_COMMENT_RETURN];
259 return 1;
260 } else
261 (*rulenum)++;
263 return 0;
266 static void trace_packet(struct sk_buff *skb,
267 unsigned int hook,
268 const struct net_device *in,
269 const struct net_device *out,
270 const char *tablename,
271 struct xt_table_info *private,
272 struct ipt_entry *e)
274 void *table_base;
275 const struct ipt_entry *root;
276 const char *hookname, *chainname, *comment;
277 unsigned int rulenum = 0;
279 table_base = private->entries[smp_processor_id()];
280 root = get_entry(table_base, private->hook_entry[hook]);
282 hookname = chainname = hooknames[hook];
283 comment = comments[NF_IP_TRACE_COMMENT_RULE];
285 IPT_ENTRY_ITERATE(root,
286 private->size - private->hook_entry[hook],
287 get_chainname_rulenum,
288 e, hookname, &chainname, &comment, &rulenum);
290 nf_log_packet(AF_INET, hook, skb, in, out, &trace_loginfo,
291 "TRACE: %s:%s:%s:%u ",
292 tablename, chainname, comment, rulenum);
294 #endif
296 static inline __pure
297 struct ipt_entry *ipt_next_entry(const struct ipt_entry *entry)
299 return (void *)entry + entry->next_offset;
302 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
303 unsigned int
304 ipt_do_table(struct sk_buff *skb,
305 unsigned int hook,
306 const struct net_device *in,
307 const struct net_device *out,
308 struct xt_table *table)
310 #define tb_comefrom ((struct ipt_entry *)table_base)->comefrom
312 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
313 const struct iphdr *ip;
314 bool hotdrop = false;
315 /* Initializing verdict to NF_DROP keeps gcc happy. */
316 unsigned int verdict = NF_DROP;
317 const char *indev, *outdev;
318 void *table_base;
319 struct ipt_entry *e, *back;
320 struct xt_table_info *private;
321 struct xt_match_param mtpar;
322 struct xt_target_param tgpar;
324 /* Initialization */
325 ip = ip_hdr(skb);
326 indev = in ? in->name : nulldevname;
327 outdev = out ? out->name : nulldevname;
328 /* We handle fragments by dealing with the first fragment as
329 * if it was a normal packet. All other fragments are treated
330 * normally, except that they will NEVER match rules that ask
331 * things we don't know, ie. tcp syn flag or ports). If the
332 * rule is also a fragment-specific rule, non-fragments won't
333 * match it. */
334 mtpar.fragoff = ntohs(ip->frag_off) & IP_OFFSET;
335 mtpar.thoff = ip_hdrlen(skb);
336 mtpar.hotdrop = &hotdrop;
337 mtpar.in = tgpar.in = in;
338 mtpar.out = tgpar.out = out;
339 mtpar.family = tgpar.family = NFPROTO_IPV4;
340 mtpar.hooknum = tgpar.hooknum = hook;
342 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
343 xt_info_rdlock_bh();
344 private = table->private;
345 table_base = private->entries[smp_processor_id()];
347 e = get_entry(table_base, private->hook_entry[hook]);
349 /* For return from builtin chain */
350 back = get_entry(table_base, private->underflow[hook]);
352 do {
353 struct ipt_entry_target *t;
355 IP_NF_ASSERT(e);
356 IP_NF_ASSERT(back);
357 if (!ip_packet_match(ip, indev, outdev,
358 &e->ip, mtpar.fragoff) ||
359 IPT_MATCH_ITERATE(e, do_match, skb, &mtpar) != 0) {
360 e = ipt_next_entry(e);
361 continue;
364 ADD_COUNTER(e->counters, ntohs(ip->tot_len), 1);
366 t = ipt_get_target(e);
367 IP_NF_ASSERT(t->u.kernel.target);
369 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
370 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
371 /* The packet is traced: log it */
372 if (unlikely(skb->nf_trace))
373 trace_packet(skb, hook, in, out,
374 table->name, private, e);
375 #endif
376 /* Standard target? */
377 if (!t->u.kernel.target->target) {
378 int v;
380 v = ((struct ipt_standard_target *)t)->verdict;
381 if (v < 0) {
382 /* Pop from stack? */
383 if (v != IPT_RETURN) {
384 verdict = (unsigned)(-v) - 1;
385 break;
387 e = back;
388 back = get_entry(table_base, back->comefrom);
389 continue;
391 if (table_base + v != ipt_next_entry(e) &&
392 !(e->ip.flags & IPT_F_GOTO)) {
393 /* Save old back ptr in next entry */
394 struct ipt_entry *next = ipt_next_entry(e);
395 next->comefrom = (void *)back - table_base;
396 /* set back pointer to next entry */
397 back = next;
400 e = get_entry(table_base, v);
401 continue;
404 /* Targets which reenter must return
405 abs. verdicts */
406 tgpar.target = t->u.kernel.target;
407 tgpar.targinfo = t->data;
410 #ifdef CONFIG_NETFILTER_DEBUG
411 tb_comefrom = 0xeeeeeeec;
412 #endif
413 verdict = t->u.kernel.target->target(skb, &tgpar);
414 #ifdef CONFIG_NETFILTER_DEBUG
415 if (tb_comefrom != 0xeeeeeeec && verdict == IPT_CONTINUE) {
416 printk("Target %s reentered!\n",
417 t->u.kernel.target->name);
418 verdict = NF_DROP;
420 tb_comefrom = 0x57acc001;
421 #endif
422 /* Target might have changed stuff. */
423 ip = ip_hdr(skb);
424 if (verdict == IPT_CONTINUE)
425 e = ipt_next_entry(e);
426 else
427 /* Verdict */
428 break;
429 } while (!hotdrop);
430 xt_info_rdunlock_bh();
432 #ifdef DEBUG_ALLOW_ALL
433 return NF_ACCEPT;
434 #else
435 if (hotdrop)
436 return NF_DROP;
437 else return verdict;
438 #endif
440 #undef tb_comefrom
443 /* Figures out from what hook each rule can be called: returns 0 if
444 there are loops. Puts hook bitmask in comefrom. */
445 static int
446 mark_source_chains(struct xt_table_info *newinfo,
447 unsigned int valid_hooks, void *entry0)
449 unsigned int hook;
451 /* No recursion; use packet counter to save back ptrs (reset
452 to 0 as we leave), and comefrom to save source hook bitmask */
453 for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
454 unsigned int pos = newinfo->hook_entry[hook];
455 struct ipt_entry *e = (struct ipt_entry *)(entry0 + pos);
457 if (!(valid_hooks & (1 << hook)))
458 continue;
460 /* Set initial back pointer. */
461 e->counters.pcnt = pos;
463 for (;;) {
464 struct ipt_standard_target *t
465 = (void *)ipt_get_target(e);
466 int visited = e->comefrom & (1 << hook);
468 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
469 printk("iptables: loop hook %u pos %u %08X.\n",
470 hook, pos, e->comefrom);
471 return 0;
473 e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
475 /* Unconditional return/END. */
476 if ((e->target_offset == sizeof(struct ipt_entry) &&
477 (strcmp(t->target.u.user.name,
478 IPT_STANDARD_TARGET) == 0) &&
479 t->verdict < 0 && unconditional(&e->ip)) ||
480 visited) {
481 unsigned int oldpos, size;
483 if ((strcmp(t->target.u.user.name,
484 IPT_STANDARD_TARGET) == 0) &&
485 t->verdict < -NF_MAX_VERDICT - 1) {
486 duprintf("mark_source_chains: bad "
487 "negative verdict (%i)\n",
488 t->verdict);
489 return 0;
492 /* Return: backtrack through the last
493 big jump. */
494 do {
495 e->comefrom ^= (1<<NF_INET_NUMHOOKS);
496 #ifdef DEBUG_IP_FIREWALL_USER
497 if (e->comefrom
498 & (1 << NF_INET_NUMHOOKS)) {
499 duprintf("Back unset "
500 "on hook %u "
501 "rule %u\n",
502 hook, pos);
504 #endif
505 oldpos = pos;
506 pos = e->counters.pcnt;
507 e->counters.pcnt = 0;
509 /* We're at the start. */
510 if (pos == oldpos)
511 goto next;
513 e = (struct ipt_entry *)
514 (entry0 + pos);
515 } while (oldpos == pos + e->next_offset);
517 /* Move along one */
518 size = e->next_offset;
519 e = (struct ipt_entry *)
520 (entry0 + pos + size);
521 e->counters.pcnt = pos;
522 pos += size;
523 } else {
524 int newpos = t->verdict;
526 if (strcmp(t->target.u.user.name,
527 IPT_STANDARD_TARGET) == 0 &&
528 newpos >= 0) {
529 if (newpos > newinfo->size -
530 sizeof(struct ipt_entry)) {
531 duprintf("mark_source_chains: "
532 "bad verdict (%i)\n",
533 newpos);
534 return 0;
536 /* This a jump; chase it. */
537 duprintf("Jump rule %u -> %u\n",
538 pos, newpos);
539 } else {
540 /* ... this is a fallthru */
541 newpos = pos + e->next_offset;
543 e = (struct ipt_entry *)
544 (entry0 + newpos);
545 e->counters.pcnt = pos;
546 pos = newpos;
549 next:
550 duprintf("Finished chain %u\n", hook);
552 return 1;
555 static int
556 cleanup_match(struct ipt_entry_match *m, unsigned int *i)
558 struct xt_mtdtor_param par;
560 if (i && (*i)-- == 0)
561 return 1;
563 par.match = m->u.kernel.match;
564 par.matchinfo = m->data;
565 par.family = NFPROTO_IPV4;
566 if (par.match->destroy != NULL)
567 par.match->destroy(&par);
568 module_put(par.match->me);
569 return 0;
572 static int
573 check_entry(struct ipt_entry *e, const char *name)
575 struct ipt_entry_target *t;
577 if (!ip_checkentry(&e->ip)) {
578 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
579 return -EINVAL;
582 if (e->target_offset + sizeof(struct ipt_entry_target) >
583 e->next_offset)
584 return -EINVAL;
586 t = ipt_get_target(e);
587 if (e->target_offset + t->u.target_size > e->next_offset)
588 return -EINVAL;
590 return 0;
593 static int
594 check_match(struct ipt_entry_match *m, struct xt_mtchk_param *par,
595 unsigned int *i)
597 const struct ipt_ip *ip = par->entryinfo;
598 int ret;
600 par->match = m->u.kernel.match;
601 par->matchinfo = m->data;
603 ret = xt_check_match(par, m->u.match_size - sizeof(*m),
604 ip->proto, ip->invflags & IPT_INV_PROTO);
605 if (ret < 0) {
606 duprintf("ip_tables: check failed for `%s'.\n",
607 par.match->name);
608 return ret;
610 ++*i;
611 return 0;
614 static int
615 find_check_match(struct ipt_entry_match *m, struct xt_mtchk_param *par,
616 unsigned int *i)
618 struct xt_match *match;
619 int ret;
621 match = try_then_request_module(xt_find_match(AF_INET, m->u.user.name,
622 m->u.user.revision),
623 "ipt_%s", m->u.user.name);
624 if (IS_ERR(match) || !match) {
625 duprintf("find_check_match: `%s' not found\n", m->u.user.name);
626 return match ? PTR_ERR(match) : -ENOENT;
628 m->u.kernel.match = match;
630 ret = check_match(m, par, i);
631 if (ret)
632 goto err;
634 return 0;
635 err:
636 module_put(m->u.kernel.match->me);
637 return ret;
640 static int check_target(struct ipt_entry *e, const char *name)
642 struct ipt_entry_target *t = ipt_get_target(e);
643 struct xt_tgchk_param par = {
644 .table = name,
645 .entryinfo = e,
646 .target = t->u.kernel.target,
647 .targinfo = t->data,
648 .hook_mask = e->comefrom,
649 .family = NFPROTO_IPV4,
651 int ret;
653 ret = xt_check_target(&par, t->u.target_size - sizeof(*t),
654 e->ip.proto, e->ip.invflags & IPT_INV_PROTO);
655 if (ret < 0) {
656 duprintf("ip_tables: check failed for `%s'.\n",
657 t->u.kernel.target->name);
658 return ret;
660 return 0;
663 static int
664 find_check_entry(struct ipt_entry *e, const char *name, unsigned int size,
665 unsigned int *i)
667 struct ipt_entry_target *t;
668 struct xt_target *target;
669 int ret;
670 unsigned int j;
671 struct xt_mtchk_param mtpar;
673 ret = check_entry(e, name);
674 if (ret)
675 return ret;
677 j = 0;
678 mtpar.table = name;
679 mtpar.entryinfo = &e->ip;
680 mtpar.hook_mask = e->comefrom;
681 mtpar.family = NFPROTO_IPV4;
682 ret = IPT_MATCH_ITERATE(e, find_check_match, &mtpar, &j);
683 if (ret != 0)
684 goto cleanup_matches;
686 t = ipt_get_target(e);
687 target = try_then_request_module(xt_find_target(AF_INET,
688 t->u.user.name,
689 t->u.user.revision),
690 "ipt_%s", t->u.user.name);
691 if (IS_ERR(target) || !target) {
692 duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
693 ret = target ? PTR_ERR(target) : -ENOENT;
694 goto cleanup_matches;
696 t->u.kernel.target = target;
698 ret = check_target(e, name);
699 if (ret)
700 goto err;
702 (*i)++;
703 return 0;
704 err:
705 module_put(t->u.kernel.target->me);
706 cleanup_matches:
707 IPT_MATCH_ITERATE(e, cleanup_match, &j);
708 return ret;
711 static bool check_underflow(struct ipt_entry *e)
713 const struct ipt_entry_target *t;
714 unsigned int verdict;
716 if (!unconditional(&e->ip))
717 return false;
718 t = ipt_get_target(e);
719 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
720 return false;
721 verdict = ((struct ipt_standard_target *)t)->verdict;
722 verdict = -verdict - 1;
723 return verdict == NF_DROP || verdict == NF_ACCEPT;
726 static int
727 check_entry_size_and_hooks(struct ipt_entry *e,
728 struct xt_table_info *newinfo,
729 unsigned char *base,
730 unsigned char *limit,
731 const unsigned int *hook_entries,
732 const unsigned int *underflows,
733 unsigned int valid_hooks,
734 unsigned int *i)
736 unsigned int h;
738 if ((unsigned long)e % __alignof__(struct ipt_entry) != 0 ||
739 (unsigned char *)e + sizeof(struct ipt_entry) >= limit) {
740 duprintf("Bad offset %p\n", e);
741 return -EINVAL;
744 if (e->next_offset
745 < sizeof(struct ipt_entry) + sizeof(struct ipt_entry_target)) {
746 duprintf("checking: element %p size %u\n",
747 e, e->next_offset);
748 return -EINVAL;
751 /* Check hooks & underflows */
752 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
753 if (!(valid_hooks & (1 << h)))
754 continue;
755 if ((unsigned char *)e - base == hook_entries[h])
756 newinfo->hook_entry[h] = hook_entries[h];
757 if ((unsigned char *)e - base == underflows[h]) {
758 if (!check_underflow(e)) {
759 pr_err("Underflows must be unconditional and "
760 "use the STANDARD target with "
761 "ACCEPT/DROP\n");
762 return -EINVAL;
764 newinfo->underflow[h] = underflows[h];
768 /* Clear counters and comefrom */
769 e->counters = ((struct xt_counters) { 0, 0 });
770 e->comefrom = 0;
772 (*i)++;
773 return 0;
776 static int
777 cleanup_entry(struct ipt_entry *e, unsigned int *i)
779 struct xt_tgdtor_param par;
780 struct ipt_entry_target *t;
782 if (i && (*i)-- == 0)
783 return 1;
785 /* Cleanup all matches */
786 IPT_MATCH_ITERATE(e, cleanup_match, NULL);
787 t = ipt_get_target(e);
789 par.target = t->u.kernel.target;
790 par.targinfo = t->data;
791 par.family = NFPROTO_IPV4;
792 if (par.target->destroy != NULL)
793 par.target->destroy(&par);
794 module_put(par.target->me);
795 return 0;
798 /* Checks and translates the user-supplied table segment (held in
799 newinfo) */
800 static int
801 translate_table(const char *name,
802 unsigned int valid_hooks,
803 struct xt_table_info *newinfo,
804 void *entry0,
805 unsigned int size,
806 unsigned int number,
807 const unsigned int *hook_entries,
808 const unsigned int *underflows)
810 unsigned int i;
811 int ret;
813 newinfo->size = size;
814 newinfo->number = number;
816 /* Init all hooks to impossible value. */
817 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
818 newinfo->hook_entry[i] = 0xFFFFFFFF;
819 newinfo->underflow[i] = 0xFFFFFFFF;
822 duprintf("translate_table: size %u\n", newinfo->size);
823 i = 0;
824 /* Walk through entries, checking offsets. */
825 ret = IPT_ENTRY_ITERATE(entry0, newinfo->size,
826 check_entry_size_and_hooks,
827 newinfo,
828 entry0,
829 entry0 + size,
830 hook_entries, underflows, valid_hooks, &i);
831 if (ret != 0)
832 return ret;
834 if (i != number) {
835 duprintf("translate_table: %u not %u entries\n",
836 i, number);
837 return -EINVAL;
840 /* Check hooks all assigned */
841 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
842 /* Only hooks which are valid */
843 if (!(valid_hooks & (1 << i)))
844 continue;
845 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
846 duprintf("Invalid hook entry %u %u\n",
847 i, hook_entries[i]);
848 return -EINVAL;
850 if (newinfo->underflow[i] == 0xFFFFFFFF) {
851 duprintf("Invalid underflow %u %u\n",
852 i, underflows[i]);
853 return -EINVAL;
857 if (!mark_source_chains(newinfo, valid_hooks, entry0))
858 return -ELOOP;
860 /* Finally, each sanity check must pass */
861 i = 0;
862 ret = IPT_ENTRY_ITERATE(entry0, newinfo->size,
863 find_check_entry, name, size, &i);
865 if (ret != 0) {
866 IPT_ENTRY_ITERATE(entry0, newinfo->size,
867 cleanup_entry, &i);
868 return ret;
871 /* And one copy for every other CPU */
872 for_each_possible_cpu(i) {
873 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
874 memcpy(newinfo->entries[i], entry0, newinfo->size);
877 return ret;
880 /* Gets counters. */
881 static inline int
882 add_entry_to_counter(const struct ipt_entry *e,
883 struct xt_counters total[],
884 unsigned int *i)
886 ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
888 (*i)++;
889 return 0;
892 static inline int
893 set_entry_to_counter(const struct ipt_entry *e,
894 struct ipt_counters total[],
895 unsigned int *i)
897 SET_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
899 (*i)++;
900 return 0;
903 static void
904 get_counters(const struct xt_table_info *t,
905 struct xt_counters counters[])
907 unsigned int cpu;
908 unsigned int i;
909 unsigned int curcpu;
911 /* Instead of clearing (by a previous call to memset())
912 * the counters and using adds, we set the counters
913 * with data used by 'current' CPU.
915 * Bottom half has to be disabled to prevent deadlock
916 * if new softirq were to run and call ipt_do_table
918 local_bh_disable();
919 curcpu = smp_processor_id();
921 i = 0;
922 IPT_ENTRY_ITERATE(t->entries[curcpu],
923 t->size,
924 set_entry_to_counter,
925 counters,
926 &i);
928 for_each_possible_cpu(cpu) {
929 if (cpu == curcpu)
930 continue;
931 i = 0;
932 xt_info_wrlock(cpu);
933 IPT_ENTRY_ITERATE(t->entries[cpu],
934 t->size,
935 add_entry_to_counter,
936 counters,
937 &i);
938 xt_info_wrunlock(cpu);
940 local_bh_enable();
943 static struct xt_counters * alloc_counters(struct xt_table *table)
945 unsigned int countersize;
946 struct xt_counters *counters;
947 struct xt_table_info *private = table->private;
949 /* We need atomic snapshot of counters: rest doesn't change
950 (other than comefrom, which userspace doesn't care
951 about). */
952 countersize = sizeof(struct xt_counters) * private->number;
953 counters = vmalloc_node(countersize, numa_node_id());
955 if (counters == NULL)
956 return ERR_PTR(-ENOMEM);
958 get_counters(private, counters);
960 return counters;
963 static int
964 copy_entries_to_user(unsigned int total_size,
965 struct xt_table *table,
966 void __user *userptr)
968 unsigned int off, num;
969 struct ipt_entry *e;
970 struct xt_counters *counters;
971 const struct xt_table_info *private = table->private;
972 int ret = 0;
973 const void *loc_cpu_entry;
975 counters = alloc_counters(table);
976 if (IS_ERR(counters))
977 return PTR_ERR(counters);
979 /* choose the copy that is on our node/cpu, ...
980 * This choice is lazy (because current thread is
981 * allowed to migrate to another cpu)
983 loc_cpu_entry = private->entries[raw_smp_processor_id()];
984 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
985 ret = -EFAULT;
986 goto free_counters;
989 /* FIXME: use iterator macros --RR */
990 /* ... then go back and fix counters and names */
991 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
992 unsigned int i;
993 const struct ipt_entry_match *m;
994 const struct ipt_entry_target *t;
996 e = (struct ipt_entry *)(loc_cpu_entry + off);
997 if (copy_to_user(userptr + off
998 + offsetof(struct ipt_entry, counters),
999 &counters[num],
1000 sizeof(counters[num])) != 0) {
1001 ret = -EFAULT;
1002 goto free_counters;
1005 for (i = sizeof(struct ipt_entry);
1006 i < e->target_offset;
1007 i += m->u.match_size) {
1008 m = (void *)e + i;
1010 if (copy_to_user(userptr + off + i
1011 + offsetof(struct ipt_entry_match,
1012 u.user.name),
1013 m->u.kernel.match->name,
1014 strlen(m->u.kernel.match->name)+1)
1015 != 0) {
1016 ret = -EFAULT;
1017 goto free_counters;
1021 t = ipt_get_target(e);
1022 if (copy_to_user(userptr + off + e->target_offset
1023 + offsetof(struct ipt_entry_target,
1024 u.user.name),
1025 t->u.kernel.target->name,
1026 strlen(t->u.kernel.target->name)+1) != 0) {
1027 ret = -EFAULT;
1028 goto free_counters;
1032 free_counters:
1033 vfree(counters);
1034 return ret;
1037 #ifdef CONFIG_COMPAT
1038 static void compat_standard_from_user(void *dst, void *src)
1040 int v = *(compat_int_t *)src;
1042 if (v > 0)
1043 v += xt_compat_calc_jump(AF_INET, v);
1044 memcpy(dst, &v, sizeof(v));
1047 static int compat_standard_to_user(void __user *dst, void *src)
1049 compat_int_t cv = *(int *)src;
1051 if (cv > 0)
1052 cv -= xt_compat_calc_jump(AF_INET, cv);
1053 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
1056 static inline int
1057 compat_calc_match(struct ipt_entry_match *m, int *size)
1059 *size += xt_compat_match_offset(m->u.kernel.match);
1060 return 0;
1063 static int compat_calc_entry(struct ipt_entry *e,
1064 const struct xt_table_info *info,
1065 void *base, struct xt_table_info *newinfo)
1067 struct ipt_entry_target *t;
1068 unsigned int entry_offset;
1069 int off, i, ret;
1071 off = sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1072 entry_offset = (void *)e - base;
1073 IPT_MATCH_ITERATE(e, compat_calc_match, &off);
1074 t = ipt_get_target(e);
1075 off += xt_compat_target_offset(t->u.kernel.target);
1076 newinfo->size -= off;
1077 ret = xt_compat_add_offset(AF_INET, entry_offset, off);
1078 if (ret)
1079 return ret;
1081 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1082 if (info->hook_entry[i] &&
1083 (e < (struct ipt_entry *)(base + info->hook_entry[i])))
1084 newinfo->hook_entry[i] -= off;
1085 if (info->underflow[i] &&
1086 (e < (struct ipt_entry *)(base + info->underflow[i])))
1087 newinfo->underflow[i] -= off;
1089 return 0;
1092 static int compat_table_info(const struct xt_table_info *info,
1093 struct xt_table_info *newinfo)
1095 void *loc_cpu_entry;
1097 if (!newinfo || !info)
1098 return -EINVAL;
1100 /* we dont care about newinfo->entries[] */
1101 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
1102 newinfo->initial_entries = 0;
1103 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1104 return IPT_ENTRY_ITERATE(loc_cpu_entry, info->size,
1105 compat_calc_entry, info, loc_cpu_entry,
1106 newinfo);
1108 #endif
1110 static int get_info(struct net *net, void __user *user, int *len, int compat)
1112 char name[IPT_TABLE_MAXNAMELEN];
1113 struct xt_table *t;
1114 int ret;
1116 if (*len != sizeof(struct ipt_getinfo)) {
1117 duprintf("length %u != %zu\n", *len,
1118 sizeof(struct ipt_getinfo));
1119 return -EINVAL;
1122 if (copy_from_user(name, user, sizeof(name)) != 0)
1123 return -EFAULT;
1125 name[IPT_TABLE_MAXNAMELEN-1] = '\0';
1126 #ifdef CONFIG_COMPAT
1127 if (compat)
1128 xt_compat_lock(AF_INET);
1129 #endif
1130 t = try_then_request_module(xt_find_table_lock(net, AF_INET, name),
1131 "iptable_%s", name);
1132 if (t && !IS_ERR(t)) {
1133 struct ipt_getinfo info;
1134 const struct xt_table_info *private = t->private;
1135 #ifdef CONFIG_COMPAT
1136 struct xt_table_info tmp;
1138 if (compat) {
1139 ret = compat_table_info(private, &tmp);
1140 xt_compat_flush_offsets(AF_INET);
1141 private = &tmp;
1143 #endif
1144 info.valid_hooks = t->valid_hooks;
1145 memcpy(info.hook_entry, private->hook_entry,
1146 sizeof(info.hook_entry));
1147 memcpy(info.underflow, private->underflow,
1148 sizeof(info.underflow));
1149 info.num_entries = private->number;
1150 info.size = private->size;
1151 strcpy(info.name, name);
1153 if (copy_to_user(user, &info, *len) != 0)
1154 ret = -EFAULT;
1155 else
1156 ret = 0;
1158 xt_table_unlock(t);
1159 module_put(t->me);
1160 } else
1161 ret = t ? PTR_ERR(t) : -ENOENT;
1162 #ifdef CONFIG_COMPAT
1163 if (compat)
1164 xt_compat_unlock(AF_INET);
1165 #endif
1166 return ret;
1169 static int
1170 get_entries(struct net *net, struct ipt_get_entries __user *uptr, int *len)
1172 int ret;
1173 struct ipt_get_entries get;
1174 struct xt_table *t;
1176 if (*len < sizeof(get)) {
1177 duprintf("get_entries: %u < %zu\n", *len, sizeof(get));
1178 return -EINVAL;
1180 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1181 return -EFAULT;
1182 if (*len != sizeof(struct ipt_get_entries) + get.size) {
1183 duprintf("get_entries: %u != %zu\n",
1184 *len, sizeof(get) + get.size);
1185 return -EINVAL;
1188 t = xt_find_table_lock(net, AF_INET, get.name);
1189 if (t && !IS_ERR(t)) {
1190 const struct xt_table_info *private = t->private;
1191 duprintf("t->private->number = %u\n", private->number);
1192 if (get.size == private->size)
1193 ret = copy_entries_to_user(private->size,
1194 t, uptr->entrytable);
1195 else {
1196 duprintf("get_entries: I've got %u not %u!\n",
1197 private->size, get.size);
1198 ret = -EAGAIN;
1200 module_put(t->me);
1201 xt_table_unlock(t);
1202 } else
1203 ret = t ? PTR_ERR(t) : -ENOENT;
1205 return ret;
1208 static int
1209 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1210 struct xt_table_info *newinfo, unsigned int num_counters,
1211 void __user *counters_ptr)
1213 int ret;
1214 struct xt_table *t;
1215 struct xt_table_info *oldinfo;
1216 struct xt_counters *counters;
1217 void *loc_cpu_old_entry;
1219 ret = 0;
1220 counters = vmalloc(num_counters * sizeof(struct xt_counters));
1221 if (!counters) {
1222 ret = -ENOMEM;
1223 goto out;
1226 t = try_then_request_module(xt_find_table_lock(net, AF_INET, name),
1227 "iptable_%s", name);
1228 if (!t || IS_ERR(t)) {
1229 ret = t ? PTR_ERR(t) : -ENOENT;
1230 goto free_newinfo_counters_untrans;
1233 /* You lied! */
1234 if (valid_hooks != t->valid_hooks) {
1235 duprintf("Valid hook crap: %08X vs %08X\n",
1236 valid_hooks, t->valid_hooks);
1237 ret = -EINVAL;
1238 goto put_module;
1241 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1242 if (!oldinfo)
1243 goto put_module;
1245 /* Update module usage count based on number of rules */
1246 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1247 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1248 if ((oldinfo->number > oldinfo->initial_entries) ||
1249 (newinfo->number <= oldinfo->initial_entries))
1250 module_put(t->me);
1251 if ((oldinfo->number > oldinfo->initial_entries) &&
1252 (newinfo->number <= oldinfo->initial_entries))
1253 module_put(t->me);
1255 /* Get the old counters, and synchronize with replace */
1256 get_counters(oldinfo, counters);
1258 /* Decrease module usage counts and free resource */
1259 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1260 IPT_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry,
1261 NULL);
1262 xt_free_table_info(oldinfo);
1263 if (copy_to_user(counters_ptr, counters,
1264 sizeof(struct xt_counters) * num_counters) != 0)
1265 ret = -EFAULT;
1266 vfree(counters);
1267 xt_table_unlock(t);
1268 return ret;
1270 put_module:
1271 module_put(t->me);
1272 xt_table_unlock(t);
1273 free_newinfo_counters_untrans:
1274 vfree(counters);
1275 out:
1276 return ret;
1279 static int
1280 do_replace(struct net *net, void __user *user, unsigned int len)
1282 int ret;
1283 struct ipt_replace tmp;
1284 struct xt_table_info *newinfo;
1285 void *loc_cpu_entry;
1287 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1288 return -EFAULT;
1290 /* overflow check */
1291 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1292 return -ENOMEM;
1294 newinfo = xt_alloc_table_info(tmp.size);
1295 if (!newinfo)
1296 return -ENOMEM;
1298 /* choose the copy that is on our node/cpu */
1299 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1300 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1301 tmp.size) != 0) {
1302 ret = -EFAULT;
1303 goto free_newinfo;
1306 ret = translate_table(tmp.name, tmp.valid_hooks,
1307 newinfo, loc_cpu_entry, tmp.size, tmp.num_entries,
1308 tmp.hook_entry, tmp.underflow);
1309 if (ret != 0)
1310 goto free_newinfo;
1312 duprintf("ip_tables: Translated table\n");
1314 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1315 tmp.num_counters, tmp.counters);
1316 if (ret)
1317 goto free_newinfo_untrans;
1318 return 0;
1320 free_newinfo_untrans:
1321 IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL);
1322 free_newinfo:
1323 xt_free_table_info(newinfo);
1324 return ret;
1327 /* We're lazy, and add to the first CPU; overflow works its fey magic
1328 * and everything is OK. */
1329 static int
1330 add_counter_to_entry(struct ipt_entry *e,
1331 const struct xt_counters addme[],
1332 unsigned int *i)
1334 ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
1336 (*i)++;
1337 return 0;
1340 static int
1341 do_add_counters(struct net *net, void __user *user, unsigned int len, int compat)
1343 unsigned int i, curcpu;
1344 struct xt_counters_info tmp;
1345 struct xt_counters *paddc;
1346 unsigned int num_counters;
1347 const char *name;
1348 int size;
1349 void *ptmp;
1350 struct xt_table *t;
1351 const struct xt_table_info *private;
1352 int ret = 0;
1353 void *loc_cpu_entry;
1354 #ifdef CONFIG_COMPAT
1355 struct compat_xt_counters_info compat_tmp;
1357 if (compat) {
1358 ptmp = &compat_tmp;
1359 size = sizeof(struct compat_xt_counters_info);
1360 } else
1361 #endif
1363 ptmp = &tmp;
1364 size = sizeof(struct xt_counters_info);
1367 if (copy_from_user(ptmp, user, size) != 0)
1368 return -EFAULT;
1370 #ifdef CONFIG_COMPAT
1371 if (compat) {
1372 num_counters = compat_tmp.num_counters;
1373 name = compat_tmp.name;
1374 } else
1375 #endif
1377 num_counters = tmp.num_counters;
1378 name = tmp.name;
1381 if (len != size + num_counters * sizeof(struct xt_counters))
1382 return -EINVAL;
1384 paddc = vmalloc_node(len - size, numa_node_id());
1385 if (!paddc)
1386 return -ENOMEM;
1388 if (copy_from_user(paddc, user + size, len - size) != 0) {
1389 ret = -EFAULT;
1390 goto free;
1393 t = xt_find_table_lock(net, AF_INET, name);
1394 if (!t || IS_ERR(t)) {
1395 ret = t ? PTR_ERR(t) : -ENOENT;
1396 goto free;
1399 local_bh_disable();
1400 private = t->private;
1401 if (private->number != num_counters) {
1402 ret = -EINVAL;
1403 goto unlock_up_free;
1406 i = 0;
1407 /* Choose the copy that is on our node */
1408 curcpu = smp_processor_id();
1409 loc_cpu_entry = private->entries[curcpu];
1410 xt_info_wrlock(curcpu);
1411 IPT_ENTRY_ITERATE(loc_cpu_entry,
1412 private->size,
1413 add_counter_to_entry,
1414 paddc,
1415 &i);
1416 xt_info_wrunlock(curcpu);
1417 unlock_up_free:
1418 local_bh_enable();
1419 xt_table_unlock(t);
1420 module_put(t->me);
1421 free:
1422 vfree(paddc);
1424 return ret;
1427 #ifdef CONFIG_COMPAT
1428 struct compat_ipt_replace {
1429 char name[IPT_TABLE_MAXNAMELEN];
1430 u32 valid_hooks;
1431 u32 num_entries;
1432 u32 size;
1433 u32 hook_entry[NF_INET_NUMHOOKS];
1434 u32 underflow[NF_INET_NUMHOOKS];
1435 u32 num_counters;
1436 compat_uptr_t counters; /* struct ipt_counters * */
1437 struct compat_ipt_entry entries[0];
1440 static int
1441 compat_copy_entry_to_user(struct ipt_entry *e, void __user **dstptr,
1442 unsigned int *size, struct xt_counters *counters,
1443 unsigned int *i)
1445 struct ipt_entry_target *t;
1446 struct compat_ipt_entry __user *ce;
1447 u_int16_t target_offset, next_offset;
1448 compat_uint_t origsize;
1449 int ret;
1451 ret = -EFAULT;
1452 origsize = *size;
1453 ce = (struct compat_ipt_entry __user *)*dstptr;
1454 if (copy_to_user(ce, e, sizeof(struct ipt_entry)))
1455 goto out;
1457 if (copy_to_user(&ce->counters, &counters[*i], sizeof(counters[*i])))
1458 goto out;
1460 *dstptr += sizeof(struct compat_ipt_entry);
1461 *size -= sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1463 ret = IPT_MATCH_ITERATE(e, xt_compat_match_to_user, dstptr, size);
1464 target_offset = e->target_offset - (origsize - *size);
1465 if (ret)
1466 goto out;
1467 t = ipt_get_target(e);
1468 ret = xt_compat_target_to_user(t, dstptr, size);
1469 if (ret)
1470 goto out;
1471 ret = -EFAULT;
1472 next_offset = e->next_offset - (origsize - *size);
1473 if (put_user(target_offset, &ce->target_offset))
1474 goto out;
1475 if (put_user(next_offset, &ce->next_offset))
1476 goto out;
1478 (*i)++;
1479 return 0;
1480 out:
1481 return ret;
1484 static int
1485 compat_find_calc_match(struct ipt_entry_match *m,
1486 const char *name,
1487 const struct ipt_ip *ip,
1488 unsigned int hookmask,
1489 int *size, unsigned int *i)
1491 struct xt_match *match;
1493 match = try_then_request_module(xt_find_match(AF_INET, m->u.user.name,
1494 m->u.user.revision),
1495 "ipt_%s", m->u.user.name);
1496 if (IS_ERR(match) || !match) {
1497 duprintf("compat_check_calc_match: `%s' not found\n",
1498 m->u.user.name);
1499 return match ? PTR_ERR(match) : -ENOENT;
1501 m->u.kernel.match = match;
1502 *size += xt_compat_match_offset(match);
1504 (*i)++;
1505 return 0;
1508 static int
1509 compat_release_match(struct ipt_entry_match *m, unsigned int *i)
1511 if (i && (*i)-- == 0)
1512 return 1;
1514 module_put(m->u.kernel.match->me);
1515 return 0;
1518 static int
1519 compat_release_entry(struct compat_ipt_entry *e, unsigned int *i)
1521 struct ipt_entry_target *t;
1523 if (i && (*i)-- == 0)
1524 return 1;
1526 /* Cleanup all matches */
1527 COMPAT_IPT_MATCH_ITERATE(e, compat_release_match, NULL);
1528 t = compat_ipt_get_target(e);
1529 module_put(t->u.kernel.target->me);
1530 return 0;
1533 static int
1534 check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
1535 struct xt_table_info *newinfo,
1536 unsigned int *size,
1537 unsigned char *base,
1538 unsigned char *limit,
1539 unsigned int *hook_entries,
1540 unsigned int *underflows,
1541 unsigned int *i,
1542 const char *name)
1544 struct ipt_entry_target *t;
1545 struct xt_target *target;
1546 unsigned int entry_offset;
1547 unsigned int j;
1548 int ret, off, h;
1550 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1551 if ((unsigned long)e % __alignof__(struct compat_ipt_entry) != 0 ||
1552 (unsigned char *)e + sizeof(struct compat_ipt_entry) >= limit) {
1553 duprintf("Bad offset %p, limit = %p\n", e, limit);
1554 return -EINVAL;
1557 if (e->next_offset < sizeof(struct compat_ipt_entry) +
1558 sizeof(struct compat_xt_entry_target)) {
1559 duprintf("checking: element %p size %u\n",
1560 e, e->next_offset);
1561 return -EINVAL;
1564 /* For purposes of check_entry casting the compat entry is fine */
1565 ret = check_entry((struct ipt_entry *)e, name);
1566 if (ret)
1567 return ret;
1569 off = sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1570 entry_offset = (void *)e - (void *)base;
1571 j = 0;
1572 ret = COMPAT_IPT_MATCH_ITERATE(e, compat_find_calc_match, name,
1573 &e->ip, e->comefrom, &off, &j);
1574 if (ret != 0)
1575 goto release_matches;
1577 t = compat_ipt_get_target(e);
1578 target = try_then_request_module(xt_find_target(AF_INET,
1579 t->u.user.name,
1580 t->u.user.revision),
1581 "ipt_%s", t->u.user.name);
1582 if (IS_ERR(target) || !target) {
1583 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1584 t->u.user.name);
1585 ret = target ? PTR_ERR(target) : -ENOENT;
1586 goto release_matches;
1588 t->u.kernel.target = target;
1590 off += xt_compat_target_offset(target);
1591 *size += off;
1592 ret = xt_compat_add_offset(AF_INET, entry_offset, off);
1593 if (ret)
1594 goto out;
1596 /* Check hooks & underflows */
1597 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1598 if ((unsigned char *)e - base == hook_entries[h])
1599 newinfo->hook_entry[h] = hook_entries[h];
1600 if ((unsigned char *)e - base == underflows[h])
1601 newinfo->underflow[h] = underflows[h];
1604 /* Clear counters and comefrom */
1605 memset(&e->counters, 0, sizeof(e->counters));
1606 e->comefrom = 0;
1608 (*i)++;
1609 return 0;
1611 out:
1612 module_put(t->u.kernel.target->me);
1613 release_matches:
1614 IPT_MATCH_ITERATE(e, compat_release_match, &j);
1615 return ret;
1618 static int
1619 compat_copy_entry_from_user(struct compat_ipt_entry *e, void **dstptr,
1620 unsigned int *size, const char *name,
1621 struct xt_table_info *newinfo, unsigned char *base)
1623 struct ipt_entry_target *t;
1624 struct xt_target *target;
1625 struct ipt_entry *de;
1626 unsigned int origsize;
1627 int ret, h;
1629 ret = 0;
1630 origsize = *size;
1631 de = (struct ipt_entry *)*dstptr;
1632 memcpy(de, e, sizeof(struct ipt_entry));
1633 memcpy(&de->counters, &e->counters, sizeof(e->counters));
1635 *dstptr += sizeof(struct ipt_entry);
1636 *size += sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1638 ret = COMPAT_IPT_MATCH_ITERATE(e, xt_compat_match_from_user,
1639 dstptr, size);
1640 if (ret)
1641 return ret;
1642 de->target_offset = e->target_offset - (origsize - *size);
1643 t = compat_ipt_get_target(e);
1644 target = t->u.kernel.target;
1645 xt_compat_target_from_user(t, dstptr, size);
1647 de->next_offset = e->next_offset - (origsize - *size);
1648 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1649 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1650 newinfo->hook_entry[h] -= origsize - *size;
1651 if ((unsigned char *)de - base < newinfo->underflow[h])
1652 newinfo->underflow[h] -= origsize - *size;
1654 return ret;
1657 static int
1658 compat_check_entry(struct ipt_entry *e, const char *name,
1659 unsigned int *i)
1661 struct xt_mtchk_param mtpar;
1662 unsigned int j;
1663 int ret;
1665 j = 0;
1666 mtpar.table = name;
1667 mtpar.entryinfo = &e->ip;
1668 mtpar.hook_mask = e->comefrom;
1669 mtpar.family = NFPROTO_IPV4;
1670 ret = IPT_MATCH_ITERATE(e, check_match, &mtpar, &j);
1671 if (ret)
1672 goto cleanup_matches;
1674 ret = check_target(e, name);
1675 if (ret)
1676 goto cleanup_matches;
1678 (*i)++;
1679 return 0;
1681 cleanup_matches:
1682 IPT_MATCH_ITERATE(e, cleanup_match, &j);
1683 return ret;
1686 static int
1687 translate_compat_table(const char *name,
1688 unsigned int valid_hooks,
1689 struct xt_table_info **pinfo,
1690 void **pentry0,
1691 unsigned int total_size,
1692 unsigned int number,
1693 unsigned int *hook_entries,
1694 unsigned int *underflows)
1696 unsigned int i, j;
1697 struct xt_table_info *newinfo, *info;
1698 void *pos, *entry0, *entry1;
1699 unsigned int size;
1700 int ret;
1702 info = *pinfo;
1703 entry0 = *pentry0;
1704 size = total_size;
1705 info->number = number;
1707 /* Init all hooks to impossible value. */
1708 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1709 info->hook_entry[i] = 0xFFFFFFFF;
1710 info->underflow[i] = 0xFFFFFFFF;
1713 duprintf("translate_compat_table: size %u\n", info->size);
1714 j = 0;
1715 xt_compat_lock(AF_INET);
1716 /* Walk through entries, checking offsets. */
1717 ret = COMPAT_IPT_ENTRY_ITERATE(entry0, total_size,
1718 check_compat_entry_size_and_hooks,
1719 info, &size, entry0,
1720 entry0 + total_size,
1721 hook_entries, underflows, &j, name);
1722 if (ret != 0)
1723 goto out_unlock;
1725 ret = -EINVAL;
1726 if (j != number) {
1727 duprintf("translate_compat_table: %u not %u entries\n",
1728 j, number);
1729 goto out_unlock;
1732 /* Check hooks all assigned */
1733 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1734 /* Only hooks which are valid */
1735 if (!(valid_hooks & (1 << i)))
1736 continue;
1737 if (info->hook_entry[i] == 0xFFFFFFFF) {
1738 duprintf("Invalid hook entry %u %u\n",
1739 i, hook_entries[i]);
1740 goto out_unlock;
1742 if (info->underflow[i] == 0xFFFFFFFF) {
1743 duprintf("Invalid underflow %u %u\n",
1744 i, underflows[i]);
1745 goto out_unlock;
1749 ret = -ENOMEM;
1750 newinfo = xt_alloc_table_info(size);
1751 if (!newinfo)
1752 goto out_unlock;
1754 newinfo->number = number;
1755 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1756 newinfo->hook_entry[i] = info->hook_entry[i];
1757 newinfo->underflow[i] = info->underflow[i];
1759 entry1 = newinfo->entries[raw_smp_processor_id()];
1760 pos = entry1;
1761 size = total_size;
1762 ret = COMPAT_IPT_ENTRY_ITERATE(entry0, total_size,
1763 compat_copy_entry_from_user,
1764 &pos, &size, name, newinfo, entry1);
1765 xt_compat_flush_offsets(AF_INET);
1766 xt_compat_unlock(AF_INET);
1767 if (ret)
1768 goto free_newinfo;
1770 ret = -ELOOP;
1771 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1772 goto free_newinfo;
1774 i = 0;
1775 ret = IPT_ENTRY_ITERATE(entry1, newinfo->size, compat_check_entry,
1776 name, &i);
1777 if (ret) {
1778 j -= i;
1779 COMPAT_IPT_ENTRY_ITERATE_CONTINUE(entry0, newinfo->size, i,
1780 compat_release_entry, &j);
1781 IPT_ENTRY_ITERATE(entry1, newinfo->size, cleanup_entry, &i);
1782 xt_free_table_info(newinfo);
1783 return ret;
1786 /* And one copy for every other CPU */
1787 for_each_possible_cpu(i)
1788 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1789 memcpy(newinfo->entries[i], entry1, newinfo->size);
1791 *pinfo = newinfo;
1792 *pentry0 = entry1;
1793 xt_free_table_info(info);
1794 return 0;
1796 free_newinfo:
1797 xt_free_table_info(newinfo);
1798 out:
1799 COMPAT_IPT_ENTRY_ITERATE(entry0, total_size, compat_release_entry, &j);
1800 return ret;
1801 out_unlock:
1802 xt_compat_flush_offsets(AF_INET);
1803 xt_compat_unlock(AF_INET);
1804 goto out;
1807 static int
1808 compat_do_replace(struct net *net, void __user *user, unsigned int len)
1810 int ret;
1811 struct compat_ipt_replace tmp;
1812 struct xt_table_info *newinfo;
1813 void *loc_cpu_entry;
1815 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1816 return -EFAULT;
1818 /* overflow check */
1819 if (tmp.size >= INT_MAX / num_possible_cpus())
1820 return -ENOMEM;
1821 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1822 return -ENOMEM;
1824 newinfo = xt_alloc_table_info(tmp.size);
1825 if (!newinfo)
1826 return -ENOMEM;
1828 /* choose the copy that is on our node/cpu */
1829 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1830 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1831 tmp.size) != 0) {
1832 ret = -EFAULT;
1833 goto free_newinfo;
1836 ret = translate_compat_table(tmp.name, tmp.valid_hooks,
1837 &newinfo, &loc_cpu_entry, tmp.size,
1838 tmp.num_entries, tmp.hook_entry,
1839 tmp.underflow);
1840 if (ret != 0)
1841 goto free_newinfo;
1843 duprintf("compat_do_replace: Translated table\n");
1845 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1846 tmp.num_counters, compat_ptr(tmp.counters));
1847 if (ret)
1848 goto free_newinfo_untrans;
1849 return 0;
1851 free_newinfo_untrans:
1852 IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL);
1853 free_newinfo:
1854 xt_free_table_info(newinfo);
1855 return ret;
1858 static int
1859 compat_do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user,
1860 unsigned int len)
1862 int ret;
1864 if (!capable(CAP_NET_ADMIN))
1865 return -EPERM;
1867 switch (cmd) {
1868 case IPT_SO_SET_REPLACE:
1869 ret = compat_do_replace(sock_net(sk), user, len);
1870 break;
1872 case IPT_SO_SET_ADD_COUNTERS:
1873 ret = do_add_counters(sock_net(sk), user, len, 1);
1874 break;
1876 default:
1877 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd);
1878 ret = -EINVAL;
1881 return ret;
1884 struct compat_ipt_get_entries {
1885 char name[IPT_TABLE_MAXNAMELEN];
1886 compat_uint_t size;
1887 struct compat_ipt_entry entrytable[0];
1890 static int
1891 compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1892 void __user *userptr)
1894 struct xt_counters *counters;
1895 const struct xt_table_info *private = table->private;
1896 void __user *pos;
1897 unsigned int size;
1898 int ret = 0;
1899 const void *loc_cpu_entry;
1900 unsigned int i = 0;
1902 counters = alloc_counters(table);
1903 if (IS_ERR(counters))
1904 return PTR_ERR(counters);
1906 /* choose the copy that is on our node/cpu, ...
1907 * This choice is lazy (because current thread is
1908 * allowed to migrate to another cpu)
1910 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1911 pos = userptr;
1912 size = total_size;
1913 ret = IPT_ENTRY_ITERATE(loc_cpu_entry, total_size,
1914 compat_copy_entry_to_user,
1915 &pos, &size, counters, &i);
1917 vfree(counters);
1918 return ret;
1921 static int
1922 compat_get_entries(struct net *net, struct compat_ipt_get_entries __user *uptr,
1923 int *len)
1925 int ret;
1926 struct compat_ipt_get_entries get;
1927 struct xt_table *t;
1929 if (*len < sizeof(get)) {
1930 duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get));
1931 return -EINVAL;
1934 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1935 return -EFAULT;
1937 if (*len != sizeof(struct compat_ipt_get_entries) + get.size) {
1938 duprintf("compat_get_entries: %u != %zu\n",
1939 *len, sizeof(get) + get.size);
1940 return -EINVAL;
1943 xt_compat_lock(AF_INET);
1944 t = xt_find_table_lock(net, AF_INET, get.name);
1945 if (t && !IS_ERR(t)) {
1946 const struct xt_table_info *private = t->private;
1947 struct xt_table_info info;
1948 duprintf("t->private->number = %u\n", private->number);
1949 ret = compat_table_info(private, &info);
1950 if (!ret && get.size == info.size) {
1951 ret = compat_copy_entries_to_user(private->size,
1952 t, uptr->entrytable);
1953 } else if (!ret) {
1954 duprintf("compat_get_entries: I've got %u not %u!\n",
1955 private->size, get.size);
1956 ret = -EAGAIN;
1958 xt_compat_flush_offsets(AF_INET);
1959 module_put(t->me);
1960 xt_table_unlock(t);
1961 } else
1962 ret = t ? PTR_ERR(t) : -ENOENT;
1964 xt_compat_unlock(AF_INET);
1965 return ret;
1968 static int do_ipt_get_ctl(struct sock *, int, void __user *, int *);
1970 static int
1971 compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1973 int ret;
1975 if (!capable(CAP_NET_ADMIN))
1976 return -EPERM;
1978 switch (cmd) {
1979 case IPT_SO_GET_INFO:
1980 ret = get_info(sock_net(sk), user, len, 1);
1981 break;
1982 case IPT_SO_GET_ENTRIES:
1983 ret = compat_get_entries(sock_net(sk), user, len);
1984 break;
1985 default:
1986 ret = do_ipt_get_ctl(sk, cmd, user, len);
1988 return ret;
1990 #endif
1992 static int
1993 do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
1995 int ret;
1997 if (!capable(CAP_NET_ADMIN))
1998 return -EPERM;
2000 switch (cmd) {
2001 case IPT_SO_SET_REPLACE:
2002 ret = do_replace(sock_net(sk), user, len);
2003 break;
2005 case IPT_SO_SET_ADD_COUNTERS:
2006 ret = do_add_counters(sock_net(sk), user, len, 0);
2007 break;
2009 default:
2010 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd);
2011 ret = -EINVAL;
2014 return ret;
2017 static int
2018 do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2020 int ret;
2022 if (!capable(CAP_NET_ADMIN))
2023 return -EPERM;
2025 switch (cmd) {
2026 case IPT_SO_GET_INFO:
2027 ret = get_info(sock_net(sk), user, len, 0);
2028 break;
2030 case IPT_SO_GET_ENTRIES:
2031 ret = get_entries(sock_net(sk), user, len);
2032 break;
2034 case IPT_SO_GET_REVISION_MATCH:
2035 case IPT_SO_GET_REVISION_TARGET: {
2036 struct ipt_get_revision rev;
2037 int target;
2039 if (*len != sizeof(rev)) {
2040 ret = -EINVAL;
2041 break;
2043 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2044 ret = -EFAULT;
2045 break;
2048 if (cmd == IPT_SO_GET_REVISION_TARGET)
2049 target = 1;
2050 else
2051 target = 0;
2053 try_then_request_module(xt_find_revision(AF_INET, rev.name,
2054 rev.revision,
2055 target, &ret),
2056 "ipt_%s", rev.name);
2057 break;
2060 default:
2061 duprintf("do_ipt_get_ctl: unknown request %i\n", cmd);
2062 ret = -EINVAL;
2065 return ret;
2068 struct xt_table *ipt_register_table(struct net *net,
2069 const struct xt_table *table,
2070 const struct ipt_replace *repl)
2072 int ret;
2073 struct xt_table_info *newinfo;
2074 struct xt_table_info bootstrap
2075 = { 0, 0, 0, { 0 }, { 0 }, { } };
2076 void *loc_cpu_entry;
2077 struct xt_table *new_table;
2079 newinfo = xt_alloc_table_info(repl->size);
2080 if (!newinfo) {
2081 ret = -ENOMEM;
2082 goto out;
2085 /* choose the copy on our node/cpu, but dont care about preemption */
2086 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2087 memcpy(loc_cpu_entry, repl->entries, repl->size);
2089 ret = translate_table(table->name, table->valid_hooks,
2090 newinfo, loc_cpu_entry, repl->size,
2091 repl->num_entries,
2092 repl->hook_entry,
2093 repl->underflow);
2094 if (ret != 0)
2095 goto out_free;
2097 new_table = xt_register_table(net, table, &bootstrap, newinfo);
2098 if (IS_ERR(new_table)) {
2099 ret = PTR_ERR(new_table);
2100 goto out_free;
2103 return new_table;
2105 out_free:
2106 xt_free_table_info(newinfo);
2107 out:
2108 return ERR_PTR(ret);
2111 void ipt_unregister_table(struct xt_table *table)
2113 struct xt_table_info *private;
2114 void *loc_cpu_entry;
2115 struct module *table_owner = table->me;
2117 private = xt_unregister_table(table);
2119 /* Decrease module usage counts and free resources */
2120 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2121 IPT_ENTRY_ITERATE(loc_cpu_entry, private->size, cleanup_entry, NULL);
2122 if (private->number > private->initial_entries)
2123 module_put(table_owner);
2124 xt_free_table_info(private);
2127 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2128 static inline bool
2129 icmp_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2130 u_int8_t type, u_int8_t code,
2131 bool invert)
2133 return ((test_type == 0xFF) ||
2134 (type == test_type && code >= min_code && code <= max_code))
2135 ^ invert;
2138 static bool
2139 icmp_match(const struct sk_buff *skb, const struct xt_match_param *par)
2141 const struct icmphdr *ic;
2142 struct icmphdr _icmph;
2143 const struct ipt_icmp *icmpinfo = par->matchinfo;
2145 /* Must not be a fragment. */
2146 if (par->fragoff != 0)
2147 return false;
2149 ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph);
2150 if (ic == NULL) {
2151 /* We've been asked to examine this packet, and we
2152 * can't. Hence, no choice but to drop.
2154 duprintf("Dropping evil ICMP tinygram.\n");
2155 *par->hotdrop = true;
2156 return false;
2159 return icmp_type_code_match(icmpinfo->type,
2160 icmpinfo->code[0],
2161 icmpinfo->code[1],
2162 ic->type, ic->code,
2163 !!(icmpinfo->invflags&IPT_ICMP_INV));
2166 static bool icmp_checkentry(const struct xt_mtchk_param *par)
2168 const struct ipt_icmp *icmpinfo = par->matchinfo;
2170 /* Must specify no unknown invflags */
2171 return !(icmpinfo->invflags & ~IPT_ICMP_INV);
2174 /* The built-in targets: standard (NULL) and error. */
2175 static struct xt_target ipt_standard_target __read_mostly = {
2176 .name = IPT_STANDARD_TARGET,
2177 .targetsize = sizeof(int),
2178 .family = NFPROTO_IPV4,
2179 #ifdef CONFIG_COMPAT
2180 .compatsize = sizeof(compat_int_t),
2181 .compat_from_user = compat_standard_from_user,
2182 .compat_to_user = compat_standard_to_user,
2183 #endif
2186 static struct xt_target ipt_error_target __read_mostly = {
2187 .name = IPT_ERROR_TARGET,
2188 .target = ipt_error,
2189 .targetsize = IPT_FUNCTION_MAXNAMELEN,
2190 .family = NFPROTO_IPV4,
2193 static struct nf_sockopt_ops ipt_sockopts = {
2194 .pf = PF_INET,
2195 .set_optmin = IPT_BASE_CTL,
2196 .set_optmax = IPT_SO_SET_MAX+1,
2197 .set = do_ipt_set_ctl,
2198 #ifdef CONFIG_COMPAT
2199 .compat_set = compat_do_ipt_set_ctl,
2200 #endif
2201 .get_optmin = IPT_BASE_CTL,
2202 .get_optmax = IPT_SO_GET_MAX+1,
2203 .get = do_ipt_get_ctl,
2204 #ifdef CONFIG_COMPAT
2205 .compat_get = compat_do_ipt_get_ctl,
2206 #endif
2207 .owner = THIS_MODULE,
2210 static struct xt_match icmp_matchstruct __read_mostly = {
2211 .name = "icmp",
2212 .match = icmp_match,
2213 .matchsize = sizeof(struct ipt_icmp),
2214 .checkentry = icmp_checkentry,
2215 .proto = IPPROTO_ICMP,
2216 .family = NFPROTO_IPV4,
2219 static int __net_init ip_tables_net_init(struct net *net)
2221 return xt_proto_init(net, NFPROTO_IPV4);
2224 static void __net_exit ip_tables_net_exit(struct net *net)
2226 xt_proto_fini(net, NFPROTO_IPV4);
2229 static struct pernet_operations ip_tables_net_ops = {
2230 .init = ip_tables_net_init,
2231 .exit = ip_tables_net_exit,
2234 static int __init ip_tables_init(void)
2236 int ret;
2238 ret = register_pernet_subsys(&ip_tables_net_ops);
2239 if (ret < 0)
2240 goto err1;
2242 /* Noone else will be downing sem now, so we won't sleep */
2243 ret = xt_register_target(&ipt_standard_target);
2244 if (ret < 0)
2245 goto err2;
2246 ret = xt_register_target(&ipt_error_target);
2247 if (ret < 0)
2248 goto err3;
2249 ret = xt_register_match(&icmp_matchstruct);
2250 if (ret < 0)
2251 goto err4;
2253 /* Register setsockopt */
2254 ret = nf_register_sockopt(&ipt_sockopts);
2255 if (ret < 0)
2256 goto err5;
2258 printk(KERN_INFO "ip_tables: (C) 2000-2006 Netfilter Core Team\n");
2259 return 0;
2261 err5:
2262 xt_unregister_match(&icmp_matchstruct);
2263 err4:
2264 xt_unregister_target(&ipt_error_target);
2265 err3:
2266 xt_unregister_target(&ipt_standard_target);
2267 err2:
2268 unregister_pernet_subsys(&ip_tables_net_ops);
2269 err1:
2270 return ret;
2273 static void __exit ip_tables_fini(void)
2275 nf_unregister_sockopt(&ipt_sockopts);
2277 xt_unregister_match(&icmp_matchstruct);
2278 xt_unregister_target(&ipt_error_target);
2279 xt_unregister_target(&ipt_standard_target);
2281 unregister_pernet_subsys(&ip_tables_net_ops);
2284 EXPORT_SYMBOL(ipt_register_table);
2285 EXPORT_SYMBOL(ipt_unregister_table);
2286 EXPORT_SYMBOL(ipt_do_table);
2287 module_init(ip_tables_init);
2288 module_exit(ip_tables_fini);