netfilter: xtables: consolidate code into xt_request_find_match
[linux-2.6/libata-dev.git] / net / ipv4 / netfilter / ip_tables.c
blob09f6567a85b7181a20810be850e0bc1ef32d173a
1 /*
2 * Packet matching code.
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 #include <linux/cache.h>
13 #include <linux/capability.h>
14 #include <linux/skbuff.h>
15 #include <linux/kmod.h>
16 #include <linux/vmalloc.h>
17 #include <linux/netdevice.h>
18 #include <linux/module.h>
19 #include <linux/icmp.h>
20 #include <net/ip.h>
21 #include <net/compat.h>
22 #include <asm/uaccess.h>
23 #include <linux/mutex.h>
24 #include <linux/proc_fs.h>
25 #include <linux/err.h>
26 #include <linux/cpumask.h>
28 #include <linux/netfilter/x_tables.h>
29 #include <linux/netfilter_ipv4/ip_tables.h>
30 #include <net/netfilter/nf_log.h>
31 #include "../../netfilter/xt_repldata.h"
33 MODULE_LICENSE("GPL");
34 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
35 MODULE_DESCRIPTION("IPv4 packet filter");
37 /*#define DEBUG_IP_FIREWALL*/
38 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
39 /*#define DEBUG_IP_FIREWALL_USER*/
41 #ifdef DEBUG_IP_FIREWALL
42 #define dprintf(format, args...) pr_info(format , ## args)
43 #else
44 #define dprintf(format, args...)
45 #endif
47 #ifdef DEBUG_IP_FIREWALL_USER
48 #define duprintf(format, args...) pr_info(format , ## args)
49 #else
50 #define duprintf(format, args...)
51 #endif
53 #ifdef CONFIG_NETFILTER_DEBUG
54 #define IP_NF_ASSERT(x) \
55 do { \
56 if (!(x)) \
57 printk("IP_NF_ASSERT: %s:%s:%u\n", \
58 __func__, __FILE__, __LINE__); \
59 } while(0)
60 #else
61 #define IP_NF_ASSERT(x)
62 #endif
64 #if 0
65 /* All the better to debug you with... */
66 #define static
67 #define inline
68 #endif
70 void *ipt_alloc_initial_table(const struct xt_table *info)
72 return xt_alloc_initial_table(ipt, IPT);
74 EXPORT_SYMBOL_GPL(ipt_alloc_initial_table);
77 We keep a set of rules for each CPU, so we can avoid write-locking
78 them in the softirq when updating the counters and therefore
79 only need to read-lock in the softirq; doing a write_lock_bh() in user
80 context stops packets coming through and allows user context to read
81 the counters or update the rules.
83 Hence the start of any table is given by get_table() below. */
85 /* Returns whether matches rule or not. */
86 /* Performance critical - called for every packet */
87 static inline bool
88 ip_packet_match(const struct iphdr *ip,
89 const char *indev,
90 const char *outdev,
91 const struct ipt_ip *ipinfo,
92 int isfrag)
94 unsigned long ret;
96 #define FWINV(bool, invflg) ((bool) ^ !!(ipinfo->invflags & (invflg)))
98 if (FWINV((ip->saddr&ipinfo->smsk.s_addr) != ipinfo->src.s_addr,
99 IPT_INV_SRCIP) ||
100 FWINV((ip->daddr&ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr,
101 IPT_INV_DSTIP)) {
102 dprintf("Source or dest mismatch.\n");
104 dprintf("SRC: %pI4. Mask: %pI4. Target: %pI4.%s\n",
105 &ip->saddr, &ipinfo->smsk.s_addr, &ipinfo->src.s_addr,
106 ipinfo->invflags & IPT_INV_SRCIP ? " (INV)" : "");
107 dprintf("DST: %pI4 Mask: %pI4 Target: %pI4.%s\n",
108 &ip->daddr, &ipinfo->dmsk.s_addr, &ipinfo->dst.s_addr,
109 ipinfo->invflags & IPT_INV_DSTIP ? " (INV)" : "");
110 return false;
113 ret = ifname_compare_aligned(indev, ipinfo->iniface, ipinfo->iniface_mask);
115 if (FWINV(ret != 0, IPT_INV_VIA_IN)) {
116 dprintf("VIA in mismatch (%s vs %s).%s\n",
117 indev, ipinfo->iniface,
118 ipinfo->invflags&IPT_INV_VIA_IN ?" (INV)":"");
119 return false;
122 ret = ifname_compare_aligned(outdev, ipinfo->outiface, ipinfo->outiface_mask);
124 if (FWINV(ret != 0, IPT_INV_VIA_OUT)) {
125 dprintf("VIA out mismatch (%s vs %s).%s\n",
126 outdev, ipinfo->outiface,
127 ipinfo->invflags&IPT_INV_VIA_OUT ?" (INV)":"");
128 return false;
131 /* Check specific protocol */
132 if (ipinfo->proto &&
133 FWINV(ip->protocol != ipinfo->proto, IPT_INV_PROTO)) {
134 dprintf("Packet protocol %hi does not match %hi.%s\n",
135 ip->protocol, ipinfo->proto,
136 ipinfo->invflags&IPT_INV_PROTO ? " (INV)":"");
137 return false;
140 /* If we have a fragment rule but the packet is not a fragment
141 * then we return zero */
142 if (FWINV((ipinfo->flags&IPT_F_FRAG) && !isfrag, IPT_INV_FRAG)) {
143 dprintf("Fragment rule but not fragment.%s\n",
144 ipinfo->invflags & IPT_INV_FRAG ? " (INV)" : "");
145 return false;
148 return true;
151 static bool
152 ip_checkentry(const struct ipt_ip *ip)
154 if (ip->flags & ~IPT_F_MASK) {
155 duprintf("Unknown flag bits set: %08X\n",
156 ip->flags & ~IPT_F_MASK);
157 return false;
159 if (ip->invflags & ~IPT_INV_MASK) {
160 duprintf("Unknown invflag bits set: %08X\n",
161 ip->invflags & ~IPT_INV_MASK);
162 return false;
164 return true;
167 static unsigned int
168 ipt_error(struct sk_buff *skb, const struct xt_target_param *par)
170 if (net_ratelimit())
171 pr_info("error: `%s'\n", (const char *)par->targinfo);
173 return NF_DROP;
176 /* Performance critical - called for every packet */
177 static inline bool
178 do_match(const struct ipt_entry_match *m, const struct sk_buff *skb,
179 struct xt_match_param *par)
181 par->match = m->u.kernel.match;
182 par->matchinfo = m->data;
184 /* Stop iteration if it doesn't match */
185 if (!m->u.kernel.match->match(skb, par))
186 return true;
187 else
188 return false;
191 /* Performance critical */
192 static inline struct ipt_entry *
193 get_entry(const void *base, unsigned int offset)
195 return (struct ipt_entry *)(base + offset);
198 /* All zeroes == unconditional rule. */
199 /* Mildly perf critical (only if packet tracing is on) */
200 static inline bool unconditional(const struct ipt_ip *ip)
202 static const struct ipt_ip uncond;
204 return memcmp(ip, &uncond, sizeof(uncond)) == 0;
205 #undef FWINV
208 /* for const-correctness */
209 static inline const struct ipt_entry_target *
210 ipt_get_target_c(const struct ipt_entry *e)
212 return ipt_get_target((struct ipt_entry *)e);
215 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
216 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
217 static const char *const hooknames[] = {
218 [NF_INET_PRE_ROUTING] = "PREROUTING",
219 [NF_INET_LOCAL_IN] = "INPUT",
220 [NF_INET_FORWARD] = "FORWARD",
221 [NF_INET_LOCAL_OUT] = "OUTPUT",
222 [NF_INET_POST_ROUTING] = "POSTROUTING",
225 enum nf_ip_trace_comments {
226 NF_IP_TRACE_COMMENT_RULE,
227 NF_IP_TRACE_COMMENT_RETURN,
228 NF_IP_TRACE_COMMENT_POLICY,
231 static const char *const comments[] = {
232 [NF_IP_TRACE_COMMENT_RULE] = "rule",
233 [NF_IP_TRACE_COMMENT_RETURN] = "return",
234 [NF_IP_TRACE_COMMENT_POLICY] = "policy",
237 static struct nf_loginfo trace_loginfo = {
238 .type = NF_LOG_TYPE_LOG,
239 .u = {
240 .log = {
241 .level = 4,
242 .logflags = NF_LOG_MASK,
247 /* Mildly perf critical (only if packet tracing is on) */
248 static inline int
249 get_chainname_rulenum(const struct ipt_entry *s, const struct ipt_entry *e,
250 const char *hookname, const char **chainname,
251 const char **comment, unsigned int *rulenum)
253 const struct ipt_standard_target *t = (void *)ipt_get_target_c(s);
255 if (strcmp(t->target.u.kernel.target->name, IPT_ERROR_TARGET) == 0) {
256 /* Head of user chain: ERROR target with chainname */
257 *chainname = t->target.data;
258 (*rulenum) = 0;
259 } else if (s == e) {
260 (*rulenum)++;
262 if (s->target_offset == sizeof(struct ipt_entry) &&
263 strcmp(t->target.u.kernel.target->name,
264 IPT_STANDARD_TARGET) == 0 &&
265 t->verdict < 0 &&
266 unconditional(&s->ip)) {
267 /* Tail of chains: STANDARD target (return/policy) */
268 *comment = *chainname == hookname
269 ? comments[NF_IP_TRACE_COMMENT_POLICY]
270 : comments[NF_IP_TRACE_COMMENT_RETURN];
272 return 1;
273 } else
274 (*rulenum)++;
276 return 0;
279 static void trace_packet(const struct sk_buff *skb,
280 unsigned int hook,
281 const struct net_device *in,
282 const struct net_device *out,
283 const char *tablename,
284 const struct xt_table_info *private,
285 const struct ipt_entry *e)
287 const void *table_base;
288 const struct ipt_entry *root;
289 const char *hookname, *chainname, *comment;
290 const struct ipt_entry *iter;
291 unsigned int rulenum = 0;
293 table_base = private->entries[smp_processor_id()];
294 root = get_entry(table_base, private->hook_entry[hook]);
296 hookname = chainname = hooknames[hook];
297 comment = comments[NF_IP_TRACE_COMMENT_RULE];
299 xt_entry_foreach(iter, root, private->size - private->hook_entry[hook])
300 if (get_chainname_rulenum(iter, e, hookname,
301 &chainname, &comment, &rulenum) != 0)
302 break;
304 nf_log_packet(AF_INET, hook, skb, in, out, &trace_loginfo,
305 "TRACE: %s:%s:%s:%u ",
306 tablename, chainname, comment, rulenum);
308 #endif
310 static inline __pure
311 struct ipt_entry *ipt_next_entry(const struct ipt_entry *entry)
313 return (void *)entry + entry->next_offset;
316 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
317 unsigned int
318 ipt_do_table(struct sk_buff *skb,
319 unsigned int hook,
320 const struct net_device *in,
321 const struct net_device *out,
322 struct xt_table *table)
324 #define tb_comefrom ((struct ipt_entry *)table_base)->comefrom
326 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
327 const struct iphdr *ip;
328 bool hotdrop = false;
329 /* Initializing verdict to NF_DROP keeps gcc happy. */
330 unsigned int verdict = NF_DROP;
331 const char *indev, *outdev;
332 const void *table_base;
333 struct ipt_entry *e, *back;
334 const struct xt_table_info *private;
335 struct xt_match_param mtpar;
336 struct xt_target_param tgpar;
338 /* Initialization */
339 ip = ip_hdr(skb);
340 indev = in ? in->name : nulldevname;
341 outdev = out ? out->name : nulldevname;
342 /* We handle fragments by dealing with the first fragment as
343 * if it was a normal packet. All other fragments are treated
344 * normally, except that they will NEVER match rules that ask
345 * things we don't know, ie. tcp syn flag or ports). If the
346 * rule is also a fragment-specific rule, non-fragments won't
347 * match it. */
348 mtpar.fragoff = ntohs(ip->frag_off) & IP_OFFSET;
349 mtpar.thoff = ip_hdrlen(skb);
350 mtpar.hotdrop = &hotdrop;
351 mtpar.in = tgpar.in = in;
352 mtpar.out = tgpar.out = out;
353 mtpar.family = tgpar.family = NFPROTO_IPV4;
354 mtpar.hooknum = tgpar.hooknum = hook;
356 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
357 xt_info_rdlock_bh();
358 private = table->private;
359 table_base = private->entries[smp_processor_id()];
361 e = get_entry(table_base, private->hook_entry[hook]);
363 /* For return from builtin chain */
364 back = get_entry(table_base, private->underflow[hook]);
366 do {
367 const struct ipt_entry_target *t;
368 const struct xt_entry_match *ematch;
370 IP_NF_ASSERT(e);
371 IP_NF_ASSERT(back);
372 if (!ip_packet_match(ip, indev, outdev,
373 &e->ip, mtpar.fragoff)) {
374 no_match:
375 e = ipt_next_entry(e);
376 continue;
379 xt_ematch_foreach(ematch, e)
380 if (do_match(ematch, skb, &mtpar) != 0)
381 goto no_match;
383 ADD_COUNTER(e->counters, ntohs(ip->tot_len), 1);
385 t = ipt_get_target(e);
386 IP_NF_ASSERT(t->u.kernel.target);
388 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
389 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
390 /* The packet is traced: log it */
391 if (unlikely(skb->nf_trace))
392 trace_packet(skb, hook, in, out,
393 table->name, private, e);
394 #endif
395 /* Standard target? */
396 if (!t->u.kernel.target->target) {
397 int v;
399 v = ((struct ipt_standard_target *)t)->verdict;
400 if (v < 0) {
401 /* Pop from stack? */
402 if (v != IPT_RETURN) {
403 verdict = (unsigned)(-v) - 1;
404 break;
406 e = back;
407 back = get_entry(table_base, back->comefrom);
408 continue;
410 if (table_base + v != ipt_next_entry(e) &&
411 !(e->ip.flags & IPT_F_GOTO)) {
412 /* Save old back ptr in next entry */
413 struct ipt_entry *next = ipt_next_entry(e);
414 next->comefrom = (void *)back - table_base;
415 /* set back pointer to next entry */
416 back = next;
419 e = get_entry(table_base, v);
420 continue;
423 /* Targets which reenter must return
424 abs. verdicts */
425 tgpar.target = t->u.kernel.target;
426 tgpar.targinfo = t->data;
429 #ifdef CONFIG_NETFILTER_DEBUG
430 tb_comefrom = 0xeeeeeeec;
431 #endif
432 verdict = t->u.kernel.target->target(skb, &tgpar);
433 #ifdef CONFIG_NETFILTER_DEBUG
434 if (tb_comefrom != 0xeeeeeeec && verdict == IPT_CONTINUE) {
435 printk("Target %s reentered!\n",
436 t->u.kernel.target->name);
437 verdict = NF_DROP;
439 tb_comefrom = 0x57acc001;
440 #endif
441 /* Target might have changed stuff. */
442 ip = ip_hdr(skb);
443 if (verdict == IPT_CONTINUE)
444 e = ipt_next_entry(e);
445 else
446 /* Verdict */
447 break;
448 } while (!hotdrop);
449 xt_info_rdunlock_bh();
451 #ifdef DEBUG_ALLOW_ALL
452 return NF_ACCEPT;
453 #else
454 if (hotdrop)
455 return NF_DROP;
456 else return verdict;
457 #endif
459 #undef tb_comefrom
462 /* Figures out from what hook each rule can be called: returns 0 if
463 there are loops. Puts hook bitmask in comefrom. */
464 static int
465 mark_source_chains(const struct xt_table_info *newinfo,
466 unsigned int valid_hooks, void *entry0)
468 unsigned int hook;
470 /* No recursion; use packet counter to save back ptrs (reset
471 to 0 as we leave), and comefrom to save source hook bitmask */
472 for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
473 unsigned int pos = newinfo->hook_entry[hook];
474 struct ipt_entry *e = (struct ipt_entry *)(entry0 + pos);
476 if (!(valid_hooks & (1 << hook)))
477 continue;
479 /* Set initial back pointer. */
480 e->counters.pcnt = pos;
482 for (;;) {
483 const struct ipt_standard_target *t
484 = (void *)ipt_get_target_c(e);
485 int visited = e->comefrom & (1 << hook);
487 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
488 printk("iptables: loop hook %u pos %u %08X.\n",
489 hook, pos, e->comefrom);
490 return 0;
492 e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
494 /* Unconditional return/END. */
495 if ((e->target_offset == sizeof(struct ipt_entry) &&
496 (strcmp(t->target.u.user.name,
497 IPT_STANDARD_TARGET) == 0) &&
498 t->verdict < 0 && unconditional(&e->ip)) ||
499 visited) {
500 unsigned int oldpos, size;
502 if ((strcmp(t->target.u.user.name,
503 IPT_STANDARD_TARGET) == 0) &&
504 t->verdict < -NF_MAX_VERDICT - 1) {
505 duprintf("mark_source_chains: bad "
506 "negative verdict (%i)\n",
507 t->verdict);
508 return 0;
511 /* Return: backtrack through the last
512 big jump. */
513 do {
514 e->comefrom ^= (1<<NF_INET_NUMHOOKS);
515 #ifdef DEBUG_IP_FIREWALL_USER
516 if (e->comefrom
517 & (1 << NF_INET_NUMHOOKS)) {
518 duprintf("Back unset "
519 "on hook %u "
520 "rule %u\n",
521 hook, pos);
523 #endif
524 oldpos = pos;
525 pos = e->counters.pcnt;
526 e->counters.pcnt = 0;
528 /* We're at the start. */
529 if (pos == oldpos)
530 goto next;
532 e = (struct ipt_entry *)
533 (entry0 + pos);
534 } while (oldpos == pos + e->next_offset);
536 /* Move along one */
537 size = e->next_offset;
538 e = (struct ipt_entry *)
539 (entry0 + pos + size);
540 e->counters.pcnt = pos;
541 pos += size;
542 } else {
543 int newpos = t->verdict;
545 if (strcmp(t->target.u.user.name,
546 IPT_STANDARD_TARGET) == 0 &&
547 newpos >= 0) {
548 if (newpos > newinfo->size -
549 sizeof(struct ipt_entry)) {
550 duprintf("mark_source_chains: "
551 "bad verdict (%i)\n",
552 newpos);
553 return 0;
555 /* This a jump; chase it. */
556 duprintf("Jump rule %u -> %u\n",
557 pos, newpos);
558 } else {
559 /* ... this is a fallthru */
560 newpos = pos + e->next_offset;
562 e = (struct ipt_entry *)
563 (entry0 + newpos);
564 e->counters.pcnt = pos;
565 pos = newpos;
568 next:
569 duprintf("Finished chain %u\n", hook);
571 return 1;
574 static void cleanup_match(struct ipt_entry_match *m, struct net *net)
576 struct xt_mtdtor_param par;
578 par.net = net;
579 par.match = m->u.kernel.match;
580 par.matchinfo = m->data;
581 par.family = NFPROTO_IPV4;
582 if (par.match->destroy != NULL)
583 par.match->destroy(&par);
584 module_put(par.match->me);
587 static int
588 check_entry(const struct ipt_entry *e, const char *name)
590 const struct ipt_entry_target *t;
592 if (!ip_checkentry(&e->ip)) {
593 duprintf("ip check failed %p %s.\n", e, name);
594 return -EINVAL;
597 if (e->target_offset + sizeof(struct ipt_entry_target) >
598 e->next_offset)
599 return -EINVAL;
601 t = ipt_get_target_c(e);
602 if (e->target_offset + t->u.target_size > e->next_offset)
603 return -EINVAL;
605 return 0;
608 static int
609 check_match(struct ipt_entry_match *m, struct xt_mtchk_param *par)
611 const struct ipt_ip *ip = par->entryinfo;
612 int ret;
614 par->match = m->u.kernel.match;
615 par->matchinfo = m->data;
617 ret = xt_check_match(par, m->u.match_size - sizeof(*m),
618 ip->proto, ip->invflags & IPT_INV_PROTO);
619 if (ret < 0) {
620 duprintf("check failed for `%s'.\n", par.match->name);
621 return ret;
623 return 0;
626 static int
627 find_check_match(struct ipt_entry_match *m, struct xt_mtchk_param *par)
629 struct xt_match *match;
630 int ret;
632 match = xt_request_find_match(NFPROTO_IPV4, m->u.user.name,
633 m->u.user.revision);
634 if (IS_ERR(match)) {
635 duprintf("find_check_match: `%s' not found\n", m->u.user.name);
636 return PTR_ERR(match);
638 m->u.kernel.match = match;
640 ret = check_match(m, par);
641 if (ret)
642 goto err;
644 return 0;
645 err:
646 module_put(m->u.kernel.match->me);
647 return ret;
650 static int check_target(struct ipt_entry *e, struct net *net, const char *name)
652 struct ipt_entry_target *t = ipt_get_target(e);
653 struct xt_tgchk_param par = {
654 .net = net,
655 .table = name,
656 .entryinfo = e,
657 .target = t->u.kernel.target,
658 .targinfo = t->data,
659 .hook_mask = e->comefrom,
660 .family = NFPROTO_IPV4,
662 int ret;
664 ret = xt_check_target(&par, t->u.target_size - sizeof(*t),
665 e->ip.proto, e->ip.invflags & IPT_INV_PROTO);
666 if (ret < 0) {
667 duprintf("check failed for `%s'.\n",
668 t->u.kernel.target->name);
669 return ret;
671 return 0;
674 static int
675 find_check_entry(struct ipt_entry *e, struct net *net, const char *name,
676 unsigned int size)
678 struct ipt_entry_target *t;
679 struct xt_target *target;
680 int ret;
681 unsigned int j;
682 struct xt_mtchk_param mtpar;
683 struct xt_entry_match *ematch;
685 ret = check_entry(e, name);
686 if (ret)
687 return ret;
689 j = 0;
690 mtpar.net = net;
691 mtpar.table = name;
692 mtpar.entryinfo = &e->ip;
693 mtpar.hook_mask = e->comefrom;
694 mtpar.family = NFPROTO_IPV4;
695 xt_ematch_foreach(ematch, e) {
696 ret = find_check_match(ematch, &mtpar);
697 if (ret != 0)
698 goto cleanup_matches;
699 ++j;
702 t = ipt_get_target(e);
703 target = xt_request_find_target(NFPROTO_IPV4, t->u.user.name,
704 t->u.user.revision);
705 if (IS_ERR(target)) {
706 duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
707 ret = PTR_ERR(target);
708 goto cleanup_matches;
710 t->u.kernel.target = target;
712 ret = check_target(e, net, name);
713 if (ret)
714 goto err;
715 return 0;
716 err:
717 module_put(t->u.kernel.target->me);
718 cleanup_matches:
719 xt_ematch_foreach(ematch, e) {
720 if (j-- == 0)
721 break;
722 cleanup_match(ematch, net);
724 return ret;
727 static bool check_underflow(const struct ipt_entry *e)
729 const struct ipt_entry_target *t;
730 unsigned int verdict;
732 if (!unconditional(&e->ip))
733 return false;
734 t = ipt_get_target_c(e);
735 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
736 return false;
737 verdict = ((struct ipt_standard_target *)t)->verdict;
738 verdict = -verdict - 1;
739 return verdict == NF_DROP || verdict == NF_ACCEPT;
742 static int
743 check_entry_size_and_hooks(struct ipt_entry *e,
744 struct xt_table_info *newinfo,
745 const unsigned char *base,
746 const unsigned char *limit,
747 const unsigned int *hook_entries,
748 const unsigned int *underflows,
749 unsigned int valid_hooks)
751 unsigned int h;
753 if ((unsigned long)e % __alignof__(struct ipt_entry) != 0 ||
754 (unsigned char *)e + sizeof(struct ipt_entry) >= limit) {
755 duprintf("Bad offset %p\n", e);
756 return -EINVAL;
759 if (e->next_offset
760 < sizeof(struct ipt_entry) + sizeof(struct ipt_entry_target)) {
761 duprintf("checking: element %p size %u\n",
762 e, e->next_offset);
763 return -EINVAL;
766 /* Check hooks & underflows */
767 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
768 if (!(valid_hooks & (1 << h)))
769 continue;
770 if ((unsigned char *)e - base == hook_entries[h])
771 newinfo->hook_entry[h] = hook_entries[h];
772 if ((unsigned char *)e - base == underflows[h]) {
773 if (!check_underflow(e)) {
774 pr_err("Underflows must be unconditional and "
775 "use the STANDARD target with "
776 "ACCEPT/DROP\n");
777 return -EINVAL;
779 newinfo->underflow[h] = underflows[h];
783 /* Clear counters and comefrom */
784 e->counters = ((struct xt_counters) { 0, 0 });
785 e->comefrom = 0;
786 return 0;
789 static void
790 cleanup_entry(struct ipt_entry *e, struct net *net)
792 struct xt_tgdtor_param par;
793 struct ipt_entry_target *t;
794 struct xt_entry_match *ematch;
796 /* Cleanup all matches */
797 xt_ematch_foreach(ematch, e)
798 cleanup_match(ematch, net);
799 t = ipt_get_target(e);
801 par.net = net;
802 par.target = t->u.kernel.target;
803 par.targinfo = t->data;
804 par.family = NFPROTO_IPV4;
805 if (par.target->destroy != NULL)
806 par.target->destroy(&par);
807 module_put(par.target->me);
810 /* Checks and translates the user-supplied table segment (held in
811 newinfo) */
812 static int
813 translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
814 const struct ipt_replace *repl)
816 struct ipt_entry *iter;
817 unsigned int i;
818 int ret = 0;
820 newinfo->size = repl->size;
821 newinfo->number = repl->num_entries;
823 /* Init all hooks to impossible value. */
824 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
825 newinfo->hook_entry[i] = 0xFFFFFFFF;
826 newinfo->underflow[i] = 0xFFFFFFFF;
829 duprintf("translate_table: size %u\n", newinfo->size);
830 i = 0;
831 /* Walk through entries, checking offsets. */
832 xt_entry_foreach(iter, entry0, newinfo->size) {
833 ret = check_entry_size_and_hooks(iter, newinfo, entry0,
834 entry0 + repl->size,
835 repl->hook_entry,
836 repl->underflow,
837 repl->valid_hooks);
838 if (ret != 0)
839 return ret;
840 ++i;
843 if (i != repl->num_entries) {
844 duprintf("translate_table: %u not %u entries\n",
845 i, repl->num_entries);
846 return -EINVAL;
849 /* Check hooks all assigned */
850 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
851 /* Only hooks which are valid */
852 if (!(repl->valid_hooks & (1 << i)))
853 continue;
854 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
855 duprintf("Invalid hook entry %u %u\n",
856 i, repl->hook_entry[i]);
857 return -EINVAL;
859 if (newinfo->underflow[i] == 0xFFFFFFFF) {
860 duprintf("Invalid underflow %u %u\n",
861 i, repl->underflow[i]);
862 return -EINVAL;
866 if (!mark_source_chains(newinfo, repl->valid_hooks, entry0))
867 return -ELOOP;
869 /* Finally, each sanity check must pass */
870 i = 0;
871 xt_entry_foreach(iter, entry0, newinfo->size) {
872 ret = find_check_entry(iter, net, repl->name, repl->size);
873 if (ret != 0)
874 break;
875 ++i;
878 if (ret != 0) {
879 xt_entry_foreach(iter, entry0, newinfo->size) {
880 if (i-- == 0)
881 break;
882 cleanup_entry(iter, net);
884 return ret;
887 /* And one copy for every other CPU */
888 for_each_possible_cpu(i) {
889 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
890 memcpy(newinfo->entries[i], entry0, newinfo->size);
893 return ret;
896 static void
897 get_counters(const struct xt_table_info *t,
898 struct xt_counters counters[])
900 struct ipt_entry *iter;
901 unsigned int cpu;
902 unsigned int i;
903 unsigned int curcpu;
905 /* Instead of clearing (by a previous call to memset())
906 * the counters and using adds, we set the counters
907 * with data used by 'current' CPU.
909 * Bottom half has to be disabled to prevent deadlock
910 * if new softirq were to run and call ipt_do_table
912 local_bh_disable();
913 curcpu = smp_processor_id();
915 i = 0;
916 xt_entry_foreach(iter, t->entries[curcpu], t->size) {
917 SET_COUNTER(counters[i], iter->counters.bcnt,
918 iter->counters.pcnt);
919 ++i;
922 for_each_possible_cpu(cpu) {
923 if (cpu == curcpu)
924 continue;
925 i = 0;
926 xt_info_wrlock(cpu);
927 xt_entry_foreach(iter, t->entries[cpu], t->size) {
928 ADD_COUNTER(counters[i], iter->counters.bcnt,
929 iter->counters.pcnt);
930 ++i; /* macro does multi eval of i */
932 xt_info_wrunlock(cpu);
934 local_bh_enable();
937 static struct xt_counters *alloc_counters(const struct xt_table *table)
939 unsigned int countersize;
940 struct xt_counters *counters;
941 const struct xt_table_info *private = table->private;
943 /* We need atomic snapshot of counters: rest doesn't change
944 (other than comefrom, which userspace doesn't care
945 about). */
946 countersize = sizeof(struct xt_counters) * private->number;
947 counters = vmalloc_node(countersize, numa_node_id());
949 if (counters == NULL)
950 return ERR_PTR(-ENOMEM);
952 get_counters(private, counters);
954 return counters;
957 static int
958 copy_entries_to_user(unsigned int total_size,
959 const struct xt_table *table,
960 void __user *userptr)
962 unsigned int off, num;
963 const struct ipt_entry *e;
964 struct xt_counters *counters;
965 const struct xt_table_info *private = table->private;
966 int ret = 0;
967 const void *loc_cpu_entry;
969 counters = alloc_counters(table);
970 if (IS_ERR(counters))
971 return PTR_ERR(counters);
973 /* choose the copy that is on our node/cpu, ...
974 * This choice is lazy (because current thread is
975 * allowed to migrate to another cpu)
977 loc_cpu_entry = private->entries[raw_smp_processor_id()];
978 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
979 ret = -EFAULT;
980 goto free_counters;
983 /* FIXME: use iterator macros --RR */
984 /* ... then go back and fix counters and names */
985 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
986 unsigned int i;
987 const struct ipt_entry_match *m;
988 const struct ipt_entry_target *t;
990 e = (struct ipt_entry *)(loc_cpu_entry + off);
991 if (copy_to_user(userptr + off
992 + offsetof(struct ipt_entry, counters),
993 &counters[num],
994 sizeof(counters[num])) != 0) {
995 ret = -EFAULT;
996 goto free_counters;
999 for (i = sizeof(struct ipt_entry);
1000 i < e->target_offset;
1001 i += m->u.match_size) {
1002 m = (void *)e + i;
1004 if (copy_to_user(userptr + off + i
1005 + offsetof(struct ipt_entry_match,
1006 u.user.name),
1007 m->u.kernel.match->name,
1008 strlen(m->u.kernel.match->name)+1)
1009 != 0) {
1010 ret = -EFAULT;
1011 goto free_counters;
1015 t = ipt_get_target_c(e);
1016 if (copy_to_user(userptr + off + e->target_offset
1017 + offsetof(struct ipt_entry_target,
1018 u.user.name),
1019 t->u.kernel.target->name,
1020 strlen(t->u.kernel.target->name)+1) != 0) {
1021 ret = -EFAULT;
1022 goto free_counters;
1026 free_counters:
1027 vfree(counters);
1028 return ret;
1031 #ifdef CONFIG_COMPAT
1032 static void compat_standard_from_user(void *dst, const void *src)
1034 int v = *(compat_int_t *)src;
1036 if (v > 0)
1037 v += xt_compat_calc_jump(AF_INET, v);
1038 memcpy(dst, &v, sizeof(v));
1041 static int compat_standard_to_user(void __user *dst, const void *src)
1043 compat_int_t cv = *(int *)src;
1045 if (cv > 0)
1046 cv -= xt_compat_calc_jump(AF_INET, cv);
1047 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
1050 static int compat_calc_entry(const struct ipt_entry *e,
1051 const struct xt_table_info *info,
1052 const void *base, struct xt_table_info *newinfo)
1054 const struct xt_entry_match *ematch;
1055 const struct ipt_entry_target *t;
1056 unsigned int entry_offset;
1057 int off, i, ret;
1059 off = sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1060 entry_offset = (void *)e - base;
1061 xt_ematch_foreach(ematch, e)
1062 off += xt_compat_match_offset(ematch->u.kernel.match);
1063 t = ipt_get_target_c(e);
1064 off += xt_compat_target_offset(t->u.kernel.target);
1065 newinfo->size -= off;
1066 ret = xt_compat_add_offset(AF_INET, entry_offset, off);
1067 if (ret)
1068 return ret;
1070 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1071 if (info->hook_entry[i] &&
1072 (e < (struct ipt_entry *)(base + info->hook_entry[i])))
1073 newinfo->hook_entry[i] -= off;
1074 if (info->underflow[i] &&
1075 (e < (struct ipt_entry *)(base + info->underflow[i])))
1076 newinfo->underflow[i] -= off;
1078 return 0;
1081 static int compat_table_info(const struct xt_table_info *info,
1082 struct xt_table_info *newinfo)
1084 struct ipt_entry *iter;
1085 void *loc_cpu_entry;
1086 int ret;
1088 if (!newinfo || !info)
1089 return -EINVAL;
1091 /* we dont care about newinfo->entries[] */
1092 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
1093 newinfo->initial_entries = 0;
1094 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1095 xt_entry_foreach(iter, loc_cpu_entry, info->size) {
1096 ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
1097 if (ret != 0)
1098 return ret;
1100 return 0;
1102 #endif
1104 static int get_info(struct net *net, void __user *user,
1105 const int *len, int compat)
1107 char name[IPT_TABLE_MAXNAMELEN];
1108 struct xt_table *t;
1109 int ret;
1111 if (*len != sizeof(struct ipt_getinfo)) {
1112 duprintf("length %u != %zu\n", *len,
1113 sizeof(struct ipt_getinfo));
1114 return -EINVAL;
1117 if (copy_from_user(name, user, sizeof(name)) != 0)
1118 return -EFAULT;
1120 name[IPT_TABLE_MAXNAMELEN-1] = '\0';
1121 #ifdef CONFIG_COMPAT
1122 if (compat)
1123 xt_compat_lock(AF_INET);
1124 #endif
1125 t = try_then_request_module(xt_find_table_lock(net, AF_INET, name),
1126 "iptable_%s", name);
1127 if (t && !IS_ERR(t)) {
1128 struct ipt_getinfo info;
1129 const struct xt_table_info *private = t->private;
1130 #ifdef CONFIG_COMPAT
1131 struct xt_table_info tmp;
1133 if (compat) {
1134 ret = compat_table_info(private, &tmp);
1135 xt_compat_flush_offsets(AF_INET);
1136 private = &tmp;
1138 #endif
1139 info.valid_hooks = t->valid_hooks;
1140 memcpy(info.hook_entry, private->hook_entry,
1141 sizeof(info.hook_entry));
1142 memcpy(info.underflow, private->underflow,
1143 sizeof(info.underflow));
1144 info.num_entries = private->number;
1145 info.size = private->size;
1146 strcpy(info.name, name);
1148 if (copy_to_user(user, &info, *len) != 0)
1149 ret = -EFAULT;
1150 else
1151 ret = 0;
1153 xt_table_unlock(t);
1154 module_put(t->me);
1155 } else
1156 ret = t ? PTR_ERR(t) : -ENOENT;
1157 #ifdef CONFIG_COMPAT
1158 if (compat)
1159 xt_compat_unlock(AF_INET);
1160 #endif
1161 return ret;
1164 static int
1165 get_entries(struct net *net, struct ipt_get_entries __user *uptr,
1166 const int *len)
1168 int ret;
1169 struct ipt_get_entries get;
1170 struct xt_table *t;
1172 if (*len < sizeof(get)) {
1173 duprintf("get_entries: %u < %zu\n", *len, sizeof(get));
1174 return -EINVAL;
1176 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1177 return -EFAULT;
1178 if (*len != sizeof(struct ipt_get_entries) + get.size) {
1179 duprintf("get_entries: %u != %zu\n",
1180 *len, sizeof(get) + get.size);
1181 return -EINVAL;
1184 t = xt_find_table_lock(net, AF_INET, get.name);
1185 if (t && !IS_ERR(t)) {
1186 const struct xt_table_info *private = t->private;
1187 duprintf("t->private->number = %u\n", private->number);
1188 if (get.size == private->size)
1189 ret = copy_entries_to_user(private->size,
1190 t, uptr->entrytable);
1191 else {
1192 duprintf("get_entries: I've got %u not %u!\n",
1193 private->size, get.size);
1194 ret = -EAGAIN;
1196 module_put(t->me);
1197 xt_table_unlock(t);
1198 } else
1199 ret = t ? PTR_ERR(t) : -ENOENT;
1201 return ret;
1204 static int
1205 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1206 struct xt_table_info *newinfo, unsigned int num_counters,
1207 void __user *counters_ptr)
1209 int ret;
1210 struct xt_table *t;
1211 struct xt_table_info *oldinfo;
1212 struct xt_counters *counters;
1213 void *loc_cpu_old_entry;
1214 struct ipt_entry *iter;
1216 ret = 0;
1217 counters = vmalloc(num_counters * sizeof(struct xt_counters));
1218 if (!counters) {
1219 ret = -ENOMEM;
1220 goto out;
1223 t = try_then_request_module(xt_find_table_lock(net, AF_INET, name),
1224 "iptable_%s", name);
1225 if (!t || IS_ERR(t)) {
1226 ret = t ? PTR_ERR(t) : -ENOENT;
1227 goto free_newinfo_counters_untrans;
1230 /* You lied! */
1231 if (valid_hooks != t->valid_hooks) {
1232 duprintf("Valid hook crap: %08X vs %08X\n",
1233 valid_hooks, t->valid_hooks);
1234 ret = -EINVAL;
1235 goto put_module;
1238 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1239 if (!oldinfo)
1240 goto put_module;
1242 /* Update module usage count based on number of rules */
1243 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1244 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1245 if ((oldinfo->number > oldinfo->initial_entries) ||
1246 (newinfo->number <= oldinfo->initial_entries))
1247 module_put(t->me);
1248 if ((oldinfo->number > oldinfo->initial_entries) &&
1249 (newinfo->number <= oldinfo->initial_entries))
1250 module_put(t->me);
1252 /* Get the old counters, and synchronize with replace */
1253 get_counters(oldinfo, counters);
1255 /* Decrease module usage counts and free resource */
1256 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1257 xt_entry_foreach(iter, loc_cpu_old_entry, oldinfo->size)
1258 cleanup_entry(iter, net);
1260 xt_free_table_info(oldinfo);
1261 if (copy_to_user(counters_ptr, counters,
1262 sizeof(struct xt_counters) * num_counters) != 0)
1263 ret = -EFAULT;
1264 vfree(counters);
1265 xt_table_unlock(t);
1266 return ret;
1268 put_module:
1269 module_put(t->me);
1270 xt_table_unlock(t);
1271 free_newinfo_counters_untrans:
1272 vfree(counters);
1273 out:
1274 return ret;
1277 static int
1278 do_replace(struct net *net, const void __user *user, unsigned int len)
1280 int ret;
1281 struct ipt_replace tmp;
1282 struct xt_table_info *newinfo;
1283 void *loc_cpu_entry;
1284 struct ipt_entry *iter;
1286 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1287 return -EFAULT;
1289 /* overflow check */
1290 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1291 return -ENOMEM;
1293 newinfo = xt_alloc_table_info(tmp.size);
1294 if (!newinfo)
1295 return -ENOMEM;
1297 /* choose the copy that is on our node/cpu */
1298 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1299 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1300 tmp.size) != 0) {
1301 ret = -EFAULT;
1302 goto free_newinfo;
1305 ret = translate_table(net, newinfo, loc_cpu_entry, &tmp);
1306 if (ret != 0)
1307 goto free_newinfo;
1309 duprintf("Translated table\n");
1311 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1312 tmp.num_counters, tmp.counters);
1313 if (ret)
1314 goto free_newinfo_untrans;
1315 return 0;
1317 free_newinfo_untrans:
1318 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1319 cleanup_entry(iter, net);
1320 free_newinfo:
1321 xt_free_table_info(newinfo);
1322 return ret;
1325 static int
1326 do_add_counters(struct net *net, const void __user *user,
1327 unsigned int len, int compat)
1329 unsigned int i, curcpu;
1330 struct xt_counters_info tmp;
1331 struct xt_counters *paddc;
1332 unsigned int num_counters;
1333 const char *name;
1334 int size;
1335 void *ptmp;
1336 struct xt_table *t;
1337 const struct xt_table_info *private;
1338 int ret = 0;
1339 void *loc_cpu_entry;
1340 struct ipt_entry *iter;
1341 #ifdef CONFIG_COMPAT
1342 struct compat_xt_counters_info compat_tmp;
1344 if (compat) {
1345 ptmp = &compat_tmp;
1346 size = sizeof(struct compat_xt_counters_info);
1347 } else
1348 #endif
1350 ptmp = &tmp;
1351 size = sizeof(struct xt_counters_info);
1354 if (copy_from_user(ptmp, user, size) != 0)
1355 return -EFAULT;
1357 #ifdef CONFIG_COMPAT
1358 if (compat) {
1359 num_counters = compat_tmp.num_counters;
1360 name = compat_tmp.name;
1361 } else
1362 #endif
1364 num_counters = tmp.num_counters;
1365 name = tmp.name;
1368 if (len != size + num_counters * sizeof(struct xt_counters))
1369 return -EINVAL;
1371 paddc = vmalloc_node(len - size, numa_node_id());
1372 if (!paddc)
1373 return -ENOMEM;
1375 if (copy_from_user(paddc, user + size, len - size) != 0) {
1376 ret = -EFAULT;
1377 goto free;
1380 t = xt_find_table_lock(net, AF_INET, name);
1381 if (!t || IS_ERR(t)) {
1382 ret = t ? PTR_ERR(t) : -ENOENT;
1383 goto free;
1386 local_bh_disable();
1387 private = t->private;
1388 if (private->number != num_counters) {
1389 ret = -EINVAL;
1390 goto unlock_up_free;
1393 i = 0;
1394 /* Choose the copy that is on our node */
1395 curcpu = smp_processor_id();
1396 loc_cpu_entry = private->entries[curcpu];
1397 xt_info_wrlock(curcpu);
1398 xt_entry_foreach(iter, loc_cpu_entry, private->size) {
1399 ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt);
1400 ++i;
1402 xt_info_wrunlock(curcpu);
1403 unlock_up_free:
1404 local_bh_enable();
1405 xt_table_unlock(t);
1406 module_put(t->me);
1407 free:
1408 vfree(paddc);
1410 return ret;
1413 #ifdef CONFIG_COMPAT
1414 struct compat_ipt_replace {
1415 char name[IPT_TABLE_MAXNAMELEN];
1416 u32 valid_hooks;
1417 u32 num_entries;
1418 u32 size;
1419 u32 hook_entry[NF_INET_NUMHOOKS];
1420 u32 underflow[NF_INET_NUMHOOKS];
1421 u32 num_counters;
1422 compat_uptr_t counters; /* struct ipt_counters * */
1423 struct compat_ipt_entry entries[0];
1426 static int
1427 compat_copy_entry_to_user(struct ipt_entry *e, void __user **dstptr,
1428 unsigned int *size, struct xt_counters *counters,
1429 unsigned int i)
1431 struct ipt_entry_target *t;
1432 struct compat_ipt_entry __user *ce;
1433 u_int16_t target_offset, next_offset;
1434 compat_uint_t origsize;
1435 const struct xt_entry_match *ematch;
1436 int ret = 0;
1438 origsize = *size;
1439 ce = (struct compat_ipt_entry __user *)*dstptr;
1440 if (copy_to_user(ce, e, sizeof(struct ipt_entry)) != 0 ||
1441 copy_to_user(&ce->counters, &counters[i],
1442 sizeof(counters[i])) != 0)
1443 return -EFAULT;
1445 *dstptr += sizeof(struct compat_ipt_entry);
1446 *size -= sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1448 xt_ematch_foreach(ematch, e) {
1449 ret = xt_compat_match_to_user(ematch, dstptr, size);
1450 if (ret != 0)
1451 return ret;
1453 target_offset = e->target_offset - (origsize - *size);
1454 t = ipt_get_target(e);
1455 ret = xt_compat_target_to_user(t, dstptr, size);
1456 if (ret)
1457 return ret;
1458 next_offset = e->next_offset - (origsize - *size);
1459 if (put_user(target_offset, &ce->target_offset) != 0 ||
1460 put_user(next_offset, &ce->next_offset) != 0)
1461 return -EFAULT;
1462 return 0;
1465 static int
1466 compat_find_calc_match(struct ipt_entry_match *m,
1467 const char *name,
1468 const struct ipt_ip *ip,
1469 unsigned int hookmask,
1470 int *size)
1472 struct xt_match *match;
1474 match = xt_request_find_match(NFPROTO_IPV4, m->u.user.name,
1475 m->u.user.revision);
1476 if (IS_ERR(match)) {
1477 duprintf("compat_check_calc_match: `%s' not found\n",
1478 m->u.user.name);
1479 return PTR_ERR(match);
1481 m->u.kernel.match = match;
1482 *size += xt_compat_match_offset(match);
1483 return 0;
1486 static void compat_release_entry(struct compat_ipt_entry *e)
1488 struct ipt_entry_target *t;
1489 struct xt_entry_match *ematch;
1491 /* Cleanup all matches */
1492 xt_ematch_foreach(ematch, e)
1493 module_put(ematch->u.kernel.match->me);
1494 t = compat_ipt_get_target(e);
1495 module_put(t->u.kernel.target->me);
1498 static int
1499 check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
1500 struct xt_table_info *newinfo,
1501 unsigned int *size,
1502 const unsigned char *base,
1503 const unsigned char *limit,
1504 const unsigned int *hook_entries,
1505 const unsigned int *underflows,
1506 const char *name)
1508 struct xt_entry_match *ematch;
1509 struct ipt_entry_target *t;
1510 struct xt_target *target;
1511 unsigned int entry_offset;
1512 unsigned int j;
1513 int ret, off, h;
1515 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1516 if ((unsigned long)e % __alignof__(struct compat_ipt_entry) != 0 ||
1517 (unsigned char *)e + sizeof(struct compat_ipt_entry) >= limit) {
1518 duprintf("Bad offset %p, limit = %p\n", e, limit);
1519 return -EINVAL;
1522 if (e->next_offset < sizeof(struct compat_ipt_entry) +
1523 sizeof(struct compat_xt_entry_target)) {
1524 duprintf("checking: element %p size %u\n",
1525 e, e->next_offset);
1526 return -EINVAL;
1529 /* For purposes of check_entry casting the compat entry is fine */
1530 ret = check_entry((struct ipt_entry *)e, name);
1531 if (ret)
1532 return ret;
1534 off = sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1535 entry_offset = (void *)e - (void *)base;
1536 j = 0;
1537 xt_ematch_foreach(ematch, e) {
1538 ret = compat_find_calc_match(ematch, name,
1539 &e->ip, e->comefrom, &off);
1540 if (ret != 0)
1541 goto release_matches;
1542 ++j;
1545 t = compat_ipt_get_target(e);
1546 target = xt_request_find_target(NFPROTO_IPV4, t->u.user.name,
1547 t->u.user.revision);
1548 if (IS_ERR(target)) {
1549 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1550 t->u.user.name);
1551 ret = PTR_ERR(target);
1552 goto release_matches;
1554 t->u.kernel.target = target;
1556 off += xt_compat_target_offset(target);
1557 *size += off;
1558 ret = xt_compat_add_offset(AF_INET, entry_offset, off);
1559 if (ret)
1560 goto out;
1562 /* Check hooks & underflows */
1563 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1564 if ((unsigned char *)e - base == hook_entries[h])
1565 newinfo->hook_entry[h] = hook_entries[h];
1566 if ((unsigned char *)e - base == underflows[h])
1567 newinfo->underflow[h] = underflows[h];
1570 /* Clear counters and comefrom */
1571 memset(&e->counters, 0, sizeof(e->counters));
1572 e->comefrom = 0;
1573 return 0;
1575 out:
1576 module_put(t->u.kernel.target->me);
1577 release_matches:
1578 xt_ematch_foreach(ematch, e) {
1579 if (j-- == 0)
1580 break;
1581 module_put(ematch->u.kernel.match->me);
1583 return ret;
1586 static int
1587 compat_copy_entry_from_user(struct compat_ipt_entry *e, void **dstptr,
1588 unsigned int *size, const char *name,
1589 struct xt_table_info *newinfo, unsigned char *base)
1591 struct ipt_entry_target *t;
1592 struct xt_target *target;
1593 struct ipt_entry *de;
1594 unsigned int origsize;
1595 int ret, h;
1596 struct xt_entry_match *ematch;
1598 ret = 0;
1599 origsize = *size;
1600 de = (struct ipt_entry *)*dstptr;
1601 memcpy(de, e, sizeof(struct ipt_entry));
1602 memcpy(&de->counters, &e->counters, sizeof(e->counters));
1604 *dstptr += sizeof(struct ipt_entry);
1605 *size += sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1607 xt_ematch_foreach(ematch, e) {
1608 ret = xt_compat_match_from_user(ematch, dstptr, size);
1609 if (ret != 0)
1610 return ret;
1612 de->target_offset = e->target_offset - (origsize - *size);
1613 t = compat_ipt_get_target(e);
1614 target = t->u.kernel.target;
1615 xt_compat_target_from_user(t, dstptr, size);
1617 de->next_offset = e->next_offset - (origsize - *size);
1618 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1619 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1620 newinfo->hook_entry[h] -= origsize - *size;
1621 if ((unsigned char *)de - base < newinfo->underflow[h])
1622 newinfo->underflow[h] -= origsize - *size;
1624 return ret;
1627 static int
1628 compat_check_entry(struct ipt_entry *e, struct net *net, const char *name)
1630 struct xt_entry_match *ematch;
1631 struct xt_mtchk_param mtpar;
1632 unsigned int j;
1633 int ret = 0;
1635 j = 0;
1636 mtpar.net = net;
1637 mtpar.table = name;
1638 mtpar.entryinfo = &e->ip;
1639 mtpar.hook_mask = e->comefrom;
1640 mtpar.family = NFPROTO_IPV4;
1641 xt_ematch_foreach(ematch, e) {
1642 ret = check_match(ematch, &mtpar);
1643 if (ret != 0)
1644 goto cleanup_matches;
1645 ++j;
1648 ret = check_target(e, net, name);
1649 if (ret)
1650 goto cleanup_matches;
1651 return 0;
1653 cleanup_matches:
1654 xt_ematch_foreach(ematch, e) {
1655 if (j-- == 0)
1656 break;
1657 cleanup_match(ematch, net);
1659 return ret;
1662 static int
1663 translate_compat_table(struct net *net,
1664 const char *name,
1665 unsigned int valid_hooks,
1666 struct xt_table_info **pinfo,
1667 void **pentry0,
1668 unsigned int total_size,
1669 unsigned int number,
1670 unsigned int *hook_entries,
1671 unsigned int *underflows)
1673 unsigned int i, j;
1674 struct xt_table_info *newinfo, *info;
1675 void *pos, *entry0, *entry1;
1676 struct compat_ipt_entry *iter0;
1677 struct ipt_entry *iter1;
1678 unsigned int size;
1679 int ret;
1681 info = *pinfo;
1682 entry0 = *pentry0;
1683 size = total_size;
1684 info->number = number;
1686 /* Init all hooks to impossible value. */
1687 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1688 info->hook_entry[i] = 0xFFFFFFFF;
1689 info->underflow[i] = 0xFFFFFFFF;
1692 duprintf("translate_compat_table: size %u\n", info->size);
1693 j = 0;
1694 xt_compat_lock(AF_INET);
1695 /* Walk through entries, checking offsets. */
1696 xt_entry_foreach(iter0, entry0, total_size) {
1697 ret = check_compat_entry_size_and_hooks(iter0, info, &size,
1698 entry0,
1699 entry0 + total_size,
1700 hook_entries,
1701 underflows,
1702 name);
1703 if (ret != 0)
1704 goto out_unlock;
1705 ++j;
1708 ret = -EINVAL;
1709 if (j != number) {
1710 duprintf("translate_compat_table: %u not %u entries\n",
1711 j, number);
1712 goto out_unlock;
1715 /* Check hooks all assigned */
1716 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1717 /* Only hooks which are valid */
1718 if (!(valid_hooks & (1 << i)))
1719 continue;
1720 if (info->hook_entry[i] == 0xFFFFFFFF) {
1721 duprintf("Invalid hook entry %u %u\n",
1722 i, hook_entries[i]);
1723 goto out_unlock;
1725 if (info->underflow[i] == 0xFFFFFFFF) {
1726 duprintf("Invalid underflow %u %u\n",
1727 i, underflows[i]);
1728 goto out_unlock;
1732 ret = -ENOMEM;
1733 newinfo = xt_alloc_table_info(size);
1734 if (!newinfo)
1735 goto out_unlock;
1737 newinfo->number = number;
1738 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1739 newinfo->hook_entry[i] = info->hook_entry[i];
1740 newinfo->underflow[i] = info->underflow[i];
1742 entry1 = newinfo->entries[raw_smp_processor_id()];
1743 pos = entry1;
1744 size = total_size;
1745 xt_entry_foreach(iter0, entry0, total_size) {
1746 ret = compat_copy_entry_from_user(iter0, &pos, &size,
1747 name, newinfo, entry1);
1748 if (ret != 0)
1749 break;
1751 xt_compat_flush_offsets(AF_INET);
1752 xt_compat_unlock(AF_INET);
1753 if (ret)
1754 goto free_newinfo;
1756 ret = -ELOOP;
1757 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1758 goto free_newinfo;
1760 i = 0;
1761 xt_entry_foreach(iter1, entry1, newinfo->size) {
1762 ret = compat_check_entry(iter1, net, name);
1763 if (ret != 0)
1764 break;
1765 ++i;
1767 if (ret) {
1769 * The first i matches need cleanup_entry (calls ->destroy)
1770 * because they had called ->check already. The other j-i
1771 * entries need only release.
1773 int skip = i;
1774 j -= i;
1775 xt_entry_foreach(iter0, entry0, newinfo->size) {
1776 if (skip-- > 0)
1777 continue;
1778 if (j-- == 0)
1779 break;
1780 compat_release_entry(iter0);
1782 xt_entry_foreach(iter1, entry1, newinfo->size) {
1783 if (i-- == 0)
1784 break;
1785 cleanup_entry(iter1, net);
1787 xt_free_table_info(newinfo);
1788 return ret;
1791 /* And one copy for every other CPU */
1792 for_each_possible_cpu(i)
1793 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1794 memcpy(newinfo->entries[i], entry1, newinfo->size);
1796 *pinfo = newinfo;
1797 *pentry0 = entry1;
1798 xt_free_table_info(info);
1799 return 0;
1801 free_newinfo:
1802 xt_free_table_info(newinfo);
1803 out:
1804 xt_entry_foreach(iter0, entry0, total_size) {
1805 if (j-- == 0)
1806 break;
1807 compat_release_entry(iter0);
1809 return ret;
1810 out_unlock:
1811 xt_compat_flush_offsets(AF_INET);
1812 xt_compat_unlock(AF_INET);
1813 goto out;
1816 static int
1817 compat_do_replace(struct net *net, void __user *user, unsigned int len)
1819 int ret;
1820 struct compat_ipt_replace tmp;
1821 struct xt_table_info *newinfo;
1822 void *loc_cpu_entry;
1823 struct ipt_entry *iter;
1825 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1826 return -EFAULT;
1828 /* overflow check */
1829 if (tmp.size >= INT_MAX / num_possible_cpus())
1830 return -ENOMEM;
1831 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1832 return -ENOMEM;
1834 newinfo = xt_alloc_table_info(tmp.size);
1835 if (!newinfo)
1836 return -ENOMEM;
1838 /* choose the copy that is on our node/cpu */
1839 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1840 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1841 tmp.size) != 0) {
1842 ret = -EFAULT;
1843 goto free_newinfo;
1846 ret = translate_compat_table(net, tmp.name, tmp.valid_hooks,
1847 &newinfo, &loc_cpu_entry, tmp.size,
1848 tmp.num_entries, tmp.hook_entry,
1849 tmp.underflow);
1850 if (ret != 0)
1851 goto free_newinfo;
1853 duprintf("compat_do_replace: Translated table\n");
1855 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1856 tmp.num_counters, compat_ptr(tmp.counters));
1857 if (ret)
1858 goto free_newinfo_untrans;
1859 return 0;
1861 free_newinfo_untrans:
1862 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1863 cleanup_entry(iter, net);
1864 free_newinfo:
1865 xt_free_table_info(newinfo);
1866 return ret;
1869 static int
1870 compat_do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user,
1871 unsigned int len)
1873 int ret;
1875 if (!capable(CAP_NET_ADMIN))
1876 return -EPERM;
1878 switch (cmd) {
1879 case IPT_SO_SET_REPLACE:
1880 ret = compat_do_replace(sock_net(sk), user, len);
1881 break;
1883 case IPT_SO_SET_ADD_COUNTERS:
1884 ret = do_add_counters(sock_net(sk), user, len, 1);
1885 break;
1887 default:
1888 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd);
1889 ret = -EINVAL;
1892 return ret;
1895 struct compat_ipt_get_entries {
1896 char name[IPT_TABLE_MAXNAMELEN];
1897 compat_uint_t size;
1898 struct compat_ipt_entry entrytable[0];
1901 static int
1902 compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1903 void __user *userptr)
1905 struct xt_counters *counters;
1906 const struct xt_table_info *private = table->private;
1907 void __user *pos;
1908 unsigned int size;
1909 int ret = 0;
1910 const void *loc_cpu_entry;
1911 unsigned int i = 0;
1912 struct ipt_entry *iter;
1914 counters = alloc_counters(table);
1915 if (IS_ERR(counters))
1916 return PTR_ERR(counters);
1918 /* choose the copy that is on our node/cpu, ...
1919 * This choice is lazy (because current thread is
1920 * allowed to migrate to another cpu)
1922 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1923 pos = userptr;
1924 size = total_size;
1925 xt_entry_foreach(iter, loc_cpu_entry, total_size) {
1926 ret = compat_copy_entry_to_user(iter, &pos,
1927 &size, counters, i++);
1928 if (ret != 0)
1929 break;
1932 vfree(counters);
1933 return ret;
1936 static int
1937 compat_get_entries(struct net *net, struct compat_ipt_get_entries __user *uptr,
1938 int *len)
1940 int ret;
1941 struct compat_ipt_get_entries get;
1942 struct xt_table *t;
1944 if (*len < sizeof(get)) {
1945 duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get));
1946 return -EINVAL;
1949 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1950 return -EFAULT;
1952 if (*len != sizeof(struct compat_ipt_get_entries) + get.size) {
1953 duprintf("compat_get_entries: %u != %zu\n",
1954 *len, sizeof(get) + get.size);
1955 return -EINVAL;
1958 xt_compat_lock(AF_INET);
1959 t = xt_find_table_lock(net, AF_INET, get.name);
1960 if (t && !IS_ERR(t)) {
1961 const struct xt_table_info *private = t->private;
1962 struct xt_table_info info;
1963 duprintf("t->private->number = %u\n", private->number);
1964 ret = compat_table_info(private, &info);
1965 if (!ret && get.size == info.size) {
1966 ret = compat_copy_entries_to_user(private->size,
1967 t, uptr->entrytable);
1968 } else if (!ret) {
1969 duprintf("compat_get_entries: I've got %u not %u!\n",
1970 private->size, get.size);
1971 ret = -EAGAIN;
1973 xt_compat_flush_offsets(AF_INET);
1974 module_put(t->me);
1975 xt_table_unlock(t);
1976 } else
1977 ret = t ? PTR_ERR(t) : -ENOENT;
1979 xt_compat_unlock(AF_INET);
1980 return ret;
1983 static int do_ipt_get_ctl(struct sock *, int, void __user *, int *);
1985 static int
1986 compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1988 int ret;
1990 if (!capable(CAP_NET_ADMIN))
1991 return -EPERM;
1993 switch (cmd) {
1994 case IPT_SO_GET_INFO:
1995 ret = get_info(sock_net(sk), user, len, 1);
1996 break;
1997 case IPT_SO_GET_ENTRIES:
1998 ret = compat_get_entries(sock_net(sk), user, len);
1999 break;
2000 default:
2001 ret = do_ipt_get_ctl(sk, cmd, user, len);
2003 return ret;
2005 #endif
2007 static int
2008 do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2010 int ret;
2012 if (!capable(CAP_NET_ADMIN))
2013 return -EPERM;
2015 switch (cmd) {
2016 case IPT_SO_SET_REPLACE:
2017 ret = do_replace(sock_net(sk), user, len);
2018 break;
2020 case IPT_SO_SET_ADD_COUNTERS:
2021 ret = do_add_counters(sock_net(sk), user, len, 0);
2022 break;
2024 default:
2025 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd);
2026 ret = -EINVAL;
2029 return ret;
2032 static int
2033 do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2035 int ret;
2037 if (!capable(CAP_NET_ADMIN))
2038 return -EPERM;
2040 switch (cmd) {
2041 case IPT_SO_GET_INFO:
2042 ret = get_info(sock_net(sk), user, len, 0);
2043 break;
2045 case IPT_SO_GET_ENTRIES:
2046 ret = get_entries(sock_net(sk), user, len);
2047 break;
2049 case IPT_SO_GET_REVISION_MATCH:
2050 case IPT_SO_GET_REVISION_TARGET: {
2051 struct ipt_get_revision rev;
2052 int target;
2054 if (*len != sizeof(rev)) {
2055 ret = -EINVAL;
2056 break;
2058 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2059 ret = -EFAULT;
2060 break;
2063 if (cmd == IPT_SO_GET_REVISION_TARGET)
2064 target = 1;
2065 else
2066 target = 0;
2068 try_then_request_module(xt_find_revision(AF_INET, rev.name,
2069 rev.revision,
2070 target, &ret),
2071 "ipt_%s", rev.name);
2072 break;
2075 default:
2076 duprintf("do_ipt_get_ctl: unknown request %i\n", cmd);
2077 ret = -EINVAL;
2080 return ret;
2083 struct xt_table *ipt_register_table(struct net *net,
2084 const struct xt_table *table,
2085 const struct ipt_replace *repl)
2087 int ret;
2088 struct xt_table_info *newinfo;
2089 struct xt_table_info bootstrap
2090 = { 0, 0, 0, { 0 }, { 0 }, { } };
2091 void *loc_cpu_entry;
2092 struct xt_table *new_table;
2094 newinfo = xt_alloc_table_info(repl->size);
2095 if (!newinfo) {
2096 ret = -ENOMEM;
2097 goto out;
2100 /* choose the copy on our node/cpu, but dont care about preemption */
2101 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2102 memcpy(loc_cpu_entry, repl->entries, repl->size);
2104 ret = translate_table(net, newinfo, loc_cpu_entry, repl);
2105 if (ret != 0)
2106 goto out_free;
2108 new_table = xt_register_table(net, table, &bootstrap, newinfo);
2109 if (IS_ERR(new_table)) {
2110 ret = PTR_ERR(new_table);
2111 goto out_free;
2114 return new_table;
2116 out_free:
2117 xt_free_table_info(newinfo);
2118 out:
2119 return ERR_PTR(ret);
2122 void ipt_unregister_table(struct net *net, struct xt_table *table)
2124 struct xt_table_info *private;
2125 void *loc_cpu_entry;
2126 struct module *table_owner = table->me;
2127 struct ipt_entry *iter;
2129 private = xt_unregister_table(table);
2131 /* Decrease module usage counts and free resources */
2132 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2133 xt_entry_foreach(iter, loc_cpu_entry, private->size)
2134 cleanup_entry(iter, net);
2135 if (private->number > private->initial_entries)
2136 module_put(table_owner);
2137 xt_free_table_info(private);
2140 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2141 static inline bool
2142 icmp_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2143 u_int8_t type, u_int8_t code,
2144 bool invert)
2146 return ((test_type == 0xFF) ||
2147 (type == test_type && code >= min_code && code <= max_code))
2148 ^ invert;
2151 static bool
2152 icmp_match(const struct sk_buff *skb, const struct xt_match_param *par)
2154 const struct icmphdr *ic;
2155 struct icmphdr _icmph;
2156 const struct ipt_icmp *icmpinfo = par->matchinfo;
2158 /* Must not be a fragment. */
2159 if (par->fragoff != 0)
2160 return false;
2162 ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph);
2163 if (ic == NULL) {
2164 /* We've been asked to examine this packet, and we
2165 * can't. Hence, no choice but to drop.
2167 duprintf("Dropping evil ICMP tinygram.\n");
2168 *par->hotdrop = true;
2169 return false;
2172 return icmp_type_code_match(icmpinfo->type,
2173 icmpinfo->code[0],
2174 icmpinfo->code[1],
2175 ic->type, ic->code,
2176 !!(icmpinfo->invflags&IPT_ICMP_INV));
2179 static bool icmp_checkentry(const struct xt_mtchk_param *par)
2181 const struct ipt_icmp *icmpinfo = par->matchinfo;
2183 /* Must specify no unknown invflags */
2184 return !(icmpinfo->invflags & ~IPT_ICMP_INV);
2187 /* The built-in targets: standard (NULL) and error. */
2188 static struct xt_target ipt_standard_target __read_mostly = {
2189 .name = IPT_STANDARD_TARGET,
2190 .targetsize = sizeof(int),
2191 .family = NFPROTO_IPV4,
2192 #ifdef CONFIG_COMPAT
2193 .compatsize = sizeof(compat_int_t),
2194 .compat_from_user = compat_standard_from_user,
2195 .compat_to_user = compat_standard_to_user,
2196 #endif
2199 static struct xt_target ipt_error_target __read_mostly = {
2200 .name = IPT_ERROR_TARGET,
2201 .target = ipt_error,
2202 .targetsize = IPT_FUNCTION_MAXNAMELEN,
2203 .family = NFPROTO_IPV4,
2206 static struct nf_sockopt_ops ipt_sockopts = {
2207 .pf = PF_INET,
2208 .set_optmin = IPT_BASE_CTL,
2209 .set_optmax = IPT_SO_SET_MAX+1,
2210 .set = do_ipt_set_ctl,
2211 #ifdef CONFIG_COMPAT
2212 .compat_set = compat_do_ipt_set_ctl,
2213 #endif
2214 .get_optmin = IPT_BASE_CTL,
2215 .get_optmax = IPT_SO_GET_MAX+1,
2216 .get = do_ipt_get_ctl,
2217 #ifdef CONFIG_COMPAT
2218 .compat_get = compat_do_ipt_get_ctl,
2219 #endif
2220 .owner = THIS_MODULE,
2223 static struct xt_match icmp_matchstruct __read_mostly = {
2224 .name = "icmp",
2225 .match = icmp_match,
2226 .matchsize = sizeof(struct ipt_icmp),
2227 .checkentry = icmp_checkentry,
2228 .proto = IPPROTO_ICMP,
2229 .family = NFPROTO_IPV4,
2232 static int __net_init ip_tables_net_init(struct net *net)
2234 return xt_proto_init(net, NFPROTO_IPV4);
2237 static void __net_exit ip_tables_net_exit(struct net *net)
2239 xt_proto_fini(net, NFPROTO_IPV4);
2242 static struct pernet_operations ip_tables_net_ops = {
2243 .init = ip_tables_net_init,
2244 .exit = ip_tables_net_exit,
2247 static int __init ip_tables_init(void)
2249 int ret;
2251 ret = register_pernet_subsys(&ip_tables_net_ops);
2252 if (ret < 0)
2253 goto err1;
2255 /* Noone else will be downing sem now, so we won't sleep */
2256 ret = xt_register_target(&ipt_standard_target);
2257 if (ret < 0)
2258 goto err2;
2259 ret = xt_register_target(&ipt_error_target);
2260 if (ret < 0)
2261 goto err3;
2262 ret = xt_register_match(&icmp_matchstruct);
2263 if (ret < 0)
2264 goto err4;
2266 /* Register setsockopt */
2267 ret = nf_register_sockopt(&ipt_sockopts);
2268 if (ret < 0)
2269 goto err5;
2271 pr_info("(C) 2000-2006 Netfilter Core Team\n");
2272 return 0;
2274 err5:
2275 xt_unregister_match(&icmp_matchstruct);
2276 err4:
2277 xt_unregister_target(&ipt_error_target);
2278 err3:
2279 xt_unregister_target(&ipt_standard_target);
2280 err2:
2281 unregister_pernet_subsys(&ip_tables_net_ops);
2282 err1:
2283 return ret;
2286 static void __exit ip_tables_fini(void)
2288 nf_unregister_sockopt(&ipt_sockopts);
2290 xt_unregister_match(&icmp_matchstruct);
2291 xt_unregister_target(&ipt_error_target);
2292 xt_unregister_target(&ipt_standard_target);
2294 unregister_pernet_subsys(&ip_tables_net_ops);
2297 EXPORT_SYMBOL(ipt_register_table);
2298 EXPORT_SYMBOL(ipt_unregister_table);
2299 EXPORT_SYMBOL(ipt_do_table);
2300 module_init(ip_tables_init);
2301 module_exit(ip_tables_fini);