added 2.6.29.6 aldebaran kernel
[nao-ulib.git] / kernel / 2.6.29.6-aldebaran-rt / net / ipv4 / netfilter / ip_tables.c
bloba42a7a73f11e2891a2548914eb4405154ea59b9d
1 /*
2 * Packet matching code.
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #include <linux/cache.h>
12 #include <linux/capability.h>
13 #include <linux/skbuff.h>
14 #include <linux/kmod.h>
15 #include <linux/vmalloc.h>
16 #include <linux/netdevice.h>
17 #include <linux/module.h>
18 #include <linux/icmp.h>
19 #include <net/ip.h>
20 #include <net/compat.h>
21 #include <asm/uaccess.h>
22 #include <linux/mutex.h>
23 #include <linux/proc_fs.h>
24 #include <linux/err.h>
25 #include <linux/cpumask.h>
27 #include <linux/netfilter/x_tables.h>
28 #include <linux/netfilter_ipv4/ip_tables.h>
29 #include <net/netfilter/nf_log.h>
31 MODULE_LICENSE("GPL");
32 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
33 MODULE_DESCRIPTION("IPv4 packet filter");
35 /*#define DEBUG_IP_FIREWALL*/
36 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
37 /*#define DEBUG_IP_FIREWALL_USER*/
39 #ifdef DEBUG_IP_FIREWALL
40 #define dprintf(format, args...) printk(format , ## args)
41 #else
42 #define dprintf(format, args...)
43 #endif
45 #ifdef DEBUG_IP_FIREWALL_USER
46 #define duprintf(format, args...) printk(format , ## args)
47 #else
48 #define duprintf(format, args...)
49 #endif
51 #ifdef CONFIG_NETFILTER_DEBUG
52 #define IP_NF_ASSERT(x) \
53 do { \
54 if (!(x)) \
55 printk("IP_NF_ASSERT: %s:%s:%u\n", \
56 __func__, __FILE__, __LINE__); \
57 } while(0)
58 #else
59 #define IP_NF_ASSERT(x)
60 #endif
62 #if 0
63 /* All the better to debug you with... */
64 #define static
65 #define inline
66 #endif
69 We keep a set of rules for each CPU, so we can avoid write-locking
70 them in the softirq when updating the counters and therefore
71 only need to read-lock in the softirq; doing a write_lock_bh() in user
72 context stops packets coming through and allows user context to read
73 the counters or update the rules.
75 Hence the start of any table is given by get_table() below. */
77 /* Returns whether matches rule or not. */
78 /* Performance critical - called for every packet */
79 static inline bool
80 ip_packet_match(const struct iphdr *ip,
81 const char *indev,
82 const char *outdev,
83 const struct ipt_ip *ipinfo,
84 int isfrag)
86 size_t i;
87 unsigned long ret;
89 #define FWINV(bool, invflg) ((bool) ^ !!(ipinfo->invflags & (invflg)))
91 if (FWINV((ip->saddr&ipinfo->smsk.s_addr) != ipinfo->src.s_addr,
92 IPT_INV_SRCIP)
93 || FWINV((ip->daddr&ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr,
94 IPT_INV_DSTIP)) {
95 dprintf("Source or dest mismatch.\n");
97 dprintf("SRC: %pI4. Mask: %pI4. Target: %pI4.%s\n",
98 &ip->saddr, &ipinfo->smsk.s_addr, &ipinfo->src.s_addr,
99 ipinfo->invflags & IPT_INV_SRCIP ? " (INV)" : "");
100 dprintf("DST: %pI4 Mask: %pI4 Target: %pI4.%s\n",
101 &ip->daddr, &ipinfo->dmsk.s_addr, &ipinfo->dst.s_addr,
102 ipinfo->invflags & IPT_INV_DSTIP ? " (INV)" : "");
103 return false;
106 /* Look for ifname matches; this should unroll nicely. */
107 for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
108 ret |= (((const unsigned long *)indev)[i]
109 ^ ((const unsigned long *)ipinfo->iniface)[i])
110 & ((const unsigned long *)ipinfo->iniface_mask)[i];
113 if (FWINV(ret != 0, IPT_INV_VIA_IN)) {
114 dprintf("VIA in mismatch (%s vs %s).%s\n",
115 indev, ipinfo->iniface,
116 ipinfo->invflags&IPT_INV_VIA_IN ?" (INV)":"");
117 return false;
120 for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
121 ret |= (((const unsigned long *)outdev)[i]
122 ^ ((const unsigned long *)ipinfo->outiface)[i])
123 & ((const unsigned long *)ipinfo->outiface_mask)[i];
126 if (FWINV(ret != 0, IPT_INV_VIA_OUT)) {
127 dprintf("VIA out mismatch (%s vs %s).%s\n",
128 outdev, ipinfo->outiface,
129 ipinfo->invflags&IPT_INV_VIA_OUT ?" (INV)":"");
130 return false;
133 /* Check specific protocol */
134 if (ipinfo->proto
135 && FWINV(ip->protocol != ipinfo->proto, IPT_INV_PROTO)) {
136 dprintf("Packet protocol %hi does not match %hi.%s\n",
137 ip->protocol, ipinfo->proto,
138 ipinfo->invflags&IPT_INV_PROTO ? " (INV)":"");
139 return false;
142 /* If we have a fragment rule but the packet is not a fragment
143 * then we return zero */
144 if (FWINV((ipinfo->flags&IPT_F_FRAG) && !isfrag, IPT_INV_FRAG)) {
145 dprintf("Fragment rule but not fragment.%s\n",
146 ipinfo->invflags & IPT_INV_FRAG ? " (INV)" : "");
147 return false;
150 return true;
153 static bool
154 ip_checkentry(const struct ipt_ip *ip)
156 if (ip->flags & ~IPT_F_MASK) {
157 duprintf("Unknown flag bits set: %08X\n",
158 ip->flags & ~IPT_F_MASK);
159 return false;
161 if (ip->invflags & ~IPT_INV_MASK) {
162 duprintf("Unknown invflag bits set: %08X\n",
163 ip->invflags & ~IPT_INV_MASK);
164 return false;
166 return true;
169 static unsigned int
170 ipt_error(struct sk_buff *skb, const struct xt_target_param *par)
172 if (net_ratelimit())
173 printk("ip_tables: error: `%s'\n",
174 (const char *)par->targinfo);
176 return NF_DROP;
179 /* Performance critical - called for every packet */
180 static inline bool
181 do_match(struct ipt_entry_match *m, const struct sk_buff *skb,
182 struct xt_match_param *par)
184 par->match = m->u.kernel.match;
185 par->matchinfo = m->data;
187 /* Stop iteration if it doesn't match */
188 if (!m->u.kernel.match->match(skb, par))
189 return true;
190 else
191 return false;
194 /* Performance critical */
195 static inline struct ipt_entry *
196 get_entry(void *base, unsigned int offset)
198 return (struct ipt_entry *)(base + offset);
201 /* All zeroes == unconditional rule. */
202 /* Mildly perf critical (only if packet tracing is on) */
203 static inline int
204 unconditional(const struct ipt_ip *ip)
206 unsigned int i;
208 for (i = 0; i < sizeof(*ip)/sizeof(__u32); i++)
209 if (((__u32 *)ip)[i])
210 return 0;
212 return 1;
213 #undef FWINV
216 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
217 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
218 static const char *const hooknames[] = {
219 [NF_INET_PRE_ROUTING] = "PREROUTING",
220 [NF_INET_LOCAL_IN] = "INPUT",
221 [NF_INET_FORWARD] = "FORWARD",
222 [NF_INET_LOCAL_OUT] = "OUTPUT",
223 [NF_INET_POST_ROUTING] = "POSTROUTING",
226 enum nf_ip_trace_comments {
227 NF_IP_TRACE_COMMENT_RULE,
228 NF_IP_TRACE_COMMENT_RETURN,
229 NF_IP_TRACE_COMMENT_POLICY,
232 static const char *const comments[] = {
233 [NF_IP_TRACE_COMMENT_RULE] = "rule",
234 [NF_IP_TRACE_COMMENT_RETURN] = "return",
235 [NF_IP_TRACE_COMMENT_POLICY] = "policy",
238 static struct nf_loginfo trace_loginfo = {
239 .type = NF_LOG_TYPE_LOG,
240 .u = {
241 .log = {
242 .level = 4,
243 .logflags = NF_LOG_MASK,
248 /* Mildly perf critical (only if packet tracing is on) */
249 static inline int
250 get_chainname_rulenum(struct ipt_entry *s, struct ipt_entry *e,
251 char *hookname, char **chainname,
252 char **comment, unsigned int *rulenum)
254 struct ipt_standard_target *t = (void *)ipt_get_target(s);
256 if (strcmp(t->target.u.kernel.target->name, IPT_ERROR_TARGET) == 0) {
257 /* Head of user chain: ERROR target with chainname */
258 *chainname = t->target.data;
259 (*rulenum) = 0;
260 } else if (s == e) {
261 (*rulenum)++;
263 if (s->target_offset == sizeof(struct ipt_entry)
264 && strcmp(t->target.u.kernel.target->name,
265 IPT_STANDARD_TARGET) == 0
266 && t->verdict < 0
267 && unconditional(&s->ip)) {
268 /* Tail of chains: STANDARD target (return/policy) */
269 *comment = *chainname == hookname
270 ? (char *)comments[NF_IP_TRACE_COMMENT_POLICY]
271 : (char *)comments[NF_IP_TRACE_COMMENT_RETURN];
273 return 1;
274 } else
275 (*rulenum)++;
277 return 0;
280 static void trace_packet(struct sk_buff *skb,
281 unsigned int hook,
282 const struct net_device *in,
283 const struct net_device *out,
284 const char *tablename,
285 struct xt_table_info *private,
286 struct ipt_entry *e)
288 void *table_base;
289 const struct ipt_entry *root;
290 char *hookname, *chainname, *comment;
291 unsigned int rulenum = 0;
293 table_base = (void *)private->entries[smp_processor_id()];
294 root = get_entry(table_base, private->hook_entry[hook]);
296 hookname = chainname = (char *)hooknames[hook];
297 comment = (char *)comments[NF_IP_TRACE_COMMENT_RULE];
299 IPT_ENTRY_ITERATE(root,
300 private->size - private->hook_entry[hook],
301 get_chainname_rulenum,
302 e, hookname, &chainname, &comment, &rulenum);
304 nf_log_packet(AF_INET, hook, skb, in, out, &trace_loginfo,
305 "TRACE: %s:%s:%s:%u ",
306 tablename, chainname, comment, rulenum);
308 #endif
310 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
311 unsigned int
312 ipt_do_table(struct sk_buff *skb,
313 unsigned int hook,
314 const struct net_device *in,
315 const struct net_device *out,
316 struct xt_table *table)
318 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
319 const struct iphdr *ip;
320 u_int16_t datalen;
321 bool hotdrop = false;
322 /* Initializing verdict to NF_DROP keeps gcc happy. */
323 unsigned int verdict = NF_DROP;
324 const char *indev, *outdev;
325 void *table_base;
326 struct ipt_entry *e, *back;
327 struct xt_table_info *private;
328 struct xt_match_param mtpar;
329 struct xt_target_param tgpar;
331 /* Initialization */
332 ip = ip_hdr(skb);
333 datalen = skb->len - ip->ihl * 4;
334 indev = in ? in->name : nulldevname;
335 outdev = out ? out->name : nulldevname;
336 /* We handle fragments by dealing with the first fragment as
337 * if it was a normal packet. All other fragments are treated
338 * normally, except that they will NEVER match rules that ask
339 * things we don't know, ie. tcp syn flag or ports). If the
340 * rule is also a fragment-specific rule, non-fragments won't
341 * match it. */
342 mtpar.fragoff = ntohs(ip->frag_off) & IP_OFFSET;
343 mtpar.thoff = ip_hdrlen(skb);
344 mtpar.hotdrop = &hotdrop;
345 mtpar.in = tgpar.in = in;
346 mtpar.out = tgpar.out = out;
347 mtpar.family = tgpar.family = NFPROTO_IPV4;
348 tgpar.hooknum = hook;
350 read_lock_bh(&table->lock);
351 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
352 private = table->private;
353 table_base = (void *)private->entries[raw_smp_processor_id()];
354 e = get_entry(table_base, private->hook_entry[hook]);
356 /* For return from builtin chain */
357 back = get_entry(table_base, private->underflow[hook]);
359 do {
360 IP_NF_ASSERT(e);
361 IP_NF_ASSERT(back);
362 if (ip_packet_match(ip, indev, outdev,
363 &e->ip, mtpar.fragoff)) {
364 struct ipt_entry_target *t;
366 if (IPT_MATCH_ITERATE(e, do_match, skb, &mtpar) != 0)
367 goto no_match;
369 ADD_COUNTER(e->counters, ntohs(ip->tot_len), 1);
371 t = ipt_get_target(e);
372 IP_NF_ASSERT(t->u.kernel.target);
374 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
375 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
376 /* The packet is traced: log it */
377 if (unlikely(skb->nf_trace))
378 trace_packet(skb, hook, in, out,
379 table->name, private, e);
380 #endif
381 /* Standard target? */
382 if (!t->u.kernel.target->target) {
383 int v;
385 v = ((struct ipt_standard_target *)t)->verdict;
386 if (v < 0) {
387 /* Pop from stack? */
388 if (v != IPT_RETURN) {
389 verdict = (unsigned)(-v) - 1;
390 break;
392 e = back;
393 back = get_entry(table_base,
394 back->comefrom);
395 continue;
397 if (table_base + v != (void *)e + e->next_offset
398 && !(e->ip.flags & IPT_F_GOTO)) {
399 /* Save old back ptr in next entry */
400 struct ipt_entry *next
401 = (void *)e + e->next_offset;
402 next->comefrom
403 = (void *)back - table_base;
404 /* set back pointer to next entry */
405 back = next;
408 e = get_entry(table_base, v);
409 } else {
410 /* Targets which reenter must return
411 abs. verdicts */
412 tgpar.target = t->u.kernel.target;
413 tgpar.targinfo = t->data;
414 #ifdef CONFIG_NETFILTER_DEBUG
415 ((struct ipt_entry *)table_base)->comefrom
416 = 0xeeeeeeec;
417 #endif
418 verdict = t->u.kernel.target->target(skb,
419 &tgpar);
420 #ifdef CONFIG_NETFILTER_DEBUG
421 if (((struct ipt_entry *)table_base)->comefrom
422 != 0xeeeeeeec
423 && verdict == IPT_CONTINUE) {
424 printk("Target %s reentered!\n",
425 t->u.kernel.target->name);
426 verdict = NF_DROP;
428 ((struct ipt_entry *)table_base)->comefrom
429 = 0x57acc001;
430 #endif
431 /* Target might have changed stuff. */
432 ip = ip_hdr(skb);
433 datalen = skb->len - ip->ihl * 4;
435 if (verdict == IPT_CONTINUE)
436 e = (void *)e + e->next_offset;
437 else
438 /* Verdict */
439 break;
441 } else {
443 no_match:
444 e = (void *)e + e->next_offset;
446 } while (!hotdrop);
448 read_unlock_bh(&table->lock);
450 #ifdef DEBUG_ALLOW_ALL
451 return NF_ACCEPT;
452 #else
453 if (hotdrop)
454 return NF_DROP;
455 else return verdict;
456 #endif
459 /* Figures out from what hook each rule can be called: returns 0 if
460 there are loops. Puts hook bitmask in comefrom. */
461 static int
462 mark_source_chains(struct xt_table_info *newinfo,
463 unsigned int valid_hooks, void *entry0)
465 unsigned int hook;
467 /* No recursion; use packet counter to save back ptrs (reset
468 to 0 as we leave), and comefrom to save source hook bitmask */
469 for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
470 unsigned int pos = newinfo->hook_entry[hook];
471 struct ipt_entry *e = (struct ipt_entry *)(entry0 + pos);
473 if (!(valid_hooks & (1 << hook)))
474 continue;
476 /* Set initial back pointer. */
477 e->counters.pcnt = pos;
479 for (;;) {
480 struct ipt_standard_target *t
481 = (void *)ipt_get_target(e);
482 int visited = e->comefrom & (1 << hook);
484 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
485 printk("iptables: loop hook %u pos %u %08X.\n",
486 hook, pos, e->comefrom);
487 return 0;
489 e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
491 /* Unconditional return/END. */
492 if ((e->target_offset == sizeof(struct ipt_entry)
493 && (strcmp(t->target.u.user.name,
494 IPT_STANDARD_TARGET) == 0)
495 && t->verdict < 0
496 && unconditional(&e->ip)) || visited) {
497 unsigned int oldpos, size;
499 if ((strcmp(t->target.u.user.name,
500 IPT_STANDARD_TARGET) == 0) &&
501 t->verdict < -NF_MAX_VERDICT - 1) {
502 duprintf("mark_source_chains: bad "
503 "negative verdict (%i)\n",
504 t->verdict);
505 return 0;
508 /* Return: backtrack through the last
509 big jump. */
510 do {
511 e->comefrom ^= (1<<NF_INET_NUMHOOKS);
512 #ifdef DEBUG_IP_FIREWALL_USER
513 if (e->comefrom
514 & (1 << NF_INET_NUMHOOKS)) {
515 duprintf("Back unset "
516 "on hook %u "
517 "rule %u\n",
518 hook, pos);
520 #endif
521 oldpos = pos;
522 pos = e->counters.pcnt;
523 e->counters.pcnt = 0;
525 /* We're at the start. */
526 if (pos == oldpos)
527 goto next;
529 e = (struct ipt_entry *)
530 (entry0 + pos);
531 } while (oldpos == pos + e->next_offset);
533 /* Move along one */
534 size = e->next_offset;
535 e = (struct ipt_entry *)
536 (entry0 + pos + size);
537 e->counters.pcnt = pos;
538 pos += size;
539 } else {
540 int newpos = t->verdict;
542 if (strcmp(t->target.u.user.name,
543 IPT_STANDARD_TARGET) == 0
544 && newpos >= 0) {
545 if (newpos > newinfo->size -
546 sizeof(struct ipt_entry)) {
547 duprintf("mark_source_chains: "
548 "bad verdict (%i)\n",
549 newpos);
550 return 0;
552 /* This a jump; chase it. */
553 duprintf("Jump rule %u -> %u\n",
554 pos, newpos);
555 } else {
556 /* ... this is a fallthru */
557 newpos = pos + e->next_offset;
559 e = (struct ipt_entry *)
560 (entry0 + newpos);
561 e->counters.pcnt = pos;
562 pos = newpos;
565 next:
566 duprintf("Finished chain %u\n", hook);
568 return 1;
571 static int
572 cleanup_match(struct ipt_entry_match *m, unsigned int *i)
574 struct xt_mtdtor_param par;
576 if (i && (*i)-- == 0)
577 return 1;
579 par.match = m->u.kernel.match;
580 par.matchinfo = m->data;
581 par.family = NFPROTO_IPV4;
582 if (par.match->destroy != NULL)
583 par.match->destroy(&par);
584 module_put(par.match->me);
585 return 0;
588 static int
589 check_entry(struct ipt_entry *e, const char *name)
591 struct ipt_entry_target *t;
593 if (!ip_checkentry(&e->ip)) {
594 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
595 return -EINVAL;
598 if (e->target_offset + sizeof(struct ipt_entry_target) >
599 e->next_offset)
600 return -EINVAL;
602 t = ipt_get_target(e);
603 if (e->target_offset + t->u.target_size > e->next_offset)
604 return -EINVAL;
606 return 0;
609 static int
610 check_match(struct ipt_entry_match *m, struct xt_mtchk_param *par,
611 unsigned int *i)
613 const struct ipt_ip *ip = par->entryinfo;
614 int ret;
616 par->match = m->u.kernel.match;
617 par->matchinfo = m->data;
619 ret = xt_check_match(par, m->u.match_size - sizeof(*m),
620 ip->proto, ip->invflags & IPT_INV_PROTO);
621 if (ret < 0) {
622 duprintf("ip_tables: check failed for `%s'.\n",
623 par.match->name);
624 return ret;
626 ++*i;
627 return 0;
630 static int
631 find_check_match(struct ipt_entry_match *m, struct xt_mtchk_param *par,
632 unsigned int *i)
634 struct xt_match *match;
635 int ret;
637 match = try_then_request_module(xt_find_match(AF_INET, m->u.user.name,
638 m->u.user.revision),
639 "ipt_%s", m->u.user.name);
640 if (IS_ERR(match) || !match) {
641 duprintf("find_check_match: `%s' not found\n", m->u.user.name);
642 return match ? PTR_ERR(match) : -ENOENT;
644 m->u.kernel.match = match;
646 ret = check_match(m, par, i);
647 if (ret)
648 goto err;
650 return 0;
651 err:
652 module_put(m->u.kernel.match->me);
653 return ret;
656 static int check_target(struct ipt_entry *e, const char *name)
658 struct ipt_entry_target *t = ipt_get_target(e);
659 struct xt_tgchk_param par = {
660 .table = name,
661 .entryinfo = e,
662 .target = t->u.kernel.target,
663 .targinfo = t->data,
664 .hook_mask = e->comefrom,
665 .family = NFPROTO_IPV4,
667 int ret;
669 ret = xt_check_target(&par, t->u.target_size - sizeof(*t),
670 e->ip.proto, e->ip.invflags & IPT_INV_PROTO);
671 if (ret < 0) {
672 duprintf("ip_tables: check failed for `%s'.\n",
673 t->u.kernel.target->name);
674 return ret;
676 return 0;
679 static int
680 find_check_entry(struct ipt_entry *e, const char *name, unsigned int size,
681 unsigned int *i)
683 struct ipt_entry_target *t;
684 struct xt_target *target;
685 int ret;
686 unsigned int j;
687 struct xt_mtchk_param mtpar;
689 ret = check_entry(e, name);
690 if (ret)
691 return ret;
693 j = 0;
694 mtpar.table = name;
695 mtpar.entryinfo = &e->ip;
696 mtpar.hook_mask = e->comefrom;
697 mtpar.family = NFPROTO_IPV4;
698 ret = IPT_MATCH_ITERATE(e, find_check_match, &mtpar, &j);
699 if (ret != 0)
700 goto cleanup_matches;
702 t = ipt_get_target(e);
703 target = try_then_request_module(xt_find_target(AF_INET,
704 t->u.user.name,
705 t->u.user.revision),
706 "ipt_%s", t->u.user.name);
707 if (IS_ERR(target) || !target) {
708 duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
709 ret = target ? PTR_ERR(target) : -ENOENT;
710 goto cleanup_matches;
712 t->u.kernel.target = target;
714 ret = check_target(e, name);
715 if (ret)
716 goto err;
718 (*i)++;
719 return 0;
720 err:
721 module_put(t->u.kernel.target->me);
722 cleanup_matches:
723 IPT_MATCH_ITERATE(e, cleanup_match, &j);
724 return ret;
727 static int
728 check_entry_size_and_hooks(struct ipt_entry *e,
729 struct xt_table_info *newinfo,
730 unsigned char *base,
731 unsigned char *limit,
732 const unsigned int *hook_entries,
733 const unsigned int *underflows,
734 unsigned int *i)
736 unsigned int h;
738 if ((unsigned long)e % __alignof__(struct ipt_entry) != 0
739 || (unsigned char *)e + sizeof(struct ipt_entry) >= limit) {
740 duprintf("Bad offset %p\n", e);
741 return -EINVAL;
744 if (e->next_offset
745 < sizeof(struct ipt_entry) + sizeof(struct ipt_entry_target)) {
746 duprintf("checking: element %p size %u\n",
747 e, e->next_offset);
748 return -EINVAL;
751 /* Check hooks & underflows */
752 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
753 if ((unsigned char *)e - base == hook_entries[h])
754 newinfo->hook_entry[h] = hook_entries[h];
755 if ((unsigned char *)e - base == underflows[h])
756 newinfo->underflow[h] = underflows[h];
759 /* FIXME: underflows must be unconditional, standard verdicts
760 < 0 (not IPT_RETURN). --RR */
762 /* Clear counters and comefrom */
763 e->counters = ((struct xt_counters) { 0, 0 });
764 e->comefrom = 0;
766 (*i)++;
767 return 0;
770 static int
771 cleanup_entry(struct ipt_entry *e, unsigned int *i)
773 struct xt_tgdtor_param par;
774 struct ipt_entry_target *t;
776 if (i && (*i)-- == 0)
777 return 1;
779 /* Cleanup all matches */
780 IPT_MATCH_ITERATE(e, cleanup_match, NULL);
781 t = ipt_get_target(e);
783 par.target = t->u.kernel.target;
784 par.targinfo = t->data;
785 par.family = NFPROTO_IPV4;
786 if (par.target->destroy != NULL)
787 par.target->destroy(&par);
788 module_put(par.target->me);
789 return 0;
792 /* Checks and translates the user-supplied table segment (held in
793 newinfo) */
794 static int
795 translate_table(const char *name,
796 unsigned int valid_hooks,
797 struct xt_table_info *newinfo,
798 void *entry0,
799 unsigned int size,
800 unsigned int number,
801 const unsigned int *hook_entries,
802 const unsigned int *underflows)
804 unsigned int i;
805 int ret;
807 newinfo->size = size;
808 newinfo->number = number;
810 /* Init all hooks to impossible value. */
811 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
812 newinfo->hook_entry[i] = 0xFFFFFFFF;
813 newinfo->underflow[i] = 0xFFFFFFFF;
816 duprintf("translate_table: size %u\n", newinfo->size);
817 i = 0;
818 /* Walk through entries, checking offsets. */
819 ret = IPT_ENTRY_ITERATE(entry0, newinfo->size,
820 check_entry_size_and_hooks,
821 newinfo,
822 entry0,
823 entry0 + size,
824 hook_entries, underflows, &i);
825 if (ret != 0)
826 return ret;
828 if (i != number) {
829 duprintf("translate_table: %u not %u entries\n",
830 i, number);
831 return -EINVAL;
834 /* Check hooks all assigned */
835 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
836 /* Only hooks which are valid */
837 if (!(valid_hooks & (1 << i)))
838 continue;
839 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
840 duprintf("Invalid hook entry %u %u\n",
841 i, hook_entries[i]);
842 return -EINVAL;
844 if (newinfo->underflow[i] == 0xFFFFFFFF) {
845 duprintf("Invalid underflow %u %u\n",
846 i, underflows[i]);
847 return -EINVAL;
851 if (!mark_source_chains(newinfo, valid_hooks, entry0))
852 return -ELOOP;
854 /* Finally, each sanity check must pass */
855 i = 0;
856 ret = IPT_ENTRY_ITERATE(entry0, newinfo->size,
857 find_check_entry, name, size, &i);
859 if (ret != 0) {
860 IPT_ENTRY_ITERATE(entry0, newinfo->size,
861 cleanup_entry, &i);
862 return ret;
865 /* And one copy for every other CPU */
866 for_each_possible_cpu(i) {
867 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
868 memcpy(newinfo->entries[i], entry0, newinfo->size);
871 return ret;
874 /* Gets counters. */
875 static inline int
876 add_entry_to_counter(const struct ipt_entry *e,
877 struct xt_counters total[],
878 unsigned int *i)
880 ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
882 (*i)++;
883 return 0;
886 static inline int
887 set_entry_to_counter(const struct ipt_entry *e,
888 struct ipt_counters total[],
889 unsigned int *i)
891 SET_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
893 (*i)++;
894 return 0;
897 static void
898 get_counters(const struct xt_table_info *t,
899 struct xt_counters counters[])
901 unsigned int cpu;
902 unsigned int i;
903 unsigned int curcpu;
905 /* Instead of clearing (by a previous call to memset())
906 * the counters and using adds, we set the counters
907 * with data used by 'current' CPU
908 * We dont care about preemption here.
910 curcpu = raw_smp_processor_id();
912 i = 0;
913 IPT_ENTRY_ITERATE(t->entries[curcpu],
914 t->size,
915 set_entry_to_counter,
916 counters,
917 &i);
919 for_each_possible_cpu(cpu) {
920 if (cpu == curcpu)
921 continue;
922 i = 0;
923 IPT_ENTRY_ITERATE(t->entries[cpu],
924 t->size,
925 add_entry_to_counter,
926 counters,
927 &i);
931 static struct xt_counters * alloc_counters(struct xt_table *table)
933 unsigned int countersize;
934 struct xt_counters *counters;
935 const struct xt_table_info *private = table->private;
937 /* We need atomic snapshot of counters: rest doesn't change
938 (other than comefrom, which userspace doesn't care
939 about). */
940 countersize = sizeof(struct xt_counters) * private->number;
941 counters = vmalloc_node(countersize, numa_node_id());
943 if (counters == NULL)
944 return ERR_PTR(-ENOMEM);
946 /* First, sum counters... */
947 write_lock_bh(&table->lock);
948 get_counters(private, counters);
949 write_unlock_bh(&table->lock);
951 return counters;
954 static int
955 copy_entries_to_user(unsigned int total_size,
956 struct xt_table *table,
957 void __user *userptr)
959 unsigned int off, num;
960 struct ipt_entry *e;
961 struct xt_counters *counters;
962 const struct xt_table_info *private = table->private;
963 int ret = 0;
964 const void *loc_cpu_entry;
966 counters = alloc_counters(table);
967 if (IS_ERR(counters))
968 return PTR_ERR(counters);
970 /* choose the copy that is on our node/cpu, ...
971 * This choice is lazy (because current thread is
972 * allowed to migrate to another cpu)
974 loc_cpu_entry = private->entries[raw_smp_processor_id()];
975 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
976 ret = -EFAULT;
977 goto free_counters;
980 /* FIXME: use iterator macros --RR */
981 /* ... then go back and fix counters and names */
982 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
983 unsigned int i;
984 const struct ipt_entry_match *m;
985 const struct ipt_entry_target *t;
987 e = (struct ipt_entry *)(loc_cpu_entry + off);
988 if (copy_to_user(userptr + off
989 + offsetof(struct ipt_entry, counters),
990 &counters[num],
991 sizeof(counters[num])) != 0) {
992 ret = -EFAULT;
993 goto free_counters;
996 for (i = sizeof(struct ipt_entry);
997 i < e->target_offset;
998 i += m->u.match_size) {
999 m = (void *)e + i;
1001 if (copy_to_user(userptr + off + i
1002 + offsetof(struct ipt_entry_match,
1003 u.user.name),
1004 m->u.kernel.match->name,
1005 strlen(m->u.kernel.match->name)+1)
1006 != 0) {
1007 ret = -EFAULT;
1008 goto free_counters;
1012 t = ipt_get_target(e);
1013 if (copy_to_user(userptr + off + e->target_offset
1014 + offsetof(struct ipt_entry_target,
1015 u.user.name),
1016 t->u.kernel.target->name,
1017 strlen(t->u.kernel.target->name)+1) != 0) {
1018 ret = -EFAULT;
1019 goto free_counters;
1023 free_counters:
1024 vfree(counters);
1025 return ret;
1028 #ifdef CONFIG_COMPAT
1029 static void compat_standard_from_user(void *dst, void *src)
1031 int v = *(compat_int_t *)src;
1033 if (v > 0)
1034 v += xt_compat_calc_jump(AF_INET, v);
1035 memcpy(dst, &v, sizeof(v));
1038 static int compat_standard_to_user(void __user *dst, void *src)
1040 compat_int_t cv = *(int *)src;
1042 if (cv > 0)
1043 cv -= xt_compat_calc_jump(AF_INET, cv);
1044 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
1047 static inline int
1048 compat_calc_match(struct ipt_entry_match *m, int *size)
1050 *size += xt_compat_match_offset(m->u.kernel.match);
1051 return 0;
1054 static int compat_calc_entry(struct ipt_entry *e,
1055 const struct xt_table_info *info,
1056 void *base, struct xt_table_info *newinfo)
1058 struct ipt_entry_target *t;
1059 unsigned int entry_offset;
1060 int off, i, ret;
1062 off = sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1063 entry_offset = (void *)e - base;
1064 IPT_MATCH_ITERATE(e, compat_calc_match, &off);
1065 t = ipt_get_target(e);
1066 off += xt_compat_target_offset(t->u.kernel.target);
1067 newinfo->size -= off;
1068 ret = xt_compat_add_offset(AF_INET, entry_offset, off);
1069 if (ret)
1070 return ret;
1072 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1073 if (info->hook_entry[i] &&
1074 (e < (struct ipt_entry *)(base + info->hook_entry[i])))
1075 newinfo->hook_entry[i] -= off;
1076 if (info->underflow[i] &&
1077 (e < (struct ipt_entry *)(base + info->underflow[i])))
1078 newinfo->underflow[i] -= off;
1080 return 0;
1083 static int compat_table_info(const struct xt_table_info *info,
1084 struct xt_table_info *newinfo)
1086 void *loc_cpu_entry;
1088 if (!newinfo || !info)
1089 return -EINVAL;
1091 /* we dont care about newinfo->entries[] */
1092 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
1093 newinfo->initial_entries = 0;
1094 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1095 return IPT_ENTRY_ITERATE(loc_cpu_entry, info->size,
1096 compat_calc_entry, info, loc_cpu_entry,
1097 newinfo);
1099 #endif
1101 static int get_info(struct net *net, void __user *user, int *len, int compat)
1103 char name[IPT_TABLE_MAXNAMELEN];
1104 struct xt_table *t;
1105 int ret;
1107 if (*len != sizeof(struct ipt_getinfo)) {
1108 duprintf("length %u != %zu\n", *len,
1109 sizeof(struct ipt_getinfo));
1110 return -EINVAL;
1113 if (copy_from_user(name, user, sizeof(name)) != 0)
1114 return -EFAULT;
1116 name[IPT_TABLE_MAXNAMELEN-1] = '\0';
1117 #ifdef CONFIG_COMPAT
1118 if (compat)
1119 xt_compat_lock(AF_INET);
1120 #endif
1121 t = try_then_request_module(xt_find_table_lock(net, AF_INET, name),
1122 "iptable_%s", name);
1123 if (t && !IS_ERR(t)) {
1124 struct ipt_getinfo info;
1125 const struct xt_table_info *private = t->private;
1127 #ifdef CONFIG_COMPAT
1128 if (compat) {
1129 struct xt_table_info tmp;
1130 ret = compat_table_info(private, &tmp);
1131 xt_compat_flush_offsets(AF_INET);
1132 private = &tmp;
1134 #endif
1135 info.valid_hooks = t->valid_hooks;
1136 memcpy(info.hook_entry, private->hook_entry,
1137 sizeof(info.hook_entry));
1138 memcpy(info.underflow, private->underflow,
1139 sizeof(info.underflow));
1140 info.num_entries = private->number;
1141 info.size = private->size;
1142 strcpy(info.name, name);
1144 if (copy_to_user(user, &info, *len) != 0)
1145 ret = -EFAULT;
1146 else
1147 ret = 0;
1149 xt_table_unlock(t);
1150 module_put(t->me);
1151 } else
1152 ret = t ? PTR_ERR(t) : -ENOENT;
1153 #ifdef CONFIG_COMPAT
1154 if (compat)
1155 xt_compat_unlock(AF_INET);
1156 #endif
1157 return ret;
1160 static int
1161 get_entries(struct net *net, struct ipt_get_entries __user *uptr, int *len)
1163 int ret;
1164 struct ipt_get_entries get;
1165 struct xt_table *t;
1167 if (*len < sizeof(get)) {
1168 duprintf("get_entries: %u < %zu\n", *len, sizeof(get));
1169 return -EINVAL;
1171 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1172 return -EFAULT;
1173 if (*len != sizeof(struct ipt_get_entries) + get.size) {
1174 duprintf("get_entries: %u != %zu\n",
1175 *len, sizeof(get) + get.size);
1176 return -EINVAL;
1179 t = xt_find_table_lock(net, AF_INET, get.name);
1180 if (t && !IS_ERR(t)) {
1181 const struct xt_table_info *private = t->private;
1182 duprintf("t->private->number = %u\n", private->number);
1183 if (get.size == private->size)
1184 ret = copy_entries_to_user(private->size,
1185 t, uptr->entrytable);
1186 else {
1187 duprintf("get_entries: I've got %u not %u!\n",
1188 private->size, get.size);
1189 ret = -EAGAIN;
1191 module_put(t->me);
1192 xt_table_unlock(t);
1193 } else
1194 ret = t ? PTR_ERR(t) : -ENOENT;
1196 return ret;
1199 static int
1200 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1201 struct xt_table_info *newinfo, unsigned int num_counters,
1202 void __user *counters_ptr)
1204 int ret;
1205 struct xt_table *t;
1206 struct xt_table_info *oldinfo;
1207 struct xt_counters *counters;
1208 void *loc_cpu_old_entry;
1210 ret = 0;
1211 counters = vmalloc(num_counters * sizeof(struct xt_counters));
1212 if (!counters) {
1213 ret = -ENOMEM;
1214 goto out;
1217 t = try_then_request_module(xt_find_table_lock(net, AF_INET, name),
1218 "iptable_%s", name);
1219 if (!t || IS_ERR(t)) {
1220 ret = t ? PTR_ERR(t) : -ENOENT;
1221 goto free_newinfo_counters_untrans;
1224 /* You lied! */
1225 if (valid_hooks != t->valid_hooks) {
1226 duprintf("Valid hook crap: %08X vs %08X\n",
1227 valid_hooks, t->valid_hooks);
1228 ret = -EINVAL;
1229 goto put_module;
1232 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1233 if (!oldinfo)
1234 goto put_module;
1236 /* Update module usage count based on number of rules */
1237 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1238 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1239 if ((oldinfo->number > oldinfo->initial_entries) ||
1240 (newinfo->number <= oldinfo->initial_entries))
1241 module_put(t->me);
1242 if ((oldinfo->number > oldinfo->initial_entries) &&
1243 (newinfo->number <= oldinfo->initial_entries))
1244 module_put(t->me);
1246 /* Get the old counters. */
1247 get_counters(oldinfo, counters);
1248 /* Decrease module usage counts and free resource */
1249 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1250 IPT_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry,
1251 NULL);
1252 xt_free_table_info(oldinfo);
1253 if (copy_to_user(counters_ptr, counters,
1254 sizeof(struct xt_counters) * num_counters) != 0)
1255 ret = -EFAULT;
1256 vfree(counters);
1257 xt_table_unlock(t);
1258 return ret;
1260 put_module:
1261 module_put(t->me);
1262 xt_table_unlock(t);
1263 free_newinfo_counters_untrans:
1264 vfree(counters);
1265 out:
1266 return ret;
1269 static int
1270 do_replace(struct net *net, void __user *user, unsigned int len)
1272 int ret;
1273 struct ipt_replace tmp;
1274 struct xt_table_info *newinfo;
1275 void *loc_cpu_entry;
1277 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1278 return -EFAULT;
1280 /* overflow check */
1281 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1282 return -ENOMEM;
1284 newinfo = xt_alloc_table_info(tmp.size);
1285 if (!newinfo)
1286 return -ENOMEM;
1288 /* choose the copy that is on our node/cpu */
1289 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1290 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1291 tmp.size) != 0) {
1292 ret = -EFAULT;
1293 goto free_newinfo;
1296 ret = translate_table(tmp.name, tmp.valid_hooks,
1297 newinfo, loc_cpu_entry, tmp.size, tmp.num_entries,
1298 tmp.hook_entry, tmp.underflow);
1299 if (ret != 0)
1300 goto free_newinfo;
1302 duprintf("ip_tables: Translated table\n");
1304 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1305 tmp.num_counters, tmp.counters);
1306 if (ret)
1307 goto free_newinfo_untrans;
1308 return 0;
1310 free_newinfo_untrans:
1311 IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL);
1312 free_newinfo:
1313 xt_free_table_info(newinfo);
1314 return ret;
1317 /* We're lazy, and add to the first CPU; overflow works its fey magic
1318 * and everything is OK. */
1319 static int
1320 add_counter_to_entry(struct ipt_entry *e,
1321 const struct xt_counters addme[],
1322 unsigned int *i)
1324 #if 0
1325 duprintf("add_counter: Entry %u %lu/%lu + %lu/%lu\n",
1327 (long unsigned int)e->counters.pcnt,
1328 (long unsigned int)e->counters.bcnt,
1329 (long unsigned int)addme[*i].pcnt,
1330 (long unsigned int)addme[*i].bcnt);
1331 #endif
1333 ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
1335 (*i)++;
1336 return 0;
1339 static int
1340 do_add_counters(struct net *net, void __user *user, unsigned int len, int compat)
1342 unsigned int i;
1343 struct xt_counters_info tmp;
1344 struct xt_counters *paddc;
1345 unsigned int num_counters;
1346 const char *name;
1347 int size;
1348 void *ptmp;
1349 struct xt_table *t;
1350 const struct xt_table_info *private;
1351 int ret = 0;
1352 void *loc_cpu_entry;
1353 #ifdef CONFIG_COMPAT
1354 struct compat_xt_counters_info compat_tmp;
1356 if (compat) {
1357 ptmp = &compat_tmp;
1358 size = sizeof(struct compat_xt_counters_info);
1359 } else
1360 #endif
1362 ptmp = &tmp;
1363 size = sizeof(struct xt_counters_info);
1366 if (copy_from_user(ptmp, user, size) != 0)
1367 return -EFAULT;
1369 #ifdef CONFIG_COMPAT
1370 if (compat) {
1371 num_counters = compat_tmp.num_counters;
1372 name = compat_tmp.name;
1373 } else
1374 #endif
1376 num_counters = tmp.num_counters;
1377 name = tmp.name;
1380 if (len != size + num_counters * sizeof(struct xt_counters))
1381 return -EINVAL;
1383 paddc = vmalloc_node(len - size, numa_node_id());
1384 if (!paddc)
1385 return -ENOMEM;
1387 if (copy_from_user(paddc, user + size, len - size) != 0) {
1388 ret = -EFAULT;
1389 goto free;
1392 t = xt_find_table_lock(net, AF_INET, name);
1393 if (!t || IS_ERR(t)) {
1394 ret = t ? PTR_ERR(t) : -ENOENT;
1395 goto free;
1398 write_lock_bh(&t->lock);
1399 private = t->private;
1400 if (private->number != num_counters) {
1401 ret = -EINVAL;
1402 goto unlock_up_free;
1405 i = 0;
1406 /* Choose the copy that is on our node */
1407 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1408 IPT_ENTRY_ITERATE(loc_cpu_entry,
1409 private->size,
1410 add_counter_to_entry,
1411 paddc,
1412 &i);
1413 unlock_up_free:
1414 write_unlock_bh(&t->lock);
1415 xt_table_unlock(t);
1416 module_put(t->me);
1417 free:
1418 vfree(paddc);
1420 return ret;
1423 #ifdef CONFIG_COMPAT
1424 struct compat_ipt_replace {
1425 char name[IPT_TABLE_MAXNAMELEN];
1426 u32 valid_hooks;
1427 u32 num_entries;
1428 u32 size;
1429 u32 hook_entry[NF_INET_NUMHOOKS];
1430 u32 underflow[NF_INET_NUMHOOKS];
1431 u32 num_counters;
1432 compat_uptr_t counters; /* struct ipt_counters * */
1433 struct compat_ipt_entry entries[0];
1436 static int
1437 compat_copy_entry_to_user(struct ipt_entry *e, void __user **dstptr,
1438 unsigned int *size, struct xt_counters *counters,
1439 unsigned int *i)
1441 struct ipt_entry_target *t;
1442 struct compat_ipt_entry __user *ce;
1443 u_int16_t target_offset, next_offset;
1444 compat_uint_t origsize;
1445 int ret;
1447 ret = -EFAULT;
1448 origsize = *size;
1449 ce = (struct compat_ipt_entry __user *)*dstptr;
1450 if (copy_to_user(ce, e, sizeof(struct ipt_entry)))
1451 goto out;
1453 if (copy_to_user(&ce->counters, &counters[*i], sizeof(counters[*i])))
1454 goto out;
1456 *dstptr += sizeof(struct compat_ipt_entry);
1457 *size -= sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1459 ret = IPT_MATCH_ITERATE(e, xt_compat_match_to_user, dstptr, size);
1460 target_offset = e->target_offset - (origsize - *size);
1461 if (ret)
1462 goto out;
1463 t = ipt_get_target(e);
1464 ret = xt_compat_target_to_user(t, dstptr, size);
1465 if (ret)
1466 goto out;
1467 ret = -EFAULT;
1468 next_offset = e->next_offset - (origsize - *size);
1469 if (put_user(target_offset, &ce->target_offset))
1470 goto out;
1471 if (put_user(next_offset, &ce->next_offset))
1472 goto out;
1474 (*i)++;
1475 return 0;
1476 out:
1477 return ret;
1480 static int
1481 compat_find_calc_match(struct ipt_entry_match *m,
1482 const char *name,
1483 const struct ipt_ip *ip,
1484 unsigned int hookmask,
1485 int *size, unsigned int *i)
1487 struct xt_match *match;
1489 match = try_then_request_module(xt_find_match(AF_INET, m->u.user.name,
1490 m->u.user.revision),
1491 "ipt_%s", m->u.user.name);
1492 if (IS_ERR(match) || !match) {
1493 duprintf("compat_check_calc_match: `%s' not found\n",
1494 m->u.user.name);
1495 return match ? PTR_ERR(match) : -ENOENT;
1497 m->u.kernel.match = match;
1498 *size += xt_compat_match_offset(match);
1500 (*i)++;
1501 return 0;
1504 static int
1505 compat_release_match(struct ipt_entry_match *m, unsigned int *i)
1507 if (i && (*i)-- == 0)
1508 return 1;
1510 module_put(m->u.kernel.match->me);
1511 return 0;
1514 static int
1515 compat_release_entry(struct compat_ipt_entry *e, unsigned int *i)
1517 struct ipt_entry_target *t;
1519 if (i && (*i)-- == 0)
1520 return 1;
1522 /* Cleanup all matches */
1523 COMPAT_IPT_MATCH_ITERATE(e, compat_release_match, NULL);
1524 t = compat_ipt_get_target(e);
1525 module_put(t->u.kernel.target->me);
1526 return 0;
1529 static int
1530 check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
1531 struct xt_table_info *newinfo,
1532 unsigned int *size,
1533 unsigned char *base,
1534 unsigned char *limit,
1535 unsigned int *hook_entries,
1536 unsigned int *underflows,
1537 unsigned int *i,
1538 const char *name)
1540 struct ipt_entry_target *t;
1541 struct xt_target *target;
1542 unsigned int entry_offset;
1543 unsigned int j;
1544 int ret, off, h;
1546 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1547 if ((unsigned long)e % __alignof__(struct compat_ipt_entry) != 0
1548 || (unsigned char *)e + sizeof(struct compat_ipt_entry) >= limit) {
1549 duprintf("Bad offset %p, limit = %p\n", e, limit);
1550 return -EINVAL;
1553 if (e->next_offset < sizeof(struct compat_ipt_entry) +
1554 sizeof(struct compat_xt_entry_target)) {
1555 duprintf("checking: element %p size %u\n",
1556 e, e->next_offset);
1557 return -EINVAL;
1560 /* For purposes of check_entry casting the compat entry is fine */
1561 ret = check_entry((struct ipt_entry *)e, name);
1562 if (ret)
1563 return ret;
1565 off = sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1566 entry_offset = (void *)e - (void *)base;
1567 j = 0;
1568 ret = COMPAT_IPT_MATCH_ITERATE(e, compat_find_calc_match, name,
1569 &e->ip, e->comefrom, &off, &j);
1570 if (ret != 0)
1571 goto release_matches;
1573 t = compat_ipt_get_target(e);
1574 target = try_then_request_module(xt_find_target(AF_INET,
1575 t->u.user.name,
1576 t->u.user.revision),
1577 "ipt_%s", t->u.user.name);
1578 if (IS_ERR(target) || !target) {
1579 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1580 t->u.user.name);
1581 ret = target ? PTR_ERR(target) : -ENOENT;
1582 goto release_matches;
1584 t->u.kernel.target = target;
1586 off += xt_compat_target_offset(target);
1587 *size += off;
1588 ret = xt_compat_add_offset(AF_INET, entry_offset, off);
1589 if (ret)
1590 goto out;
1592 /* Check hooks & underflows */
1593 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1594 if ((unsigned char *)e - base == hook_entries[h])
1595 newinfo->hook_entry[h] = hook_entries[h];
1596 if ((unsigned char *)e - base == underflows[h])
1597 newinfo->underflow[h] = underflows[h];
1600 /* Clear counters and comefrom */
1601 memset(&e->counters, 0, sizeof(e->counters));
1602 e->comefrom = 0;
1604 (*i)++;
1605 return 0;
1607 out:
1608 module_put(t->u.kernel.target->me);
1609 release_matches:
1610 IPT_MATCH_ITERATE(e, compat_release_match, &j);
1611 return ret;
1614 static int
1615 compat_copy_entry_from_user(struct compat_ipt_entry *e, void **dstptr,
1616 unsigned int *size, const char *name,
1617 struct xt_table_info *newinfo, unsigned char *base)
1619 struct ipt_entry_target *t;
1620 struct xt_target *target;
1621 struct ipt_entry *de;
1622 unsigned int origsize;
1623 int ret, h;
1625 ret = 0;
1626 origsize = *size;
1627 de = (struct ipt_entry *)*dstptr;
1628 memcpy(de, e, sizeof(struct ipt_entry));
1629 memcpy(&de->counters, &e->counters, sizeof(e->counters));
1631 *dstptr += sizeof(struct ipt_entry);
1632 *size += sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1634 ret = COMPAT_IPT_MATCH_ITERATE(e, xt_compat_match_from_user,
1635 dstptr, size);
1636 if (ret)
1637 return ret;
1638 de->target_offset = e->target_offset - (origsize - *size);
1639 t = compat_ipt_get_target(e);
1640 target = t->u.kernel.target;
1641 xt_compat_target_from_user(t, dstptr, size);
1643 de->next_offset = e->next_offset - (origsize - *size);
1644 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1645 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1646 newinfo->hook_entry[h] -= origsize - *size;
1647 if ((unsigned char *)de - base < newinfo->underflow[h])
1648 newinfo->underflow[h] -= origsize - *size;
1650 return ret;
1653 static int
1654 compat_check_entry(struct ipt_entry *e, const char *name,
1655 unsigned int *i)
1657 struct xt_mtchk_param mtpar;
1658 unsigned int j;
1659 int ret;
1661 j = 0;
1662 mtpar.table = name;
1663 mtpar.entryinfo = &e->ip;
1664 mtpar.hook_mask = e->comefrom;
1665 mtpar.family = NFPROTO_IPV4;
1666 ret = IPT_MATCH_ITERATE(e, check_match, &mtpar, &j);
1667 if (ret)
1668 goto cleanup_matches;
1670 ret = check_target(e, name);
1671 if (ret)
1672 goto cleanup_matches;
1674 (*i)++;
1675 return 0;
1677 cleanup_matches:
1678 IPT_MATCH_ITERATE(e, cleanup_match, &j);
1679 return ret;
1682 static int
1683 translate_compat_table(const char *name,
1684 unsigned int valid_hooks,
1685 struct xt_table_info **pinfo,
1686 void **pentry0,
1687 unsigned int total_size,
1688 unsigned int number,
1689 unsigned int *hook_entries,
1690 unsigned int *underflows)
1692 unsigned int i, j;
1693 struct xt_table_info *newinfo, *info;
1694 void *pos, *entry0, *entry1;
1695 unsigned int size;
1696 int ret;
1698 info = *pinfo;
1699 entry0 = *pentry0;
1700 size = total_size;
1701 info->number = number;
1703 /* Init all hooks to impossible value. */
1704 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1705 info->hook_entry[i] = 0xFFFFFFFF;
1706 info->underflow[i] = 0xFFFFFFFF;
1709 duprintf("translate_compat_table: size %u\n", info->size);
1710 j = 0;
1711 xt_compat_lock(AF_INET);
1712 /* Walk through entries, checking offsets. */
1713 ret = COMPAT_IPT_ENTRY_ITERATE(entry0, total_size,
1714 check_compat_entry_size_and_hooks,
1715 info, &size, entry0,
1716 entry0 + total_size,
1717 hook_entries, underflows, &j, name);
1718 if (ret != 0)
1719 goto out_unlock;
1721 ret = -EINVAL;
1722 if (j != number) {
1723 duprintf("translate_compat_table: %u not %u entries\n",
1724 j, number);
1725 goto out_unlock;
1728 /* Check hooks all assigned */
1729 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1730 /* Only hooks which are valid */
1731 if (!(valid_hooks & (1 << i)))
1732 continue;
1733 if (info->hook_entry[i] == 0xFFFFFFFF) {
1734 duprintf("Invalid hook entry %u %u\n",
1735 i, hook_entries[i]);
1736 goto out_unlock;
1738 if (info->underflow[i] == 0xFFFFFFFF) {
1739 duprintf("Invalid underflow %u %u\n",
1740 i, underflows[i]);
1741 goto out_unlock;
1745 ret = -ENOMEM;
1746 newinfo = xt_alloc_table_info(size);
1747 if (!newinfo)
1748 goto out_unlock;
1750 newinfo->number = number;
1751 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1752 newinfo->hook_entry[i] = info->hook_entry[i];
1753 newinfo->underflow[i] = info->underflow[i];
1755 entry1 = newinfo->entries[raw_smp_processor_id()];
1756 pos = entry1;
1757 size = total_size;
1758 ret = COMPAT_IPT_ENTRY_ITERATE(entry0, total_size,
1759 compat_copy_entry_from_user,
1760 &pos, &size, name, newinfo, entry1);
1761 xt_compat_flush_offsets(AF_INET);
1762 xt_compat_unlock(AF_INET);
1763 if (ret)
1764 goto free_newinfo;
1766 ret = -ELOOP;
1767 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1768 goto free_newinfo;
1770 i = 0;
1771 ret = IPT_ENTRY_ITERATE(entry1, newinfo->size, compat_check_entry,
1772 name, &i);
1773 if (ret) {
1774 j -= i;
1775 COMPAT_IPT_ENTRY_ITERATE_CONTINUE(entry0, newinfo->size, i,
1776 compat_release_entry, &j);
1777 IPT_ENTRY_ITERATE(entry1, newinfo->size, cleanup_entry, &i);
1778 xt_free_table_info(newinfo);
1779 return ret;
1782 /* And one copy for every other CPU */
1783 for_each_possible_cpu(i)
1784 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1785 memcpy(newinfo->entries[i], entry1, newinfo->size);
1787 *pinfo = newinfo;
1788 *pentry0 = entry1;
1789 xt_free_table_info(info);
1790 return 0;
1792 free_newinfo:
1793 xt_free_table_info(newinfo);
1794 out:
1795 COMPAT_IPT_ENTRY_ITERATE(entry0, total_size, compat_release_entry, &j);
1796 return ret;
1797 out_unlock:
1798 xt_compat_flush_offsets(AF_INET);
1799 xt_compat_unlock(AF_INET);
1800 goto out;
1803 static int
1804 compat_do_replace(struct net *net, void __user *user, unsigned int len)
1806 int ret;
1807 struct compat_ipt_replace tmp;
1808 struct xt_table_info *newinfo;
1809 void *loc_cpu_entry;
1811 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1812 return -EFAULT;
1814 /* overflow check */
1815 if (tmp.size >= INT_MAX / num_possible_cpus())
1816 return -ENOMEM;
1817 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1818 return -ENOMEM;
1820 newinfo = xt_alloc_table_info(tmp.size);
1821 if (!newinfo)
1822 return -ENOMEM;
1824 /* choose the copy that is on our node/cpu */
1825 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1826 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1827 tmp.size) != 0) {
1828 ret = -EFAULT;
1829 goto free_newinfo;
1832 ret = translate_compat_table(tmp.name, tmp.valid_hooks,
1833 &newinfo, &loc_cpu_entry, tmp.size,
1834 tmp.num_entries, tmp.hook_entry,
1835 tmp.underflow);
1836 if (ret != 0)
1837 goto free_newinfo;
1839 duprintf("compat_do_replace: Translated table\n");
1841 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1842 tmp.num_counters, compat_ptr(tmp.counters));
1843 if (ret)
1844 goto free_newinfo_untrans;
1845 return 0;
1847 free_newinfo_untrans:
1848 IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL);
1849 free_newinfo:
1850 xt_free_table_info(newinfo);
1851 return ret;
1854 static int
1855 compat_do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user,
1856 unsigned int len)
1858 int ret;
1860 if (!capable(CAP_NET_ADMIN))
1861 return -EPERM;
1863 switch (cmd) {
1864 case IPT_SO_SET_REPLACE:
1865 ret = compat_do_replace(sock_net(sk), user, len);
1866 break;
1868 case IPT_SO_SET_ADD_COUNTERS:
1869 ret = do_add_counters(sock_net(sk), user, len, 1);
1870 break;
1872 default:
1873 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd);
1874 ret = -EINVAL;
1877 return ret;
1880 struct compat_ipt_get_entries {
1881 char name[IPT_TABLE_MAXNAMELEN];
1882 compat_uint_t size;
1883 struct compat_ipt_entry entrytable[0];
1886 static int
1887 compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1888 void __user *userptr)
1890 struct xt_counters *counters;
1891 const struct xt_table_info *private = table->private;
1892 void __user *pos;
1893 unsigned int size;
1894 int ret = 0;
1895 const void *loc_cpu_entry;
1896 unsigned int i = 0;
1898 counters = alloc_counters(table);
1899 if (IS_ERR(counters))
1900 return PTR_ERR(counters);
1902 /* choose the copy that is on our node/cpu, ...
1903 * This choice is lazy (because current thread is
1904 * allowed to migrate to another cpu)
1906 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1907 pos = userptr;
1908 size = total_size;
1909 ret = IPT_ENTRY_ITERATE(loc_cpu_entry, total_size,
1910 compat_copy_entry_to_user,
1911 &pos, &size, counters, &i);
1913 vfree(counters);
1914 return ret;
1917 static int
1918 compat_get_entries(struct net *net, struct compat_ipt_get_entries __user *uptr,
1919 int *len)
1921 int ret;
1922 struct compat_ipt_get_entries get;
1923 struct xt_table *t;
1925 if (*len < sizeof(get)) {
1926 duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get));
1927 return -EINVAL;
1930 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1931 return -EFAULT;
1933 if (*len != sizeof(struct compat_ipt_get_entries) + get.size) {
1934 duprintf("compat_get_entries: %u != %zu\n",
1935 *len, sizeof(get) + get.size);
1936 return -EINVAL;
1939 xt_compat_lock(AF_INET);
1940 t = xt_find_table_lock(net, AF_INET, get.name);
1941 if (t && !IS_ERR(t)) {
1942 const struct xt_table_info *private = t->private;
1943 struct xt_table_info info;
1944 duprintf("t->private->number = %u\n", private->number);
1945 ret = compat_table_info(private, &info);
1946 if (!ret && get.size == info.size) {
1947 ret = compat_copy_entries_to_user(private->size,
1948 t, uptr->entrytable);
1949 } else if (!ret) {
1950 duprintf("compat_get_entries: I've got %u not %u!\n",
1951 private->size, get.size);
1952 ret = -EAGAIN;
1954 xt_compat_flush_offsets(AF_INET);
1955 module_put(t->me);
1956 xt_table_unlock(t);
1957 } else
1958 ret = t ? PTR_ERR(t) : -ENOENT;
1960 xt_compat_unlock(AF_INET);
1961 return ret;
1964 static int do_ipt_get_ctl(struct sock *, int, void __user *, int *);
1966 static int
1967 compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1969 int ret;
1971 if (!capable(CAP_NET_ADMIN))
1972 return -EPERM;
1974 switch (cmd) {
1975 case IPT_SO_GET_INFO:
1976 ret = get_info(sock_net(sk), user, len, 1);
1977 break;
1978 case IPT_SO_GET_ENTRIES:
1979 ret = compat_get_entries(sock_net(sk), user, len);
1980 break;
1981 default:
1982 ret = do_ipt_get_ctl(sk, cmd, user, len);
1984 return ret;
1986 #endif
1988 static int
1989 do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
1991 int ret;
1993 if (!capable(CAP_NET_ADMIN))
1994 return -EPERM;
1996 switch (cmd) {
1997 case IPT_SO_SET_REPLACE:
1998 ret = do_replace(sock_net(sk), user, len);
1999 break;
2001 case IPT_SO_SET_ADD_COUNTERS:
2002 ret = do_add_counters(sock_net(sk), user, len, 0);
2003 break;
2005 default:
2006 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd);
2007 ret = -EINVAL;
2010 return ret;
2013 static int
2014 do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2016 int ret;
2018 if (!capable(CAP_NET_ADMIN))
2019 return -EPERM;
2021 switch (cmd) {
2022 case IPT_SO_GET_INFO:
2023 ret = get_info(sock_net(sk), user, len, 0);
2024 break;
2026 case IPT_SO_GET_ENTRIES:
2027 ret = get_entries(sock_net(sk), user, len);
2028 break;
2030 case IPT_SO_GET_REVISION_MATCH:
2031 case IPT_SO_GET_REVISION_TARGET: {
2032 struct ipt_get_revision rev;
2033 int target;
2035 if (*len != sizeof(rev)) {
2036 ret = -EINVAL;
2037 break;
2039 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2040 ret = -EFAULT;
2041 break;
2044 if (cmd == IPT_SO_GET_REVISION_TARGET)
2045 target = 1;
2046 else
2047 target = 0;
2049 try_then_request_module(xt_find_revision(AF_INET, rev.name,
2050 rev.revision,
2051 target, &ret),
2052 "ipt_%s", rev.name);
2053 break;
2056 default:
2057 duprintf("do_ipt_get_ctl: unknown request %i\n", cmd);
2058 ret = -EINVAL;
2061 return ret;
2064 struct xt_table *ipt_register_table(struct net *net, struct xt_table *table,
2065 const struct ipt_replace *repl)
2067 int ret;
2068 struct xt_table_info *newinfo;
2069 struct xt_table_info bootstrap
2070 = { 0, 0, 0, { 0 }, { 0 }, { } };
2071 void *loc_cpu_entry;
2072 struct xt_table *new_table;
2074 newinfo = xt_alloc_table_info(repl->size);
2075 if (!newinfo) {
2076 ret = -ENOMEM;
2077 goto out;
2080 /* choose the copy on our node/cpu, but dont care about preemption */
2081 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2082 memcpy(loc_cpu_entry, repl->entries, repl->size);
2084 ret = translate_table(table->name, table->valid_hooks,
2085 newinfo, loc_cpu_entry, repl->size,
2086 repl->num_entries,
2087 repl->hook_entry,
2088 repl->underflow);
2089 if (ret != 0)
2090 goto out_free;
2092 new_table = xt_register_table(net, table, &bootstrap, newinfo);
2093 if (IS_ERR(new_table)) {
2094 ret = PTR_ERR(new_table);
2095 goto out_free;
2098 return new_table;
2100 out_free:
2101 xt_free_table_info(newinfo);
2102 out:
2103 return ERR_PTR(ret);
2106 void ipt_unregister_table(struct xt_table *table)
2108 struct xt_table_info *private;
2109 void *loc_cpu_entry;
2110 struct module *table_owner = table->me;
2112 private = xt_unregister_table(table);
2114 /* Decrease module usage counts and free resources */
2115 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2116 IPT_ENTRY_ITERATE(loc_cpu_entry, private->size, cleanup_entry, NULL);
2117 if (private->number > private->initial_entries)
2118 module_put(table_owner);
2119 xt_free_table_info(private);
2122 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2123 static inline bool
2124 icmp_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2125 u_int8_t type, u_int8_t code,
2126 bool invert)
2128 return ((test_type == 0xFF) ||
2129 (type == test_type && code >= min_code && code <= max_code))
2130 ^ invert;
2133 static bool
2134 icmp_match(const struct sk_buff *skb, const struct xt_match_param *par)
2136 const struct icmphdr *ic;
2137 struct icmphdr _icmph;
2138 const struct ipt_icmp *icmpinfo = par->matchinfo;
2140 /* Must not be a fragment. */
2141 if (par->fragoff != 0)
2142 return false;
2144 ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph);
2145 if (ic == NULL) {
2146 /* We've been asked to examine this packet, and we
2147 * can't. Hence, no choice but to drop.
2149 duprintf("Dropping evil ICMP tinygram.\n");
2150 *par->hotdrop = true;
2151 return false;
2154 return icmp_type_code_match(icmpinfo->type,
2155 icmpinfo->code[0],
2156 icmpinfo->code[1],
2157 ic->type, ic->code,
2158 !!(icmpinfo->invflags&IPT_ICMP_INV));
2161 static bool icmp_checkentry(const struct xt_mtchk_param *par)
2163 const struct ipt_icmp *icmpinfo = par->matchinfo;
2165 /* Must specify no unknown invflags */
2166 return !(icmpinfo->invflags & ~IPT_ICMP_INV);
2169 /* The built-in targets: standard (NULL) and error. */
2170 static struct xt_target ipt_standard_target __read_mostly = {
2171 .name = IPT_STANDARD_TARGET,
2172 .targetsize = sizeof(int),
2173 .family = AF_INET,
2174 #ifdef CONFIG_COMPAT
2175 .compatsize = sizeof(compat_int_t),
2176 .compat_from_user = compat_standard_from_user,
2177 .compat_to_user = compat_standard_to_user,
2178 #endif
2181 static struct xt_target ipt_error_target __read_mostly = {
2182 .name = IPT_ERROR_TARGET,
2183 .target = ipt_error,
2184 .targetsize = IPT_FUNCTION_MAXNAMELEN,
2185 .family = AF_INET,
2188 static struct nf_sockopt_ops ipt_sockopts = {
2189 .pf = PF_INET,
2190 .set_optmin = IPT_BASE_CTL,
2191 .set_optmax = IPT_SO_SET_MAX+1,
2192 .set = do_ipt_set_ctl,
2193 #ifdef CONFIG_COMPAT
2194 .compat_set = compat_do_ipt_set_ctl,
2195 #endif
2196 .get_optmin = IPT_BASE_CTL,
2197 .get_optmax = IPT_SO_GET_MAX+1,
2198 .get = do_ipt_get_ctl,
2199 #ifdef CONFIG_COMPAT
2200 .compat_get = compat_do_ipt_get_ctl,
2201 #endif
2202 .owner = THIS_MODULE,
2205 static struct xt_match icmp_matchstruct __read_mostly = {
2206 .name = "icmp",
2207 .match = icmp_match,
2208 .matchsize = sizeof(struct ipt_icmp),
2209 .checkentry = icmp_checkentry,
2210 .proto = IPPROTO_ICMP,
2211 .family = AF_INET,
2214 static int __net_init ip_tables_net_init(struct net *net)
2216 return xt_proto_init(net, AF_INET);
2219 static void __net_exit ip_tables_net_exit(struct net *net)
2221 xt_proto_fini(net, AF_INET);
2224 static struct pernet_operations ip_tables_net_ops = {
2225 .init = ip_tables_net_init,
2226 .exit = ip_tables_net_exit,
2229 static int __init ip_tables_init(void)
2231 int ret;
2233 ret = register_pernet_subsys(&ip_tables_net_ops);
2234 if (ret < 0)
2235 goto err1;
2237 /* Noone else will be downing sem now, so we won't sleep */
2238 ret = xt_register_target(&ipt_standard_target);
2239 if (ret < 0)
2240 goto err2;
2241 ret = xt_register_target(&ipt_error_target);
2242 if (ret < 0)
2243 goto err3;
2244 ret = xt_register_match(&icmp_matchstruct);
2245 if (ret < 0)
2246 goto err4;
2248 /* Register setsockopt */
2249 ret = nf_register_sockopt(&ipt_sockopts);
2250 if (ret < 0)
2251 goto err5;
2253 printk(KERN_INFO "ip_tables: (C) 2000-2006 Netfilter Core Team\n");
2254 return 0;
2256 err5:
2257 xt_unregister_match(&icmp_matchstruct);
2258 err4:
2259 xt_unregister_target(&ipt_error_target);
2260 err3:
2261 xt_unregister_target(&ipt_standard_target);
2262 err2:
2263 unregister_pernet_subsys(&ip_tables_net_ops);
2264 err1:
2265 return ret;
2268 static void __exit ip_tables_fini(void)
2270 nf_unregister_sockopt(&ipt_sockopts);
2272 xt_unregister_match(&icmp_matchstruct);
2273 xt_unregister_target(&ipt_error_target);
2274 xt_unregister_target(&ipt_standard_target);
2276 unregister_pernet_subsys(&ip_tables_net_ops);
2279 EXPORT_SYMBOL(ipt_register_table);
2280 EXPORT_SYMBOL(ipt_unregister_table);
2281 EXPORT_SYMBOL(ipt_do_table);
2282 module_init(ip_tables_init);
2283 module_exit(ip_tables_fini);