[NETFILTER]: ip_tables: fix module refcount leaks in compat error paths
[linux-2.6.22.y-op.git] / net / ipv4 / netfilter / ip_tables.c
blob3d5d4a4640c32d93e320901e84ddffaa1e13d596
1 /*
2 * Packet matching code.
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * 19 Jan 2002 Harald Welte <laforge@gnumonks.org>
12 * - increase module usage count as soon as we have rules inside
13 * a table
14 * 08 Oct 2005 Harald Welte <lafore@netfilter.org>
15 * - Generalize into "x_tables" layer and "{ip,ip6,arp}_tables"
17 #include <linux/cache.h>
18 #include <linux/capability.h>
19 #include <linux/skbuff.h>
20 #include <linux/kmod.h>
21 #include <linux/vmalloc.h>
22 #include <linux/netdevice.h>
23 #include <linux/module.h>
24 #include <linux/icmp.h>
25 #include <net/ip.h>
26 #include <net/compat.h>
27 #include <asm/uaccess.h>
28 #include <linux/mutex.h>
29 #include <linux/proc_fs.h>
30 #include <linux/err.h>
31 #include <linux/cpumask.h>
33 #include <linux/netfilter/x_tables.h>
34 #include <linux/netfilter_ipv4/ip_tables.h>
36 MODULE_LICENSE("GPL");
37 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
38 MODULE_DESCRIPTION("IPv4 packet filter");
40 /*#define DEBUG_IP_FIREWALL*/
41 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
42 /*#define DEBUG_IP_FIREWALL_USER*/
44 #ifdef DEBUG_IP_FIREWALL
45 #define dprintf(format, args...) printk(format , ## args)
46 #else
47 #define dprintf(format, args...)
48 #endif
50 #ifdef DEBUG_IP_FIREWALL_USER
51 #define duprintf(format, args...) printk(format , ## args)
52 #else
53 #define duprintf(format, args...)
54 #endif
56 #ifdef CONFIG_NETFILTER_DEBUG
57 #define IP_NF_ASSERT(x) \
58 do { \
59 if (!(x)) \
60 printk("IP_NF_ASSERT: %s:%s:%u\n", \
61 __FUNCTION__, __FILE__, __LINE__); \
62 } while(0)
63 #else
64 #define IP_NF_ASSERT(x)
65 #endif
67 #if 0
68 /* All the better to debug you with... */
69 #define static
70 #define inline
71 #endif
74 We keep a set of rules for each CPU, so we can avoid write-locking
75 them in the softirq when updating the counters and therefore
76 only need to read-lock in the softirq; doing a write_lock_bh() in user
77 context stops packets coming through and allows user context to read
78 the counters or update the rules.
80 Hence the start of any table is given by get_table() below. */
82 /* Returns whether matches rule or not. */
83 static inline int
84 ip_packet_match(const struct iphdr *ip,
85 const char *indev,
86 const char *outdev,
87 const struct ipt_ip *ipinfo,
88 int isfrag)
90 size_t i;
91 unsigned long ret;
93 #define FWINV(bool,invflg) ((bool) ^ !!(ipinfo->invflags & invflg))
95 if (FWINV((ip->saddr&ipinfo->smsk.s_addr) != ipinfo->src.s_addr,
96 IPT_INV_SRCIP)
97 || FWINV((ip->daddr&ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr,
98 IPT_INV_DSTIP)) {
99 dprintf("Source or dest mismatch.\n");
101 dprintf("SRC: %u.%u.%u.%u. Mask: %u.%u.%u.%u. Target: %u.%u.%u.%u.%s\n",
102 NIPQUAD(ip->saddr),
103 NIPQUAD(ipinfo->smsk.s_addr),
104 NIPQUAD(ipinfo->src.s_addr),
105 ipinfo->invflags & IPT_INV_SRCIP ? " (INV)" : "");
106 dprintf("DST: %u.%u.%u.%u Mask: %u.%u.%u.%u Target: %u.%u.%u.%u.%s\n",
107 NIPQUAD(ip->daddr),
108 NIPQUAD(ipinfo->dmsk.s_addr),
109 NIPQUAD(ipinfo->dst.s_addr),
110 ipinfo->invflags & IPT_INV_DSTIP ? " (INV)" : "");
111 return 0;
114 /* Look for ifname matches; this should unroll nicely. */
115 for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
116 ret |= (((const unsigned long *)indev)[i]
117 ^ ((const unsigned long *)ipinfo->iniface)[i])
118 & ((const unsigned long *)ipinfo->iniface_mask)[i];
121 if (FWINV(ret != 0, IPT_INV_VIA_IN)) {
122 dprintf("VIA in mismatch (%s vs %s).%s\n",
123 indev, ipinfo->iniface,
124 ipinfo->invflags&IPT_INV_VIA_IN ?" (INV)":"");
125 return 0;
128 for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
129 ret |= (((const unsigned long *)outdev)[i]
130 ^ ((const unsigned long *)ipinfo->outiface)[i])
131 & ((const unsigned long *)ipinfo->outiface_mask)[i];
134 if (FWINV(ret != 0, IPT_INV_VIA_OUT)) {
135 dprintf("VIA out mismatch (%s vs %s).%s\n",
136 outdev, ipinfo->outiface,
137 ipinfo->invflags&IPT_INV_VIA_OUT ?" (INV)":"");
138 return 0;
141 /* Check specific protocol */
142 if (ipinfo->proto
143 && FWINV(ip->protocol != ipinfo->proto, IPT_INV_PROTO)) {
144 dprintf("Packet protocol %hi does not match %hi.%s\n",
145 ip->protocol, ipinfo->proto,
146 ipinfo->invflags&IPT_INV_PROTO ? " (INV)":"");
147 return 0;
150 /* If we have a fragment rule but the packet is not a fragment
151 * then we return zero */
152 if (FWINV((ipinfo->flags&IPT_F_FRAG) && !isfrag, IPT_INV_FRAG)) {
153 dprintf("Fragment rule but not fragment.%s\n",
154 ipinfo->invflags & IPT_INV_FRAG ? " (INV)" : "");
155 return 0;
158 return 1;
161 static inline int
162 ip_checkentry(const struct ipt_ip *ip)
164 if (ip->flags & ~IPT_F_MASK) {
165 duprintf("Unknown flag bits set: %08X\n",
166 ip->flags & ~IPT_F_MASK);
167 return 0;
169 if (ip->invflags & ~IPT_INV_MASK) {
170 duprintf("Unknown invflag bits set: %08X\n",
171 ip->invflags & ~IPT_INV_MASK);
172 return 0;
174 return 1;
177 static unsigned int
178 ipt_error(struct sk_buff **pskb,
179 const struct net_device *in,
180 const struct net_device *out,
181 unsigned int hooknum,
182 const struct xt_target *target,
183 const void *targinfo)
185 if (net_ratelimit())
186 printk("ip_tables: error: `%s'\n", (char *)targinfo);
188 return NF_DROP;
191 static inline
192 int do_match(struct ipt_entry_match *m,
193 const struct sk_buff *skb,
194 const struct net_device *in,
195 const struct net_device *out,
196 int offset,
197 int *hotdrop)
199 /* Stop iteration if it doesn't match */
200 if (!m->u.kernel.match->match(skb, in, out, m->u.kernel.match, m->data,
201 offset, skb->nh.iph->ihl*4, hotdrop))
202 return 1;
203 else
204 return 0;
207 static inline struct ipt_entry *
208 get_entry(void *base, unsigned int offset)
210 return (struct ipt_entry *)(base + offset);
213 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
214 unsigned int
215 ipt_do_table(struct sk_buff **pskb,
216 unsigned int hook,
217 const struct net_device *in,
218 const struct net_device *out,
219 struct ipt_table *table)
221 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
222 u_int16_t offset;
223 struct iphdr *ip;
224 u_int16_t datalen;
225 int hotdrop = 0;
226 /* Initializing verdict to NF_DROP keeps gcc happy. */
227 unsigned int verdict = NF_DROP;
228 const char *indev, *outdev;
229 void *table_base;
230 struct ipt_entry *e, *back;
231 struct xt_table_info *private;
233 /* Initialization */
234 ip = (*pskb)->nh.iph;
235 datalen = (*pskb)->len - ip->ihl * 4;
236 indev = in ? in->name : nulldevname;
237 outdev = out ? out->name : nulldevname;
238 /* We handle fragments by dealing with the first fragment as
239 * if it was a normal packet. All other fragments are treated
240 * normally, except that they will NEVER match rules that ask
241 * things we don't know, ie. tcp syn flag or ports). If the
242 * rule is also a fragment-specific rule, non-fragments won't
243 * match it. */
244 offset = ntohs(ip->frag_off) & IP_OFFSET;
246 read_lock_bh(&table->lock);
247 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
248 private = table->private;
249 table_base = (void *)private->entries[smp_processor_id()];
250 e = get_entry(table_base, private->hook_entry[hook]);
252 /* For return from builtin chain */
253 back = get_entry(table_base, private->underflow[hook]);
255 do {
256 IP_NF_ASSERT(e);
257 IP_NF_ASSERT(back);
258 if (ip_packet_match(ip, indev, outdev, &e->ip, offset)) {
259 struct ipt_entry_target *t;
261 if (IPT_MATCH_ITERATE(e, do_match,
262 *pskb, in, out,
263 offset, &hotdrop) != 0)
264 goto no_match;
266 ADD_COUNTER(e->counters, ntohs(ip->tot_len), 1);
268 t = ipt_get_target(e);
269 IP_NF_ASSERT(t->u.kernel.target);
270 /* Standard target? */
271 if (!t->u.kernel.target->target) {
272 int v;
274 v = ((struct ipt_standard_target *)t)->verdict;
275 if (v < 0) {
276 /* Pop from stack? */
277 if (v != IPT_RETURN) {
278 verdict = (unsigned)(-v) - 1;
279 break;
281 e = back;
282 back = get_entry(table_base,
283 back->comefrom);
284 continue;
286 if (table_base + v != (void *)e + e->next_offset
287 && !(e->ip.flags & IPT_F_GOTO)) {
288 /* Save old back ptr in next entry */
289 struct ipt_entry *next
290 = (void *)e + e->next_offset;
291 next->comefrom
292 = (void *)back - table_base;
293 /* set back pointer to next entry */
294 back = next;
297 e = get_entry(table_base, v);
298 } else {
299 /* Targets which reenter must return
300 abs. verdicts */
301 #ifdef CONFIG_NETFILTER_DEBUG
302 ((struct ipt_entry *)table_base)->comefrom
303 = 0xeeeeeeec;
304 #endif
305 verdict = t->u.kernel.target->target(pskb,
306 in, out,
307 hook,
308 t->u.kernel.target,
309 t->data);
311 #ifdef CONFIG_NETFILTER_DEBUG
312 if (((struct ipt_entry *)table_base)->comefrom
313 != 0xeeeeeeec
314 && verdict == IPT_CONTINUE) {
315 printk("Target %s reentered!\n",
316 t->u.kernel.target->name);
317 verdict = NF_DROP;
319 ((struct ipt_entry *)table_base)->comefrom
320 = 0x57acc001;
321 #endif
322 /* Target might have changed stuff. */
323 ip = (*pskb)->nh.iph;
324 datalen = (*pskb)->len - ip->ihl * 4;
326 if (verdict == IPT_CONTINUE)
327 e = (void *)e + e->next_offset;
328 else
329 /* Verdict */
330 break;
332 } else {
334 no_match:
335 e = (void *)e + e->next_offset;
337 } while (!hotdrop);
339 read_unlock_bh(&table->lock);
341 #ifdef DEBUG_ALLOW_ALL
342 return NF_ACCEPT;
343 #else
344 if (hotdrop)
345 return NF_DROP;
346 else return verdict;
347 #endif
350 /* All zeroes == unconditional rule. */
351 static inline int
352 unconditional(const struct ipt_ip *ip)
354 unsigned int i;
356 for (i = 0; i < sizeof(*ip)/sizeof(__u32); i++)
357 if (((__u32 *)ip)[i])
358 return 0;
360 return 1;
363 /* Figures out from what hook each rule can be called: returns 0 if
364 there are loops. Puts hook bitmask in comefrom. */
365 static int
366 mark_source_chains(struct xt_table_info *newinfo,
367 unsigned int valid_hooks, void *entry0)
369 unsigned int hook;
371 /* No recursion; use packet counter to save back ptrs (reset
372 to 0 as we leave), and comefrom to save source hook bitmask */
373 for (hook = 0; hook < NF_IP_NUMHOOKS; hook++) {
374 unsigned int pos = newinfo->hook_entry[hook];
375 struct ipt_entry *e
376 = (struct ipt_entry *)(entry0 + pos);
378 if (!(valid_hooks & (1 << hook)))
379 continue;
381 /* Set initial back pointer. */
382 e->counters.pcnt = pos;
384 for (;;) {
385 struct ipt_standard_target *t
386 = (void *)ipt_get_target(e);
388 if (e->comefrom & (1 << NF_IP_NUMHOOKS)) {
389 printk("iptables: loop hook %u pos %u %08X.\n",
390 hook, pos, e->comefrom);
391 return 0;
393 e->comefrom
394 |= ((1 << hook) | (1 << NF_IP_NUMHOOKS));
396 /* Unconditional return/END. */
397 if (e->target_offset == sizeof(struct ipt_entry)
398 && (strcmp(t->target.u.user.name,
399 IPT_STANDARD_TARGET) == 0)
400 && t->verdict < 0
401 && unconditional(&e->ip)) {
402 unsigned int oldpos, size;
404 /* Return: backtrack through the last
405 big jump. */
406 do {
407 e->comefrom ^= (1<<NF_IP_NUMHOOKS);
408 #ifdef DEBUG_IP_FIREWALL_USER
409 if (e->comefrom
410 & (1 << NF_IP_NUMHOOKS)) {
411 duprintf("Back unset "
412 "on hook %u "
413 "rule %u\n",
414 hook, pos);
416 #endif
417 oldpos = pos;
418 pos = e->counters.pcnt;
419 e->counters.pcnt = 0;
421 /* We're at the start. */
422 if (pos == oldpos)
423 goto next;
425 e = (struct ipt_entry *)
426 (entry0 + pos);
427 } while (oldpos == pos + e->next_offset);
429 /* Move along one */
430 size = e->next_offset;
431 e = (struct ipt_entry *)
432 (entry0 + pos + size);
433 e->counters.pcnt = pos;
434 pos += size;
435 } else {
436 int newpos = t->verdict;
438 if (strcmp(t->target.u.user.name,
439 IPT_STANDARD_TARGET) == 0
440 && newpos >= 0) {
441 /* This a jump; chase it. */
442 duprintf("Jump rule %u -> %u\n",
443 pos, newpos);
444 } else {
445 /* ... this is a fallthru */
446 newpos = pos + e->next_offset;
448 e = (struct ipt_entry *)
449 (entry0 + newpos);
450 e->counters.pcnt = pos;
451 pos = newpos;
454 next:
455 duprintf("Finished chain %u\n", hook);
457 return 1;
460 static inline int
461 cleanup_match(struct ipt_entry_match *m, unsigned int *i)
463 if (i && (*i)-- == 0)
464 return 1;
466 if (m->u.kernel.match->destroy)
467 m->u.kernel.match->destroy(m->u.kernel.match, m->data);
468 module_put(m->u.kernel.match->me);
469 return 0;
472 static inline int
473 standard_check(const struct ipt_entry_target *t,
474 unsigned int max_offset)
476 struct ipt_standard_target *targ = (void *)t;
478 /* Check standard info. */
479 if (targ->verdict >= 0
480 && targ->verdict > max_offset - sizeof(struct ipt_entry)) {
481 duprintf("ipt_standard_check: bad verdict (%i)\n",
482 targ->verdict);
483 return 0;
485 if (targ->verdict < -NF_MAX_VERDICT - 1) {
486 duprintf("ipt_standard_check: bad negative verdict (%i)\n",
487 targ->verdict);
488 return 0;
490 return 1;
493 static inline int
494 check_match(struct ipt_entry_match *m,
495 const char *name,
496 const struct ipt_ip *ip,
497 unsigned int hookmask,
498 unsigned int *i)
500 struct ipt_match *match;
501 int ret;
503 match = try_then_request_module(xt_find_match(AF_INET, m->u.user.name,
504 m->u.user.revision),
505 "ipt_%s", m->u.user.name);
506 if (IS_ERR(match) || !match) {
507 duprintf("check_match: `%s' not found\n", m->u.user.name);
508 return match ? PTR_ERR(match) : -ENOENT;
510 m->u.kernel.match = match;
512 ret = xt_check_match(match, AF_INET, m->u.match_size - sizeof(*m),
513 name, hookmask, ip->proto,
514 ip->invflags & IPT_INV_PROTO);
515 if (ret)
516 goto err;
518 if (m->u.kernel.match->checkentry
519 && !m->u.kernel.match->checkentry(name, ip, match, m->data,
520 hookmask)) {
521 duprintf("ip_tables: check failed for `%s'.\n",
522 m->u.kernel.match->name);
523 ret = -EINVAL;
524 goto err;
527 (*i)++;
528 return 0;
529 err:
530 module_put(m->u.kernel.match->me);
531 return ret;
534 static struct ipt_target ipt_standard_target;
536 static inline int
537 check_entry(struct ipt_entry *e, const char *name, unsigned int size,
538 unsigned int *i)
540 struct ipt_entry_target *t;
541 struct ipt_target *target;
542 int ret;
543 unsigned int j;
545 if (!ip_checkentry(&e->ip)) {
546 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
547 return -EINVAL;
550 j = 0;
551 ret = IPT_MATCH_ITERATE(e, check_match, name, &e->ip, e->comefrom, &j);
552 if (ret != 0)
553 goto cleanup_matches;
555 t = ipt_get_target(e);
556 target = try_then_request_module(xt_find_target(AF_INET,
557 t->u.user.name,
558 t->u.user.revision),
559 "ipt_%s", t->u.user.name);
560 if (IS_ERR(target) || !target) {
561 duprintf("check_entry: `%s' not found\n", t->u.user.name);
562 ret = target ? PTR_ERR(target) : -ENOENT;
563 goto cleanup_matches;
565 t->u.kernel.target = target;
567 ret = xt_check_target(target, AF_INET, t->u.target_size - sizeof(*t),
568 name, e->comefrom, e->ip.proto,
569 e->ip.invflags & IPT_INV_PROTO);
570 if (ret)
571 goto err;
573 if (t->u.kernel.target == &ipt_standard_target) {
574 if (!standard_check(t, size)) {
575 ret = -EINVAL;
576 goto err;
578 } else if (t->u.kernel.target->checkentry
579 && !t->u.kernel.target->checkentry(name, e, target, t->data,
580 e->comefrom)) {
581 duprintf("ip_tables: check failed for `%s'.\n",
582 t->u.kernel.target->name);
583 ret = -EINVAL;
584 goto err;
587 (*i)++;
588 return 0;
589 err:
590 module_put(t->u.kernel.target->me);
591 cleanup_matches:
592 IPT_MATCH_ITERATE(e, cleanup_match, &j);
593 return ret;
596 static inline int
597 check_entry_size_and_hooks(struct ipt_entry *e,
598 struct xt_table_info *newinfo,
599 unsigned char *base,
600 unsigned char *limit,
601 const unsigned int *hook_entries,
602 const unsigned int *underflows,
603 unsigned int *i)
605 unsigned int h;
607 if ((unsigned long)e % __alignof__(struct ipt_entry) != 0
608 || (unsigned char *)e + sizeof(struct ipt_entry) >= limit) {
609 duprintf("Bad offset %p\n", e);
610 return -EINVAL;
613 if (e->next_offset
614 < sizeof(struct ipt_entry) + sizeof(struct ipt_entry_target)) {
615 duprintf("checking: element %p size %u\n",
616 e, e->next_offset);
617 return -EINVAL;
620 /* Check hooks & underflows */
621 for (h = 0; h < NF_IP_NUMHOOKS; h++) {
622 if ((unsigned char *)e - base == hook_entries[h])
623 newinfo->hook_entry[h] = hook_entries[h];
624 if ((unsigned char *)e - base == underflows[h])
625 newinfo->underflow[h] = underflows[h];
628 /* FIXME: underflows must be unconditional, standard verdicts
629 < 0 (not IPT_RETURN). --RR */
631 /* Clear counters and comefrom */
632 e->counters = ((struct xt_counters) { 0, 0 });
633 e->comefrom = 0;
635 (*i)++;
636 return 0;
639 static inline int
640 cleanup_entry(struct ipt_entry *e, unsigned int *i)
642 struct ipt_entry_target *t;
644 if (i && (*i)-- == 0)
645 return 1;
647 /* Cleanup all matches */
648 IPT_MATCH_ITERATE(e, cleanup_match, NULL);
649 t = ipt_get_target(e);
650 if (t->u.kernel.target->destroy)
651 t->u.kernel.target->destroy(t->u.kernel.target, t->data);
652 module_put(t->u.kernel.target->me);
653 return 0;
656 /* Checks and translates the user-supplied table segment (held in
657 newinfo) */
658 static int
659 translate_table(const char *name,
660 unsigned int valid_hooks,
661 struct xt_table_info *newinfo,
662 void *entry0,
663 unsigned int size,
664 unsigned int number,
665 const unsigned int *hook_entries,
666 const unsigned int *underflows)
668 unsigned int i;
669 int ret;
671 newinfo->size = size;
672 newinfo->number = number;
674 /* Init all hooks to impossible value. */
675 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
676 newinfo->hook_entry[i] = 0xFFFFFFFF;
677 newinfo->underflow[i] = 0xFFFFFFFF;
680 duprintf("translate_table: size %u\n", newinfo->size);
681 i = 0;
682 /* Walk through entries, checking offsets. */
683 ret = IPT_ENTRY_ITERATE(entry0, newinfo->size,
684 check_entry_size_and_hooks,
685 newinfo,
686 entry0,
687 entry0 + size,
688 hook_entries, underflows, &i);
689 if (ret != 0)
690 return ret;
692 if (i != number) {
693 duprintf("translate_table: %u not %u entries\n",
694 i, number);
695 return -EINVAL;
698 /* Check hooks all assigned */
699 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
700 /* Only hooks which are valid */
701 if (!(valid_hooks & (1 << i)))
702 continue;
703 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
704 duprintf("Invalid hook entry %u %u\n",
705 i, hook_entries[i]);
706 return -EINVAL;
708 if (newinfo->underflow[i] == 0xFFFFFFFF) {
709 duprintf("Invalid underflow %u %u\n",
710 i, underflows[i]);
711 return -EINVAL;
715 if (!mark_source_chains(newinfo, valid_hooks, entry0))
716 return -ELOOP;
718 /* Finally, each sanity check must pass */
719 i = 0;
720 ret = IPT_ENTRY_ITERATE(entry0, newinfo->size,
721 check_entry, name, size, &i);
723 if (ret != 0) {
724 IPT_ENTRY_ITERATE(entry0, newinfo->size,
725 cleanup_entry, &i);
726 return ret;
729 /* And one copy for every other CPU */
730 for_each_possible_cpu(i) {
731 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
732 memcpy(newinfo->entries[i], entry0, newinfo->size);
735 return ret;
738 /* Gets counters. */
739 static inline int
740 add_entry_to_counter(const struct ipt_entry *e,
741 struct xt_counters total[],
742 unsigned int *i)
744 ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
746 (*i)++;
747 return 0;
750 static inline int
751 set_entry_to_counter(const struct ipt_entry *e,
752 struct ipt_counters total[],
753 unsigned int *i)
755 SET_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
757 (*i)++;
758 return 0;
761 static void
762 get_counters(const struct xt_table_info *t,
763 struct xt_counters counters[])
765 unsigned int cpu;
766 unsigned int i;
767 unsigned int curcpu;
769 /* Instead of clearing (by a previous call to memset())
770 * the counters and using adds, we set the counters
771 * with data used by 'current' CPU
772 * We dont care about preemption here.
774 curcpu = raw_smp_processor_id();
776 i = 0;
777 IPT_ENTRY_ITERATE(t->entries[curcpu],
778 t->size,
779 set_entry_to_counter,
780 counters,
781 &i);
783 for_each_possible_cpu(cpu) {
784 if (cpu == curcpu)
785 continue;
786 i = 0;
787 IPT_ENTRY_ITERATE(t->entries[cpu],
788 t->size,
789 add_entry_to_counter,
790 counters,
791 &i);
795 static inline struct xt_counters * alloc_counters(struct ipt_table *table)
797 unsigned int countersize;
798 struct xt_counters *counters;
799 struct xt_table_info *private = table->private;
801 /* We need atomic snapshot of counters: rest doesn't change
802 (other than comefrom, which userspace doesn't care
803 about). */
804 countersize = sizeof(struct xt_counters) * private->number;
805 counters = vmalloc_node(countersize, numa_node_id());
807 if (counters == NULL)
808 return ERR_PTR(-ENOMEM);
810 /* First, sum counters... */
811 write_lock_bh(&table->lock);
812 get_counters(private, counters);
813 write_unlock_bh(&table->lock);
815 return counters;
818 static int
819 copy_entries_to_user(unsigned int total_size,
820 struct ipt_table *table,
821 void __user *userptr)
823 unsigned int off, num;
824 struct ipt_entry *e;
825 struct xt_counters *counters;
826 struct xt_table_info *private = table->private;
827 int ret = 0;
828 void *loc_cpu_entry;
830 counters = alloc_counters(table);
831 if (IS_ERR(counters))
832 return PTR_ERR(counters);
834 /* choose the copy that is on our node/cpu, ...
835 * This choice is lazy (because current thread is
836 * allowed to migrate to another cpu)
838 loc_cpu_entry = private->entries[raw_smp_processor_id()];
839 /* ... then copy entire thing ... */
840 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
841 ret = -EFAULT;
842 goto free_counters;
845 /* FIXME: use iterator macros --RR */
846 /* ... then go back and fix counters and names */
847 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
848 unsigned int i;
849 struct ipt_entry_match *m;
850 struct ipt_entry_target *t;
852 e = (struct ipt_entry *)(loc_cpu_entry + off);
853 if (copy_to_user(userptr + off
854 + offsetof(struct ipt_entry, counters),
855 &counters[num],
856 sizeof(counters[num])) != 0) {
857 ret = -EFAULT;
858 goto free_counters;
861 for (i = sizeof(struct ipt_entry);
862 i < e->target_offset;
863 i += m->u.match_size) {
864 m = (void *)e + i;
866 if (copy_to_user(userptr + off + i
867 + offsetof(struct ipt_entry_match,
868 u.user.name),
869 m->u.kernel.match->name,
870 strlen(m->u.kernel.match->name)+1)
871 != 0) {
872 ret = -EFAULT;
873 goto free_counters;
877 t = ipt_get_target(e);
878 if (copy_to_user(userptr + off + e->target_offset
879 + offsetof(struct ipt_entry_target,
880 u.user.name),
881 t->u.kernel.target->name,
882 strlen(t->u.kernel.target->name)+1) != 0) {
883 ret = -EFAULT;
884 goto free_counters;
888 free_counters:
889 vfree(counters);
890 return ret;
893 #ifdef CONFIG_COMPAT
894 struct compat_delta {
895 struct compat_delta *next;
896 u_int16_t offset;
897 short delta;
900 static struct compat_delta *compat_offsets = NULL;
902 static int compat_add_offset(u_int16_t offset, short delta)
904 struct compat_delta *tmp;
906 tmp = kmalloc(sizeof(struct compat_delta), GFP_KERNEL);
907 if (!tmp)
908 return -ENOMEM;
909 tmp->offset = offset;
910 tmp->delta = delta;
911 if (compat_offsets) {
912 tmp->next = compat_offsets->next;
913 compat_offsets->next = tmp;
914 } else {
915 compat_offsets = tmp;
916 tmp->next = NULL;
918 return 0;
921 static void compat_flush_offsets(void)
923 struct compat_delta *tmp, *next;
925 if (compat_offsets) {
926 for(tmp = compat_offsets; tmp; tmp = next) {
927 next = tmp->next;
928 kfree(tmp);
930 compat_offsets = NULL;
934 static short compat_calc_jump(u_int16_t offset)
936 struct compat_delta *tmp;
937 short delta;
939 for(tmp = compat_offsets, delta = 0; tmp; tmp = tmp->next)
940 if (tmp->offset < offset)
941 delta += tmp->delta;
942 return delta;
945 struct compat_ipt_standard_target
947 struct compat_xt_entry_target target;
948 compat_int_t verdict;
951 struct compat_ipt_standard
953 struct compat_ipt_entry entry;
954 struct compat_ipt_standard_target target;
957 #define IPT_ST_LEN XT_ALIGN(sizeof(struct ipt_standard_target))
958 #define IPT_ST_COMPAT_LEN COMPAT_XT_ALIGN(sizeof(struct compat_ipt_standard_target))
959 #define IPT_ST_OFFSET (IPT_ST_LEN - IPT_ST_COMPAT_LEN)
961 static int compat_ipt_standard_fn(void *target,
962 void **dstptr, int *size, int convert)
964 struct compat_ipt_standard_target compat_st, *pcompat_st;
965 struct ipt_standard_target st, *pst;
966 int ret;
968 ret = 0;
969 switch (convert) {
970 case COMPAT_TO_USER:
971 pst = target;
972 memcpy(&compat_st.target, &pst->target,
973 sizeof(compat_st.target));
974 compat_st.verdict = pst->verdict;
975 if (compat_st.verdict > 0)
976 compat_st.verdict -=
977 compat_calc_jump(compat_st.verdict);
978 compat_st.target.u.user.target_size = IPT_ST_COMPAT_LEN;
979 if (copy_to_user(*dstptr, &compat_st, IPT_ST_COMPAT_LEN))
980 ret = -EFAULT;
981 *size -= IPT_ST_OFFSET;
982 *dstptr += IPT_ST_COMPAT_LEN;
983 break;
984 case COMPAT_FROM_USER:
985 pcompat_st = target;
986 memcpy(&st.target, &pcompat_st->target, IPT_ST_COMPAT_LEN);
987 st.verdict = pcompat_st->verdict;
988 if (st.verdict > 0)
989 st.verdict += compat_calc_jump(st.verdict);
990 st.target.u.user.target_size = IPT_ST_LEN;
991 memcpy(*dstptr, &st, IPT_ST_LEN);
992 *size += IPT_ST_OFFSET;
993 *dstptr += IPT_ST_LEN;
994 break;
995 case COMPAT_CALC_SIZE:
996 *size += IPT_ST_OFFSET;
997 break;
998 default:
999 ret = -ENOPROTOOPT;
1000 break;
1002 return ret;
1005 static inline int
1006 compat_calc_match(struct ipt_entry_match *m, int * size)
1008 if (m->u.kernel.match->compat)
1009 m->u.kernel.match->compat(m, NULL, size, COMPAT_CALC_SIZE);
1010 else
1011 xt_compat_match(m, NULL, size, COMPAT_CALC_SIZE);
1012 return 0;
1015 static int compat_calc_entry(struct ipt_entry *e, struct xt_table_info *info,
1016 void *base, struct xt_table_info *newinfo)
1018 struct ipt_entry_target *t;
1019 u_int16_t entry_offset;
1020 int off, i, ret;
1022 off = 0;
1023 entry_offset = (void *)e - base;
1024 IPT_MATCH_ITERATE(e, compat_calc_match, &off);
1025 t = ipt_get_target(e);
1026 if (t->u.kernel.target->compat)
1027 t->u.kernel.target->compat(t, NULL, &off, COMPAT_CALC_SIZE);
1028 else
1029 xt_compat_target(t, NULL, &off, COMPAT_CALC_SIZE);
1030 newinfo->size -= off;
1031 ret = compat_add_offset(entry_offset, off);
1032 if (ret)
1033 return ret;
1035 for (i = 0; i< NF_IP_NUMHOOKS; i++) {
1036 if (info->hook_entry[i] && (e < (struct ipt_entry *)
1037 (base + info->hook_entry[i])))
1038 newinfo->hook_entry[i] -= off;
1039 if (info->underflow[i] && (e < (struct ipt_entry *)
1040 (base + info->underflow[i])))
1041 newinfo->underflow[i] -= off;
1043 return 0;
1046 static int compat_table_info(struct xt_table_info *info,
1047 struct xt_table_info *newinfo)
1049 void *loc_cpu_entry;
1050 int i;
1052 if (!newinfo || !info)
1053 return -EINVAL;
1055 memset(newinfo, 0, sizeof(struct xt_table_info));
1056 newinfo->size = info->size;
1057 newinfo->number = info->number;
1058 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
1059 newinfo->hook_entry[i] = info->hook_entry[i];
1060 newinfo->underflow[i] = info->underflow[i];
1062 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1063 return IPT_ENTRY_ITERATE(loc_cpu_entry, info->size,
1064 compat_calc_entry, info, loc_cpu_entry, newinfo);
1066 #endif
1068 static int get_info(void __user *user, int *len, int compat)
1070 char name[IPT_TABLE_MAXNAMELEN];
1071 struct ipt_table *t;
1072 int ret;
1074 if (*len != sizeof(struct ipt_getinfo)) {
1075 duprintf("length %u != %u\n", *len,
1076 (unsigned int)sizeof(struct ipt_getinfo));
1077 return -EINVAL;
1080 if (copy_from_user(name, user, sizeof(name)) != 0)
1081 return -EFAULT;
1083 name[IPT_TABLE_MAXNAMELEN-1] = '\0';
1084 #ifdef CONFIG_COMPAT
1085 if (compat)
1086 xt_compat_lock(AF_INET);
1087 #endif
1088 t = try_then_request_module(xt_find_table_lock(AF_INET, name),
1089 "iptable_%s", name);
1090 if (t && !IS_ERR(t)) {
1091 struct ipt_getinfo info;
1092 struct xt_table_info *private = t->private;
1094 #ifdef CONFIG_COMPAT
1095 if (compat) {
1096 struct xt_table_info tmp;
1097 ret = compat_table_info(private, &tmp);
1098 compat_flush_offsets();
1099 private = &tmp;
1101 #endif
1102 info.valid_hooks = t->valid_hooks;
1103 memcpy(info.hook_entry, private->hook_entry,
1104 sizeof(info.hook_entry));
1105 memcpy(info.underflow, private->underflow,
1106 sizeof(info.underflow));
1107 info.num_entries = private->number;
1108 info.size = private->size;
1109 strcpy(info.name, name);
1111 if (copy_to_user(user, &info, *len) != 0)
1112 ret = -EFAULT;
1113 else
1114 ret = 0;
1116 xt_table_unlock(t);
1117 module_put(t->me);
1118 } else
1119 ret = t ? PTR_ERR(t) : -ENOENT;
1120 #ifdef CONFIG_COMPAT
1121 if (compat)
1122 xt_compat_unlock(AF_INET);
1123 #endif
1124 return ret;
1127 static int
1128 get_entries(struct ipt_get_entries __user *uptr, int *len)
1130 int ret;
1131 struct ipt_get_entries get;
1132 struct ipt_table *t;
1134 if (*len < sizeof(get)) {
1135 duprintf("get_entries: %u < %d\n", *len,
1136 (unsigned int)sizeof(get));
1137 return -EINVAL;
1139 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1140 return -EFAULT;
1141 if (*len != sizeof(struct ipt_get_entries) + get.size) {
1142 duprintf("get_entries: %u != %u\n", *len,
1143 (unsigned int)(sizeof(struct ipt_get_entries) +
1144 get.size));
1145 return -EINVAL;
1148 t = xt_find_table_lock(AF_INET, get.name);
1149 if (t && !IS_ERR(t)) {
1150 struct xt_table_info *private = t->private;
1151 duprintf("t->private->number = %u\n",
1152 private->number);
1153 if (get.size == private->size)
1154 ret = copy_entries_to_user(private->size,
1155 t, uptr->entrytable);
1156 else {
1157 duprintf("get_entries: I've got %u not %u!\n",
1158 private->size,
1159 get.size);
1160 ret = -EINVAL;
1162 module_put(t->me);
1163 xt_table_unlock(t);
1164 } else
1165 ret = t ? PTR_ERR(t) : -ENOENT;
1167 return ret;
1170 static int
1171 __do_replace(const char *name, unsigned int valid_hooks,
1172 struct xt_table_info *newinfo, unsigned int num_counters,
1173 void __user *counters_ptr)
1175 int ret;
1176 struct ipt_table *t;
1177 struct xt_table_info *oldinfo;
1178 struct xt_counters *counters;
1179 void *loc_cpu_old_entry;
1181 ret = 0;
1182 counters = vmalloc(num_counters * sizeof(struct xt_counters));
1183 if (!counters) {
1184 ret = -ENOMEM;
1185 goto out;
1188 t = try_then_request_module(xt_find_table_lock(AF_INET, name),
1189 "iptable_%s", name);
1190 if (!t || IS_ERR(t)) {
1191 ret = t ? PTR_ERR(t) : -ENOENT;
1192 goto free_newinfo_counters_untrans;
1195 /* You lied! */
1196 if (valid_hooks != t->valid_hooks) {
1197 duprintf("Valid hook crap: %08X vs %08X\n",
1198 valid_hooks, t->valid_hooks);
1199 ret = -EINVAL;
1200 goto put_module;
1203 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1204 if (!oldinfo)
1205 goto put_module;
1207 /* Update module usage count based on number of rules */
1208 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1209 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1210 if ((oldinfo->number > oldinfo->initial_entries) ||
1211 (newinfo->number <= oldinfo->initial_entries))
1212 module_put(t->me);
1213 if ((oldinfo->number > oldinfo->initial_entries) &&
1214 (newinfo->number <= oldinfo->initial_entries))
1215 module_put(t->me);
1217 /* Get the old counters. */
1218 get_counters(oldinfo, counters);
1219 /* Decrease module usage counts and free resource */
1220 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1221 IPT_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry,NULL);
1222 xt_free_table_info(oldinfo);
1223 if (copy_to_user(counters_ptr, counters,
1224 sizeof(struct xt_counters) * num_counters) != 0)
1225 ret = -EFAULT;
1226 vfree(counters);
1227 xt_table_unlock(t);
1228 return ret;
1230 put_module:
1231 module_put(t->me);
1232 xt_table_unlock(t);
1233 free_newinfo_counters_untrans:
1234 vfree(counters);
1235 out:
1236 return ret;
1239 static int
1240 do_replace(void __user *user, unsigned int len)
1242 int ret;
1243 struct ipt_replace tmp;
1244 struct xt_table_info *newinfo;
1245 void *loc_cpu_entry;
1247 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1248 return -EFAULT;
1250 /* Hack: Causes ipchains to give correct error msg --RR */
1251 if (len != sizeof(tmp) + tmp.size)
1252 return -ENOPROTOOPT;
1254 /* overflow check */
1255 if (tmp.size >= (INT_MAX - sizeof(struct xt_table_info)) / NR_CPUS -
1256 SMP_CACHE_BYTES)
1257 return -ENOMEM;
1258 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1259 return -ENOMEM;
1261 newinfo = xt_alloc_table_info(tmp.size);
1262 if (!newinfo)
1263 return -ENOMEM;
1265 /* choose the copy that is our node/cpu */
1266 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1267 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1268 tmp.size) != 0) {
1269 ret = -EFAULT;
1270 goto free_newinfo;
1273 ret = translate_table(tmp.name, tmp.valid_hooks,
1274 newinfo, loc_cpu_entry, tmp.size, tmp.num_entries,
1275 tmp.hook_entry, tmp.underflow);
1276 if (ret != 0)
1277 goto free_newinfo;
1279 duprintf("ip_tables: Translated table\n");
1281 ret = __do_replace(tmp.name, tmp.valid_hooks,
1282 newinfo, tmp.num_counters,
1283 tmp.counters);
1284 if (ret)
1285 goto free_newinfo_untrans;
1286 return 0;
1288 free_newinfo_untrans:
1289 IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry,NULL);
1290 free_newinfo:
1291 xt_free_table_info(newinfo);
1292 return ret;
1295 /* We're lazy, and add to the first CPU; overflow works its fey magic
1296 * and everything is OK. */
1297 static inline int
1298 add_counter_to_entry(struct ipt_entry *e,
1299 const struct xt_counters addme[],
1300 unsigned int *i)
1302 #if 0
1303 duprintf("add_counter: Entry %u %lu/%lu + %lu/%lu\n",
1305 (long unsigned int)e->counters.pcnt,
1306 (long unsigned int)e->counters.bcnt,
1307 (long unsigned int)addme[*i].pcnt,
1308 (long unsigned int)addme[*i].bcnt);
1309 #endif
1311 ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
1313 (*i)++;
1314 return 0;
1317 static int
1318 do_add_counters(void __user *user, unsigned int len, int compat)
1320 unsigned int i;
1321 struct xt_counters_info tmp;
1322 struct xt_counters *paddc;
1323 unsigned int num_counters;
1324 char *name;
1325 int size;
1326 void *ptmp;
1327 struct ipt_table *t;
1328 struct xt_table_info *private;
1329 int ret = 0;
1330 void *loc_cpu_entry;
1331 #ifdef CONFIG_COMPAT
1332 struct compat_xt_counters_info compat_tmp;
1334 if (compat) {
1335 ptmp = &compat_tmp;
1336 size = sizeof(struct compat_xt_counters_info);
1337 } else
1338 #endif
1340 ptmp = &tmp;
1341 size = sizeof(struct xt_counters_info);
1344 if (copy_from_user(ptmp, user, size) != 0)
1345 return -EFAULT;
1347 #ifdef CONFIG_COMPAT
1348 if (compat) {
1349 num_counters = compat_tmp.num_counters;
1350 name = compat_tmp.name;
1351 } else
1352 #endif
1354 num_counters = tmp.num_counters;
1355 name = tmp.name;
1358 if (len != size + num_counters * sizeof(struct xt_counters))
1359 return -EINVAL;
1361 paddc = vmalloc_node(len - size, numa_node_id());
1362 if (!paddc)
1363 return -ENOMEM;
1365 if (copy_from_user(paddc, user + size, len - size) != 0) {
1366 ret = -EFAULT;
1367 goto free;
1370 t = xt_find_table_lock(AF_INET, name);
1371 if (!t || IS_ERR(t)) {
1372 ret = t ? PTR_ERR(t) : -ENOENT;
1373 goto free;
1376 write_lock_bh(&t->lock);
1377 private = t->private;
1378 if (private->number != num_counters) {
1379 ret = -EINVAL;
1380 goto unlock_up_free;
1383 i = 0;
1384 /* Choose the copy that is on our node */
1385 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1386 IPT_ENTRY_ITERATE(loc_cpu_entry,
1387 private->size,
1388 add_counter_to_entry,
1389 paddc,
1390 &i);
1391 unlock_up_free:
1392 write_unlock_bh(&t->lock);
1393 xt_table_unlock(t);
1394 module_put(t->me);
1395 free:
1396 vfree(paddc);
1398 return ret;
1401 #ifdef CONFIG_COMPAT
1402 struct compat_ipt_replace {
1403 char name[IPT_TABLE_MAXNAMELEN];
1404 u32 valid_hooks;
1405 u32 num_entries;
1406 u32 size;
1407 u32 hook_entry[NF_IP_NUMHOOKS];
1408 u32 underflow[NF_IP_NUMHOOKS];
1409 u32 num_counters;
1410 compat_uptr_t counters; /* struct ipt_counters * */
1411 struct compat_ipt_entry entries[0];
1414 static inline int compat_copy_match_to_user(struct ipt_entry_match *m,
1415 void __user **dstptr, compat_uint_t *size)
1417 if (m->u.kernel.match->compat)
1418 return m->u.kernel.match->compat(m, dstptr, size,
1419 COMPAT_TO_USER);
1420 else
1421 return xt_compat_match(m, dstptr, size, COMPAT_TO_USER);
1424 static int compat_copy_entry_to_user(struct ipt_entry *e,
1425 void __user **dstptr, compat_uint_t *size)
1427 struct ipt_entry_target __user *t;
1428 struct compat_ipt_entry __user *ce;
1429 u_int16_t target_offset, next_offset;
1430 compat_uint_t origsize;
1431 int ret;
1433 ret = -EFAULT;
1434 origsize = *size;
1435 ce = (struct compat_ipt_entry __user *)*dstptr;
1436 if (copy_to_user(ce, e, sizeof(struct ipt_entry)))
1437 goto out;
1439 *dstptr += sizeof(struct compat_ipt_entry);
1440 ret = IPT_MATCH_ITERATE(e, compat_copy_match_to_user, dstptr, size);
1441 target_offset = e->target_offset - (origsize - *size);
1442 if (ret)
1443 goto out;
1444 t = ipt_get_target(e);
1445 if (t->u.kernel.target->compat)
1446 ret = t->u.kernel.target->compat(t, dstptr, size,
1447 COMPAT_TO_USER);
1448 else
1449 ret = xt_compat_target(t, dstptr, size, COMPAT_TO_USER);
1450 if (ret)
1451 goto out;
1452 ret = -EFAULT;
1453 next_offset = e->next_offset - (origsize - *size);
1454 if (put_user(target_offset, &ce->target_offset))
1455 goto out;
1456 if (put_user(next_offset, &ce->next_offset))
1457 goto out;
1458 return 0;
1459 out:
1460 return ret;
1463 static inline int
1464 compat_check_calc_match(struct ipt_entry_match *m,
1465 const char *name,
1466 const struct ipt_ip *ip,
1467 unsigned int hookmask,
1468 int *size, int *i)
1470 struct ipt_match *match;
1472 match = try_then_request_module(xt_find_match(AF_INET, m->u.user.name,
1473 m->u.user.revision),
1474 "ipt_%s", m->u.user.name);
1475 if (IS_ERR(match) || !match) {
1476 duprintf("compat_check_calc_match: `%s' not found\n",
1477 m->u.user.name);
1478 return match ? PTR_ERR(match) : -ENOENT;
1480 m->u.kernel.match = match;
1482 if (m->u.kernel.match->compat)
1483 m->u.kernel.match->compat(m, NULL, size, COMPAT_CALC_SIZE);
1484 else
1485 xt_compat_match(m, NULL, size, COMPAT_CALC_SIZE);
1487 (*i)++;
1488 return 0;
1491 static inline int
1492 check_compat_entry_size_and_hooks(struct ipt_entry *e,
1493 struct xt_table_info *newinfo,
1494 unsigned int *size,
1495 unsigned char *base,
1496 unsigned char *limit,
1497 unsigned int *hook_entries,
1498 unsigned int *underflows,
1499 unsigned int *i,
1500 const char *name)
1502 struct ipt_entry_target *t;
1503 struct ipt_target *target;
1504 u_int16_t entry_offset;
1505 int ret, off, h, j;
1507 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1508 if ((unsigned long)e % __alignof__(struct compat_ipt_entry) != 0
1509 || (unsigned char *)e + sizeof(struct compat_ipt_entry) >= limit) {
1510 duprintf("Bad offset %p, limit = %p\n", e, limit);
1511 return -EINVAL;
1514 if (e->next_offset < sizeof(struct compat_ipt_entry) +
1515 sizeof(struct compat_xt_entry_target)) {
1516 duprintf("checking: element %p size %u\n",
1517 e, e->next_offset);
1518 return -EINVAL;
1521 if (!ip_checkentry(&e->ip)) {
1522 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
1523 return -EINVAL;
1526 off = 0;
1527 entry_offset = (void *)e - (void *)base;
1528 j = 0;
1529 ret = IPT_MATCH_ITERATE(e, compat_check_calc_match, name, &e->ip,
1530 e->comefrom, &off, &j);
1531 if (ret != 0)
1532 goto cleanup_matches;
1534 t = ipt_get_target(e);
1535 target = try_then_request_module(xt_find_target(AF_INET,
1536 t->u.user.name,
1537 t->u.user.revision),
1538 "ipt_%s", t->u.user.name);
1539 if (IS_ERR(target) || !target) {
1540 duprintf("check_entry: `%s' not found\n", t->u.user.name);
1541 ret = target ? PTR_ERR(target) : -ENOENT;
1542 goto cleanup_matches;
1544 t->u.kernel.target = target;
1546 if (t->u.kernel.target->compat)
1547 t->u.kernel.target->compat(t, NULL, &off, COMPAT_CALC_SIZE);
1548 else
1549 xt_compat_target(t, NULL, &off, COMPAT_CALC_SIZE);
1550 *size += off;
1551 ret = compat_add_offset(entry_offset, off);
1552 if (ret)
1553 goto out;
1555 /* Check hooks & underflows */
1556 for (h = 0; h < NF_IP_NUMHOOKS; h++) {
1557 if ((unsigned char *)e - base == hook_entries[h])
1558 newinfo->hook_entry[h] = hook_entries[h];
1559 if ((unsigned char *)e - base == underflows[h])
1560 newinfo->underflow[h] = underflows[h];
1563 /* Clear counters and comefrom */
1564 e->counters = ((struct ipt_counters) { 0, 0 });
1565 e->comefrom = 0;
1567 (*i)++;
1568 return 0;
1570 out:
1571 module_put(t->u.kernel.target->me);
1572 cleanup_matches:
1573 IPT_MATCH_ITERATE(e, cleanup_match, &j);
1574 return ret;
1577 static inline int compat_copy_match_from_user(struct ipt_entry_match *m,
1578 void **dstptr, compat_uint_t *size, const char *name,
1579 const struct ipt_ip *ip, unsigned int hookmask, int *i)
1581 struct ipt_entry_match *dm;
1582 struct ipt_match *match;
1583 int ret;
1585 dm = (struct ipt_entry_match *)*dstptr;
1586 match = m->u.kernel.match;
1587 if (match->compat)
1588 match->compat(m, dstptr, size, COMPAT_FROM_USER);
1589 else
1590 xt_compat_match(m, dstptr, size, COMPAT_FROM_USER);
1592 ret = xt_check_match(match, AF_INET, dm->u.match_size - sizeof(*dm),
1593 name, hookmask, ip->proto,
1594 ip->invflags & IPT_INV_PROTO);
1595 if (ret)
1596 goto err;
1598 if (m->u.kernel.match->checkentry
1599 && !m->u.kernel.match->checkentry(name, ip, match, dm->data,
1600 hookmask)) {
1601 duprintf("ip_tables: check failed for `%s'.\n",
1602 m->u.kernel.match->name);
1603 ret = -EINVAL;
1604 goto err;
1606 (*i)++;
1607 return 0;
1609 err:
1610 module_put(m->u.kernel.match->me);
1611 return ret;
1614 static int compat_copy_entry_from_user(struct ipt_entry *e, void **dstptr,
1615 unsigned int *size, const char *name,
1616 struct xt_table_info *newinfo, unsigned char *base)
1618 struct ipt_entry_target *t;
1619 struct ipt_target *target;
1620 struct ipt_entry *de;
1621 unsigned int origsize;
1622 int ret, h, j;
1624 ret = 0;
1625 origsize = *size;
1626 de = (struct ipt_entry *)*dstptr;
1627 memcpy(de, e, sizeof(struct ipt_entry));
1629 j = 0;
1630 *dstptr += sizeof(struct compat_ipt_entry);
1631 ret = IPT_MATCH_ITERATE(e, compat_copy_match_from_user, dstptr, size,
1632 name, &de->ip, de->comefrom, &j);
1633 if (ret)
1634 goto cleanup_matches;
1635 de->target_offset = e->target_offset - (origsize - *size);
1636 t = ipt_get_target(e);
1637 target = t->u.kernel.target;
1638 if (target->compat)
1639 target->compat(t, dstptr, size, COMPAT_FROM_USER);
1640 else
1641 xt_compat_target(t, dstptr, size, COMPAT_FROM_USER);
1643 de->next_offset = e->next_offset - (origsize - *size);
1644 for (h = 0; h < NF_IP_NUMHOOKS; h++) {
1645 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1646 newinfo->hook_entry[h] -= origsize - *size;
1647 if ((unsigned char *)de - base < newinfo->underflow[h])
1648 newinfo->underflow[h] -= origsize - *size;
1651 t = ipt_get_target(de);
1652 target = t->u.kernel.target;
1653 ret = xt_check_target(target, AF_INET, t->u.target_size - sizeof(*t),
1654 name, e->comefrom, e->ip.proto,
1655 e->ip.invflags & IPT_INV_PROTO);
1656 if (ret)
1657 goto err;
1659 ret = -EINVAL;
1660 if (t->u.kernel.target == &ipt_standard_target) {
1661 if (!standard_check(t, *size))
1662 goto err;
1663 } else if (t->u.kernel.target->checkentry
1664 && !t->u.kernel.target->checkentry(name, de, target,
1665 t->data, de->comefrom)) {
1666 duprintf("ip_tables: compat: check failed for `%s'.\n",
1667 t->u.kernel.target->name);
1668 goto err;
1670 ret = 0;
1671 return ret;
1673 err:
1674 module_put(t->u.kernel.target->me);
1675 cleanup_matches:
1676 IPT_MATCH_ITERATE(e, cleanup_match, &j);
1677 return ret;
1680 static int
1681 translate_compat_table(const char *name,
1682 unsigned int valid_hooks,
1683 struct xt_table_info **pinfo,
1684 void **pentry0,
1685 unsigned int total_size,
1686 unsigned int number,
1687 unsigned int *hook_entries,
1688 unsigned int *underflows)
1690 unsigned int i;
1691 struct xt_table_info *newinfo, *info;
1692 void *pos, *entry0, *entry1;
1693 unsigned int size;
1694 int ret;
1696 info = *pinfo;
1697 entry0 = *pentry0;
1698 size = total_size;
1699 info->number = number;
1701 /* Init all hooks to impossible value. */
1702 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
1703 info->hook_entry[i] = 0xFFFFFFFF;
1704 info->underflow[i] = 0xFFFFFFFF;
1707 duprintf("translate_compat_table: size %u\n", info->size);
1708 i = 0;
1709 xt_compat_lock(AF_INET);
1710 /* Walk through entries, checking offsets. */
1711 ret = IPT_ENTRY_ITERATE(entry0, total_size,
1712 check_compat_entry_size_and_hooks,
1713 info, &size, entry0,
1714 entry0 + total_size,
1715 hook_entries, underflows, &i, name);
1716 if (ret != 0)
1717 goto out_unlock;
1719 ret = -EINVAL;
1720 if (i != number) {
1721 duprintf("translate_compat_table: %u not %u entries\n",
1722 i, number);
1723 goto out_unlock;
1726 /* Check hooks all assigned */
1727 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
1728 /* Only hooks which are valid */
1729 if (!(valid_hooks & (1 << i)))
1730 continue;
1731 if (info->hook_entry[i] == 0xFFFFFFFF) {
1732 duprintf("Invalid hook entry %u %u\n",
1733 i, hook_entries[i]);
1734 goto out_unlock;
1736 if (info->underflow[i] == 0xFFFFFFFF) {
1737 duprintf("Invalid underflow %u %u\n",
1738 i, underflows[i]);
1739 goto out_unlock;
1743 ret = -ENOMEM;
1744 newinfo = xt_alloc_table_info(size);
1745 if (!newinfo)
1746 goto out_unlock;
1748 newinfo->number = number;
1749 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
1750 newinfo->hook_entry[i] = info->hook_entry[i];
1751 newinfo->underflow[i] = info->underflow[i];
1753 entry1 = newinfo->entries[raw_smp_processor_id()];
1754 pos = entry1;
1755 size = total_size;
1756 ret = IPT_ENTRY_ITERATE(entry0, total_size,
1757 compat_copy_entry_from_user, &pos, &size,
1758 name, newinfo, entry1);
1759 compat_flush_offsets();
1760 xt_compat_unlock(AF_INET);
1761 if (ret)
1762 goto free_newinfo;
1764 ret = -ELOOP;
1765 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1766 goto free_newinfo;
1768 /* And one copy for every other CPU */
1769 for_each_possible_cpu(i)
1770 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1771 memcpy(newinfo->entries[i], entry1, newinfo->size);
1773 *pinfo = newinfo;
1774 *pentry0 = entry1;
1775 xt_free_table_info(info);
1776 return 0;
1778 free_newinfo:
1779 xt_free_table_info(newinfo);
1780 out:
1781 return ret;
1782 out_unlock:
1783 xt_compat_unlock(AF_INET);
1784 goto out;
1787 static int
1788 compat_do_replace(void __user *user, unsigned int len)
1790 int ret;
1791 struct compat_ipt_replace tmp;
1792 struct xt_table_info *newinfo;
1793 void *loc_cpu_entry;
1795 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1796 return -EFAULT;
1798 /* Hack: Causes ipchains to give correct error msg --RR */
1799 if (len != sizeof(tmp) + tmp.size)
1800 return -ENOPROTOOPT;
1802 /* overflow check */
1803 if (tmp.size >= (INT_MAX - sizeof(struct xt_table_info)) / NR_CPUS -
1804 SMP_CACHE_BYTES)
1805 return -ENOMEM;
1806 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1807 return -ENOMEM;
1809 newinfo = xt_alloc_table_info(tmp.size);
1810 if (!newinfo)
1811 return -ENOMEM;
1813 /* choose the copy that is our node/cpu */
1814 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1815 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1816 tmp.size) != 0) {
1817 ret = -EFAULT;
1818 goto free_newinfo;
1821 ret = translate_compat_table(tmp.name, tmp.valid_hooks,
1822 &newinfo, &loc_cpu_entry, tmp.size,
1823 tmp.num_entries, tmp.hook_entry, tmp.underflow);
1824 if (ret != 0)
1825 goto free_newinfo;
1827 duprintf("compat_do_replace: Translated table\n");
1829 ret = __do_replace(tmp.name, tmp.valid_hooks,
1830 newinfo, tmp.num_counters,
1831 compat_ptr(tmp.counters));
1832 if (ret)
1833 goto free_newinfo_untrans;
1834 return 0;
1836 free_newinfo_untrans:
1837 IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry,NULL);
1838 free_newinfo:
1839 xt_free_table_info(newinfo);
1840 return ret;
1843 static int
1844 compat_do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user,
1845 unsigned int len)
1847 int ret;
1849 if (!capable(CAP_NET_ADMIN))
1850 return -EPERM;
1852 switch (cmd) {
1853 case IPT_SO_SET_REPLACE:
1854 ret = compat_do_replace(user, len);
1855 break;
1857 case IPT_SO_SET_ADD_COUNTERS:
1858 ret = do_add_counters(user, len, 1);
1859 break;
1861 default:
1862 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd);
1863 ret = -EINVAL;
1866 return ret;
1869 struct compat_ipt_get_entries
1871 char name[IPT_TABLE_MAXNAMELEN];
1872 compat_uint_t size;
1873 struct compat_ipt_entry entrytable[0];
1876 static int compat_copy_entries_to_user(unsigned int total_size,
1877 struct ipt_table *table, void __user *userptr)
1879 unsigned int off, num;
1880 struct compat_ipt_entry e;
1881 struct xt_counters *counters;
1882 struct xt_table_info *private = table->private;
1883 void __user *pos;
1884 unsigned int size;
1885 int ret = 0;
1886 void *loc_cpu_entry;
1888 counters = alloc_counters(table);
1889 if (IS_ERR(counters))
1890 return PTR_ERR(counters);
1892 /* choose the copy that is on our node/cpu, ...
1893 * This choice is lazy (because current thread is
1894 * allowed to migrate to another cpu)
1896 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1897 pos = userptr;
1898 size = total_size;
1899 ret = IPT_ENTRY_ITERATE(loc_cpu_entry, total_size,
1900 compat_copy_entry_to_user, &pos, &size);
1901 if (ret)
1902 goto free_counters;
1904 /* ... then go back and fix counters and names */
1905 for (off = 0, num = 0; off < size; off += e.next_offset, num++) {
1906 unsigned int i;
1907 struct ipt_entry_match m;
1908 struct ipt_entry_target t;
1910 ret = -EFAULT;
1911 if (copy_from_user(&e, userptr + off,
1912 sizeof(struct compat_ipt_entry)))
1913 goto free_counters;
1914 if (copy_to_user(userptr + off +
1915 offsetof(struct compat_ipt_entry, counters),
1916 &counters[num], sizeof(counters[num])))
1917 goto free_counters;
1919 for (i = sizeof(struct compat_ipt_entry);
1920 i < e.target_offset; i += m.u.match_size) {
1921 if (copy_from_user(&m, userptr + off + i,
1922 sizeof(struct ipt_entry_match)))
1923 goto free_counters;
1924 if (copy_to_user(userptr + off + i +
1925 offsetof(struct ipt_entry_match, u.user.name),
1926 m.u.kernel.match->name,
1927 strlen(m.u.kernel.match->name) + 1))
1928 goto free_counters;
1931 if (copy_from_user(&t, userptr + off + e.target_offset,
1932 sizeof(struct ipt_entry_target)))
1933 goto free_counters;
1934 if (copy_to_user(userptr + off + e.target_offset +
1935 offsetof(struct ipt_entry_target, u.user.name),
1936 t.u.kernel.target->name,
1937 strlen(t.u.kernel.target->name) + 1))
1938 goto free_counters;
1940 ret = 0;
1941 free_counters:
1942 vfree(counters);
1943 return ret;
1946 static int
1947 compat_get_entries(struct compat_ipt_get_entries __user *uptr, int *len)
1949 int ret;
1950 struct compat_ipt_get_entries get;
1951 struct ipt_table *t;
1954 if (*len < sizeof(get)) {
1955 duprintf("compat_get_entries: %u < %u\n",
1956 *len, (unsigned int)sizeof(get));
1957 return -EINVAL;
1960 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1961 return -EFAULT;
1963 if (*len != sizeof(struct compat_ipt_get_entries) + get.size) {
1964 duprintf("compat_get_entries: %u != %u\n", *len,
1965 (unsigned int)(sizeof(struct compat_ipt_get_entries) +
1966 get.size));
1967 return -EINVAL;
1970 xt_compat_lock(AF_INET);
1971 t = xt_find_table_lock(AF_INET, get.name);
1972 if (t && !IS_ERR(t)) {
1973 struct xt_table_info *private = t->private;
1974 struct xt_table_info info;
1975 duprintf("t->private->number = %u\n",
1976 private->number);
1977 ret = compat_table_info(private, &info);
1978 if (!ret && get.size == info.size) {
1979 ret = compat_copy_entries_to_user(private->size,
1980 t, uptr->entrytable);
1981 } else if (!ret) {
1982 duprintf("compat_get_entries: I've got %u not %u!\n",
1983 private->size,
1984 get.size);
1985 ret = -EINVAL;
1987 compat_flush_offsets();
1988 module_put(t->me);
1989 xt_table_unlock(t);
1990 } else
1991 ret = t ? PTR_ERR(t) : -ENOENT;
1993 xt_compat_unlock(AF_INET);
1994 return ret;
1997 static int
1998 compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2000 int ret;
2002 switch (cmd) {
2003 case IPT_SO_GET_INFO:
2004 ret = get_info(user, len, 1);
2005 break;
2006 case IPT_SO_GET_ENTRIES:
2007 ret = compat_get_entries(user, len);
2008 break;
2009 default:
2010 duprintf("compat_do_ipt_get_ctl: unknown request %i\n", cmd);
2011 ret = -EINVAL;
2013 return ret;
2015 #endif
2017 static int
2018 do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2020 int ret;
2022 if (!capable(CAP_NET_ADMIN))
2023 return -EPERM;
2025 switch (cmd) {
2026 case IPT_SO_SET_REPLACE:
2027 ret = do_replace(user, len);
2028 break;
2030 case IPT_SO_SET_ADD_COUNTERS:
2031 ret = do_add_counters(user, len, 0);
2032 break;
2034 default:
2035 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd);
2036 ret = -EINVAL;
2039 return ret;
2042 static int
2043 do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2045 int ret;
2047 if (!capable(CAP_NET_ADMIN))
2048 return -EPERM;
2050 switch (cmd) {
2051 case IPT_SO_GET_INFO:
2052 ret = get_info(user, len, 0);
2053 break;
2055 case IPT_SO_GET_ENTRIES:
2056 ret = get_entries(user, len);
2057 break;
2059 case IPT_SO_GET_REVISION_MATCH:
2060 case IPT_SO_GET_REVISION_TARGET: {
2061 struct ipt_get_revision rev;
2062 int target;
2064 if (*len != sizeof(rev)) {
2065 ret = -EINVAL;
2066 break;
2068 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2069 ret = -EFAULT;
2070 break;
2073 if (cmd == IPT_SO_GET_REVISION_TARGET)
2074 target = 1;
2075 else
2076 target = 0;
2078 try_then_request_module(xt_find_revision(AF_INET, rev.name,
2079 rev.revision,
2080 target, &ret),
2081 "ipt_%s", rev.name);
2082 break;
2085 default:
2086 duprintf("do_ipt_get_ctl: unknown request %i\n", cmd);
2087 ret = -EINVAL;
2090 return ret;
2093 int ipt_register_table(struct xt_table *table, const struct ipt_replace *repl)
2095 int ret;
2096 struct xt_table_info *newinfo;
2097 static struct xt_table_info bootstrap
2098 = { 0, 0, 0, { 0 }, { 0 }, { } };
2099 void *loc_cpu_entry;
2101 newinfo = xt_alloc_table_info(repl->size);
2102 if (!newinfo)
2103 return -ENOMEM;
2105 /* choose the copy on our node/cpu
2106 * but dont care of preemption
2108 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2109 memcpy(loc_cpu_entry, repl->entries, repl->size);
2111 ret = translate_table(table->name, table->valid_hooks,
2112 newinfo, loc_cpu_entry, repl->size,
2113 repl->num_entries,
2114 repl->hook_entry,
2115 repl->underflow);
2116 if (ret != 0) {
2117 xt_free_table_info(newinfo);
2118 return ret;
2121 ret = xt_register_table(table, &bootstrap, newinfo);
2122 if (ret != 0) {
2123 xt_free_table_info(newinfo);
2124 return ret;
2127 return 0;
2130 void ipt_unregister_table(struct ipt_table *table)
2132 struct xt_table_info *private;
2133 void *loc_cpu_entry;
2135 private = xt_unregister_table(table);
2137 /* Decrease module usage counts and free resources */
2138 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2139 IPT_ENTRY_ITERATE(loc_cpu_entry, private->size, cleanup_entry, NULL);
2140 xt_free_table_info(private);
2143 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2144 static inline int
2145 icmp_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2146 u_int8_t type, u_int8_t code,
2147 int invert)
2149 return ((test_type == 0xFF) || (type == test_type && code >= min_code && code <= max_code))
2150 ^ invert;
2153 static int
2154 icmp_match(const struct sk_buff *skb,
2155 const struct net_device *in,
2156 const struct net_device *out,
2157 const struct xt_match *match,
2158 const void *matchinfo,
2159 int offset,
2160 unsigned int protoff,
2161 int *hotdrop)
2163 struct icmphdr _icmph, *ic;
2164 const struct ipt_icmp *icmpinfo = matchinfo;
2166 /* Must not be a fragment. */
2167 if (offset)
2168 return 0;
2170 ic = skb_header_pointer(skb, protoff, sizeof(_icmph), &_icmph);
2171 if (ic == NULL) {
2172 /* We've been asked to examine this packet, and we
2173 * can't. Hence, no choice but to drop.
2175 duprintf("Dropping evil ICMP tinygram.\n");
2176 *hotdrop = 1;
2177 return 0;
2180 return icmp_type_code_match(icmpinfo->type,
2181 icmpinfo->code[0],
2182 icmpinfo->code[1],
2183 ic->type, ic->code,
2184 !!(icmpinfo->invflags&IPT_ICMP_INV));
2187 /* Called when user tries to insert an entry of this type. */
2188 static int
2189 icmp_checkentry(const char *tablename,
2190 const void *info,
2191 const struct xt_match *match,
2192 void *matchinfo,
2193 unsigned int hook_mask)
2195 const struct ipt_icmp *icmpinfo = matchinfo;
2197 /* Must specify no unknown invflags */
2198 return !(icmpinfo->invflags & ~IPT_ICMP_INV);
2201 /* The built-in targets: standard (NULL) and error. */
2202 static struct ipt_target ipt_standard_target = {
2203 .name = IPT_STANDARD_TARGET,
2204 .targetsize = sizeof(int),
2205 .family = AF_INET,
2206 #ifdef CONFIG_COMPAT
2207 .compat = &compat_ipt_standard_fn,
2208 #endif
2211 static struct ipt_target ipt_error_target = {
2212 .name = IPT_ERROR_TARGET,
2213 .target = ipt_error,
2214 .targetsize = IPT_FUNCTION_MAXNAMELEN,
2215 .family = AF_INET,
2218 static struct nf_sockopt_ops ipt_sockopts = {
2219 .pf = PF_INET,
2220 .set_optmin = IPT_BASE_CTL,
2221 .set_optmax = IPT_SO_SET_MAX+1,
2222 .set = do_ipt_set_ctl,
2223 #ifdef CONFIG_COMPAT
2224 .compat_set = compat_do_ipt_set_ctl,
2225 #endif
2226 .get_optmin = IPT_BASE_CTL,
2227 .get_optmax = IPT_SO_GET_MAX+1,
2228 .get = do_ipt_get_ctl,
2229 #ifdef CONFIG_COMPAT
2230 .compat_get = compat_do_ipt_get_ctl,
2231 #endif
2234 static struct ipt_match icmp_matchstruct = {
2235 .name = "icmp",
2236 .match = icmp_match,
2237 .matchsize = sizeof(struct ipt_icmp),
2238 .proto = IPPROTO_ICMP,
2239 .family = AF_INET,
2240 .checkentry = icmp_checkentry,
2243 static int __init ip_tables_init(void)
2245 int ret;
2247 ret = xt_proto_init(AF_INET);
2248 if (ret < 0)
2249 goto err1;
2251 /* Noone else will be downing sem now, so we won't sleep */
2252 ret = xt_register_target(&ipt_standard_target);
2253 if (ret < 0)
2254 goto err2;
2255 ret = xt_register_target(&ipt_error_target);
2256 if (ret < 0)
2257 goto err3;
2258 ret = xt_register_match(&icmp_matchstruct);
2259 if (ret < 0)
2260 goto err4;
2262 /* Register setsockopt */
2263 ret = nf_register_sockopt(&ipt_sockopts);
2264 if (ret < 0)
2265 goto err5;
2267 printk("ip_tables: (C) 2000-2006 Netfilter Core Team\n");
2268 return 0;
2270 err5:
2271 xt_unregister_match(&icmp_matchstruct);
2272 err4:
2273 xt_unregister_target(&ipt_error_target);
2274 err3:
2275 xt_unregister_target(&ipt_standard_target);
2276 err2:
2277 xt_proto_fini(AF_INET);
2278 err1:
2279 return ret;
2282 static void __exit ip_tables_fini(void)
2284 nf_unregister_sockopt(&ipt_sockopts);
2286 xt_unregister_match(&icmp_matchstruct);
2287 xt_unregister_target(&ipt_error_target);
2288 xt_unregister_target(&ipt_standard_target);
2290 xt_proto_fini(AF_INET);
2293 EXPORT_SYMBOL(ipt_register_table);
2294 EXPORT_SYMBOL(ipt_unregister_table);
2295 EXPORT_SYMBOL(ipt_do_table);
2296 module_init(ip_tables_init);
2297 module_exit(ip_tables_fini);