timer: fix section mismatch
[linux-2.6/s3c2410-cpufreq.git] / net / ipv4 / netfilter / ip_tables.c
blobb9b189c262080cdfe0612f5b266c5c475939a563
1 /*
2 * Packet matching code.
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #include <linux/cache.h>
12 #include <linux/capability.h>
13 #include <linux/skbuff.h>
14 #include <linux/kmod.h>
15 #include <linux/vmalloc.h>
16 #include <linux/netdevice.h>
17 #include <linux/module.h>
18 #include <linux/icmp.h>
19 #include <net/ip.h>
20 #include <net/compat.h>
21 #include <asm/uaccess.h>
22 #include <linux/mutex.h>
23 #include <linux/proc_fs.h>
24 #include <linux/err.h>
25 #include <linux/cpumask.h>
27 #include <linux/netfilter/x_tables.h>
28 #include <linux/netfilter_ipv4/ip_tables.h>
30 MODULE_LICENSE("GPL");
31 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
32 MODULE_DESCRIPTION("IPv4 packet filter");
34 /*#define DEBUG_IP_FIREWALL*/
35 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
36 /*#define DEBUG_IP_FIREWALL_USER*/
38 #ifdef DEBUG_IP_FIREWALL
39 #define dprintf(format, args...) printk(format , ## args)
40 #else
41 #define dprintf(format, args...)
42 #endif
44 #ifdef DEBUG_IP_FIREWALL_USER
45 #define duprintf(format, args...) printk(format , ## args)
46 #else
47 #define duprintf(format, args...)
48 #endif
50 #ifdef CONFIG_NETFILTER_DEBUG
51 #define IP_NF_ASSERT(x) \
52 do { \
53 if (!(x)) \
54 printk("IP_NF_ASSERT: %s:%s:%u\n", \
55 __FUNCTION__, __FILE__, __LINE__); \
56 } while(0)
57 #else
58 #define IP_NF_ASSERT(x)
59 #endif
61 #if 0
62 /* All the better to debug you with... */
63 #define static
64 #define inline
65 #endif
68 We keep a set of rules for each CPU, so we can avoid write-locking
69 them in the softirq when updating the counters and therefore
70 only need to read-lock in the softirq; doing a write_lock_bh() in user
71 context stops packets coming through and allows user context to read
72 the counters or update the rules.
74 Hence the start of any table is given by get_table() below. */
76 /* Returns whether matches rule or not. */
77 static inline int
78 ip_packet_match(const struct iphdr *ip,
79 const char *indev,
80 const char *outdev,
81 const struct ipt_ip *ipinfo,
82 int isfrag)
84 size_t i;
85 unsigned long ret;
87 #define FWINV(bool,invflg) ((bool) ^ !!(ipinfo->invflags & invflg))
89 if (FWINV((ip->saddr&ipinfo->smsk.s_addr) != ipinfo->src.s_addr,
90 IPT_INV_SRCIP)
91 || FWINV((ip->daddr&ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr,
92 IPT_INV_DSTIP)) {
93 dprintf("Source or dest mismatch.\n");
95 dprintf("SRC: %u.%u.%u.%u. Mask: %u.%u.%u.%u. Target: %u.%u.%u.%u.%s\n",
96 NIPQUAD(ip->saddr),
97 NIPQUAD(ipinfo->smsk.s_addr),
98 NIPQUAD(ipinfo->src.s_addr),
99 ipinfo->invflags & IPT_INV_SRCIP ? " (INV)" : "");
100 dprintf("DST: %u.%u.%u.%u Mask: %u.%u.%u.%u Target: %u.%u.%u.%u.%s\n",
101 NIPQUAD(ip->daddr),
102 NIPQUAD(ipinfo->dmsk.s_addr),
103 NIPQUAD(ipinfo->dst.s_addr),
104 ipinfo->invflags & IPT_INV_DSTIP ? " (INV)" : "");
105 return 0;
108 /* Look for ifname matches; this should unroll nicely. */
109 for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
110 ret |= (((const unsigned long *)indev)[i]
111 ^ ((const unsigned long *)ipinfo->iniface)[i])
112 & ((const unsigned long *)ipinfo->iniface_mask)[i];
115 if (FWINV(ret != 0, IPT_INV_VIA_IN)) {
116 dprintf("VIA in mismatch (%s vs %s).%s\n",
117 indev, ipinfo->iniface,
118 ipinfo->invflags&IPT_INV_VIA_IN ?" (INV)":"");
119 return 0;
122 for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
123 ret |= (((const unsigned long *)outdev)[i]
124 ^ ((const unsigned long *)ipinfo->outiface)[i])
125 & ((const unsigned long *)ipinfo->outiface_mask)[i];
128 if (FWINV(ret != 0, IPT_INV_VIA_OUT)) {
129 dprintf("VIA out mismatch (%s vs %s).%s\n",
130 outdev, ipinfo->outiface,
131 ipinfo->invflags&IPT_INV_VIA_OUT ?" (INV)":"");
132 return 0;
135 /* Check specific protocol */
136 if (ipinfo->proto
137 && FWINV(ip->protocol != ipinfo->proto, IPT_INV_PROTO)) {
138 dprintf("Packet protocol %hi does not match %hi.%s\n",
139 ip->protocol, ipinfo->proto,
140 ipinfo->invflags&IPT_INV_PROTO ? " (INV)":"");
141 return 0;
144 /* If we have a fragment rule but the packet is not a fragment
145 * then we return zero */
146 if (FWINV((ipinfo->flags&IPT_F_FRAG) && !isfrag, IPT_INV_FRAG)) {
147 dprintf("Fragment rule but not fragment.%s\n",
148 ipinfo->invflags & IPT_INV_FRAG ? " (INV)" : "");
149 return 0;
152 return 1;
155 static inline bool
156 ip_checkentry(const struct ipt_ip *ip)
158 if (ip->flags & ~IPT_F_MASK) {
159 duprintf("Unknown flag bits set: %08X\n",
160 ip->flags & ~IPT_F_MASK);
161 return false;
163 if (ip->invflags & ~IPT_INV_MASK) {
164 duprintf("Unknown invflag bits set: %08X\n",
165 ip->invflags & ~IPT_INV_MASK);
166 return false;
168 return true;
171 static unsigned int
172 ipt_error(struct sk_buff *skb,
173 const struct net_device *in,
174 const struct net_device *out,
175 unsigned int hooknum,
176 const struct xt_target *target,
177 const void *targinfo)
179 if (net_ratelimit())
180 printk("ip_tables: error: `%s'\n", (char *)targinfo);
182 return NF_DROP;
185 static inline
186 bool do_match(struct ipt_entry_match *m,
187 const struct sk_buff *skb,
188 const struct net_device *in,
189 const struct net_device *out,
190 int offset,
191 bool *hotdrop)
193 /* Stop iteration if it doesn't match */
194 if (!m->u.kernel.match->match(skb, in, out, m->u.kernel.match, m->data,
195 offset, ip_hdrlen(skb), hotdrop))
196 return true;
197 else
198 return false;
201 static inline struct ipt_entry *
202 get_entry(void *base, unsigned int offset)
204 return (struct ipt_entry *)(base + offset);
207 /* All zeroes == unconditional rule. */
208 static inline int
209 unconditional(const struct ipt_ip *ip)
211 unsigned int i;
213 for (i = 0; i < sizeof(*ip)/sizeof(__u32); i++)
214 if (((__u32 *)ip)[i])
215 return 0;
217 return 1;
220 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
221 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
222 static const char *hooknames[] = {
223 [NF_IP_PRE_ROUTING] = "PREROUTING",
224 [NF_IP_LOCAL_IN] = "INPUT",
225 [NF_IP_FORWARD] = "FORWARD",
226 [NF_IP_LOCAL_OUT] = "OUTPUT",
227 [NF_IP_POST_ROUTING] = "POSTROUTING",
230 enum nf_ip_trace_comments {
231 NF_IP_TRACE_COMMENT_RULE,
232 NF_IP_TRACE_COMMENT_RETURN,
233 NF_IP_TRACE_COMMENT_POLICY,
236 static const char *comments[] = {
237 [NF_IP_TRACE_COMMENT_RULE] = "rule",
238 [NF_IP_TRACE_COMMENT_RETURN] = "return",
239 [NF_IP_TRACE_COMMENT_POLICY] = "policy",
242 static struct nf_loginfo trace_loginfo = {
243 .type = NF_LOG_TYPE_LOG,
244 .u = {
245 .log = {
246 .level = 4,
247 .logflags = NF_LOG_MASK,
252 static inline int
253 get_chainname_rulenum(struct ipt_entry *s, struct ipt_entry *e,
254 char *hookname, char **chainname,
255 char **comment, unsigned int *rulenum)
257 struct ipt_standard_target *t = (void *)ipt_get_target(s);
259 if (strcmp(t->target.u.kernel.target->name, IPT_ERROR_TARGET) == 0) {
260 /* Head of user chain: ERROR target with chainname */
261 *chainname = t->target.data;
262 (*rulenum) = 0;
263 } else if (s == e) {
264 (*rulenum)++;
266 if (s->target_offset == sizeof(struct ipt_entry)
267 && strcmp(t->target.u.kernel.target->name,
268 IPT_STANDARD_TARGET) == 0
269 && t->verdict < 0
270 && unconditional(&s->ip)) {
271 /* Tail of chains: STANDARD target (return/policy) */
272 *comment = *chainname == hookname
273 ? (char *)comments[NF_IP_TRACE_COMMENT_POLICY]
274 : (char *)comments[NF_IP_TRACE_COMMENT_RETURN];
276 return 1;
277 } else
278 (*rulenum)++;
280 return 0;
283 static void trace_packet(struct sk_buff *skb,
284 unsigned int hook,
285 const struct net_device *in,
286 const struct net_device *out,
287 char *tablename,
288 struct xt_table_info *private,
289 struct ipt_entry *e)
291 void *table_base;
292 struct ipt_entry *root;
293 char *hookname, *chainname, *comment;
294 unsigned int rulenum = 0;
296 table_base = (void *)private->entries[smp_processor_id()];
297 root = get_entry(table_base, private->hook_entry[hook]);
299 hookname = chainname = (char *)hooknames[hook];
300 comment = (char *)comments[NF_IP_TRACE_COMMENT_RULE];
302 IPT_ENTRY_ITERATE(root,
303 private->size - private->hook_entry[hook],
304 get_chainname_rulenum,
305 e, hookname, &chainname, &comment, &rulenum);
307 nf_log_packet(AF_INET, hook, skb, in, out, &trace_loginfo,
308 "TRACE: %s:%s:%s:%u ",
309 tablename, chainname, comment, rulenum);
311 #endif
313 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
314 unsigned int
315 ipt_do_table(struct sk_buff *skb,
316 unsigned int hook,
317 const struct net_device *in,
318 const struct net_device *out,
319 struct xt_table *table)
321 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
322 u_int16_t offset;
323 struct iphdr *ip;
324 u_int16_t datalen;
325 bool hotdrop = false;
326 /* Initializing verdict to NF_DROP keeps gcc happy. */
327 unsigned int verdict = NF_DROP;
328 const char *indev, *outdev;
329 void *table_base;
330 struct ipt_entry *e, *back;
331 struct xt_table_info *private;
333 /* Initialization */
334 ip = ip_hdr(skb);
335 datalen = skb->len - ip->ihl * 4;
336 indev = in ? in->name : nulldevname;
337 outdev = out ? out->name : nulldevname;
338 /* We handle fragments by dealing with the first fragment as
339 * if it was a normal packet. All other fragments are treated
340 * normally, except that they will NEVER match rules that ask
341 * things we don't know, ie. tcp syn flag or ports). If the
342 * rule is also a fragment-specific rule, non-fragments won't
343 * match it. */
344 offset = ntohs(ip->frag_off) & IP_OFFSET;
346 read_lock_bh(&table->lock);
347 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
348 private = table->private;
349 table_base = (void *)private->entries[smp_processor_id()];
350 e = get_entry(table_base, private->hook_entry[hook]);
352 /* For return from builtin chain */
353 back = get_entry(table_base, private->underflow[hook]);
355 do {
356 IP_NF_ASSERT(e);
357 IP_NF_ASSERT(back);
358 if (ip_packet_match(ip, indev, outdev, &e->ip, offset)) {
359 struct ipt_entry_target *t;
361 if (IPT_MATCH_ITERATE(e, do_match,
362 skb, in, out,
363 offset, &hotdrop) != 0)
364 goto no_match;
366 ADD_COUNTER(e->counters, ntohs(ip->tot_len), 1);
368 t = ipt_get_target(e);
369 IP_NF_ASSERT(t->u.kernel.target);
371 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
372 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
373 /* The packet is traced: log it */
374 if (unlikely(skb->nf_trace))
375 trace_packet(skb, hook, in, out,
376 table->name, private, e);
377 #endif
378 /* Standard target? */
379 if (!t->u.kernel.target->target) {
380 int v;
382 v = ((struct ipt_standard_target *)t)->verdict;
383 if (v < 0) {
384 /* Pop from stack? */
385 if (v != IPT_RETURN) {
386 verdict = (unsigned)(-v) - 1;
387 break;
389 e = back;
390 back = get_entry(table_base,
391 back->comefrom);
392 continue;
394 if (table_base + v != (void *)e + e->next_offset
395 && !(e->ip.flags & IPT_F_GOTO)) {
396 /* Save old back ptr in next entry */
397 struct ipt_entry *next
398 = (void *)e + e->next_offset;
399 next->comefrom
400 = (void *)back - table_base;
401 /* set back pointer to next entry */
402 back = next;
405 e = get_entry(table_base, v);
406 } else {
407 /* Targets which reenter must return
408 abs. verdicts */
409 #ifdef CONFIG_NETFILTER_DEBUG
410 ((struct ipt_entry *)table_base)->comefrom
411 = 0xeeeeeeec;
412 #endif
413 verdict = t->u.kernel.target->target(skb,
414 in, out,
415 hook,
416 t->u.kernel.target,
417 t->data);
419 #ifdef CONFIG_NETFILTER_DEBUG
420 if (((struct ipt_entry *)table_base)->comefrom
421 != 0xeeeeeeec
422 && verdict == IPT_CONTINUE) {
423 printk("Target %s reentered!\n",
424 t->u.kernel.target->name);
425 verdict = NF_DROP;
427 ((struct ipt_entry *)table_base)->comefrom
428 = 0x57acc001;
429 #endif
430 /* Target might have changed stuff. */
431 ip = ip_hdr(skb);
432 datalen = skb->len - ip->ihl * 4;
434 if (verdict == IPT_CONTINUE)
435 e = (void *)e + e->next_offset;
436 else
437 /* Verdict */
438 break;
440 } else {
442 no_match:
443 e = (void *)e + e->next_offset;
445 } while (!hotdrop);
447 read_unlock_bh(&table->lock);
449 #ifdef DEBUG_ALLOW_ALL
450 return NF_ACCEPT;
451 #else
452 if (hotdrop)
453 return NF_DROP;
454 else return verdict;
455 #endif
458 /* Figures out from what hook each rule can be called: returns 0 if
459 there are loops. Puts hook bitmask in comefrom. */
460 static int
461 mark_source_chains(struct xt_table_info *newinfo,
462 unsigned int valid_hooks, void *entry0)
464 unsigned int hook;
466 /* No recursion; use packet counter to save back ptrs (reset
467 to 0 as we leave), and comefrom to save source hook bitmask */
468 for (hook = 0; hook < NF_IP_NUMHOOKS; hook++) {
469 unsigned int pos = newinfo->hook_entry[hook];
470 struct ipt_entry *e
471 = (struct ipt_entry *)(entry0 + pos);
473 if (!(valid_hooks & (1 << hook)))
474 continue;
476 /* Set initial back pointer. */
477 e->counters.pcnt = pos;
479 for (;;) {
480 struct ipt_standard_target *t
481 = (void *)ipt_get_target(e);
482 int visited = e->comefrom & (1 << hook);
484 if (e->comefrom & (1 << NF_IP_NUMHOOKS)) {
485 printk("iptables: loop hook %u pos %u %08X.\n",
486 hook, pos, e->comefrom);
487 return 0;
489 e->comefrom
490 |= ((1 << hook) | (1 << NF_IP_NUMHOOKS));
492 /* Unconditional return/END. */
493 if ((e->target_offset == sizeof(struct ipt_entry)
494 && (strcmp(t->target.u.user.name,
495 IPT_STANDARD_TARGET) == 0)
496 && t->verdict < 0
497 && unconditional(&e->ip)) || visited) {
498 unsigned int oldpos, size;
500 if (t->verdict < -NF_MAX_VERDICT - 1) {
501 duprintf("mark_source_chains: bad "
502 "negative verdict (%i)\n",
503 t->verdict);
504 return 0;
507 /* Return: backtrack through the last
508 big jump. */
509 do {
510 e->comefrom ^= (1<<NF_IP_NUMHOOKS);
511 #ifdef DEBUG_IP_FIREWALL_USER
512 if (e->comefrom
513 & (1 << NF_IP_NUMHOOKS)) {
514 duprintf("Back unset "
515 "on hook %u "
516 "rule %u\n",
517 hook, pos);
519 #endif
520 oldpos = pos;
521 pos = e->counters.pcnt;
522 e->counters.pcnt = 0;
524 /* We're at the start. */
525 if (pos == oldpos)
526 goto next;
528 e = (struct ipt_entry *)
529 (entry0 + pos);
530 } while (oldpos == pos + e->next_offset);
532 /* Move along one */
533 size = e->next_offset;
534 e = (struct ipt_entry *)
535 (entry0 + pos + size);
536 e->counters.pcnt = pos;
537 pos += size;
538 } else {
539 int newpos = t->verdict;
541 if (strcmp(t->target.u.user.name,
542 IPT_STANDARD_TARGET) == 0
543 && newpos >= 0) {
544 if (newpos > newinfo->size -
545 sizeof(struct ipt_entry)) {
546 duprintf("mark_source_chains: "
547 "bad verdict (%i)\n",
548 newpos);
549 return 0;
551 /* This a jump; chase it. */
552 duprintf("Jump rule %u -> %u\n",
553 pos, newpos);
554 } else {
555 /* ... this is a fallthru */
556 newpos = pos + e->next_offset;
558 e = (struct ipt_entry *)
559 (entry0 + newpos);
560 e->counters.pcnt = pos;
561 pos = newpos;
564 next:
565 duprintf("Finished chain %u\n", hook);
567 return 1;
570 static inline int
571 cleanup_match(struct ipt_entry_match *m, unsigned int *i)
573 if (i && (*i)-- == 0)
574 return 1;
576 if (m->u.kernel.match->destroy)
577 m->u.kernel.match->destroy(m->u.kernel.match, m->data);
578 module_put(m->u.kernel.match->me);
579 return 0;
582 static inline int
583 check_entry(struct ipt_entry *e, const char *name)
585 struct ipt_entry_target *t;
587 if (!ip_checkentry(&e->ip)) {
588 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
589 return -EINVAL;
592 if (e->target_offset + sizeof(struct ipt_entry_target) > e->next_offset)
593 return -EINVAL;
595 t = ipt_get_target(e);
596 if (e->target_offset + t->u.target_size > e->next_offset)
597 return -EINVAL;
599 return 0;
602 static inline int check_match(struct ipt_entry_match *m, const char *name,
603 const struct ipt_ip *ip, unsigned int hookmask,
604 unsigned int *i)
606 struct xt_match *match;
607 int ret;
609 match = m->u.kernel.match;
610 ret = xt_check_match(match, AF_INET, m->u.match_size - sizeof(*m),
611 name, hookmask, ip->proto,
612 ip->invflags & IPT_INV_PROTO);
613 if (!ret && m->u.kernel.match->checkentry
614 && !m->u.kernel.match->checkentry(name, ip, match, m->data,
615 hookmask)) {
616 duprintf("ip_tables: check failed for `%s'.\n",
617 m->u.kernel.match->name);
618 ret = -EINVAL;
620 if (!ret)
621 (*i)++;
622 return ret;
625 static inline int
626 find_check_match(struct ipt_entry_match *m,
627 const char *name,
628 const struct ipt_ip *ip,
629 unsigned int hookmask,
630 unsigned int *i)
632 struct xt_match *match;
633 int ret;
635 match = try_then_request_module(xt_find_match(AF_INET, m->u.user.name,
636 m->u.user.revision),
637 "ipt_%s", m->u.user.name);
638 if (IS_ERR(match) || !match) {
639 duprintf("find_check_match: `%s' not found\n", m->u.user.name);
640 return match ? PTR_ERR(match) : -ENOENT;
642 m->u.kernel.match = match;
644 ret = check_match(m, name, ip, hookmask, i);
645 if (ret)
646 goto err;
648 return 0;
649 err:
650 module_put(m->u.kernel.match->me);
651 return ret;
654 static inline int check_target(struct ipt_entry *e, const char *name)
656 struct ipt_entry_target *t;
657 struct xt_target *target;
658 int ret;
660 t = ipt_get_target(e);
661 target = t->u.kernel.target;
662 ret = xt_check_target(target, AF_INET, t->u.target_size - sizeof(*t),
663 name, e->comefrom, e->ip.proto,
664 e->ip.invflags & IPT_INV_PROTO);
665 if (!ret && t->u.kernel.target->checkentry
666 && !t->u.kernel.target->checkentry(name, e, target,
667 t->data, e->comefrom)) {
668 duprintf("ip_tables: check failed for `%s'.\n",
669 t->u.kernel.target->name);
670 ret = -EINVAL;
672 return ret;
675 static inline int
676 find_check_entry(struct ipt_entry *e, const char *name, unsigned int size,
677 unsigned int *i)
679 struct ipt_entry_target *t;
680 struct xt_target *target;
681 int ret;
682 unsigned int j;
684 ret = check_entry(e, name);
685 if (ret)
686 return ret;
688 j = 0;
689 ret = IPT_MATCH_ITERATE(e, find_check_match, name, &e->ip,
690 e->comefrom, &j);
691 if (ret != 0)
692 goto cleanup_matches;
694 t = ipt_get_target(e);
695 target = try_then_request_module(xt_find_target(AF_INET,
696 t->u.user.name,
697 t->u.user.revision),
698 "ipt_%s", t->u.user.name);
699 if (IS_ERR(target) || !target) {
700 duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
701 ret = target ? PTR_ERR(target) : -ENOENT;
702 goto cleanup_matches;
704 t->u.kernel.target = target;
706 ret = check_target(e, name);
707 if (ret)
708 goto err;
710 (*i)++;
711 return 0;
712 err:
713 module_put(t->u.kernel.target->me);
714 cleanup_matches:
715 IPT_MATCH_ITERATE(e, cleanup_match, &j);
716 return ret;
719 static inline int
720 check_entry_size_and_hooks(struct ipt_entry *e,
721 struct xt_table_info *newinfo,
722 unsigned char *base,
723 unsigned char *limit,
724 const unsigned int *hook_entries,
725 const unsigned int *underflows,
726 unsigned int *i)
728 unsigned int h;
730 if ((unsigned long)e % __alignof__(struct ipt_entry) != 0
731 || (unsigned char *)e + sizeof(struct ipt_entry) >= limit) {
732 duprintf("Bad offset %p\n", e);
733 return -EINVAL;
736 if (e->next_offset
737 < sizeof(struct ipt_entry) + sizeof(struct ipt_entry_target)) {
738 duprintf("checking: element %p size %u\n",
739 e, e->next_offset);
740 return -EINVAL;
743 /* Check hooks & underflows */
744 for (h = 0; h < NF_IP_NUMHOOKS; h++) {
745 if ((unsigned char *)e - base == hook_entries[h])
746 newinfo->hook_entry[h] = hook_entries[h];
747 if ((unsigned char *)e - base == underflows[h])
748 newinfo->underflow[h] = underflows[h];
751 /* FIXME: underflows must be unconditional, standard verdicts
752 < 0 (not IPT_RETURN). --RR */
754 /* Clear counters and comefrom */
755 e->counters = ((struct xt_counters) { 0, 0 });
756 e->comefrom = 0;
758 (*i)++;
759 return 0;
762 static inline int
763 cleanup_entry(struct ipt_entry *e, unsigned int *i)
765 struct ipt_entry_target *t;
767 if (i && (*i)-- == 0)
768 return 1;
770 /* Cleanup all matches */
771 IPT_MATCH_ITERATE(e, cleanup_match, NULL);
772 t = ipt_get_target(e);
773 if (t->u.kernel.target->destroy)
774 t->u.kernel.target->destroy(t->u.kernel.target, t->data);
775 module_put(t->u.kernel.target->me);
776 return 0;
779 /* Checks and translates the user-supplied table segment (held in
780 newinfo) */
781 static int
782 translate_table(const char *name,
783 unsigned int valid_hooks,
784 struct xt_table_info *newinfo,
785 void *entry0,
786 unsigned int size,
787 unsigned int number,
788 const unsigned int *hook_entries,
789 const unsigned int *underflows)
791 unsigned int i;
792 int ret;
794 newinfo->size = size;
795 newinfo->number = number;
797 /* Init all hooks to impossible value. */
798 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
799 newinfo->hook_entry[i] = 0xFFFFFFFF;
800 newinfo->underflow[i] = 0xFFFFFFFF;
803 duprintf("translate_table: size %u\n", newinfo->size);
804 i = 0;
805 /* Walk through entries, checking offsets. */
806 ret = IPT_ENTRY_ITERATE(entry0, newinfo->size,
807 check_entry_size_and_hooks,
808 newinfo,
809 entry0,
810 entry0 + size,
811 hook_entries, underflows, &i);
812 if (ret != 0)
813 return ret;
815 if (i != number) {
816 duprintf("translate_table: %u not %u entries\n",
817 i, number);
818 return -EINVAL;
821 /* Check hooks all assigned */
822 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
823 /* Only hooks which are valid */
824 if (!(valid_hooks & (1 << i)))
825 continue;
826 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
827 duprintf("Invalid hook entry %u %u\n",
828 i, hook_entries[i]);
829 return -EINVAL;
831 if (newinfo->underflow[i] == 0xFFFFFFFF) {
832 duprintf("Invalid underflow %u %u\n",
833 i, underflows[i]);
834 return -EINVAL;
838 if (!mark_source_chains(newinfo, valid_hooks, entry0))
839 return -ELOOP;
841 /* Finally, each sanity check must pass */
842 i = 0;
843 ret = IPT_ENTRY_ITERATE(entry0, newinfo->size,
844 find_check_entry, name, size, &i);
846 if (ret != 0) {
847 IPT_ENTRY_ITERATE(entry0, newinfo->size,
848 cleanup_entry, &i);
849 return ret;
852 /* And one copy for every other CPU */
853 for_each_possible_cpu(i) {
854 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
855 memcpy(newinfo->entries[i], entry0, newinfo->size);
858 return ret;
861 /* Gets counters. */
862 static inline int
863 add_entry_to_counter(const struct ipt_entry *e,
864 struct xt_counters total[],
865 unsigned int *i)
867 ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
869 (*i)++;
870 return 0;
873 static inline int
874 set_entry_to_counter(const struct ipt_entry *e,
875 struct ipt_counters total[],
876 unsigned int *i)
878 SET_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
880 (*i)++;
881 return 0;
884 static void
885 get_counters(const struct xt_table_info *t,
886 struct xt_counters counters[])
888 unsigned int cpu;
889 unsigned int i;
890 unsigned int curcpu;
892 /* Instead of clearing (by a previous call to memset())
893 * the counters and using adds, we set the counters
894 * with data used by 'current' CPU
895 * We dont care about preemption here.
897 curcpu = raw_smp_processor_id();
899 i = 0;
900 IPT_ENTRY_ITERATE(t->entries[curcpu],
901 t->size,
902 set_entry_to_counter,
903 counters,
904 &i);
906 for_each_possible_cpu(cpu) {
907 if (cpu == curcpu)
908 continue;
909 i = 0;
910 IPT_ENTRY_ITERATE(t->entries[cpu],
911 t->size,
912 add_entry_to_counter,
913 counters,
914 &i);
918 static inline struct xt_counters * alloc_counters(struct xt_table *table)
920 unsigned int countersize;
921 struct xt_counters *counters;
922 struct xt_table_info *private = table->private;
924 /* We need atomic snapshot of counters: rest doesn't change
925 (other than comefrom, which userspace doesn't care
926 about). */
927 countersize = sizeof(struct xt_counters) * private->number;
928 counters = vmalloc_node(countersize, numa_node_id());
930 if (counters == NULL)
931 return ERR_PTR(-ENOMEM);
933 /* First, sum counters... */
934 write_lock_bh(&table->lock);
935 get_counters(private, counters);
936 write_unlock_bh(&table->lock);
938 return counters;
941 static int
942 copy_entries_to_user(unsigned int total_size,
943 struct xt_table *table,
944 void __user *userptr)
946 unsigned int off, num;
947 struct ipt_entry *e;
948 struct xt_counters *counters;
949 struct xt_table_info *private = table->private;
950 int ret = 0;
951 void *loc_cpu_entry;
953 counters = alloc_counters(table);
954 if (IS_ERR(counters))
955 return PTR_ERR(counters);
957 /* choose the copy that is on our node/cpu, ...
958 * This choice is lazy (because current thread is
959 * allowed to migrate to another cpu)
961 loc_cpu_entry = private->entries[raw_smp_processor_id()];
962 /* ... then copy entire thing ... */
963 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
964 ret = -EFAULT;
965 goto free_counters;
968 /* FIXME: use iterator macros --RR */
969 /* ... then go back and fix counters and names */
970 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
971 unsigned int i;
972 struct ipt_entry_match *m;
973 struct ipt_entry_target *t;
975 e = (struct ipt_entry *)(loc_cpu_entry + off);
976 if (copy_to_user(userptr + off
977 + offsetof(struct ipt_entry, counters),
978 &counters[num],
979 sizeof(counters[num])) != 0) {
980 ret = -EFAULT;
981 goto free_counters;
984 for (i = sizeof(struct ipt_entry);
985 i < e->target_offset;
986 i += m->u.match_size) {
987 m = (void *)e + i;
989 if (copy_to_user(userptr + off + i
990 + offsetof(struct ipt_entry_match,
991 u.user.name),
992 m->u.kernel.match->name,
993 strlen(m->u.kernel.match->name)+1)
994 != 0) {
995 ret = -EFAULT;
996 goto free_counters;
1000 t = ipt_get_target(e);
1001 if (copy_to_user(userptr + off + e->target_offset
1002 + offsetof(struct ipt_entry_target,
1003 u.user.name),
1004 t->u.kernel.target->name,
1005 strlen(t->u.kernel.target->name)+1) != 0) {
1006 ret = -EFAULT;
1007 goto free_counters;
1011 free_counters:
1012 vfree(counters);
1013 return ret;
1016 #ifdef CONFIG_COMPAT
1017 struct compat_delta {
1018 struct compat_delta *next;
1019 unsigned int offset;
1020 short delta;
1023 static struct compat_delta *compat_offsets = NULL;
1025 static int compat_add_offset(unsigned int offset, short delta)
1027 struct compat_delta *tmp;
1029 tmp = kmalloc(sizeof(struct compat_delta), GFP_KERNEL);
1030 if (!tmp)
1031 return -ENOMEM;
1032 tmp->offset = offset;
1033 tmp->delta = delta;
1034 if (compat_offsets) {
1035 tmp->next = compat_offsets->next;
1036 compat_offsets->next = tmp;
1037 } else {
1038 compat_offsets = tmp;
1039 tmp->next = NULL;
1041 return 0;
1044 static void compat_flush_offsets(void)
1046 struct compat_delta *tmp, *next;
1048 if (compat_offsets) {
1049 for(tmp = compat_offsets; tmp; tmp = next) {
1050 next = tmp->next;
1051 kfree(tmp);
1053 compat_offsets = NULL;
1057 static short compat_calc_jump(unsigned int offset)
1059 struct compat_delta *tmp;
1060 short delta;
1062 for(tmp = compat_offsets, delta = 0; tmp; tmp = tmp->next)
1063 if (tmp->offset < offset)
1064 delta += tmp->delta;
1065 return delta;
1068 static void compat_standard_from_user(void *dst, void *src)
1070 int v = *(compat_int_t *)src;
1072 if (v > 0)
1073 v += compat_calc_jump(v);
1074 memcpy(dst, &v, sizeof(v));
1077 static int compat_standard_to_user(void __user *dst, void *src)
1079 compat_int_t cv = *(int *)src;
1081 if (cv > 0)
1082 cv -= compat_calc_jump(cv);
1083 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
1086 static inline int
1087 compat_calc_match(struct ipt_entry_match *m, int * size)
1089 *size += xt_compat_match_offset(m->u.kernel.match);
1090 return 0;
1093 static int compat_calc_entry(struct ipt_entry *e, struct xt_table_info *info,
1094 void *base, struct xt_table_info *newinfo)
1096 struct ipt_entry_target *t;
1097 unsigned int entry_offset;
1098 int off, i, ret;
1100 off = 0;
1101 entry_offset = (void *)e - base;
1102 IPT_MATCH_ITERATE(e, compat_calc_match, &off);
1103 t = ipt_get_target(e);
1104 off += xt_compat_target_offset(t->u.kernel.target);
1105 newinfo->size -= off;
1106 ret = compat_add_offset(entry_offset, off);
1107 if (ret)
1108 return ret;
1110 for (i = 0; i< NF_IP_NUMHOOKS; i++) {
1111 if (info->hook_entry[i] && (e < (struct ipt_entry *)
1112 (base + info->hook_entry[i])))
1113 newinfo->hook_entry[i] -= off;
1114 if (info->underflow[i] && (e < (struct ipt_entry *)
1115 (base + info->underflow[i])))
1116 newinfo->underflow[i] -= off;
1118 return 0;
1121 static int compat_table_info(struct xt_table_info *info,
1122 struct xt_table_info *newinfo)
1124 void *loc_cpu_entry;
1125 int i;
1127 if (!newinfo || !info)
1128 return -EINVAL;
1130 memset(newinfo, 0, sizeof(struct xt_table_info));
1131 newinfo->size = info->size;
1132 newinfo->number = info->number;
1133 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
1134 newinfo->hook_entry[i] = info->hook_entry[i];
1135 newinfo->underflow[i] = info->underflow[i];
1137 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1138 return IPT_ENTRY_ITERATE(loc_cpu_entry, info->size,
1139 compat_calc_entry, info, loc_cpu_entry, newinfo);
1141 #endif
1143 static int get_info(void __user *user, int *len, int compat)
1145 char name[IPT_TABLE_MAXNAMELEN];
1146 struct xt_table *t;
1147 int ret;
1149 if (*len != sizeof(struct ipt_getinfo)) {
1150 duprintf("length %u != %u\n", *len,
1151 (unsigned int)sizeof(struct ipt_getinfo));
1152 return -EINVAL;
1155 if (copy_from_user(name, user, sizeof(name)) != 0)
1156 return -EFAULT;
1158 name[IPT_TABLE_MAXNAMELEN-1] = '\0';
1159 #ifdef CONFIG_COMPAT
1160 if (compat)
1161 xt_compat_lock(AF_INET);
1162 #endif
1163 t = try_then_request_module(xt_find_table_lock(AF_INET, name),
1164 "iptable_%s", name);
1165 if (t && !IS_ERR(t)) {
1166 struct ipt_getinfo info;
1167 struct xt_table_info *private = t->private;
1169 #ifdef CONFIG_COMPAT
1170 if (compat) {
1171 struct xt_table_info tmp;
1172 ret = compat_table_info(private, &tmp);
1173 compat_flush_offsets();
1174 private = &tmp;
1176 #endif
1177 info.valid_hooks = t->valid_hooks;
1178 memcpy(info.hook_entry, private->hook_entry,
1179 sizeof(info.hook_entry));
1180 memcpy(info.underflow, private->underflow,
1181 sizeof(info.underflow));
1182 info.num_entries = private->number;
1183 info.size = private->size;
1184 strcpy(info.name, name);
1186 if (copy_to_user(user, &info, *len) != 0)
1187 ret = -EFAULT;
1188 else
1189 ret = 0;
1191 xt_table_unlock(t);
1192 module_put(t->me);
1193 } else
1194 ret = t ? PTR_ERR(t) : -ENOENT;
1195 #ifdef CONFIG_COMPAT
1196 if (compat)
1197 xt_compat_unlock(AF_INET);
1198 #endif
1199 return ret;
1202 static int
1203 get_entries(struct ipt_get_entries __user *uptr, int *len)
1205 int ret;
1206 struct ipt_get_entries get;
1207 struct xt_table *t;
1209 if (*len < sizeof(get)) {
1210 duprintf("get_entries: %u < %d\n", *len,
1211 (unsigned int)sizeof(get));
1212 return -EINVAL;
1214 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1215 return -EFAULT;
1216 if (*len != sizeof(struct ipt_get_entries) + get.size) {
1217 duprintf("get_entries: %u != %u\n", *len,
1218 (unsigned int)(sizeof(struct ipt_get_entries) +
1219 get.size));
1220 return -EINVAL;
1223 t = xt_find_table_lock(AF_INET, get.name);
1224 if (t && !IS_ERR(t)) {
1225 struct xt_table_info *private = t->private;
1226 duprintf("t->private->number = %u\n",
1227 private->number);
1228 if (get.size == private->size)
1229 ret = copy_entries_to_user(private->size,
1230 t, uptr->entrytable);
1231 else {
1232 duprintf("get_entries: I've got %u not %u!\n",
1233 private->size,
1234 get.size);
1235 ret = -EINVAL;
1237 module_put(t->me);
1238 xt_table_unlock(t);
1239 } else
1240 ret = t ? PTR_ERR(t) : -ENOENT;
1242 return ret;
1245 static int
1246 __do_replace(const char *name, unsigned int valid_hooks,
1247 struct xt_table_info *newinfo, unsigned int num_counters,
1248 void __user *counters_ptr)
1250 int ret;
1251 struct xt_table *t;
1252 struct xt_table_info *oldinfo;
1253 struct xt_counters *counters;
1254 void *loc_cpu_old_entry;
1256 ret = 0;
1257 counters = vmalloc(num_counters * sizeof(struct xt_counters));
1258 if (!counters) {
1259 ret = -ENOMEM;
1260 goto out;
1263 t = try_then_request_module(xt_find_table_lock(AF_INET, name),
1264 "iptable_%s", name);
1265 if (!t || IS_ERR(t)) {
1266 ret = t ? PTR_ERR(t) : -ENOENT;
1267 goto free_newinfo_counters_untrans;
1270 /* You lied! */
1271 if (valid_hooks != t->valid_hooks) {
1272 duprintf("Valid hook crap: %08X vs %08X\n",
1273 valid_hooks, t->valid_hooks);
1274 ret = -EINVAL;
1275 goto put_module;
1278 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1279 if (!oldinfo)
1280 goto put_module;
1282 /* Update module usage count based on number of rules */
1283 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1284 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1285 if ((oldinfo->number > oldinfo->initial_entries) ||
1286 (newinfo->number <= oldinfo->initial_entries))
1287 module_put(t->me);
1288 if ((oldinfo->number > oldinfo->initial_entries) &&
1289 (newinfo->number <= oldinfo->initial_entries))
1290 module_put(t->me);
1292 /* Get the old counters. */
1293 get_counters(oldinfo, counters);
1294 /* Decrease module usage counts and free resource */
1295 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1296 IPT_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry,NULL);
1297 xt_free_table_info(oldinfo);
1298 if (copy_to_user(counters_ptr, counters,
1299 sizeof(struct xt_counters) * num_counters) != 0)
1300 ret = -EFAULT;
1301 vfree(counters);
1302 xt_table_unlock(t);
1303 return ret;
1305 put_module:
1306 module_put(t->me);
1307 xt_table_unlock(t);
1308 free_newinfo_counters_untrans:
1309 vfree(counters);
1310 out:
1311 return ret;
1314 static int
1315 do_replace(void __user *user, unsigned int len)
1317 int ret;
1318 struct ipt_replace tmp;
1319 struct xt_table_info *newinfo;
1320 void *loc_cpu_entry;
1322 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1323 return -EFAULT;
1325 /* Hack: Causes ipchains to give correct error msg --RR */
1326 if (len != sizeof(tmp) + tmp.size)
1327 return -ENOPROTOOPT;
1329 /* overflow check */
1330 if (tmp.size >= (INT_MAX - sizeof(struct xt_table_info)) / NR_CPUS -
1331 SMP_CACHE_BYTES)
1332 return -ENOMEM;
1333 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1334 return -ENOMEM;
1336 newinfo = xt_alloc_table_info(tmp.size);
1337 if (!newinfo)
1338 return -ENOMEM;
1340 /* choose the copy that is our node/cpu */
1341 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1342 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1343 tmp.size) != 0) {
1344 ret = -EFAULT;
1345 goto free_newinfo;
1348 ret = translate_table(tmp.name, tmp.valid_hooks,
1349 newinfo, loc_cpu_entry, tmp.size, tmp.num_entries,
1350 tmp.hook_entry, tmp.underflow);
1351 if (ret != 0)
1352 goto free_newinfo;
1354 duprintf("ip_tables: Translated table\n");
1356 ret = __do_replace(tmp.name, tmp.valid_hooks,
1357 newinfo, tmp.num_counters,
1358 tmp.counters);
1359 if (ret)
1360 goto free_newinfo_untrans;
1361 return 0;
1363 free_newinfo_untrans:
1364 IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry,NULL);
1365 free_newinfo:
1366 xt_free_table_info(newinfo);
1367 return ret;
1370 /* We're lazy, and add to the first CPU; overflow works its fey magic
1371 * and everything is OK. */
1372 static inline int
1373 add_counter_to_entry(struct ipt_entry *e,
1374 const struct xt_counters addme[],
1375 unsigned int *i)
1377 #if 0
1378 duprintf("add_counter: Entry %u %lu/%lu + %lu/%lu\n",
1380 (long unsigned int)e->counters.pcnt,
1381 (long unsigned int)e->counters.bcnt,
1382 (long unsigned int)addme[*i].pcnt,
1383 (long unsigned int)addme[*i].bcnt);
1384 #endif
1386 ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
1388 (*i)++;
1389 return 0;
1392 static int
1393 do_add_counters(void __user *user, unsigned int len, int compat)
1395 unsigned int i;
1396 struct xt_counters_info tmp;
1397 struct xt_counters *paddc;
1398 unsigned int num_counters;
1399 char *name;
1400 int size;
1401 void *ptmp;
1402 struct xt_table *t;
1403 struct xt_table_info *private;
1404 int ret = 0;
1405 void *loc_cpu_entry;
1406 #ifdef CONFIG_COMPAT
1407 struct compat_xt_counters_info compat_tmp;
1409 if (compat) {
1410 ptmp = &compat_tmp;
1411 size = sizeof(struct compat_xt_counters_info);
1412 } else
1413 #endif
1415 ptmp = &tmp;
1416 size = sizeof(struct xt_counters_info);
1419 if (copy_from_user(ptmp, user, size) != 0)
1420 return -EFAULT;
1422 #ifdef CONFIG_COMPAT
1423 if (compat) {
1424 num_counters = compat_tmp.num_counters;
1425 name = compat_tmp.name;
1426 } else
1427 #endif
1429 num_counters = tmp.num_counters;
1430 name = tmp.name;
1433 if (len != size + num_counters * sizeof(struct xt_counters))
1434 return -EINVAL;
1436 paddc = vmalloc_node(len - size, numa_node_id());
1437 if (!paddc)
1438 return -ENOMEM;
1440 if (copy_from_user(paddc, user + size, len - size) != 0) {
1441 ret = -EFAULT;
1442 goto free;
1445 t = xt_find_table_lock(AF_INET, name);
1446 if (!t || IS_ERR(t)) {
1447 ret = t ? PTR_ERR(t) : -ENOENT;
1448 goto free;
1451 write_lock_bh(&t->lock);
1452 private = t->private;
1453 if (private->number != num_counters) {
1454 ret = -EINVAL;
1455 goto unlock_up_free;
1458 i = 0;
1459 /* Choose the copy that is on our node */
1460 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1461 IPT_ENTRY_ITERATE(loc_cpu_entry,
1462 private->size,
1463 add_counter_to_entry,
1464 paddc,
1465 &i);
1466 unlock_up_free:
1467 write_unlock_bh(&t->lock);
1468 xt_table_unlock(t);
1469 module_put(t->me);
1470 free:
1471 vfree(paddc);
1473 return ret;
1476 #ifdef CONFIG_COMPAT
1477 struct compat_ipt_replace {
1478 char name[IPT_TABLE_MAXNAMELEN];
1479 u32 valid_hooks;
1480 u32 num_entries;
1481 u32 size;
1482 u32 hook_entry[NF_IP_NUMHOOKS];
1483 u32 underflow[NF_IP_NUMHOOKS];
1484 u32 num_counters;
1485 compat_uptr_t counters; /* struct ipt_counters * */
1486 struct compat_ipt_entry entries[0];
1489 static inline int compat_copy_match_to_user(struct ipt_entry_match *m,
1490 void __user **dstptr, compat_uint_t *size)
1492 return xt_compat_match_to_user(m, dstptr, size);
1495 static int
1496 compat_copy_entry_to_user(struct ipt_entry *e, void __user **dstptr,
1497 compat_uint_t *size, struct xt_counters *counters,
1498 unsigned int *i)
1500 struct ipt_entry_target *t;
1501 struct compat_ipt_entry __user *ce;
1502 u_int16_t target_offset, next_offset;
1503 compat_uint_t origsize;
1504 int ret;
1506 ret = -EFAULT;
1507 origsize = *size;
1508 ce = (struct compat_ipt_entry __user *)*dstptr;
1509 if (copy_to_user(ce, e, sizeof(struct ipt_entry)))
1510 goto out;
1512 if (copy_to_user(&ce->counters, &counters[*i], sizeof(counters[*i])))
1513 goto out;
1515 *dstptr += sizeof(struct compat_ipt_entry);
1516 ret = IPT_MATCH_ITERATE(e, compat_copy_match_to_user, dstptr, size);
1517 target_offset = e->target_offset - (origsize - *size);
1518 if (ret)
1519 goto out;
1520 t = ipt_get_target(e);
1521 ret = xt_compat_target_to_user(t, dstptr, size);
1522 if (ret)
1523 goto out;
1524 ret = -EFAULT;
1525 next_offset = e->next_offset - (origsize - *size);
1526 if (put_user(target_offset, &ce->target_offset))
1527 goto out;
1528 if (put_user(next_offset, &ce->next_offset))
1529 goto out;
1531 (*i)++;
1532 return 0;
1533 out:
1534 return ret;
1537 static inline int
1538 compat_find_calc_match(struct ipt_entry_match *m,
1539 const char *name,
1540 const struct ipt_ip *ip,
1541 unsigned int hookmask,
1542 int *size, int *i)
1544 struct xt_match *match;
1546 match = try_then_request_module(xt_find_match(AF_INET, m->u.user.name,
1547 m->u.user.revision),
1548 "ipt_%s", m->u.user.name);
1549 if (IS_ERR(match) || !match) {
1550 duprintf("compat_check_calc_match: `%s' not found\n",
1551 m->u.user.name);
1552 return match ? PTR_ERR(match) : -ENOENT;
1554 m->u.kernel.match = match;
1555 *size += xt_compat_match_offset(match);
1557 (*i)++;
1558 return 0;
1561 static inline int
1562 compat_release_match(struct ipt_entry_match *m, unsigned int *i)
1564 if (i && (*i)-- == 0)
1565 return 1;
1567 module_put(m->u.kernel.match->me);
1568 return 0;
1571 static inline int
1572 compat_release_entry(struct ipt_entry *e, unsigned int *i)
1574 struct ipt_entry_target *t;
1576 if (i && (*i)-- == 0)
1577 return 1;
1579 /* Cleanup all matches */
1580 IPT_MATCH_ITERATE(e, compat_release_match, NULL);
1581 t = ipt_get_target(e);
1582 module_put(t->u.kernel.target->me);
1583 return 0;
1586 static inline int
1587 check_compat_entry_size_and_hooks(struct ipt_entry *e,
1588 struct xt_table_info *newinfo,
1589 unsigned int *size,
1590 unsigned char *base,
1591 unsigned char *limit,
1592 unsigned int *hook_entries,
1593 unsigned int *underflows,
1594 unsigned int *i,
1595 const char *name)
1597 struct ipt_entry_target *t;
1598 struct xt_target *target;
1599 unsigned int entry_offset;
1600 int ret, off, h, j;
1602 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1603 if ((unsigned long)e % __alignof__(struct compat_ipt_entry) != 0
1604 || (unsigned char *)e + sizeof(struct compat_ipt_entry) >= limit) {
1605 duprintf("Bad offset %p, limit = %p\n", e, limit);
1606 return -EINVAL;
1609 if (e->next_offset < sizeof(struct compat_ipt_entry) +
1610 sizeof(struct compat_xt_entry_target)) {
1611 duprintf("checking: element %p size %u\n",
1612 e, e->next_offset);
1613 return -EINVAL;
1616 ret = check_entry(e, name);
1617 if (ret)
1618 return ret;
1620 off = 0;
1621 entry_offset = (void *)e - (void *)base;
1622 j = 0;
1623 ret = IPT_MATCH_ITERATE(e, compat_find_calc_match, name, &e->ip,
1624 e->comefrom, &off, &j);
1625 if (ret != 0)
1626 goto release_matches;
1628 t = ipt_get_target(e);
1629 target = try_then_request_module(xt_find_target(AF_INET,
1630 t->u.user.name,
1631 t->u.user.revision),
1632 "ipt_%s", t->u.user.name);
1633 if (IS_ERR(target) || !target) {
1634 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1635 t->u.user.name);
1636 ret = target ? PTR_ERR(target) : -ENOENT;
1637 goto release_matches;
1639 t->u.kernel.target = target;
1641 off += xt_compat_target_offset(target);
1642 *size += off;
1643 ret = compat_add_offset(entry_offset, off);
1644 if (ret)
1645 goto out;
1647 /* Check hooks & underflows */
1648 for (h = 0; h < NF_IP_NUMHOOKS; h++) {
1649 if ((unsigned char *)e - base == hook_entries[h])
1650 newinfo->hook_entry[h] = hook_entries[h];
1651 if ((unsigned char *)e - base == underflows[h])
1652 newinfo->underflow[h] = underflows[h];
1655 /* Clear counters and comefrom */
1656 e->counters = ((struct ipt_counters) { 0, 0 });
1657 e->comefrom = 0;
1659 (*i)++;
1660 return 0;
1662 out:
1663 module_put(t->u.kernel.target->me);
1664 release_matches:
1665 IPT_MATCH_ITERATE(e, compat_release_match, &j);
1666 return ret;
1669 static inline int compat_copy_match_from_user(struct ipt_entry_match *m,
1670 void **dstptr, compat_uint_t *size, const char *name,
1671 const struct ipt_ip *ip, unsigned int hookmask)
1673 xt_compat_match_from_user(m, dstptr, size);
1674 return 0;
1677 static int compat_copy_entry_from_user(struct ipt_entry *e, void **dstptr,
1678 unsigned int *size, const char *name,
1679 struct xt_table_info *newinfo, unsigned char *base)
1681 struct ipt_entry_target *t;
1682 struct xt_target *target;
1683 struct ipt_entry *de;
1684 unsigned int origsize;
1685 int ret, h;
1687 ret = 0;
1688 origsize = *size;
1689 de = (struct ipt_entry *)*dstptr;
1690 memcpy(de, e, sizeof(struct ipt_entry));
1692 *dstptr += sizeof(struct compat_ipt_entry);
1693 ret = IPT_MATCH_ITERATE(e, compat_copy_match_from_user, dstptr, size,
1694 name, &de->ip, de->comefrom);
1695 if (ret)
1696 return ret;
1697 de->target_offset = e->target_offset - (origsize - *size);
1698 t = ipt_get_target(e);
1699 target = t->u.kernel.target;
1700 xt_compat_target_from_user(t, dstptr, size);
1702 de->next_offset = e->next_offset - (origsize - *size);
1703 for (h = 0; h < NF_IP_NUMHOOKS; h++) {
1704 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1705 newinfo->hook_entry[h] -= origsize - *size;
1706 if ((unsigned char *)de - base < newinfo->underflow[h])
1707 newinfo->underflow[h] -= origsize - *size;
1709 return ret;
1712 static inline int compat_check_entry(struct ipt_entry *e, const char *name,
1713 unsigned int *i)
1715 int j, ret;
1717 j = 0;
1718 ret = IPT_MATCH_ITERATE(e, check_match, name, &e->ip, e->comefrom, &j);
1719 if (ret)
1720 goto cleanup_matches;
1722 ret = check_target(e, name);
1723 if (ret)
1724 goto cleanup_matches;
1726 (*i)++;
1727 return 0;
1729 cleanup_matches:
1730 IPT_MATCH_ITERATE(e, cleanup_match, &j);
1731 return ret;
1734 static int
1735 translate_compat_table(const char *name,
1736 unsigned int valid_hooks,
1737 struct xt_table_info **pinfo,
1738 void **pentry0,
1739 unsigned int total_size,
1740 unsigned int number,
1741 unsigned int *hook_entries,
1742 unsigned int *underflows)
1744 unsigned int i, j;
1745 struct xt_table_info *newinfo, *info;
1746 void *pos, *entry0, *entry1;
1747 unsigned int size;
1748 int ret;
1750 info = *pinfo;
1751 entry0 = *pentry0;
1752 size = total_size;
1753 info->number = number;
1755 /* Init all hooks to impossible value. */
1756 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
1757 info->hook_entry[i] = 0xFFFFFFFF;
1758 info->underflow[i] = 0xFFFFFFFF;
1761 duprintf("translate_compat_table: size %u\n", info->size);
1762 j = 0;
1763 xt_compat_lock(AF_INET);
1764 /* Walk through entries, checking offsets. */
1765 ret = IPT_ENTRY_ITERATE(entry0, total_size,
1766 check_compat_entry_size_and_hooks,
1767 info, &size, entry0,
1768 entry0 + total_size,
1769 hook_entries, underflows, &j, name);
1770 if (ret != 0)
1771 goto out_unlock;
1773 ret = -EINVAL;
1774 if (j != number) {
1775 duprintf("translate_compat_table: %u not %u entries\n",
1776 j, number);
1777 goto out_unlock;
1780 /* Check hooks all assigned */
1781 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
1782 /* Only hooks which are valid */
1783 if (!(valid_hooks & (1 << i)))
1784 continue;
1785 if (info->hook_entry[i] == 0xFFFFFFFF) {
1786 duprintf("Invalid hook entry %u %u\n",
1787 i, hook_entries[i]);
1788 goto out_unlock;
1790 if (info->underflow[i] == 0xFFFFFFFF) {
1791 duprintf("Invalid underflow %u %u\n",
1792 i, underflows[i]);
1793 goto out_unlock;
1797 ret = -ENOMEM;
1798 newinfo = xt_alloc_table_info(size);
1799 if (!newinfo)
1800 goto out_unlock;
1802 newinfo->number = number;
1803 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
1804 newinfo->hook_entry[i] = info->hook_entry[i];
1805 newinfo->underflow[i] = info->underflow[i];
1807 entry1 = newinfo->entries[raw_smp_processor_id()];
1808 pos = entry1;
1809 size = total_size;
1810 ret = IPT_ENTRY_ITERATE(entry0, total_size,
1811 compat_copy_entry_from_user, &pos, &size,
1812 name, newinfo, entry1);
1813 compat_flush_offsets();
1814 xt_compat_unlock(AF_INET);
1815 if (ret)
1816 goto free_newinfo;
1818 ret = -ELOOP;
1819 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1820 goto free_newinfo;
1822 i = 0;
1823 ret = IPT_ENTRY_ITERATE(entry1, newinfo->size, compat_check_entry,
1824 name, &i);
1825 if (ret) {
1826 j -= i;
1827 IPT_ENTRY_ITERATE_CONTINUE(entry1, newinfo->size, i,
1828 compat_release_entry, &j);
1829 IPT_ENTRY_ITERATE(entry1, newinfo->size, cleanup_entry, &i);
1830 xt_free_table_info(newinfo);
1831 return ret;
1834 /* And one copy for every other CPU */
1835 for_each_possible_cpu(i)
1836 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1837 memcpy(newinfo->entries[i], entry1, newinfo->size);
1839 *pinfo = newinfo;
1840 *pentry0 = entry1;
1841 xt_free_table_info(info);
1842 return 0;
1844 free_newinfo:
1845 xt_free_table_info(newinfo);
1846 out:
1847 IPT_ENTRY_ITERATE(entry0, total_size, compat_release_entry, &j);
1848 return ret;
1849 out_unlock:
1850 compat_flush_offsets();
1851 xt_compat_unlock(AF_INET);
1852 goto out;
1855 static int
1856 compat_do_replace(void __user *user, unsigned int len)
1858 int ret;
1859 struct compat_ipt_replace tmp;
1860 struct xt_table_info *newinfo;
1861 void *loc_cpu_entry;
1863 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1864 return -EFAULT;
1866 /* Hack: Causes ipchains to give correct error msg --RR */
1867 if (len != sizeof(tmp) + tmp.size)
1868 return -ENOPROTOOPT;
1870 /* overflow check */
1871 if (tmp.size >= (INT_MAX - sizeof(struct xt_table_info)) / NR_CPUS -
1872 SMP_CACHE_BYTES)
1873 return -ENOMEM;
1874 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1875 return -ENOMEM;
1877 newinfo = xt_alloc_table_info(tmp.size);
1878 if (!newinfo)
1879 return -ENOMEM;
1881 /* choose the copy that is our node/cpu */
1882 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1883 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1884 tmp.size) != 0) {
1885 ret = -EFAULT;
1886 goto free_newinfo;
1889 ret = translate_compat_table(tmp.name, tmp.valid_hooks,
1890 &newinfo, &loc_cpu_entry, tmp.size,
1891 tmp.num_entries, tmp.hook_entry, tmp.underflow);
1892 if (ret != 0)
1893 goto free_newinfo;
1895 duprintf("compat_do_replace: Translated table\n");
1897 ret = __do_replace(tmp.name, tmp.valid_hooks,
1898 newinfo, tmp.num_counters,
1899 compat_ptr(tmp.counters));
1900 if (ret)
1901 goto free_newinfo_untrans;
1902 return 0;
1904 free_newinfo_untrans:
1905 IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry,NULL);
1906 free_newinfo:
1907 xt_free_table_info(newinfo);
1908 return ret;
1911 static int
1912 compat_do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user,
1913 unsigned int len)
1915 int ret;
1917 if (!capable(CAP_NET_ADMIN))
1918 return -EPERM;
1920 switch (cmd) {
1921 case IPT_SO_SET_REPLACE:
1922 ret = compat_do_replace(user, len);
1923 break;
1925 case IPT_SO_SET_ADD_COUNTERS:
1926 ret = do_add_counters(user, len, 1);
1927 break;
1929 default:
1930 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd);
1931 ret = -EINVAL;
1934 return ret;
1937 struct compat_ipt_get_entries
1939 char name[IPT_TABLE_MAXNAMELEN];
1940 compat_uint_t size;
1941 struct compat_ipt_entry entrytable[0];
1944 static int compat_copy_entries_to_user(unsigned int total_size,
1945 struct xt_table *table, void __user *userptr)
1947 struct xt_counters *counters;
1948 struct xt_table_info *private = table->private;
1949 void __user *pos;
1950 unsigned int size;
1951 int ret = 0;
1952 void *loc_cpu_entry;
1953 unsigned int i = 0;
1955 counters = alloc_counters(table);
1956 if (IS_ERR(counters))
1957 return PTR_ERR(counters);
1959 /* choose the copy that is on our node/cpu, ...
1960 * This choice is lazy (because current thread is
1961 * allowed to migrate to another cpu)
1963 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1964 pos = userptr;
1965 size = total_size;
1966 ret = IPT_ENTRY_ITERATE(loc_cpu_entry, total_size,
1967 compat_copy_entry_to_user,
1968 &pos, &size, counters, &i);
1970 vfree(counters);
1971 return ret;
1974 static int
1975 compat_get_entries(struct compat_ipt_get_entries __user *uptr, int *len)
1977 int ret;
1978 struct compat_ipt_get_entries get;
1979 struct xt_table *t;
1982 if (*len < sizeof(get)) {
1983 duprintf("compat_get_entries: %u < %u\n",
1984 *len, (unsigned int)sizeof(get));
1985 return -EINVAL;
1988 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1989 return -EFAULT;
1991 if (*len != sizeof(struct compat_ipt_get_entries) + get.size) {
1992 duprintf("compat_get_entries: %u != %u\n", *len,
1993 (unsigned int)(sizeof(struct compat_ipt_get_entries) +
1994 get.size));
1995 return -EINVAL;
1998 xt_compat_lock(AF_INET);
1999 t = xt_find_table_lock(AF_INET, get.name);
2000 if (t && !IS_ERR(t)) {
2001 struct xt_table_info *private = t->private;
2002 struct xt_table_info info;
2003 duprintf("t->private->number = %u\n",
2004 private->number);
2005 ret = compat_table_info(private, &info);
2006 if (!ret && get.size == info.size) {
2007 ret = compat_copy_entries_to_user(private->size,
2008 t, uptr->entrytable);
2009 } else if (!ret) {
2010 duprintf("compat_get_entries: I've got %u not %u!\n",
2011 private->size,
2012 get.size);
2013 ret = -EINVAL;
2015 compat_flush_offsets();
2016 module_put(t->me);
2017 xt_table_unlock(t);
2018 } else
2019 ret = t ? PTR_ERR(t) : -ENOENT;
2021 xt_compat_unlock(AF_INET);
2022 return ret;
2025 static int do_ipt_get_ctl(struct sock *, int, void __user *, int *);
2027 static int
2028 compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2030 int ret;
2032 if (!capable(CAP_NET_ADMIN))
2033 return -EPERM;
2035 switch (cmd) {
2036 case IPT_SO_GET_INFO:
2037 ret = get_info(user, len, 1);
2038 break;
2039 case IPT_SO_GET_ENTRIES:
2040 ret = compat_get_entries(user, len);
2041 break;
2042 default:
2043 ret = do_ipt_get_ctl(sk, cmd, user, len);
2045 return ret;
2047 #endif
2049 static int
2050 do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2052 int ret;
2054 if (!capable(CAP_NET_ADMIN))
2055 return -EPERM;
2057 switch (cmd) {
2058 case IPT_SO_SET_REPLACE:
2059 ret = do_replace(user, len);
2060 break;
2062 case IPT_SO_SET_ADD_COUNTERS:
2063 ret = do_add_counters(user, len, 0);
2064 break;
2066 default:
2067 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd);
2068 ret = -EINVAL;
2071 return ret;
2074 static int
2075 do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2077 int ret;
2079 if (!capable(CAP_NET_ADMIN))
2080 return -EPERM;
2082 switch (cmd) {
2083 case IPT_SO_GET_INFO:
2084 ret = get_info(user, len, 0);
2085 break;
2087 case IPT_SO_GET_ENTRIES:
2088 ret = get_entries(user, len);
2089 break;
2091 case IPT_SO_GET_REVISION_MATCH:
2092 case IPT_SO_GET_REVISION_TARGET: {
2093 struct ipt_get_revision rev;
2094 int target;
2096 if (*len != sizeof(rev)) {
2097 ret = -EINVAL;
2098 break;
2100 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2101 ret = -EFAULT;
2102 break;
2105 if (cmd == IPT_SO_GET_REVISION_TARGET)
2106 target = 1;
2107 else
2108 target = 0;
2110 try_then_request_module(xt_find_revision(AF_INET, rev.name,
2111 rev.revision,
2112 target, &ret),
2113 "ipt_%s", rev.name);
2114 break;
2117 default:
2118 duprintf("do_ipt_get_ctl: unknown request %i\n", cmd);
2119 ret = -EINVAL;
2122 return ret;
2125 int ipt_register_table(struct xt_table *table, const struct ipt_replace *repl)
2127 int ret;
2128 struct xt_table_info *newinfo;
2129 static struct xt_table_info bootstrap
2130 = { 0, 0, 0, { 0 }, { 0 }, { } };
2131 void *loc_cpu_entry;
2133 newinfo = xt_alloc_table_info(repl->size);
2134 if (!newinfo)
2135 return -ENOMEM;
2137 /* choose the copy on our node/cpu
2138 * but dont care of preemption
2140 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2141 memcpy(loc_cpu_entry, repl->entries, repl->size);
2143 ret = translate_table(table->name, table->valid_hooks,
2144 newinfo, loc_cpu_entry, repl->size,
2145 repl->num_entries,
2146 repl->hook_entry,
2147 repl->underflow);
2148 if (ret != 0) {
2149 xt_free_table_info(newinfo);
2150 return ret;
2153 ret = xt_register_table(table, &bootstrap, newinfo);
2154 if (ret != 0) {
2155 xt_free_table_info(newinfo);
2156 return ret;
2159 return 0;
2162 void ipt_unregister_table(struct xt_table *table)
2164 struct xt_table_info *private;
2165 void *loc_cpu_entry;
2167 private = xt_unregister_table(table);
2169 /* Decrease module usage counts and free resources */
2170 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2171 IPT_ENTRY_ITERATE(loc_cpu_entry, private->size, cleanup_entry, NULL);
2172 xt_free_table_info(private);
2175 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2176 static inline bool
2177 icmp_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2178 u_int8_t type, u_int8_t code,
2179 bool invert)
2181 return ((test_type == 0xFF) || (type == test_type && code >= min_code && code <= max_code))
2182 ^ invert;
2185 static bool
2186 icmp_match(const struct sk_buff *skb,
2187 const struct net_device *in,
2188 const struct net_device *out,
2189 const struct xt_match *match,
2190 const void *matchinfo,
2191 int offset,
2192 unsigned int protoff,
2193 bool *hotdrop)
2195 struct icmphdr _icmph, *ic;
2196 const struct ipt_icmp *icmpinfo = matchinfo;
2198 /* Must not be a fragment. */
2199 if (offset)
2200 return false;
2202 ic = skb_header_pointer(skb, protoff, sizeof(_icmph), &_icmph);
2203 if (ic == NULL) {
2204 /* We've been asked to examine this packet, and we
2205 * can't. Hence, no choice but to drop.
2207 duprintf("Dropping evil ICMP tinygram.\n");
2208 *hotdrop = true;
2209 return false;
2212 return icmp_type_code_match(icmpinfo->type,
2213 icmpinfo->code[0],
2214 icmpinfo->code[1],
2215 ic->type, ic->code,
2216 !!(icmpinfo->invflags&IPT_ICMP_INV));
2219 /* Called when user tries to insert an entry of this type. */
2220 static bool
2221 icmp_checkentry(const char *tablename,
2222 const void *info,
2223 const struct xt_match *match,
2224 void *matchinfo,
2225 unsigned int hook_mask)
2227 const struct ipt_icmp *icmpinfo = matchinfo;
2229 /* Must specify no unknown invflags */
2230 return !(icmpinfo->invflags & ~IPT_ICMP_INV);
2233 /* The built-in targets: standard (NULL) and error. */
2234 static struct xt_target ipt_standard_target __read_mostly = {
2235 .name = IPT_STANDARD_TARGET,
2236 .targetsize = sizeof(int),
2237 .family = AF_INET,
2238 #ifdef CONFIG_COMPAT
2239 .compatsize = sizeof(compat_int_t),
2240 .compat_from_user = compat_standard_from_user,
2241 .compat_to_user = compat_standard_to_user,
2242 #endif
2245 static struct xt_target ipt_error_target __read_mostly = {
2246 .name = IPT_ERROR_TARGET,
2247 .target = ipt_error,
2248 .targetsize = IPT_FUNCTION_MAXNAMELEN,
2249 .family = AF_INET,
2252 static struct nf_sockopt_ops ipt_sockopts = {
2253 .pf = PF_INET,
2254 .set_optmin = IPT_BASE_CTL,
2255 .set_optmax = IPT_SO_SET_MAX+1,
2256 .set = do_ipt_set_ctl,
2257 #ifdef CONFIG_COMPAT
2258 .compat_set = compat_do_ipt_set_ctl,
2259 #endif
2260 .get_optmin = IPT_BASE_CTL,
2261 .get_optmax = IPT_SO_GET_MAX+1,
2262 .get = do_ipt_get_ctl,
2263 #ifdef CONFIG_COMPAT
2264 .compat_get = compat_do_ipt_get_ctl,
2265 #endif
2266 .owner = THIS_MODULE,
2269 static struct xt_match icmp_matchstruct __read_mostly = {
2270 .name = "icmp",
2271 .match = icmp_match,
2272 .matchsize = sizeof(struct ipt_icmp),
2273 .proto = IPPROTO_ICMP,
2274 .family = AF_INET,
2275 .checkentry = icmp_checkentry,
2278 static int __init ip_tables_init(void)
2280 int ret;
2282 ret = xt_proto_init(AF_INET);
2283 if (ret < 0)
2284 goto err1;
2286 /* Noone else will be downing sem now, so we won't sleep */
2287 ret = xt_register_target(&ipt_standard_target);
2288 if (ret < 0)
2289 goto err2;
2290 ret = xt_register_target(&ipt_error_target);
2291 if (ret < 0)
2292 goto err3;
2293 ret = xt_register_match(&icmp_matchstruct);
2294 if (ret < 0)
2295 goto err4;
2297 /* Register setsockopt */
2298 ret = nf_register_sockopt(&ipt_sockopts);
2299 if (ret < 0)
2300 goto err5;
2302 printk(KERN_INFO "ip_tables: (C) 2000-2006 Netfilter Core Team\n");
2303 return 0;
2305 err5:
2306 xt_unregister_match(&icmp_matchstruct);
2307 err4:
2308 xt_unregister_target(&ipt_error_target);
2309 err3:
2310 xt_unregister_target(&ipt_standard_target);
2311 err2:
2312 xt_proto_fini(AF_INET);
2313 err1:
2314 return ret;
2317 static void __exit ip_tables_fini(void)
2319 nf_unregister_sockopt(&ipt_sockopts);
2321 xt_unregister_match(&icmp_matchstruct);
2322 xt_unregister_target(&ipt_error_target);
2323 xt_unregister_target(&ipt_standard_target);
2325 xt_proto_fini(AF_INET);
2328 EXPORT_SYMBOL(ipt_register_table);
2329 EXPORT_SYMBOL(ipt_unregister_table);
2330 EXPORT_SYMBOL(ipt_do_table);
2331 module_init(ip_tables_init);
2332 module_exit(ip_tables_fini);