netfilter: xtables: ignore unassigned hooks in check_entry_size_and_hooks
[linux-2.6.git] / net / ipv4 / netfilter / ip_tables.c
blob6e7b7e8b80b1430de9296709c4f9dcc24b34c233
1 /*
2 * Packet matching code.
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #include <linux/cache.h>
12 #include <linux/capability.h>
13 #include <linux/skbuff.h>
14 #include <linux/kmod.h>
15 #include <linux/vmalloc.h>
16 #include <linux/netdevice.h>
17 #include <linux/module.h>
18 #include <linux/icmp.h>
19 #include <net/ip.h>
20 #include <net/compat.h>
21 #include <asm/uaccess.h>
22 #include <linux/mutex.h>
23 #include <linux/proc_fs.h>
24 #include <linux/err.h>
25 #include <linux/cpumask.h>
27 #include <linux/netfilter/x_tables.h>
28 #include <linux/netfilter_ipv4/ip_tables.h>
29 #include <net/netfilter/nf_log.h>
31 MODULE_LICENSE("GPL");
32 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
33 MODULE_DESCRIPTION("IPv4 packet filter");
35 /*#define DEBUG_IP_FIREWALL*/
36 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
37 /*#define DEBUG_IP_FIREWALL_USER*/
39 #ifdef DEBUG_IP_FIREWALL
40 #define dprintf(format, args...) printk(format , ## args)
41 #else
42 #define dprintf(format, args...)
43 #endif
45 #ifdef DEBUG_IP_FIREWALL_USER
46 #define duprintf(format, args...) printk(format , ## args)
47 #else
48 #define duprintf(format, args...)
49 #endif
51 #ifdef CONFIG_NETFILTER_DEBUG
52 #define IP_NF_ASSERT(x) \
53 do { \
54 if (!(x)) \
55 printk("IP_NF_ASSERT: %s:%s:%u\n", \
56 __func__, __FILE__, __LINE__); \
57 } while(0)
58 #else
59 #define IP_NF_ASSERT(x)
60 #endif
62 #if 0
63 /* All the better to debug you with... */
64 #define static
65 #define inline
66 #endif
69 We keep a set of rules for each CPU, so we can avoid write-locking
70 them in the softirq when updating the counters and therefore
71 only need to read-lock in the softirq; doing a write_lock_bh() in user
72 context stops packets coming through and allows user context to read
73 the counters or update the rules.
75 Hence the start of any table is given by get_table() below. */
77 /* Returns whether matches rule or not. */
78 /* Performance critical - called for every packet */
79 static inline bool
80 ip_packet_match(const struct iphdr *ip,
81 const char *indev,
82 const char *outdev,
83 const struct ipt_ip *ipinfo,
84 int isfrag)
86 unsigned long ret;
88 #define FWINV(bool, invflg) ((bool) ^ !!(ipinfo->invflags & (invflg)))
90 if (FWINV((ip->saddr&ipinfo->smsk.s_addr) != ipinfo->src.s_addr,
91 IPT_INV_SRCIP)
92 || FWINV((ip->daddr&ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr,
93 IPT_INV_DSTIP)) {
94 dprintf("Source or dest mismatch.\n");
96 dprintf("SRC: %pI4. Mask: %pI4. Target: %pI4.%s\n",
97 &ip->saddr, &ipinfo->smsk.s_addr, &ipinfo->src.s_addr,
98 ipinfo->invflags & IPT_INV_SRCIP ? " (INV)" : "");
99 dprintf("DST: %pI4 Mask: %pI4 Target: %pI4.%s\n",
100 &ip->daddr, &ipinfo->dmsk.s_addr, &ipinfo->dst.s_addr,
101 ipinfo->invflags & IPT_INV_DSTIP ? " (INV)" : "");
102 return false;
105 ret = ifname_compare_aligned(indev, ipinfo->iniface, ipinfo->iniface_mask);
107 if (FWINV(ret != 0, IPT_INV_VIA_IN)) {
108 dprintf("VIA in mismatch (%s vs %s).%s\n",
109 indev, ipinfo->iniface,
110 ipinfo->invflags&IPT_INV_VIA_IN ?" (INV)":"");
111 return false;
114 ret = ifname_compare_aligned(outdev, ipinfo->outiface, ipinfo->outiface_mask);
116 if (FWINV(ret != 0, IPT_INV_VIA_OUT)) {
117 dprintf("VIA out mismatch (%s vs %s).%s\n",
118 outdev, ipinfo->outiface,
119 ipinfo->invflags&IPT_INV_VIA_OUT ?" (INV)":"");
120 return false;
123 /* Check specific protocol */
124 if (ipinfo->proto
125 && FWINV(ip->protocol != ipinfo->proto, IPT_INV_PROTO)) {
126 dprintf("Packet protocol %hi does not match %hi.%s\n",
127 ip->protocol, ipinfo->proto,
128 ipinfo->invflags&IPT_INV_PROTO ? " (INV)":"");
129 return false;
132 /* If we have a fragment rule but the packet is not a fragment
133 * then we return zero */
134 if (FWINV((ipinfo->flags&IPT_F_FRAG) && !isfrag, IPT_INV_FRAG)) {
135 dprintf("Fragment rule but not fragment.%s\n",
136 ipinfo->invflags & IPT_INV_FRAG ? " (INV)" : "");
137 return false;
140 return true;
143 static bool
144 ip_checkentry(const struct ipt_ip *ip)
146 if (ip->flags & ~IPT_F_MASK) {
147 duprintf("Unknown flag bits set: %08X\n",
148 ip->flags & ~IPT_F_MASK);
149 return false;
151 if (ip->invflags & ~IPT_INV_MASK) {
152 duprintf("Unknown invflag bits set: %08X\n",
153 ip->invflags & ~IPT_INV_MASK);
154 return false;
156 return true;
159 static unsigned int
160 ipt_error(struct sk_buff *skb, const struct xt_target_param *par)
162 if (net_ratelimit())
163 printk("ip_tables: error: `%s'\n",
164 (const char *)par->targinfo);
166 return NF_DROP;
169 /* Performance critical - called for every packet */
170 static inline bool
171 do_match(struct ipt_entry_match *m, const struct sk_buff *skb,
172 struct xt_match_param *par)
174 par->match = m->u.kernel.match;
175 par->matchinfo = m->data;
177 /* Stop iteration if it doesn't match */
178 if (!m->u.kernel.match->match(skb, par))
179 return true;
180 else
181 return false;
184 /* Performance critical */
185 static inline struct ipt_entry *
186 get_entry(void *base, unsigned int offset)
188 return (struct ipt_entry *)(base + offset);
191 /* All zeroes == unconditional rule. */
192 /* Mildly perf critical (only if packet tracing is on) */
193 static inline bool unconditional(const struct ipt_ip *ip)
195 static const struct ipt_ip uncond;
197 return memcmp(ip, &uncond, sizeof(uncond)) == 0;
198 #undef FWINV
201 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
202 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
203 static const char *const hooknames[] = {
204 [NF_INET_PRE_ROUTING] = "PREROUTING",
205 [NF_INET_LOCAL_IN] = "INPUT",
206 [NF_INET_FORWARD] = "FORWARD",
207 [NF_INET_LOCAL_OUT] = "OUTPUT",
208 [NF_INET_POST_ROUTING] = "POSTROUTING",
211 enum nf_ip_trace_comments {
212 NF_IP_TRACE_COMMENT_RULE,
213 NF_IP_TRACE_COMMENT_RETURN,
214 NF_IP_TRACE_COMMENT_POLICY,
217 static const char *const comments[] = {
218 [NF_IP_TRACE_COMMENT_RULE] = "rule",
219 [NF_IP_TRACE_COMMENT_RETURN] = "return",
220 [NF_IP_TRACE_COMMENT_POLICY] = "policy",
223 static struct nf_loginfo trace_loginfo = {
224 .type = NF_LOG_TYPE_LOG,
225 .u = {
226 .log = {
227 .level = 4,
228 .logflags = NF_LOG_MASK,
233 /* Mildly perf critical (only if packet tracing is on) */
234 static inline int
235 get_chainname_rulenum(struct ipt_entry *s, struct ipt_entry *e,
236 const char *hookname, const char **chainname,
237 const char **comment, unsigned int *rulenum)
239 struct ipt_standard_target *t = (void *)ipt_get_target(s);
241 if (strcmp(t->target.u.kernel.target->name, IPT_ERROR_TARGET) == 0) {
242 /* Head of user chain: ERROR target with chainname */
243 *chainname = t->target.data;
244 (*rulenum) = 0;
245 } else if (s == e) {
246 (*rulenum)++;
248 if (s->target_offset == sizeof(struct ipt_entry)
249 && strcmp(t->target.u.kernel.target->name,
250 IPT_STANDARD_TARGET) == 0
251 && t->verdict < 0
252 && unconditional(&s->ip)) {
253 /* Tail of chains: STANDARD target (return/policy) */
254 *comment = *chainname == hookname
255 ? comments[NF_IP_TRACE_COMMENT_POLICY]
256 : comments[NF_IP_TRACE_COMMENT_RETURN];
258 return 1;
259 } else
260 (*rulenum)++;
262 return 0;
265 static void trace_packet(struct sk_buff *skb,
266 unsigned int hook,
267 const struct net_device *in,
268 const struct net_device *out,
269 const char *tablename,
270 struct xt_table_info *private,
271 struct ipt_entry *e)
273 void *table_base;
274 const struct ipt_entry *root;
275 const char *hookname, *chainname, *comment;
276 unsigned int rulenum = 0;
278 table_base = private->entries[smp_processor_id()];
279 root = get_entry(table_base, private->hook_entry[hook]);
281 hookname = chainname = hooknames[hook];
282 comment = comments[NF_IP_TRACE_COMMENT_RULE];
284 IPT_ENTRY_ITERATE(root,
285 private->size - private->hook_entry[hook],
286 get_chainname_rulenum,
287 e, hookname, &chainname, &comment, &rulenum);
289 nf_log_packet(AF_INET, hook, skb, in, out, &trace_loginfo,
290 "TRACE: %s:%s:%s:%u ",
291 tablename, chainname, comment, rulenum);
293 #endif
295 static inline __pure
296 struct ipt_entry *ipt_next_entry(const struct ipt_entry *entry)
298 return (void *)entry + entry->next_offset;
301 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
302 unsigned int
303 ipt_do_table(struct sk_buff *skb,
304 unsigned int hook,
305 const struct net_device *in,
306 const struct net_device *out,
307 struct xt_table *table)
309 #define tb_comefrom ((struct ipt_entry *)table_base)->comefrom
311 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
312 const struct iphdr *ip;
313 bool hotdrop = false;
314 /* Initializing verdict to NF_DROP keeps gcc happy. */
315 unsigned int verdict = NF_DROP;
316 const char *indev, *outdev;
317 void *table_base;
318 struct ipt_entry *e, *back;
319 struct xt_table_info *private;
320 struct xt_match_param mtpar;
321 struct xt_target_param tgpar;
323 /* Initialization */
324 ip = ip_hdr(skb);
325 indev = in ? in->name : nulldevname;
326 outdev = out ? out->name : nulldevname;
327 /* We handle fragments by dealing with the first fragment as
328 * if it was a normal packet. All other fragments are treated
329 * normally, except that they will NEVER match rules that ask
330 * things we don't know, ie. tcp syn flag or ports). If the
331 * rule is also a fragment-specific rule, non-fragments won't
332 * match it. */
333 mtpar.fragoff = ntohs(ip->frag_off) & IP_OFFSET;
334 mtpar.thoff = ip_hdrlen(skb);
335 mtpar.hotdrop = &hotdrop;
336 mtpar.in = tgpar.in = in;
337 mtpar.out = tgpar.out = out;
338 mtpar.family = tgpar.family = NFPROTO_IPV4;
339 mtpar.hooknum = tgpar.hooknum = hook;
341 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
342 xt_info_rdlock_bh();
343 private = table->private;
344 table_base = private->entries[smp_processor_id()];
346 e = get_entry(table_base, private->hook_entry[hook]);
348 /* For return from builtin chain */
349 back = get_entry(table_base, private->underflow[hook]);
351 do {
352 struct ipt_entry_target *t;
354 IP_NF_ASSERT(e);
355 IP_NF_ASSERT(back);
356 if (!ip_packet_match(ip, indev, outdev,
357 &e->ip, mtpar.fragoff) ||
358 IPT_MATCH_ITERATE(e, do_match, skb, &mtpar) != 0) {
359 e = ipt_next_entry(e);
360 continue;
363 ADD_COUNTER(e->counters, ntohs(ip->tot_len), 1);
365 t = ipt_get_target(e);
366 IP_NF_ASSERT(t->u.kernel.target);
368 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
369 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
370 /* The packet is traced: log it */
371 if (unlikely(skb->nf_trace))
372 trace_packet(skb, hook, in, out,
373 table->name, private, e);
374 #endif
375 /* Standard target? */
376 if (!t->u.kernel.target->target) {
377 int v;
379 v = ((struct ipt_standard_target *)t)->verdict;
380 if (v < 0) {
381 /* Pop from stack? */
382 if (v != IPT_RETURN) {
383 verdict = (unsigned)(-v) - 1;
384 break;
386 e = back;
387 back = get_entry(table_base, back->comefrom);
388 continue;
390 if (table_base + v != ipt_next_entry(e)
391 && !(e->ip.flags & IPT_F_GOTO)) {
392 /* Save old back ptr in next entry */
393 struct ipt_entry *next = ipt_next_entry(e);
394 next->comefrom = (void *)back - table_base;
395 /* set back pointer to next entry */
396 back = next;
399 e = get_entry(table_base, v);
400 continue;
403 /* Targets which reenter must return
404 abs. verdicts */
405 tgpar.target = t->u.kernel.target;
406 tgpar.targinfo = t->data;
409 #ifdef CONFIG_NETFILTER_DEBUG
410 tb_comefrom = 0xeeeeeeec;
411 #endif
412 verdict = t->u.kernel.target->target(skb, &tgpar);
413 #ifdef CONFIG_NETFILTER_DEBUG
414 if (tb_comefrom != 0xeeeeeeec && verdict == IPT_CONTINUE) {
415 printk("Target %s reentered!\n",
416 t->u.kernel.target->name);
417 verdict = NF_DROP;
419 tb_comefrom = 0x57acc001;
420 #endif
421 /* Target might have changed stuff. */
422 ip = ip_hdr(skb);
423 if (verdict == IPT_CONTINUE)
424 e = ipt_next_entry(e);
425 else
426 /* Verdict */
427 break;
428 } while (!hotdrop);
429 xt_info_rdunlock_bh();
431 #ifdef DEBUG_ALLOW_ALL
432 return NF_ACCEPT;
433 #else
434 if (hotdrop)
435 return NF_DROP;
436 else return verdict;
437 #endif
439 #undef tb_comefrom
442 /* Figures out from what hook each rule can be called: returns 0 if
443 there are loops. Puts hook bitmask in comefrom. */
444 static int
445 mark_source_chains(struct xt_table_info *newinfo,
446 unsigned int valid_hooks, void *entry0)
448 unsigned int hook;
450 /* No recursion; use packet counter to save back ptrs (reset
451 to 0 as we leave), and comefrom to save source hook bitmask */
452 for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
453 unsigned int pos = newinfo->hook_entry[hook];
454 struct ipt_entry *e = (struct ipt_entry *)(entry0 + pos);
456 if (!(valid_hooks & (1 << hook)))
457 continue;
459 /* Set initial back pointer. */
460 e->counters.pcnt = pos;
462 for (;;) {
463 struct ipt_standard_target *t
464 = (void *)ipt_get_target(e);
465 int visited = e->comefrom & (1 << hook);
467 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
468 printk("iptables: loop hook %u pos %u %08X.\n",
469 hook, pos, e->comefrom);
470 return 0;
472 e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
474 /* Unconditional return/END. */
475 if ((e->target_offset == sizeof(struct ipt_entry)
476 && (strcmp(t->target.u.user.name,
477 IPT_STANDARD_TARGET) == 0)
478 && t->verdict < 0
479 && unconditional(&e->ip)) || visited) {
480 unsigned int oldpos, size;
482 if ((strcmp(t->target.u.user.name,
483 IPT_STANDARD_TARGET) == 0) &&
484 t->verdict < -NF_MAX_VERDICT - 1) {
485 duprintf("mark_source_chains: bad "
486 "negative verdict (%i)\n",
487 t->verdict);
488 return 0;
491 /* Return: backtrack through the last
492 big jump. */
493 do {
494 e->comefrom ^= (1<<NF_INET_NUMHOOKS);
495 #ifdef DEBUG_IP_FIREWALL_USER
496 if (e->comefrom
497 & (1 << NF_INET_NUMHOOKS)) {
498 duprintf("Back unset "
499 "on hook %u "
500 "rule %u\n",
501 hook, pos);
503 #endif
504 oldpos = pos;
505 pos = e->counters.pcnt;
506 e->counters.pcnt = 0;
508 /* We're at the start. */
509 if (pos == oldpos)
510 goto next;
512 e = (struct ipt_entry *)
513 (entry0 + pos);
514 } while (oldpos == pos + e->next_offset);
516 /* Move along one */
517 size = e->next_offset;
518 e = (struct ipt_entry *)
519 (entry0 + pos + size);
520 e->counters.pcnt = pos;
521 pos += size;
522 } else {
523 int newpos = t->verdict;
525 if (strcmp(t->target.u.user.name,
526 IPT_STANDARD_TARGET) == 0
527 && newpos >= 0) {
528 if (newpos > newinfo->size -
529 sizeof(struct ipt_entry)) {
530 duprintf("mark_source_chains: "
531 "bad verdict (%i)\n",
532 newpos);
533 return 0;
535 /* This a jump; chase it. */
536 duprintf("Jump rule %u -> %u\n",
537 pos, newpos);
538 } else {
539 /* ... this is a fallthru */
540 newpos = pos + e->next_offset;
542 e = (struct ipt_entry *)
543 (entry0 + newpos);
544 e->counters.pcnt = pos;
545 pos = newpos;
548 next:
549 duprintf("Finished chain %u\n", hook);
551 return 1;
554 static int
555 cleanup_match(struct ipt_entry_match *m, unsigned int *i)
557 struct xt_mtdtor_param par;
559 if (i && (*i)-- == 0)
560 return 1;
562 par.match = m->u.kernel.match;
563 par.matchinfo = m->data;
564 par.family = NFPROTO_IPV4;
565 if (par.match->destroy != NULL)
566 par.match->destroy(&par);
567 module_put(par.match->me);
568 return 0;
571 static int
572 check_entry(struct ipt_entry *e, const char *name)
574 struct ipt_entry_target *t;
576 if (!ip_checkentry(&e->ip)) {
577 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
578 return -EINVAL;
581 if (e->target_offset + sizeof(struct ipt_entry_target) >
582 e->next_offset)
583 return -EINVAL;
585 t = ipt_get_target(e);
586 if (e->target_offset + t->u.target_size > e->next_offset)
587 return -EINVAL;
589 return 0;
592 static int
593 check_match(struct ipt_entry_match *m, struct xt_mtchk_param *par,
594 unsigned int *i)
596 const struct ipt_ip *ip = par->entryinfo;
597 int ret;
599 par->match = m->u.kernel.match;
600 par->matchinfo = m->data;
602 ret = xt_check_match(par, m->u.match_size - sizeof(*m),
603 ip->proto, ip->invflags & IPT_INV_PROTO);
604 if (ret < 0) {
605 duprintf("ip_tables: check failed for `%s'.\n",
606 par.match->name);
607 return ret;
609 ++*i;
610 return 0;
613 static int
614 find_check_match(struct ipt_entry_match *m, struct xt_mtchk_param *par,
615 unsigned int *i)
617 struct xt_match *match;
618 int ret;
620 match = try_then_request_module(xt_find_match(AF_INET, m->u.user.name,
621 m->u.user.revision),
622 "ipt_%s", m->u.user.name);
623 if (IS_ERR(match) || !match) {
624 duprintf("find_check_match: `%s' not found\n", m->u.user.name);
625 return match ? PTR_ERR(match) : -ENOENT;
627 m->u.kernel.match = match;
629 ret = check_match(m, par, i);
630 if (ret)
631 goto err;
633 return 0;
634 err:
635 module_put(m->u.kernel.match->me);
636 return ret;
639 static int check_target(struct ipt_entry *e, const char *name)
641 struct ipt_entry_target *t = ipt_get_target(e);
642 struct xt_tgchk_param par = {
643 .table = name,
644 .entryinfo = e,
645 .target = t->u.kernel.target,
646 .targinfo = t->data,
647 .hook_mask = e->comefrom,
648 .family = NFPROTO_IPV4,
650 int ret;
652 ret = xt_check_target(&par, t->u.target_size - sizeof(*t),
653 e->ip.proto, e->ip.invflags & IPT_INV_PROTO);
654 if (ret < 0) {
655 duprintf("ip_tables: check failed for `%s'.\n",
656 t->u.kernel.target->name);
657 return ret;
659 return 0;
662 static int
663 find_check_entry(struct ipt_entry *e, const char *name, unsigned int size,
664 unsigned int *i)
666 struct ipt_entry_target *t;
667 struct xt_target *target;
668 int ret;
669 unsigned int j;
670 struct xt_mtchk_param mtpar;
672 ret = check_entry(e, name);
673 if (ret)
674 return ret;
676 j = 0;
677 mtpar.table = name;
678 mtpar.entryinfo = &e->ip;
679 mtpar.hook_mask = e->comefrom;
680 mtpar.family = NFPROTO_IPV4;
681 ret = IPT_MATCH_ITERATE(e, find_check_match, &mtpar, &j);
682 if (ret != 0)
683 goto cleanup_matches;
685 t = ipt_get_target(e);
686 target = try_then_request_module(xt_find_target(AF_INET,
687 t->u.user.name,
688 t->u.user.revision),
689 "ipt_%s", t->u.user.name);
690 if (IS_ERR(target) || !target) {
691 duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
692 ret = target ? PTR_ERR(target) : -ENOENT;
693 goto cleanup_matches;
695 t->u.kernel.target = target;
697 ret = check_target(e, name);
698 if (ret)
699 goto err;
701 (*i)++;
702 return 0;
703 err:
704 module_put(t->u.kernel.target->me);
705 cleanup_matches:
706 IPT_MATCH_ITERATE(e, cleanup_match, &j);
707 return ret;
710 static int
711 check_entry_size_and_hooks(struct ipt_entry *e,
712 struct xt_table_info *newinfo,
713 unsigned char *base,
714 unsigned char *limit,
715 const unsigned int *hook_entries,
716 const unsigned int *underflows,
717 unsigned int valid_hooks,
718 unsigned int *i)
720 unsigned int h;
722 if ((unsigned long)e % __alignof__(struct ipt_entry) != 0
723 || (unsigned char *)e + sizeof(struct ipt_entry) >= limit) {
724 duprintf("Bad offset %p\n", e);
725 return -EINVAL;
728 if (e->next_offset
729 < sizeof(struct ipt_entry) + sizeof(struct ipt_entry_target)) {
730 duprintf("checking: element %p size %u\n",
731 e, e->next_offset);
732 return -EINVAL;
735 /* Check hooks & underflows */
736 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
737 if (!(valid_hooks & (1 << h)))
738 continue;
739 if ((unsigned char *)e - base == hook_entries[h])
740 newinfo->hook_entry[h] = hook_entries[h];
741 if ((unsigned char *)e - base == underflows[h])
742 newinfo->underflow[h] = underflows[h];
745 /* FIXME: underflows must be unconditional, standard verdicts
746 < 0 (not IPT_RETURN). --RR */
748 /* Clear counters and comefrom */
749 e->counters = ((struct xt_counters) { 0, 0 });
750 e->comefrom = 0;
752 (*i)++;
753 return 0;
756 static int
757 cleanup_entry(struct ipt_entry *e, unsigned int *i)
759 struct xt_tgdtor_param par;
760 struct ipt_entry_target *t;
762 if (i && (*i)-- == 0)
763 return 1;
765 /* Cleanup all matches */
766 IPT_MATCH_ITERATE(e, cleanup_match, NULL);
767 t = ipt_get_target(e);
769 par.target = t->u.kernel.target;
770 par.targinfo = t->data;
771 par.family = NFPROTO_IPV4;
772 if (par.target->destroy != NULL)
773 par.target->destroy(&par);
774 module_put(par.target->me);
775 return 0;
778 /* Checks and translates the user-supplied table segment (held in
779 newinfo) */
780 static int
781 translate_table(const char *name,
782 unsigned int valid_hooks,
783 struct xt_table_info *newinfo,
784 void *entry0,
785 unsigned int size,
786 unsigned int number,
787 const unsigned int *hook_entries,
788 const unsigned int *underflows)
790 unsigned int i;
791 int ret;
793 newinfo->size = size;
794 newinfo->number = number;
796 /* Init all hooks to impossible value. */
797 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
798 newinfo->hook_entry[i] = 0xFFFFFFFF;
799 newinfo->underflow[i] = 0xFFFFFFFF;
802 duprintf("translate_table: size %u\n", newinfo->size);
803 i = 0;
804 /* Walk through entries, checking offsets. */
805 ret = IPT_ENTRY_ITERATE(entry0, newinfo->size,
806 check_entry_size_and_hooks,
807 newinfo,
808 entry0,
809 entry0 + size,
810 hook_entries, underflows, valid_hooks, &i);
811 if (ret != 0)
812 return ret;
814 if (i != number) {
815 duprintf("translate_table: %u not %u entries\n",
816 i, number);
817 return -EINVAL;
820 /* Check hooks all assigned */
821 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
822 /* Only hooks which are valid */
823 if (!(valid_hooks & (1 << i)))
824 continue;
825 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
826 duprintf("Invalid hook entry %u %u\n",
827 i, hook_entries[i]);
828 return -EINVAL;
830 if (newinfo->underflow[i] == 0xFFFFFFFF) {
831 duprintf("Invalid underflow %u %u\n",
832 i, underflows[i]);
833 return -EINVAL;
837 if (!mark_source_chains(newinfo, valid_hooks, entry0))
838 return -ELOOP;
840 /* Finally, each sanity check must pass */
841 i = 0;
842 ret = IPT_ENTRY_ITERATE(entry0, newinfo->size,
843 find_check_entry, name, size, &i);
845 if (ret != 0) {
846 IPT_ENTRY_ITERATE(entry0, newinfo->size,
847 cleanup_entry, &i);
848 return ret;
851 /* And one copy for every other CPU */
852 for_each_possible_cpu(i) {
853 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
854 memcpy(newinfo->entries[i], entry0, newinfo->size);
857 return ret;
860 /* Gets counters. */
861 static inline int
862 add_entry_to_counter(const struct ipt_entry *e,
863 struct xt_counters total[],
864 unsigned int *i)
866 ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
868 (*i)++;
869 return 0;
872 static inline int
873 set_entry_to_counter(const struct ipt_entry *e,
874 struct ipt_counters total[],
875 unsigned int *i)
877 SET_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
879 (*i)++;
880 return 0;
883 static void
884 get_counters(const struct xt_table_info *t,
885 struct xt_counters counters[])
887 unsigned int cpu;
888 unsigned int i;
889 unsigned int curcpu;
891 /* Instead of clearing (by a previous call to memset())
892 * the counters and using adds, we set the counters
893 * with data used by 'current' CPU.
895 * Bottom half has to be disabled to prevent deadlock
896 * if new softirq were to run and call ipt_do_table
898 local_bh_disable();
899 curcpu = smp_processor_id();
901 i = 0;
902 IPT_ENTRY_ITERATE(t->entries[curcpu],
903 t->size,
904 set_entry_to_counter,
905 counters,
906 &i);
908 for_each_possible_cpu(cpu) {
909 if (cpu == curcpu)
910 continue;
911 i = 0;
912 xt_info_wrlock(cpu);
913 IPT_ENTRY_ITERATE(t->entries[cpu],
914 t->size,
915 add_entry_to_counter,
916 counters,
917 &i);
918 xt_info_wrunlock(cpu);
920 local_bh_enable();
923 static struct xt_counters * alloc_counters(struct xt_table *table)
925 unsigned int countersize;
926 struct xt_counters *counters;
927 struct xt_table_info *private = table->private;
929 /* We need atomic snapshot of counters: rest doesn't change
930 (other than comefrom, which userspace doesn't care
931 about). */
932 countersize = sizeof(struct xt_counters) * private->number;
933 counters = vmalloc_node(countersize, numa_node_id());
935 if (counters == NULL)
936 return ERR_PTR(-ENOMEM);
938 get_counters(private, counters);
940 return counters;
943 static int
944 copy_entries_to_user(unsigned int total_size,
945 struct xt_table *table,
946 void __user *userptr)
948 unsigned int off, num;
949 struct ipt_entry *e;
950 struct xt_counters *counters;
951 const struct xt_table_info *private = table->private;
952 int ret = 0;
953 const void *loc_cpu_entry;
955 counters = alloc_counters(table);
956 if (IS_ERR(counters))
957 return PTR_ERR(counters);
959 /* choose the copy that is on our node/cpu, ...
960 * This choice is lazy (because current thread is
961 * allowed to migrate to another cpu)
963 loc_cpu_entry = private->entries[raw_smp_processor_id()];
964 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
965 ret = -EFAULT;
966 goto free_counters;
969 /* FIXME: use iterator macros --RR */
970 /* ... then go back and fix counters and names */
971 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
972 unsigned int i;
973 const struct ipt_entry_match *m;
974 const struct ipt_entry_target *t;
976 e = (struct ipt_entry *)(loc_cpu_entry + off);
977 if (copy_to_user(userptr + off
978 + offsetof(struct ipt_entry, counters),
979 &counters[num],
980 sizeof(counters[num])) != 0) {
981 ret = -EFAULT;
982 goto free_counters;
985 for (i = sizeof(struct ipt_entry);
986 i < e->target_offset;
987 i += m->u.match_size) {
988 m = (void *)e + i;
990 if (copy_to_user(userptr + off + i
991 + offsetof(struct ipt_entry_match,
992 u.user.name),
993 m->u.kernel.match->name,
994 strlen(m->u.kernel.match->name)+1)
995 != 0) {
996 ret = -EFAULT;
997 goto free_counters;
1001 t = ipt_get_target(e);
1002 if (copy_to_user(userptr + off + e->target_offset
1003 + offsetof(struct ipt_entry_target,
1004 u.user.name),
1005 t->u.kernel.target->name,
1006 strlen(t->u.kernel.target->name)+1) != 0) {
1007 ret = -EFAULT;
1008 goto free_counters;
1012 free_counters:
1013 vfree(counters);
1014 return ret;
1017 #ifdef CONFIG_COMPAT
1018 static void compat_standard_from_user(void *dst, void *src)
1020 int v = *(compat_int_t *)src;
1022 if (v > 0)
1023 v += xt_compat_calc_jump(AF_INET, v);
1024 memcpy(dst, &v, sizeof(v));
1027 static int compat_standard_to_user(void __user *dst, void *src)
1029 compat_int_t cv = *(int *)src;
1031 if (cv > 0)
1032 cv -= xt_compat_calc_jump(AF_INET, cv);
1033 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
1036 static inline int
1037 compat_calc_match(struct ipt_entry_match *m, int *size)
1039 *size += xt_compat_match_offset(m->u.kernel.match);
1040 return 0;
1043 static int compat_calc_entry(struct ipt_entry *e,
1044 const struct xt_table_info *info,
1045 void *base, struct xt_table_info *newinfo)
1047 struct ipt_entry_target *t;
1048 unsigned int entry_offset;
1049 int off, i, ret;
1051 off = sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1052 entry_offset = (void *)e - base;
1053 IPT_MATCH_ITERATE(e, compat_calc_match, &off);
1054 t = ipt_get_target(e);
1055 off += xt_compat_target_offset(t->u.kernel.target);
1056 newinfo->size -= off;
1057 ret = xt_compat_add_offset(AF_INET, entry_offset, off);
1058 if (ret)
1059 return ret;
1061 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1062 if (info->hook_entry[i] &&
1063 (e < (struct ipt_entry *)(base + info->hook_entry[i])))
1064 newinfo->hook_entry[i] -= off;
1065 if (info->underflow[i] &&
1066 (e < (struct ipt_entry *)(base + info->underflow[i])))
1067 newinfo->underflow[i] -= off;
1069 return 0;
1072 static int compat_table_info(const struct xt_table_info *info,
1073 struct xt_table_info *newinfo)
1075 void *loc_cpu_entry;
1077 if (!newinfo || !info)
1078 return -EINVAL;
1080 /* we dont care about newinfo->entries[] */
1081 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
1082 newinfo->initial_entries = 0;
1083 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1084 return IPT_ENTRY_ITERATE(loc_cpu_entry, info->size,
1085 compat_calc_entry, info, loc_cpu_entry,
1086 newinfo);
1088 #endif
1090 static int get_info(struct net *net, void __user *user, int *len, int compat)
1092 char name[IPT_TABLE_MAXNAMELEN];
1093 struct xt_table *t;
1094 int ret;
1096 if (*len != sizeof(struct ipt_getinfo)) {
1097 duprintf("length %u != %zu\n", *len,
1098 sizeof(struct ipt_getinfo));
1099 return -EINVAL;
1102 if (copy_from_user(name, user, sizeof(name)) != 0)
1103 return -EFAULT;
1105 name[IPT_TABLE_MAXNAMELEN-1] = '\0';
1106 #ifdef CONFIG_COMPAT
1107 if (compat)
1108 xt_compat_lock(AF_INET);
1109 #endif
1110 t = try_then_request_module(xt_find_table_lock(net, AF_INET, name),
1111 "iptable_%s", name);
1112 if (t && !IS_ERR(t)) {
1113 struct ipt_getinfo info;
1114 const struct xt_table_info *private = t->private;
1116 #ifdef CONFIG_COMPAT
1117 if (compat) {
1118 struct xt_table_info tmp;
1119 ret = compat_table_info(private, &tmp);
1120 xt_compat_flush_offsets(AF_INET);
1121 private = &tmp;
1123 #endif
1124 info.valid_hooks = t->valid_hooks;
1125 memcpy(info.hook_entry, private->hook_entry,
1126 sizeof(info.hook_entry));
1127 memcpy(info.underflow, private->underflow,
1128 sizeof(info.underflow));
1129 info.num_entries = private->number;
1130 info.size = private->size;
1131 strcpy(info.name, name);
1133 if (copy_to_user(user, &info, *len) != 0)
1134 ret = -EFAULT;
1135 else
1136 ret = 0;
1138 xt_table_unlock(t);
1139 module_put(t->me);
1140 } else
1141 ret = t ? PTR_ERR(t) : -ENOENT;
1142 #ifdef CONFIG_COMPAT
1143 if (compat)
1144 xt_compat_unlock(AF_INET);
1145 #endif
1146 return ret;
1149 static int
1150 get_entries(struct net *net, struct ipt_get_entries __user *uptr, int *len)
1152 int ret;
1153 struct ipt_get_entries get;
1154 struct xt_table *t;
1156 if (*len < sizeof(get)) {
1157 duprintf("get_entries: %u < %zu\n", *len, sizeof(get));
1158 return -EINVAL;
1160 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1161 return -EFAULT;
1162 if (*len != sizeof(struct ipt_get_entries) + get.size) {
1163 duprintf("get_entries: %u != %zu\n",
1164 *len, sizeof(get) + get.size);
1165 return -EINVAL;
1168 t = xt_find_table_lock(net, AF_INET, get.name);
1169 if (t && !IS_ERR(t)) {
1170 const struct xt_table_info *private = t->private;
1171 duprintf("t->private->number = %u\n", private->number);
1172 if (get.size == private->size)
1173 ret = copy_entries_to_user(private->size,
1174 t, uptr->entrytable);
1175 else {
1176 duprintf("get_entries: I've got %u not %u!\n",
1177 private->size, get.size);
1178 ret = -EAGAIN;
1180 module_put(t->me);
1181 xt_table_unlock(t);
1182 } else
1183 ret = t ? PTR_ERR(t) : -ENOENT;
1185 return ret;
1188 static int
1189 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1190 struct xt_table_info *newinfo, unsigned int num_counters,
1191 void __user *counters_ptr)
1193 int ret;
1194 struct xt_table *t;
1195 struct xt_table_info *oldinfo;
1196 struct xt_counters *counters;
1197 void *loc_cpu_old_entry;
1199 ret = 0;
1200 counters = vmalloc(num_counters * sizeof(struct xt_counters));
1201 if (!counters) {
1202 ret = -ENOMEM;
1203 goto out;
1206 t = try_then_request_module(xt_find_table_lock(net, AF_INET, name),
1207 "iptable_%s", name);
1208 if (!t || IS_ERR(t)) {
1209 ret = t ? PTR_ERR(t) : -ENOENT;
1210 goto free_newinfo_counters_untrans;
1213 /* You lied! */
1214 if (valid_hooks != t->valid_hooks) {
1215 duprintf("Valid hook crap: %08X vs %08X\n",
1216 valid_hooks, t->valid_hooks);
1217 ret = -EINVAL;
1218 goto put_module;
1221 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1222 if (!oldinfo)
1223 goto put_module;
1225 /* Update module usage count based on number of rules */
1226 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1227 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1228 if ((oldinfo->number > oldinfo->initial_entries) ||
1229 (newinfo->number <= oldinfo->initial_entries))
1230 module_put(t->me);
1231 if ((oldinfo->number > oldinfo->initial_entries) &&
1232 (newinfo->number <= oldinfo->initial_entries))
1233 module_put(t->me);
1235 /* Get the old counters, and synchronize with replace */
1236 get_counters(oldinfo, counters);
1238 /* Decrease module usage counts and free resource */
1239 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1240 IPT_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry,
1241 NULL);
1242 xt_free_table_info(oldinfo);
1243 if (copy_to_user(counters_ptr, counters,
1244 sizeof(struct xt_counters) * num_counters) != 0)
1245 ret = -EFAULT;
1246 vfree(counters);
1247 xt_table_unlock(t);
1248 return ret;
1250 put_module:
1251 module_put(t->me);
1252 xt_table_unlock(t);
1253 free_newinfo_counters_untrans:
1254 vfree(counters);
1255 out:
1256 return ret;
1259 static int
1260 do_replace(struct net *net, void __user *user, unsigned int len)
1262 int ret;
1263 struct ipt_replace tmp;
1264 struct xt_table_info *newinfo;
1265 void *loc_cpu_entry;
1267 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1268 return -EFAULT;
1270 /* overflow check */
1271 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1272 return -ENOMEM;
1274 newinfo = xt_alloc_table_info(tmp.size);
1275 if (!newinfo)
1276 return -ENOMEM;
1278 /* choose the copy that is on our node/cpu */
1279 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1280 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1281 tmp.size) != 0) {
1282 ret = -EFAULT;
1283 goto free_newinfo;
1286 ret = translate_table(tmp.name, tmp.valid_hooks,
1287 newinfo, loc_cpu_entry, tmp.size, tmp.num_entries,
1288 tmp.hook_entry, tmp.underflow);
1289 if (ret != 0)
1290 goto free_newinfo;
1292 duprintf("ip_tables: Translated table\n");
1294 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1295 tmp.num_counters, tmp.counters);
1296 if (ret)
1297 goto free_newinfo_untrans;
1298 return 0;
1300 free_newinfo_untrans:
1301 IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL);
1302 free_newinfo:
1303 xt_free_table_info(newinfo);
1304 return ret;
1307 /* We're lazy, and add to the first CPU; overflow works its fey magic
1308 * and everything is OK. */
1309 static int
1310 add_counter_to_entry(struct ipt_entry *e,
1311 const struct xt_counters addme[],
1312 unsigned int *i)
1314 ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
1316 (*i)++;
1317 return 0;
1320 static int
1321 do_add_counters(struct net *net, void __user *user, unsigned int len, int compat)
1323 unsigned int i, curcpu;
1324 struct xt_counters_info tmp;
1325 struct xt_counters *paddc;
1326 unsigned int num_counters;
1327 const char *name;
1328 int size;
1329 void *ptmp;
1330 struct xt_table *t;
1331 const struct xt_table_info *private;
1332 int ret = 0;
1333 void *loc_cpu_entry;
1334 #ifdef CONFIG_COMPAT
1335 struct compat_xt_counters_info compat_tmp;
1337 if (compat) {
1338 ptmp = &compat_tmp;
1339 size = sizeof(struct compat_xt_counters_info);
1340 } else
1341 #endif
1343 ptmp = &tmp;
1344 size = sizeof(struct xt_counters_info);
1347 if (copy_from_user(ptmp, user, size) != 0)
1348 return -EFAULT;
1350 #ifdef CONFIG_COMPAT
1351 if (compat) {
1352 num_counters = compat_tmp.num_counters;
1353 name = compat_tmp.name;
1354 } else
1355 #endif
1357 num_counters = tmp.num_counters;
1358 name = tmp.name;
1361 if (len != size + num_counters * sizeof(struct xt_counters))
1362 return -EINVAL;
1364 paddc = vmalloc_node(len - size, numa_node_id());
1365 if (!paddc)
1366 return -ENOMEM;
1368 if (copy_from_user(paddc, user + size, len - size) != 0) {
1369 ret = -EFAULT;
1370 goto free;
1373 t = xt_find_table_lock(net, AF_INET, name);
1374 if (!t || IS_ERR(t)) {
1375 ret = t ? PTR_ERR(t) : -ENOENT;
1376 goto free;
1379 local_bh_disable();
1380 private = t->private;
1381 if (private->number != num_counters) {
1382 ret = -EINVAL;
1383 goto unlock_up_free;
1386 i = 0;
1387 /* Choose the copy that is on our node */
1388 curcpu = smp_processor_id();
1389 loc_cpu_entry = private->entries[curcpu];
1390 xt_info_wrlock(curcpu);
1391 IPT_ENTRY_ITERATE(loc_cpu_entry,
1392 private->size,
1393 add_counter_to_entry,
1394 paddc,
1395 &i);
1396 xt_info_wrunlock(curcpu);
1397 unlock_up_free:
1398 local_bh_enable();
1399 xt_table_unlock(t);
1400 module_put(t->me);
1401 free:
1402 vfree(paddc);
1404 return ret;
1407 #ifdef CONFIG_COMPAT
1408 struct compat_ipt_replace {
1409 char name[IPT_TABLE_MAXNAMELEN];
1410 u32 valid_hooks;
1411 u32 num_entries;
1412 u32 size;
1413 u32 hook_entry[NF_INET_NUMHOOKS];
1414 u32 underflow[NF_INET_NUMHOOKS];
1415 u32 num_counters;
1416 compat_uptr_t counters; /* struct ipt_counters * */
1417 struct compat_ipt_entry entries[0];
1420 static int
1421 compat_copy_entry_to_user(struct ipt_entry *e, void __user **dstptr,
1422 unsigned int *size, struct xt_counters *counters,
1423 unsigned int *i)
1425 struct ipt_entry_target *t;
1426 struct compat_ipt_entry __user *ce;
1427 u_int16_t target_offset, next_offset;
1428 compat_uint_t origsize;
1429 int ret;
1431 ret = -EFAULT;
1432 origsize = *size;
1433 ce = (struct compat_ipt_entry __user *)*dstptr;
1434 if (copy_to_user(ce, e, sizeof(struct ipt_entry)))
1435 goto out;
1437 if (copy_to_user(&ce->counters, &counters[*i], sizeof(counters[*i])))
1438 goto out;
1440 *dstptr += sizeof(struct compat_ipt_entry);
1441 *size -= sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1443 ret = IPT_MATCH_ITERATE(e, xt_compat_match_to_user, dstptr, size);
1444 target_offset = e->target_offset - (origsize - *size);
1445 if (ret)
1446 goto out;
1447 t = ipt_get_target(e);
1448 ret = xt_compat_target_to_user(t, dstptr, size);
1449 if (ret)
1450 goto out;
1451 ret = -EFAULT;
1452 next_offset = e->next_offset - (origsize - *size);
1453 if (put_user(target_offset, &ce->target_offset))
1454 goto out;
1455 if (put_user(next_offset, &ce->next_offset))
1456 goto out;
1458 (*i)++;
1459 return 0;
1460 out:
1461 return ret;
1464 static int
1465 compat_find_calc_match(struct ipt_entry_match *m,
1466 const char *name,
1467 const struct ipt_ip *ip,
1468 unsigned int hookmask,
1469 int *size, unsigned int *i)
1471 struct xt_match *match;
1473 match = try_then_request_module(xt_find_match(AF_INET, m->u.user.name,
1474 m->u.user.revision),
1475 "ipt_%s", m->u.user.name);
1476 if (IS_ERR(match) || !match) {
1477 duprintf("compat_check_calc_match: `%s' not found\n",
1478 m->u.user.name);
1479 return match ? PTR_ERR(match) : -ENOENT;
1481 m->u.kernel.match = match;
1482 *size += xt_compat_match_offset(match);
1484 (*i)++;
1485 return 0;
1488 static int
1489 compat_release_match(struct ipt_entry_match *m, unsigned int *i)
1491 if (i && (*i)-- == 0)
1492 return 1;
1494 module_put(m->u.kernel.match->me);
1495 return 0;
1498 static int
1499 compat_release_entry(struct compat_ipt_entry *e, unsigned int *i)
1501 struct ipt_entry_target *t;
1503 if (i && (*i)-- == 0)
1504 return 1;
1506 /* Cleanup all matches */
1507 COMPAT_IPT_MATCH_ITERATE(e, compat_release_match, NULL);
1508 t = compat_ipt_get_target(e);
1509 module_put(t->u.kernel.target->me);
1510 return 0;
1513 static int
1514 check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
1515 struct xt_table_info *newinfo,
1516 unsigned int *size,
1517 unsigned char *base,
1518 unsigned char *limit,
1519 unsigned int *hook_entries,
1520 unsigned int *underflows,
1521 unsigned int *i,
1522 const char *name)
1524 struct ipt_entry_target *t;
1525 struct xt_target *target;
1526 unsigned int entry_offset;
1527 unsigned int j;
1528 int ret, off, h;
1530 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1531 if ((unsigned long)e % __alignof__(struct compat_ipt_entry) != 0
1532 || (unsigned char *)e + sizeof(struct compat_ipt_entry) >= limit) {
1533 duprintf("Bad offset %p, limit = %p\n", e, limit);
1534 return -EINVAL;
1537 if (e->next_offset < sizeof(struct compat_ipt_entry) +
1538 sizeof(struct compat_xt_entry_target)) {
1539 duprintf("checking: element %p size %u\n",
1540 e, e->next_offset);
1541 return -EINVAL;
1544 /* For purposes of check_entry casting the compat entry is fine */
1545 ret = check_entry((struct ipt_entry *)e, name);
1546 if (ret)
1547 return ret;
1549 off = sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1550 entry_offset = (void *)e - (void *)base;
1551 j = 0;
1552 ret = COMPAT_IPT_MATCH_ITERATE(e, compat_find_calc_match, name,
1553 &e->ip, e->comefrom, &off, &j);
1554 if (ret != 0)
1555 goto release_matches;
1557 t = compat_ipt_get_target(e);
1558 target = try_then_request_module(xt_find_target(AF_INET,
1559 t->u.user.name,
1560 t->u.user.revision),
1561 "ipt_%s", t->u.user.name);
1562 if (IS_ERR(target) || !target) {
1563 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1564 t->u.user.name);
1565 ret = target ? PTR_ERR(target) : -ENOENT;
1566 goto release_matches;
1568 t->u.kernel.target = target;
1570 off += xt_compat_target_offset(target);
1571 *size += off;
1572 ret = xt_compat_add_offset(AF_INET, entry_offset, off);
1573 if (ret)
1574 goto out;
1576 /* Check hooks & underflows */
1577 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1578 if ((unsigned char *)e - base == hook_entries[h])
1579 newinfo->hook_entry[h] = hook_entries[h];
1580 if ((unsigned char *)e - base == underflows[h])
1581 newinfo->underflow[h] = underflows[h];
1584 /* Clear counters and comefrom */
1585 memset(&e->counters, 0, sizeof(e->counters));
1586 e->comefrom = 0;
1588 (*i)++;
1589 return 0;
1591 out:
1592 module_put(t->u.kernel.target->me);
1593 release_matches:
1594 IPT_MATCH_ITERATE(e, compat_release_match, &j);
1595 return ret;
1598 static int
1599 compat_copy_entry_from_user(struct compat_ipt_entry *e, void **dstptr,
1600 unsigned int *size, const char *name,
1601 struct xt_table_info *newinfo, unsigned char *base)
1603 struct ipt_entry_target *t;
1604 struct xt_target *target;
1605 struct ipt_entry *de;
1606 unsigned int origsize;
1607 int ret, h;
1609 ret = 0;
1610 origsize = *size;
1611 de = (struct ipt_entry *)*dstptr;
1612 memcpy(de, e, sizeof(struct ipt_entry));
1613 memcpy(&de->counters, &e->counters, sizeof(e->counters));
1615 *dstptr += sizeof(struct ipt_entry);
1616 *size += sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1618 ret = COMPAT_IPT_MATCH_ITERATE(e, xt_compat_match_from_user,
1619 dstptr, size);
1620 if (ret)
1621 return ret;
1622 de->target_offset = e->target_offset - (origsize - *size);
1623 t = compat_ipt_get_target(e);
1624 target = t->u.kernel.target;
1625 xt_compat_target_from_user(t, dstptr, size);
1627 de->next_offset = e->next_offset - (origsize - *size);
1628 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1629 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1630 newinfo->hook_entry[h] -= origsize - *size;
1631 if ((unsigned char *)de - base < newinfo->underflow[h])
1632 newinfo->underflow[h] -= origsize - *size;
1634 return ret;
1637 static int
1638 compat_check_entry(struct ipt_entry *e, const char *name,
1639 unsigned int *i)
1641 struct xt_mtchk_param mtpar;
1642 unsigned int j;
1643 int ret;
1645 j = 0;
1646 mtpar.table = name;
1647 mtpar.entryinfo = &e->ip;
1648 mtpar.hook_mask = e->comefrom;
1649 mtpar.family = NFPROTO_IPV4;
1650 ret = IPT_MATCH_ITERATE(e, check_match, &mtpar, &j);
1651 if (ret)
1652 goto cleanup_matches;
1654 ret = check_target(e, name);
1655 if (ret)
1656 goto cleanup_matches;
1658 (*i)++;
1659 return 0;
1661 cleanup_matches:
1662 IPT_MATCH_ITERATE(e, cleanup_match, &j);
1663 return ret;
1666 static int
1667 translate_compat_table(const char *name,
1668 unsigned int valid_hooks,
1669 struct xt_table_info **pinfo,
1670 void **pentry0,
1671 unsigned int total_size,
1672 unsigned int number,
1673 unsigned int *hook_entries,
1674 unsigned int *underflows)
1676 unsigned int i, j;
1677 struct xt_table_info *newinfo, *info;
1678 void *pos, *entry0, *entry1;
1679 unsigned int size;
1680 int ret;
1682 info = *pinfo;
1683 entry0 = *pentry0;
1684 size = total_size;
1685 info->number = number;
1687 /* Init all hooks to impossible value. */
1688 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1689 info->hook_entry[i] = 0xFFFFFFFF;
1690 info->underflow[i] = 0xFFFFFFFF;
1693 duprintf("translate_compat_table: size %u\n", info->size);
1694 j = 0;
1695 xt_compat_lock(AF_INET);
1696 /* Walk through entries, checking offsets. */
1697 ret = COMPAT_IPT_ENTRY_ITERATE(entry0, total_size,
1698 check_compat_entry_size_and_hooks,
1699 info, &size, entry0,
1700 entry0 + total_size,
1701 hook_entries, underflows, &j, name);
1702 if (ret != 0)
1703 goto out_unlock;
1705 ret = -EINVAL;
1706 if (j != number) {
1707 duprintf("translate_compat_table: %u not %u entries\n",
1708 j, number);
1709 goto out_unlock;
1712 /* Check hooks all assigned */
1713 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1714 /* Only hooks which are valid */
1715 if (!(valid_hooks & (1 << i)))
1716 continue;
1717 if (info->hook_entry[i] == 0xFFFFFFFF) {
1718 duprintf("Invalid hook entry %u %u\n",
1719 i, hook_entries[i]);
1720 goto out_unlock;
1722 if (info->underflow[i] == 0xFFFFFFFF) {
1723 duprintf("Invalid underflow %u %u\n",
1724 i, underflows[i]);
1725 goto out_unlock;
1729 ret = -ENOMEM;
1730 newinfo = xt_alloc_table_info(size);
1731 if (!newinfo)
1732 goto out_unlock;
1734 newinfo->number = number;
1735 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1736 newinfo->hook_entry[i] = info->hook_entry[i];
1737 newinfo->underflow[i] = info->underflow[i];
1739 entry1 = newinfo->entries[raw_smp_processor_id()];
1740 pos = entry1;
1741 size = total_size;
1742 ret = COMPAT_IPT_ENTRY_ITERATE(entry0, total_size,
1743 compat_copy_entry_from_user,
1744 &pos, &size, name, newinfo, entry1);
1745 xt_compat_flush_offsets(AF_INET);
1746 xt_compat_unlock(AF_INET);
1747 if (ret)
1748 goto free_newinfo;
1750 ret = -ELOOP;
1751 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1752 goto free_newinfo;
1754 i = 0;
1755 ret = IPT_ENTRY_ITERATE(entry1, newinfo->size, compat_check_entry,
1756 name, &i);
1757 if (ret) {
1758 j -= i;
1759 COMPAT_IPT_ENTRY_ITERATE_CONTINUE(entry0, newinfo->size, i,
1760 compat_release_entry, &j);
1761 IPT_ENTRY_ITERATE(entry1, newinfo->size, cleanup_entry, &i);
1762 xt_free_table_info(newinfo);
1763 return ret;
1766 /* And one copy for every other CPU */
1767 for_each_possible_cpu(i)
1768 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1769 memcpy(newinfo->entries[i], entry1, newinfo->size);
1771 *pinfo = newinfo;
1772 *pentry0 = entry1;
1773 xt_free_table_info(info);
1774 return 0;
1776 free_newinfo:
1777 xt_free_table_info(newinfo);
1778 out:
1779 COMPAT_IPT_ENTRY_ITERATE(entry0, total_size, compat_release_entry, &j);
1780 return ret;
1781 out_unlock:
1782 xt_compat_flush_offsets(AF_INET);
1783 xt_compat_unlock(AF_INET);
1784 goto out;
1787 static int
1788 compat_do_replace(struct net *net, void __user *user, unsigned int len)
1790 int ret;
1791 struct compat_ipt_replace tmp;
1792 struct xt_table_info *newinfo;
1793 void *loc_cpu_entry;
1795 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1796 return -EFAULT;
1798 /* overflow check */
1799 if (tmp.size >= INT_MAX / num_possible_cpus())
1800 return -ENOMEM;
1801 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1802 return -ENOMEM;
1804 newinfo = xt_alloc_table_info(tmp.size);
1805 if (!newinfo)
1806 return -ENOMEM;
1808 /* choose the copy that is on our node/cpu */
1809 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1810 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1811 tmp.size) != 0) {
1812 ret = -EFAULT;
1813 goto free_newinfo;
1816 ret = translate_compat_table(tmp.name, tmp.valid_hooks,
1817 &newinfo, &loc_cpu_entry, tmp.size,
1818 tmp.num_entries, tmp.hook_entry,
1819 tmp.underflow);
1820 if (ret != 0)
1821 goto free_newinfo;
1823 duprintf("compat_do_replace: Translated table\n");
1825 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1826 tmp.num_counters, compat_ptr(tmp.counters));
1827 if (ret)
1828 goto free_newinfo_untrans;
1829 return 0;
1831 free_newinfo_untrans:
1832 IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL);
1833 free_newinfo:
1834 xt_free_table_info(newinfo);
1835 return ret;
1838 static int
1839 compat_do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user,
1840 unsigned int len)
1842 int ret;
1844 if (!capable(CAP_NET_ADMIN))
1845 return -EPERM;
1847 switch (cmd) {
1848 case IPT_SO_SET_REPLACE:
1849 ret = compat_do_replace(sock_net(sk), user, len);
1850 break;
1852 case IPT_SO_SET_ADD_COUNTERS:
1853 ret = do_add_counters(sock_net(sk), user, len, 1);
1854 break;
1856 default:
1857 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd);
1858 ret = -EINVAL;
1861 return ret;
1864 struct compat_ipt_get_entries {
1865 char name[IPT_TABLE_MAXNAMELEN];
1866 compat_uint_t size;
1867 struct compat_ipt_entry entrytable[0];
1870 static int
1871 compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1872 void __user *userptr)
1874 struct xt_counters *counters;
1875 const struct xt_table_info *private = table->private;
1876 void __user *pos;
1877 unsigned int size;
1878 int ret = 0;
1879 const void *loc_cpu_entry;
1880 unsigned int i = 0;
1882 counters = alloc_counters(table);
1883 if (IS_ERR(counters))
1884 return PTR_ERR(counters);
1886 /* choose the copy that is on our node/cpu, ...
1887 * This choice is lazy (because current thread is
1888 * allowed to migrate to another cpu)
1890 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1891 pos = userptr;
1892 size = total_size;
1893 ret = IPT_ENTRY_ITERATE(loc_cpu_entry, total_size,
1894 compat_copy_entry_to_user,
1895 &pos, &size, counters, &i);
1897 vfree(counters);
1898 return ret;
1901 static int
1902 compat_get_entries(struct net *net, struct compat_ipt_get_entries __user *uptr,
1903 int *len)
1905 int ret;
1906 struct compat_ipt_get_entries get;
1907 struct xt_table *t;
1909 if (*len < sizeof(get)) {
1910 duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get));
1911 return -EINVAL;
1914 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1915 return -EFAULT;
1917 if (*len != sizeof(struct compat_ipt_get_entries) + get.size) {
1918 duprintf("compat_get_entries: %u != %zu\n",
1919 *len, sizeof(get) + get.size);
1920 return -EINVAL;
1923 xt_compat_lock(AF_INET);
1924 t = xt_find_table_lock(net, AF_INET, get.name);
1925 if (t && !IS_ERR(t)) {
1926 const struct xt_table_info *private = t->private;
1927 struct xt_table_info info;
1928 duprintf("t->private->number = %u\n", private->number);
1929 ret = compat_table_info(private, &info);
1930 if (!ret && get.size == info.size) {
1931 ret = compat_copy_entries_to_user(private->size,
1932 t, uptr->entrytable);
1933 } else if (!ret) {
1934 duprintf("compat_get_entries: I've got %u not %u!\n",
1935 private->size, get.size);
1936 ret = -EAGAIN;
1938 xt_compat_flush_offsets(AF_INET);
1939 module_put(t->me);
1940 xt_table_unlock(t);
1941 } else
1942 ret = t ? PTR_ERR(t) : -ENOENT;
1944 xt_compat_unlock(AF_INET);
1945 return ret;
1948 static int do_ipt_get_ctl(struct sock *, int, void __user *, int *);
1950 static int
1951 compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1953 int ret;
1955 if (!capable(CAP_NET_ADMIN))
1956 return -EPERM;
1958 switch (cmd) {
1959 case IPT_SO_GET_INFO:
1960 ret = get_info(sock_net(sk), user, len, 1);
1961 break;
1962 case IPT_SO_GET_ENTRIES:
1963 ret = compat_get_entries(sock_net(sk), user, len);
1964 break;
1965 default:
1966 ret = do_ipt_get_ctl(sk, cmd, user, len);
1968 return ret;
1970 #endif
1972 static int
1973 do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
1975 int ret;
1977 if (!capable(CAP_NET_ADMIN))
1978 return -EPERM;
1980 switch (cmd) {
1981 case IPT_SO_SET_REPLACE:
1982 ret = do_replace(sock_net(sk), user, len);
1983 break;
1985 case IPT_SO_SET_ADD_COUNTERS:
1986 ret = do_add_counters(sock_net(sk), user, len, 0);
1987 break;
1989 default:
1990 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd);
1991 ret = -EINVAL;
1994 return ret;
1997 static int
1998 do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2000 int ret;
2002 if (!capable(CAP_NET_ADMIN))
2003 return -EPERM;
2005 switch (cmd) {
2006 case IPT_SO_GET_INFO:
2007 ret = get_info(sock_net(sk), user, len, 0);
2008 break;
2010 case IPT_SO_GET_ENTRIES:
2011 ret = get_entries(sock_net(sk), user, len);
2012 break;
2014 case IPT_SO_GET_REVISION_MATCH:
2015 case IPT_SO_GET_REVISION_TARGET: {
2016 struct ipt_get_revision rev;
2017 int target;
2019 if (*len != sizeof(rev)) {
2020 ret = -EINVAL;
2021 break;
2023 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2024 ret = -EFAULT;
2025 break;
2028 if (cmd == IPT_SO_GET_REVISION_TARGET)
2029 target = 1;
2030 else
2031 target = 0;
2033 try_then_request_module(xt_find_revision(AF_INET, rev.name,
2034 rev.revision,
2035 target, &ret),
2036 "ipt_%s", rev.name);
2037 break;
2040 default:
2041 duprintf("do_ipt_get_ctl: unknown request %i\n", cmd);
2042 ret = -EINVAL;
2045 return ret;
2048 struct xt_table *ipt_register_table(struct net *net, struct xt_table *table,
2049 const struct ipt_replace *repl)
2051 int ret;
2052 struct xt_table_info *newinfo;
2053 struct xt_table_info bootstrap
2054 = { 0, 0, 0, { 0 }, { 0 }, { } };
2055 void *loc_cpu_entry;
2056 struct xt_table *new_table;
2058 newinfo = xt_alloc_table_info(repl->size);
2059 if (!newinfo) {
2060 ret = -ENOMEM;
2061 goto out;
2064 /* choose the copy on our node/cpu, but dont care about preemption */
2065 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2066 memcpy(loc_cpu_entry, repl->entries, repl->size);
2068 ret = translate_table(table->name, table->valid_hooks,
2069 newinfo, loc_cpu_entry, repl->size,
2070 repl->num_entries,
2071 repl->hook_entry,
2072 repl->underflow);
2073 if (ret != 0)
2074 goto out_free;
2076 new_table = xt_register_table(net, table, &bootstrap, newinfo);
2077 if (IS_ERR(new_table)) {
2078 ret = PTR_ERR(new_table);
2079 goto out_free;
2082 return new_table;
2084 out_free:
2085 xt_free_table_info(newinfo);
2086 out:
2087 return ERR_PTR(ret);
2090 void ipt_unregister_table(struct xt_table *table)
2092 struct xt_table_info *private;
2093 void *loc_cpu_entry;
2094 struct module *table_owner = table->me;
2096 private = xt_unregister_table(table);
2098 /* Decrease module usage counts and free resources */
2099 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2100 IPT_ENTRY_ITERATE(loc_cpu_entry, private->size, cleanup_entry, NULL);
2101 if (private->number > private->initial_entries)
2102 module_put(table_owner);
2103 xt_free_table_info(private);
2106 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2107 static inline bool
2108 icmp_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2109 u_int8_t type, u_int8_t code,
2110 bool invert)
2112 return ((test_type == 0xFF) ||
2113 (type == test_type && code >= min_code && code <= max_code))
2114 ^ invert;
2117 static bool
2118 icmp_match(const struct sk_buff *skb, const struct xt_match_param *par)
2120 const struct icmphdr *ic;
2121 struct icmphdr _icmph;
2122 const struct ipt_icmp *icmpinfo = par->matchinfo;
2124 /* Must not be a fragment. */
2125 if (par->fragoff != 0)
2126 return false;
2128 ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph);
2129 if (ic == NULL) {
2130 /* We've been asked to examine this packet, and we
2131 * can't. Hence, no choice but to drop.
2133 duprintf("Dropping evil ICMP tinygram.\n");
2134 *par->hotdrop = true;
2135 return false;
2138 return icmp_type_code_match(icmpinfo->type,
2139 icmpinfo->code[0],
2140 icmpinfo->code[1],
2141 ic->type, ic->code,
2142 !!(icmpinfo->invflags&IPT_ICMP_INV));
2145 static bool icmp_checkentry(const struct xt_mtchk_param *par)
2147 const struct ipt_icmp *icmpinfo = par->matchinfo;
2149 /* Must specify no unknown invflags */
2150 return !(icmpinfo->invflags & ~IPT_ICMP_INV);
2153 /* The built-in targets: standard (NULL) and error. */
2154 static struct xt_target ipt_standard_target __read_mostly = {
2155 .name = IPT_STANDARD_TARGET,
2156 .targetsize = sizeof(int),
2157 .family = NFPROTO_IPV4,
2158 #ifdef CONFIG_COMPAT
2159 .compatsize = sizeof(compat_int_t),
2160 .compat_from_user = compat_standard_from_user,
2161 .compat_to_user = compat_standard_to_user,
2162 #endif
2165 static struct xt_target ipt_error_target __read_mostly = {
2166 .name = IPT_ERROR_TARGET,
2167 .target = ipt_error,
2168 .targetsize = IPT_FUNCTION_MAXNAMELEN,
2169 .family = NFPROTO_IPV4,
2172 static struct nf_sockopt_ops ipt_sockopts = {
2173 .pf = PF_INET,
2174 .set_optmin = IPT_BASE_CTL,
2175 .set_optmax = IPT_SO_SET_MAX+1,
2176 .set = do_ipt_set_ctl,
2177 #ifdef CONFIG_COMPAT
2178 .compat_set = compat_do_ipt_set_ctl,
2179 #endif
2180 .get_optmin = IPT_BASE_CTL,
2181 .get_optmax = IPT_SO_GET_MAX+1,
2182 .get = do_ipt_get_ctl,
2183 #ifdef CONFIG_COMPAT
2184 .compat_get = compat_do_ipt_get_ctl,
2185 #endif
2186 .owner = THIS_MODULE,
2189 static struct xt_match icmp_matchstruct __read_mostly = {
2190 .name = "icmp",
2191 .match = icmp_match,
2192 .matchsize = sizeof(struct ipt_icmp),
2193 .checkentry = icmp_checkentry,
2194 .proto = IPPROTO_ICMP,
2195 .family = NFPROTO_IPV4,
2198 static int __net_init ip_tables_net_init(struct net *net)
2200 return xt_proto_init(net, NFPROTO_IPV4);
2203 static void __net_exit ip_tables_net_exit(struct net *net)
2205 xt_proto_fini(net, NFPROTO_IPV4);
2208 static struct pernet_operations ip_tables_net_ops = {
2209 .init = ip_tables_net_init,
2210 .exit = ip_tables_net_exit,
2213 static int __init ip_tables_init(void)
2215 int ret;
2217 ret = register_pernet_subsys(&ip_tables_net_ops);
2218 if (ret < 0)
2219 goto err1;
2221 /* Noone else will be downing sem now, so we won't sleep */
2222 ret = xt_register_target(&ipt_standard_target);
2223 if (ret < 0)
2224 goto err2;
2225 ret = xt_register_target(&ipt_error_target);
2226 if (ret < 0)
2227 goto err3;
2228 ret = xt_register_match(&icmp_matchstruct);
2229 if (ret < 0)
2230 goto err4;
2232 /* Register setsockopt */
2233 ret = nf_register_sockopt(&ipt_sockopts);
2234 if (ret < 0)
2235 goto err5;
2237 printk(KERN_INFO "ip_tables: (C) 2000-2006 Netfilter Core Team\n");
2238 return 0;
2240 err5:
2241 xt_unregister_match(&icmp_matchstruct);
2242 err4:
2243 xt_unregister_target(&ipt_error_target);
2244 err3:
2245 xt_unregister_target(&ipt_standard_target);
2246 err2:
2247 unregister_pernet_subsys(&ip_tables_net_ops);
2248 err1:
2249 return ret;
2252 static void __exit ip_tables_fini(void)
2254 nf_unregister_sockopt(&ipt_sockopts);
2256 xt_unregister_match(&icmp_matchstruct);
2257 xt_unregister_target(&ipt_error_target);
2258 xt_unregister_target(&ipt_standard_target);
2260 unregister_pernet_subsys(&ip_tables_net_ops);
2263 EXPORT_SYMBOL(ipt_register_table);
2264 EXPORT_SYMBOL(ipt_unregister_table);
2265 EXPORT_SYMBOL(ipt_do_table);
2266 module_init(ip_tables_init);
2267 module_exit(ip_tables_fini);