[CIFS] remove checkpatch warning
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / net / ipv4 / netfilter / ip_tables.c
blob2ec8d7290c400a4e8adfe71888150c41927be835
1 /*
2 * Packet matching code.
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #include <linux/cache.h>
12 #include <linux/capability.h>
13 #include <linux/skbuff.h>
14 #include <linux/kmod.h>
15 #include <linux/vmalloc.h>
16 #include <linux/netdevice.h>
17 #include <linux/module.h>
18 #include <linux/icmp.h>
19 #include <net/ip.h>
20 #include <net/compat.h>
21 #include <asm/uaccess.h>
22 #include <linux/mutex.h>
23 #include <linux/proc_fs.h>
24 #include <linux/err.h>
25 #include <linux/cpumask.h>
27 #include <linux/netfilter/x_tables.h>
28 #include <linux/netfilter_ipv4/ip_tables.h>
29 #include <net/netfilter/nf_log.h>
31 MODULE_LICENSE("GPL");
32 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
33 MODULE_DESCRIPTION("IPv4 packet filter");
35 /*#define DEBUG_IP_FIREWALL*/
36 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
37 /*#define DEBUG_IP_FIREWALL_USER*/
39 #ifdef DEBUG_IP_FIREWALL
40 #define dprintf(format, args...) printk(format , ## args)
41 #else
42 #define dprintf(format, args...)
43 #endif
45 #ifdef DEBUG_IP_FIREWALL_USER
46 #define duprintf(format, args...) printk(format , ## args)
47 #else
48 #define duprintf(format, args...)
49 #endif
51 #ifdef CONFIG_NETFILTER_DEBUG
52 #define IP_NF_ASSERT(x) \
53 do { \
54 if (!(x)) \
55 printk("IP_NF_ASSERT: %s:%s:%u\n", \
56 __func__, __FILE__, __LINE__); \
57 } while(0)
58 #else
59 #define IP_NF_ASSERT(x)
60 #endif
62 #if 0
63 /* All the better to debug you with... */
64 #define static
65 #define inline
66 #endif
69 We keep a set of rules for each CPU, so we can avoid write-locking
70 them in the softirq when updating the counters and therefore
71 only need to read-lock in the softirq; doing a write_lock_bh() in user
72 context stops packets coming through and allows user context to read
73 the counters or update the rules.
75 Hence the start of any table is given by get_table() below. */
77 /* Returns whether matches rule or not. */
78 /* Performance critical - called for every packet */
79 static inline bool
80 ip_packet_match(const struct iphdr *ip,
81 const char *indev,
82 const char *outdev,
83 const struct ipt_ip *ipinfo,
84 int isfrag)
86 unsigned long ret;
88 #define FWINV(bool, invflg) ((bool) ^ !!(ipinfo->invflags & (invflg)))
90 if (FWINV((ip->saddr&ipinfo->smsk.s_addr) != ipinfo->src.s_addr,
91 IPT_INV_SRCIP)
92 || FWINV((ip->daddr&ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr,
93 IPT_INV_DSTIP)) {
94 dprintf("Source or dest mismatch.\n");
96 dprintf("SRC: %pI4. Mask: %pI4. Target: %pI4.%s\n",
97 &ip->saddr, &ipinfo->smsk.s_addr, &ipinfo->src.s_addr,
98 ipinfo->invflags & IPT_INV_SRCIP ? " (INV)" : "");
99 dprintf("DST: %pI4 Mask: %pI4 Target: %pI4.%s\n",
100 &ip->daddr, &ipinfo->dmsk.s_addr, &ipinfo->dst.s_addr,
101 ipinfo->invflags & IPT_INV_DSTIP ? " (INV)" : "");
102 return false;
105 ret = ifname_compare_aligned(indev, ipinfo->iniface, ipinfo->iniface_mask);
107 if (FWINV(ret != 0, IPT_INV_VIA_IN)) {
108 dprintf("VIA in mismatch (%s vs %s).%s\n",
109 indev, ipinfo->iniface,
110 ipinfo->invflags&IPT_INV_VIA_IN ?" (INV)":"");
111 return false;
114 ret = ifname_compare_aligned(outdev, ipinfo->outiface, ipinfo->outiface_mask);
116 if (FWINV(ret != 0, IPT_INV_VIA_OUT)) {
117 dprintf("VIA out mismatch (%s vs %s).%s\n",
118 outdev, ipinfo->outiface,
119 ipinfo->invflags&IPT_INV_VIA_OUT ?" (INV)":"");
120 return false;
123 /* Check specific protocol */
124 if (ipinfo->proto
125 && FWINV(ip->protocol != ipinfo->proto, IPT_INV_PROTO)) {
126 dprintf("Packet protocol %hi does not match %hi.%s\n",
127 ip->protocol, ipinfo->proto,
128 ipinfo->invflags&IPT_INV_PROTO ? " (INV)":"");
129 return false;
132 /* If we have a fragment rule but the packet is not a fragment
133 * then we return zero */
134 if (FWINV((ipinfo->flags&IPT_F_FRAG) && !isfrag, IPT_INV_FRAG)) {
135 dprintf("Fragment rule but not fragment.%s\n",
136 ipinfo->invflags & IPT_INV_FRAG ? " (INV)" : "");
137 return false;
140 return true;
143 static bool
144 ip_checkentry(const struct ipt_ip *ip)
146 if (ip->flags & ~IPT_F_MASK) {
147 duprintf("Unknown flag bits set: %08X\n",
148 ip->flags & ~IPT_F_MASK);
149 return false;
151 if (ip->invflags & ~IPT_INV_MASK) {
152 duprintf("Unknown invflag bits set: %08X\n",
153 ip->invflags & ~IPT_INV_MASK);
154 return false;
156 return true;
159 static unsigned int
160 ipt_error(struct sk_buff *skb, const struct xt_target_param *par)
162 if (net_ratelimit())
163 printk("ip_tables: error: `%s'\n",
164 (const char *)par->targinfo);
166 return NF_DROP;
169 /* Performance critical - called for every packet */
170 static inline bool
171 do_match(struct ipt_entry_match *m, const struct sk_buff *skb,
172 struct xt_match_param *par)
174 par->match = m->u.kernel.match;
175 par->matchinfo = m->data;
177 /* Stop iteration if it doesn't match */
178 if (!m->u.kernel.match->match(skb, par))
179 return true;
180 else
181 return false;
184 /* Performance critical */
185 static inline struct ipt_entry *
186 get_entry(void *base, unsigned int offset)
188 return (struct ipt_entry *)(base + offset);
191 /* All zeroes == unconditional rule. */
192 /* Mildly perf critical (only if packet tracing is on) */
193 static inline int
194 unconditional(const struct ipt_ip *ip)
196 unsigned int i;
198 for (i = 0; i < sizeof(*ip)/sizeof(__u32); i++)
199 if (((__u32 *)ip)[i])
200 return 0;
202 return 1;
203 #undef FWINV
206 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
207 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
208 static const char *const hooknames[] = {
209 [NF_INET_PRE_ROUTING] = "PREROUTING",
210 [NF_INET_LOCAL_IN] = "INPUT",
211 [NF_INET_FORWARD] = "FORWARD",
212 [NF_INET_LOCAL_OUT] = "OUTPUT",
213 [NF_INET_POST_ROUTING] = "POSTROUTING",
216 enum nf_ip_trace_comments {
217 NF_IP_TRACE_COMMENT_RULE,
218 NF_IP_TRACE_COMMENT_RETURN,
219 NF_IP_TRACE_COMMENT_POLICY,
222 static const char *const comments[] = {
223 [NF_IP_TRACE_COMMENT_RULE] = "rule",
224 [NF_IP_TRACE_COMMENT_RETURN] = "return",
225 [NF_IP_TRACE_COMMENT_POLICY] = "policy",
228 static struct nf_loginfo trace_loginfo = {
229 .type = NF_LOG_TYPE_LOG,
230 .u = {
231 .log = {
232 .level = 4,
233 .logflags = NF_LOG_MASK,
238 /* Mildly perf critical (only if packet tracing is on) */
239 static inline int
240 get_chainname_rulenum(struct ipt_entry *s, struct ipt_entry *e,
241 char *hookname, char **chainname,
242 char **comment, unsigned int *rulenum)
244 struct ipt_standard_target *t = (void *)ipt_get_target(s);
246 if (strcmp(t->target.u.kernel.target->name, IPT_ERROR_TARGET) == 0) {
247 /* Head of user chain: ERROR target with chainname */
248 *chainname = t->target.data;
249 (*rulenum) = 0;
250 } else if (s == e) {
251 (*rulenum)++;
253 if (s->target_offset == sizeof(struct ipt_entry)
254 && strcmp(t->target.u.kernel.target->name,
255 IPT_STANDARD_TARGET) == 0
256 && t->verdict < 0
257 && unconditional(&s->ip)) {
258 /* Tail of chains: STANDARD target (return/policy) */
259 *comment = *chainname == hookname
260 ? (char *)comments[NF_IP_TRACE_COMMENT_POLICY]
261 : (char *)comments[NF_IP_TRACE_COMMENT_RETURN];
263 return 1;
264 } else
265 (*rulenum)++;
267 return 0;
270 static void trace_packet(struct sk_buff *skb,
271 unsigned int hook,
272 const struct net_device *in,
273 const struct net_device *out,
274 const char *tablename,
275 struct xt_table_info *private,
276 struct ipt_entry *e)
278 void *table_base;
279 const struct ipt_entry *root;
280 char *hookname, *chainname, *comment;
281 unsigned int rulenum = 0;
283 table_base = (void *)private->entries[smp_processor_id()];
284 root = get_entry(table_base, private->hook_entry[hook]);
286 hookname = chainname = (char *)hooknames[hook];
287 comment = (char *)comments[NF_IP_TRACE_COMMENT_RULE];
289 IPT_ENTRY_ITERATE(root,
290 private->size - private->hook_entry[hook],
291 get_chainname_rulenum,
292 e, hookname, &chainname, &comment, &rulenum);
294 nf_log_packet(AF_INET, hook, skb, in, out, &trace_loginfo,
295 "TRACE: %s:%s:%s:%u ",
296 tablename, chainname, comment, rulenum);
298 #endif
300 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
301 unsigned int
302 ipt_do_table(struct sk_buff *skb,
303 unsigned int hook,
304 const struct net_device *in,
305 const struct net_device *out,
306 struct xt_table *table)
308 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
309 const struct iphdr *ip;
310 u_int16_t datalen;
311 bool hotdrop = false;
312 /* Initializing verdict to NF_DROP keeps gcc happy. */
313 unsigned int verdict = NF_DROP;
314 const char *indev, *outdev;
315 void *table_base;
316 struct ipt_entry *e, *back;
317 struct xt_table_info *private;
318 struct xt_match_param mtpar;
319 struct xt_target_param tgpar;
321 /* Initialization */
322 ip = ip_hdr(skb);
323 datalen = skb->len - ip->ihl * 4;
324 indev = in ? in->name : nulldevname;
325 outdev = out ? out->name : nulldevname;
326 /* We handle fragments by dealing with the first fragment as
327 * if it was a normal packet. All other fragments are treated
328 * normally, except that they will NEVER match rules that ask
329 * things we don't know, ie. tcp syn flag or ports). If the
330 * rule is also a fragment-specific rule, non-fragments won't
331 * match it. */
332 mtpar.fragoff = ntohs(ip->frag_off) & IP_OFFSET;
333 mtpar.thoff = ip_hdrlen(skb);
334 mtpar.hotdrop = &hotdrop;
335 mtpar.in = tgpar.in = in;
336 mtpar.out = tgpar.out = out;
337 mtpar.family = tgpar.family = NFPROTO_IPV4;
338 tgpar.hooknum = hook;
340 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
341 xt_info_rdlock_bh();
342 private = table->private;
343 table_base = private->entries[smp_processor_id()];
345 e = get_entry(table_base, private->hook_entry[hook]);
347 /* For return from builtin chain */
348 back = get_entry(table_base, private->underflow[hook]);
350 do {
351 IP_NF_ASSERT(e);
352 IP_NF_ASSERT(back);
353 if (ip_packet_match(ip, indev, outdev,
354 &e->ip, mtpar.fragoff)) {
355 struct ipt_entry_target *t;
357 if (IPT_MATCH_ITERATE(e, do_match, skb, &mtpar) != 0)
358 goto no_match;
360 ADD_COUNTER(e->counters, ntohs(ip->tot_len), 1);
362 t = ipt_get_target(e);
363 IP_NF_ASSERT(t->u.kernel.target);
365 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
366 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
367 /* The packet is traced: log it */
368 if (unlikely(skb->nf_trace))
369 trace_packet(skb, hook, in, out,
370 table->name, private, e);
371 #endif
372 /* Standard target? */
373 if (!t->u.kernel.target->target) {
374 int v;
376 v = ((struct ipt_standard_target *)t)->verdict;
377 if (v < 0) {
378 /* Pop from stack? */
379 if (v != IPT_RETURN) {
380 verdict = (unsigned)(-v) - 1;
381 break;
383 e = back;
384 back = get_entry(table_base,
385 back->comefrom);
386 continue;
388 if (table_base + v != (void *)e + e->next_offset
389 && !(e->ip.flags & IPT_F_GOTO)) {
390 /* Save old back ptr in next entry */
391 struct ipt_entry *next
392 = (void *)e + e->next_offset;
393 next->comefrom
394 = (void *)back - table_base;
395 /* set back pointer to next entry */
396 back = next;
399 e = get_entry(table_base, v);
400 } else {
401 /* Targets which reenter must return
402 abs. verdicts */
403 tgpar.target = t->u.kernel.target;
404 tgpar.targinfo = t->data;
405 #ifdef CONFIG_NETFILTER_DEBUG
406 ((struct ipt_entry *)table_base)->comefrom
407 = 0xeeeeeeec;
408 #endif
409 verdict = t->u.kernel.target->target(skb,
410 &tgpar);
411 #ifdef CONFIG_NETFILTER_DEBUG
412 if (((struct ipt_entry *)table_base)->comefrom
413 != 0xeeeeeeec
414 && verdict == IPT_CONTINUE) {
415 printk("Target %s reentered!\n",
416 t->u.kernel.target->name);
417 verdict = NF_DROP;
419 ((struct ipt_entry *)table_base)->comefrom
420 = 0x57acc001;
421 #endif
422 /* Target might have changed stuff. */
423 ip = ip_hdr(skb);
424 datalen = skb->len - ip->ihl * 4;
426 if (verdict == IPT_CONTINUE)
427 e = (void *)e + e->next_offset;
428 else
429 /* Verdict */
430 break;
432 } else {
434 no_match:
435 e = (void *)e + e->next_offset;
437 } while (!hotdrop);
438 xt_info_rdunlock_bh();
440 #ifdef DEBUG_ALLOW_ALL
441 return NF_ACCEPT;
442 #else
443 if (hotdrop)
444 return NF_DROP;
445 else return verdict;
446 #endif
449 /* Figures out from what hook each rule can be called: returns 0 if
450 there are loops. Puts hook bitmask in comefrom. */
451 static int
452 mark_source_chains(struct xt_table_info *newinfo,
453 unsigned int valid_hooks, void *entry0)
455 unsigned int hook;
457 /* No recursion; use packet counter to save back ptrs (reset
458 to 0 as we leave), and comefrom to save source hook bitmask */
459 for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
460 unsigned int pos = newinfo->hook_entry[hook];
461 struct ipt_entry *e = (struct ipt_entry *)(entry0 + pos);
463 if (!(valid_hooks & (1 << hook)))
464 continue;
466 /* Set initial back pointer. */
467 e->counters.pcnt = pos;
469 for (;;) {
470 struct ipt_standard_target *t
471 = (void *)ipt_get_target(e);
472 int visited = e->comefrom & (1 << hook);
474 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
475 printk("iptables: loop hook %u pos %u %08X.\n",
476 hook, pos, e->comefrom);
477 return 0;
479 e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
481 /* Unconditional return/END. */
482 if ((e->target_offset == sizeof(struct ipt_entry)
483 && (strcmp(t->target.u.user.name,
484 IPT_STANDARD_TARGET) == 0)
485 && t->verdict < 0
486 && unconditional(&e->ip)) || visited) {
487 unsigned int oldpos, size;
489 if ((strcmp(t->target.u.user.name,
490 IPT_STANDARD_TARGET) == 0) &&
491 t->verdict < -NF_MAX_VERDICT - 1) {
492 duprintf("mark_source_chains: bad "
493 "negative verdict (%i)\n",
494 t->verdict);
495 return 0;
498 /* Return: backtrack through the last
499 big jump. */
500 do {
501 e->comefrom ^= (1<<NF_INET_NUMHOOKS);
502 #ifdef DEBUG_IP_FIREWALL_USER
503 if (e->comefrom
504 & (1 << NF_INET_NUMHOOKS)) {
505 duprintf("Back unset "
506 "on hook %u "
507 "rule %u\n",
508 hook, pos);
510 #endif
511 oldpos = pos;
512 pos = e->counters.pcnt;
513 e->counters.pcnt = 0;
515 /* We're at the start. */
516 if (pos == oldpos)
517 goto next;
519 e = (struct ipt_entry *)
520 (entry0 + pos);
521 } while (oldpos == pos + e->next_offset);
523 /* Move along one */
524 size = e->next_offset;
525 e = (struct ipt_entry *)
526 (entry0 + pos + size);
527 e->counters.pcnt = pos;
528 pos += size;
529 } else {
530 int newpos = t->verdict;
532 if (strcmp(t->target.u.user.name,
533 IPT_STANDARD_TARGET) == 0
534 && newpos >= 0) {
535 if (newpos > newinfo->size -
536 sizeof(struct ipt_entry)) {
537 duprintf("mark_source_chains: "
538 "bad verdict (%i)\n",
539 newpos);
540 return 0;
542 /* This a jump; chase it. */
543 duprintf("Jump rule %u -> %u\n",
544 pos, newpos);
545 } else {
546 /* ... this is a fallthru */
547 newpos = pos + e->next_offset;
549 e = (struct ipt_entry *)
550 (entry0 + newpos);
551 e->counters.pcnt = pos;
552 pos = newpos;
555 next:
556 duprintf("Finished chain %u\n", hook);
558 return 1;
561 static int
562 cleanup_match(struct ipt_entry_match *m, unsigned int *i)
564 struct xt_mtdtor_param par;
566 if (i && (*i)-- == 0)
567 return 1;
569 par.match = m->u.kernel.match;
570 par.matchinfo = m->data;
571 par.family = NFPROTO_IPV4;
572 if (par.match->destroy != NULL)
573 par.match->destroy(&par);
574 module_put(par.match->me);
575 return 0;
578 static int
579 check_entry(struct ipt_entry *e, const char *name)
581 struct ipt_entry_target *t;
583 if (!ip_checkentry(&e->ip)) {
584 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
585 return -EINVAL;
588 if (e->target_offset + sizeof(struct ipt_entry_target) >
589 e->next_offset)
590 return -EINVAL;
592 t = ipt_get_target(e);
593 if (e->target_offset + t->u.target_size > e->next_offset)
594 return -EINVAL;
596 return 0;
599 static int
600 check_match(struct ipt_entry_match *m, struct xt_mtchk_param *par,
601 unsigned int *i)
603 const struct ipt_ip *ip = par->entryinfo;
604 int ret;
606 par->match = m->u.kernel.match;
607 par->matchinfo = m->data;
609 ret = xt_check_match(par, m->u.match_size - sizeof(*m),
610 ip->proto, ip->invflags & IPT_INV_PROTO);
611 if (ret < 0) {
612 duprintf("ip_tables: check failed for `%s'.\n",
613 par.match->name);
614 return ret;
616 ++*i;
617 return 0;
620 static int
621 find_check_match(struct ipt_entry_match *m, struct xt_mtchk_param *par,
622 unsigned int *i)
624 struct xt_match *match;
625 int ret;
627 match = try_then_request_module(xt_find_match(AF_INET, m->u.user.name,
628 m->u.user.revision),
629 "ipt_%s", m->u.user.name);
630 if (IS_ERR(match) || !match) {
631 duprintf("find_check_match: `%s' not found\n", m->u.user.name);
632 return match ? PTR_ERR(match) : -ENOENT;
634 m->u.kernel.match = match;
636 ret = check_match(m, par, i);
637 if (ret)
638 goto err;
640 return 0;
641 err:
642 module_put(m->u.kernel.match->me);
643 return ret;
646 static int check_target(struct ipt_entry *e, const char *name)
648 struct ipt_entry_target *t = ipt_get_target(e);
649 struct xt_tgchk_param par = {
650 .table = name,
651 .entryinfo = e,
652 .target = t->u.kernel.target,
653 .targinfo = t->data,
654 .hook_mask = e->comefrom,
655 .family = NFPROTO_IPV4,
657 int ret;
659 ret = xt_check_target(&par, t->u.target_size - sizeof(*t),
660 e->ip.proto, e->ip.invflags & IPT_INV_PROTO);
661 if (ret < 0) {
662 duprintf("ip_tables: check failed for `%s'.\n",
663 t->u.kernel.target->name);
664 return ret;
666 return 0;
669 static int
670 find_check_entry(struct ipt_entry *e, const char *name, unsigned int size,
671 unsigned int *i)
673 struct ipt_entry_target *t;
674 struct xt_target *target;
675 int ret;
676 unsigned int j;
677 struct xt_mtchk_param mtpar;
679 ret = check_entry(e, name);
680 if (ret)
681 return ret;
683 j = 0;
684 mtpar.table = name;
685 mtpar.entryinfo = &e->ip;
686 mtpar.hook_mask = e->comefrom;
687 mtpar.family = NFPROTO_IPV4;
688 ret = IPT_MATCH_ITERATE(e, find_check_match, &mtpar, &j);
689 if (ret != 0)
690 goto cleanup_matches;
692 t = ipt_get_target(e);
693 target = try_then_request_module(xt_find_target(AF_INET,
694 t->u.user.name,
695 t->u.user.revision),
696 "ipt_%s", t->u.user.name);
697 if (IS_ERR(target) || !target) {
698 duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
699 ret = target ? PTR_ERR(target) : -ENOENT;
700 goto cleanup_matches;
702 t->u.kernel.target = target;
704 ret = check_target(e, name);
705 if (ret)
706 goto err;
708 (*i)++;
709 return 0;
710 err:
711 module_put(t->u.kernel.target->me);
712 cleanup_matches:
713 IPT_MATCH_ITERATE(e, cleanup_match, &j);
714 return ret;
717 static int
718 check_entry_size_and_hooks(struct ipt_entry *e,
719 struct xt_table_info *newinfo,
720 unsigned char *base,
721 unsigned char *limit,
722 const unsigned int *hook_entries,
723 const unsigned int *underflows,
724 unsigned int *i)
726 unsigned int h;
728 if ((unsigned long)e % __alignof__(struct ipt_entry) != 0
729 || (unsigned char *)e + sizeof(struct ipt_entry) >= limit) {
730 duprintf("Bad offset %p\n", e);
731 return -EINVAL;
734 if (e->next_offset
735 < sizeof(struct ipt_entry) + sizeof(struct ipt_entry_target)) {
736 duprintf("checking: element %p size %u\n",
737 e, e->next_offset);
738 return -EINVAL;
741 /* Check hooks & underflows */
742 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
743 if ((unsigned char *)e - base == hook_entries[h])
744 newinfo->hook_entry[h] = hook_entries[h];
745 if ((unsigned char *)e - base == underflows[h])
746 newinfo->underflow[h] = underflows[h];
749 /* FIXME: underflows must be unconditional, standard verdicts
750 < 0 (not IPT_RETURN). --RR */
752 /* Clear counters and comefrom */
753 e->counters = ((struct xt_counters) { 0, 0 });
754 e->comefrom = 0;
756 (*i)++;
757 return 0;
760 static int
761 cleanup_entry(struct ipt_entry *e, unsigned int *i)
763 struct xt_tgdtor_param par;
764 struct ipt_entry_target *t;
766 if (i && (*i)-- == 0)
767 return 1;
769 /* Cleanup all matches */
770 IPT_MATCH_ITERATE(e, cleanup_match, NULL);
771 t = ipt_get_target(e);
773 par.target = t->u.kernel.target;
774 par.targinfo = t->data;
775 par.family = NFPROTO_IPV4;
776 if (par.target->destroy != NULL)
777 par.target->destroy(&par);
778 module_put(par.target->me);
779 return 0;
782 /* Checks and translates the user-supplied table segment (held in
783 newinfo) */
784 static int
785 translate_table(const char *name,
786 unsigned int valid_hooks,
787 struct xt_table_info *newinfo,
788 void *entry0,
789 unsigned int size,
790 unsigned int number,
791 const unsigned int *hook_entries,
792 const unsigned int *underflows)
794 unsigned int i;
795 int ret;
797 newinfo->size = size;
798 newinfo->number = number;
800 /* Init all hooks to impossible value. */
801 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
802 newinfo->hook_entry[i] = 0xFFFFFFFF;
803 newinfo->underflow[i] = 0xFFFFFFFF;
806 duprintf("translate_table: size %u\n", newinfo->size);
807 i = 0;
808 /* Walk through entries, checking offsets. */
809 ret = IPT_ENTRY_ITERATE(entry0, newinfo->size,
810 check_entry_size_and_hooks,
811 newinfo,
812 entry0,
813 entry0 + size,
814 hook_entries, underflows, &i);
815 if (ret != 0)
816 return ret;
818 if (i != number) {
819 duprintf("translate_table: %u not %u entries\n",
820 i, number);
821 return -EINVAL;
824 /* Check hooks all assigned */
825 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
826 /* Only hooks which are valid */
827 if (!(valid_hooks & (1 << i)))
828 continue;
829 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
830 duprintf("Invalid hook entry %u %u\n",
831 i, hook_entries[i]);
832 return -EINVAL;
834 if (newinfo->underflow[i] == 0xFFFFFFFF) {
835 duprintf("Invalid underflow %u %u\n",
836 i, underflows[i]);
837 return -EINVAL;
841 if (!mark_source_chains(newinfo, valid_hooks, entry0))
842 return -ELOOP;
844 /* Finally, each sanity check must pass */
845 i = 0;
846 ret = IPT_ENTRY_ITERATE(entry0, newinfo->size,
847 find_check_entry, name, size, &i);
849 if (ret != 0) {
850 IPT_ENTRY_ITERATE(entry0, newinfo->size,
851 cleanup_entry, &i);
852 return ret;
855 /* And one copy for every other CPU */
856 for_each_possible_cpu(i) {
857 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
858 memcpy(newinfo->entries[i], entry0, newinfo->size);
861 return ret;
864 /* Gets counters. */
865 static inline int
866 add_entry_to_counter(const struct ipt_entry *e,
867 struct xt_counters total[],
868 unsigned int *i)
870 ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
872 (*i)++;
873 return 0;
876 static inline int
877 set_entry_to_counter(const struct ipt_entry *e,
878 struct ipt_counters total[],
879 unsigned int *i)
881 SET_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
883 (*i)++;
884 return 0;
887 static void
888 get_counters(const struct xt_table_info *t,
889 struct xt_counters counters[])
891 unsigned int cpu;
892 unsigned int i;
893 unsigned int curcpu;
895 /* Instead of clearing (by a previous call to memset())
896 * the counters and using adds, we set the counters
897 * with data used by 'current' CPU.
899 * Bottom half has to be disabled to prevent deadlock
900 * if new softirq were to run and call ipt_do_table
902 local_bh_disable();
903 curcpu = smp_processor_id();
905 i = 0;
906 IPT_ENTRY_ITERATE(t->entries[curcpu],
907 t->size,
908 set_entry_to_counter,
909 counters,
910 &i);
912 for_each_possible_cpu(cpu) {
913 if (cpu == curcpu)
914 continue;
915 i = 0;
916 xt_info_wrlock(cpu);
917 IPT_ENTRY_ITERATE(t->entries[cpu],
918 t->size,
919 add_entry_to_counter,
920 counters,
921 &i);
922 xt_info_wrunlock(cpu);
924 local_bh_enable();
927 static struct xt_counters * alloc_counters(struct xt_table *table)
929 unsigned int countersize;
930 struct xt_counters *counters;
931 struct xt_table_info *private = table->private;
933 /* We need atomic snapshot of counters: rest doesn't change
934 (other than comefrom, which userspace doesn't care
935 about). */
936 countersize = sizeof(struct xt_counters) * private->number;
937 counters = vmalloc_node(countersize, numa_node_id());
939 if (counters == NULL)
940 return ERR_PTR(-ENOMEM);
942 get_counters(private, counters);
944 return counters;
947 static int
948 copy_entries_to_user(unsigned int total_size,
949 struct xt_table *table,
950 void __user *userptr)
952 unsigned int off, num;
953 struct ipt_entry *e;
954 struct xt_counters *counters;
955 const struct xt_table_info *private = table->private;
956 int ret = 0;
957 const void *loc_cpu_entry;
959 counters = alloc_counters(table);
960 if (IS_ERR(counters))
961 return PTR_ERR(counters);
963 /* choose the copy that is on our node/cpu, ...
964 * This choice is lazy (because current thread is
965 * allowed to migrate to another cpu)
967 loc_cpu_entry = private->entries[raw_smp_processor_id()];
968 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
969 ret = -EFAULT;
970 goto free_counters;
973 /* FIXME: use iterator macros --RR */
974 /* ... then go back and fix counters and names */
975 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
976 unsigned int i;
977 const struct ipt_entry_match *m;
978 const struct ipt_entry_target *t;
980 e = (struct ipt_entry *)(loc_cpu_entry + off);
981 if (copy_to_user(userptr + off
982 + offsetof(struct ipt_entry, counters),
983 &counters[num],
984 sizeof(counters[num])) != 0) {
985 ret = -EFAULT;
986 goto free_counters;
989 for (i = sizeof(struct ipt_entry);
990 i < e->target_offset;
991 i += m->u.match_size) {
992 m = (void *)e + i;
994 if (copy_to_user(userptr + off + i
995 + offsetof(struct ipt_entry_match,
996 u.user.name),
997 m->u.kernel.match->name,
998 strlen(m->u.kernel.match->name)+1)
999 != 0) {
1000 ret = -EFAULT;
1001 goto free_counters;
1005 t = ipt_get_target(e);
1006 if (copy_to_user(userptr + off + e->target_offset
1007 + offsetof(struct ipt_entry_target,
1008 u.user.name),
1009 t->u.kernel.target->name,
1010 strlen(t->u.kernel.target->name)+1) != 0) {
1011 ret = -EFAULT;
1012 goto free_counters;
1016 free_counters:
1017 vfree(counters);
1018 return ret;
1021 #ifdef CONFIG_COMPAT
1022 static void compat_standard_from_user(void *dst, void *src)
1024 int v = *(compat_int_t *)src;
1026 if (v > 0)
1027 v += xt_compat_calc_jump(AF_INET, v);
1028 memcpy(dst, &v, sizeof(v));
1031 static int compat_standard_to_user(void __user *dst, void *src)
1033 compat_int_t cv = *(int *)src;
1035 if (cv > 0)
1036 cv -= xt_compat_calc_jump(AF_INET, cv);
1037 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
1040 static inline int
1041 compat_calc_match(struct ipt_entry_match *m, int *size)
1043 *size += xt_compat_match_offset(m->u.kernel.match);
1044 return 0;
1047 static int compat_calc_entry(struct ipt_entry *e,
1048 const struct xt_table_info *info,
1049 void *base, struct xt_table_info *newinfo)
1051 struct ipt_entry_target *t;
1052 unsigned int entry_offset;
1053 int off, i, ret;
1055 off = sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1056 entry_offset = (void *)e - base;
1057 IPT_MATCH_ITERATE(e, compat_calc_match, &off);
1058 t = ipt_get_target(e);
1059 off += xt_compat_target_offset(t->u.kernel.target);
1060 newinfo->size -= off;
1061 ret = xt_compat_add_offset(AF_INET, entry_offset, off);
1062 if (ret)
1063 return ret;
1065 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1066 if (info->hook_entry[i] &&
1067 (e < (struct ipt_entry *)(base + info->hook_entry[i])))
1068 newinfo->hook_entry[i] -= off;
1069 if (info->underflow[i] &&
1070 (e < (struct ipt_entry *)(base + info->underflow[i])))
1071 newinfo->underflow[i] -= off;
1073 return 0;
1076 static int compat_table_info(const struct xt_table_info *info,
1077 struct xt_table_info *newinfo)
1079 void *loc_cpu_entry;
1081 if (!newinfo || !info)
1082 return -EINVAL;
1084 /* we dont care about newinfo->entries[] */
1085 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
1086 newinfo->initial_entries = 0;
1087 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1088 return IPT_ENTRY_ITERATE(loc_cpu_entry, info->size,
1089 compat_calc_entry, info, loc_cpu_entry,
1090 newinfo);
1092 #endif
1094 static int get_info(struct net *net, void __user *user, int *len, int compat)
1096 char name[IPT_TABLE_MAXNAMELEN];
1097 struct xt_table *t;
1098 int ret;
1100 if (*len != sizeof(struct ipt_getinfo)) {
1101 duprintf("length %u != %zu\n", *len,
1102 sizeof(struct ipt_getinfo));
1103 return -EINVAL;
1106 if (copy_from_user(name, user, sizeof(name)) != 0)
1107 return -EFAULT;
1109 name[IPT_TABLE_MAXNAMELEN-1] = '\0';
1110 #ifdef CONFIG_COMPAT
1111 if (compat)
1112 xt_compat_lock(AF_INET);
1113 #endif
1114 t = try_then_request_module(xt_find_table_lock(net, AF_INET, name),
1115 "iptable_%s", name);
1116 if (t && !IS_ERR(t)) {
1117 struct ipt_getinfo info;
1118 const struct xt_table_info *private = t->private;
1120 #ifdef CONFIG_COMPAT
1121 if (compat) {
1122 struct xt_table_info tmp;
1123 ret = compat_table_info(private, &tmp);
1124 xt_compat_flush_offsets(AF_INET);
1125 private = &tmp;
1127 #endif
1128 info.valid_hooks = t->valid_hooks;
1129 memcpy(info.hook_entry, private->hook_entry,
1130 sizeof(info.hook_entry));
1131 memcpy(info.underflow, private->underflow,
1132 sizeof(info.underflow));
1133 info.num_entries = private->number;
1134 info.size = private->size;
1135 strcpy(info.name, name);
1137 if (copy_to_user(user, &info, *len) != 0)
1138 ret = -EFAULT;
1139 else
1140 ret = 0;
1142 xt_table_unlock(t);
1143 module_put(t->me);
1144 } else
1145 ret = t ? PTR_ERR(t) : -ENOENT;
1146 #ifdef CONFIG_COMPAT
1147 if (compat)
1148 xt_compat_unlock(AF_INET);
1149 #endif
1150 return ret;
1153 static int
1154 get_entries(struct net *net, struct ipt_get_entries __user *uptr, int *len)
1156 int ret;
1157 struct ipt_get_entries get;
1158 struct xt_table *t;
1160 if (*len < sizeof(get)) {
1161 duprintf("get_entries: %u < %zu\n", *len, sizeof(get));
1162 return -EINVAL;
1164 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1165 return -EFAULT;
1166 if (*len != sizeof(struct ipt_get_entries) + get.size) {
1167 duprintf("get_entries: %u != %zu\n",
1168 *len, sizeof(get) + get.size);
1169 return -EINVAL;
1172 t = xt_find_table_lock(net, AF_INET, get.name);
1173 if (t && !IS_ERR(t)) {
1174 const struct xt_table_info *private = t->private;
1175 duprintf("t->private->number = %u\n", private->number);
1176 if (get.size == private->size)
1177 ret = copy_entries_to_user(private->size,
1178 t, uptr->entrytable);
1179 else {
1180 duprintf("get_entries: I've got %u not %u!\n",
1181 private->size, get.size);
1182 ret = -EAGAIN;
1184 module_put(t->me);
1185 xt_table_unlock(t);
1186 } else
1187 ret = t ? PTR_ERR(t) : -ENOENT;
1189 return ret;
1192 static int
1193 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1194 struct xt_table_info *newinfo, unsigned int num_counters,
1195 void __user *counters_ptr)
1197 int ret;
1198 struct xt_table *t;
1199 struct xt_table_info *oldinfo;
1200 struct xt_counters *counters;
1201 void *loc_cpu_old_entry;
1203 ret = 0;
1204 counters = vmalloc(num_counters * sizeof(struct xt_counters));
1205 if (!counters) {
1206 ret = -ENOMEM;
1207 goto out;
1210 t = try_then_request_module(xt_find_table_lock(net, AF_INET, name),
1211 "iptable_%s", name);
1212 if (!t || IS_ERR(t)) {
1213 ret = t ? PTR_ERR(t) : -ENOENT;
1214 goto free_newinfo_counters_untrans;
1217 /* You lied! */
1218 if (valid_hooks != t->valid_hooks) {
1219 duprintf("Valid hook crap: %08X vs %08X\n",
1220 valid_hooks, t->valid_hooks);
1221 ret = -EINVAL;
1222 goto put_module;
1225 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1226 if (!oldinfo)
1227 goto put_module;
1229 /* Update module usage count based on number of rules */
1230 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1231 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1232 if ((oldinfo->number > oldinfo->initial_entries) ||
1233 (newinfo->number <= oldinfo->initial_entries))
1234 module_put(t->me);
1235 if ((oldinfo->number > oldinfo->initial_entries) &&
1236 (newinfo->number <= oldinfo->initial_entries))
1237 module_put(t->me);
1239 /* Get the old counters, and synchronize with replace */
1240 get_counters(oldinfo, counters);
1242 /* Decrease module usage counts and free resource */
1243 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1244 IPT_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry,
1245 NULL);
1246 xt_free_table_info(oldinfo);
1247 if (copy_to_user(counters_ptr, counters,
1248 sizeof(struct xt_counters) * num_counters) != 0)
1249 ret = -EFAULT;
1250 vfree(counters);
1251 xt_table_unlock(t);
1252 return ret;
1254 put_module:
1255 module_put(t->me);
1256 xt_table_unlock(t);
1257 free_newinfo_counters_untrans:
1258 vfree(counters);
1259 out:
1260 return ret;
1263 static int
1264 do_replace(struct net *net, void __user *user, unsigned int len)
1266 int ret;
1267 struct ipt_replace tmp;
1268 struct xt_table_info *newinfo;
1269 void *loc_cpu_entry;
1271 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1272 return -EFAULT;
1274 /* overflow check */
1275 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1276 return -ENOMEM;
1278 newinfo = xt_alloc_table_info(tmp.size);
1279 if (!newinfo)
1280 return -ENOMEM;
1282 /* choose the copy that is on our node/cpu */
1283 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1284 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1285 tmp.size) != 0) {
1286 ret = -EFAULT;
1287 goto free_newinfo;
1290 ret = translate_table(tmp.name, tmp.valid_hooks,
1291 newinfo, loc_cpu_entry, tmp.size, tmp.num_entries,
1292 tmp.hook_entry, tmp.underflow);
1293 if (ret != 0)
1294 goto free_newinfo;
1296 duprintf("ip_tables: Translated table\n");
1298 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1299 tmp.num_counters, tmp.counters);
1300 if (ret)
1301 goto free_newinfo_untrans;
1302 return 0;
1304 free_newinfo_untrans:
1305 IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL);
1306 free_newinfo:
1307 xt_free_table_info(newinfo);
1308 return ret;
1311 /* We're lazy, and add to the first CPU; overflow works its fey magic
1312 * and everything is OK. */
1313 static int
1314 add_counter_to_entry(struct ipt_entry *e,
1315 const struct xt_counters addme[],
1316 unsigned int *i)
1318 ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
1320 (*i)++;
1321 return 0;
1324 static int
1325 do_add_counters(struct net *net, void __user *user, unsigned int len, int compat)
1327 unsigned int i, curcpu;
1328 struct xt_counters_info tmp;
1329 struct xt_counters *paddc;
1330 unsigned int num_counters;
1331 const char *name;
1332 int size;
1333 void *ptmp;
1334 struct xt_table *t;
1335 const struct xt_table_info *private;
1336 int ret = 0;
1337 void *loc_cpu_entry;
1338 #ifdef CONFIG_COMPAT
1339 struct compat_xt_counters_info compat_tmp;
1341 if (compat) {
1342 ptmp = &compat_tmp;
1343 size = sizeof(struct compat_xt_counters_info);
1344 } else
1345 #endif
1347 ptmp = &tmp;
1348 size = sizeof(struct xt_counters_info);
1351 if (copy_from_user(ptmp, user, size) != 0)
1352 return -EFAULT;
1354 #ifdef CONFIG_COMPAT
1355 if (compat) {
1356 num_counters = compat_tmp.num_counters;
1357 name = compat_tmp.name;
1358 } else
1359 #endif
1361 num_counters = tmp.num_counters;
1362 name = tmp.name;
1365 if (len != size + num_counters * sizeof(struct xt_counters))
1366 return -EINVAL;
1368 paddc = vmalloc_node(len - size, numa_node_id());
1369 if (!paddc)
1370 return -ENOMEM;
1372 if (copy_from_user(paddc, user + size, len - size) != 0) {
1373 ret = -EFAULT;
1374 goto free;
1377 t = xt_find_table_lock(net, AF_INET, name);
1378 if (!t || IS_ERR(t)) {
1379 ret = t ? PTR_ERR(t) : -ENOENT;
1380 goto free;
1383 local_bh_disable();
1384 private = t->private;
1385 if (private->number != num_counters) {
1386 ret = -EINVAL;
1387 goto unlock_up_free;
1390 i = 0;
1391 /* Choose the copy that is on our node */
1392 curcpu = smp_processor_id();
1393 loc_cpu_entry = private->entries[curcpu];
1394 xt_info_wrlock(curcpu);
1395 IPT_ENTRY_ITERATE(loc_cpu_entry,
1396 private->size,
1397 add_counter_to_entry,
1398 paddc,
1399 &i);
1400 xt_info_wrunlock(curcpu);
1401 unlock_up_free:
1402 local_bh_enable();
1403 xt_table_unlock(t);
1404 module_put(t->me);
1405 free:
1406 vfree(paddc);
1408 return ret;
1411 #ifdef CONFIG_COMPAT
1412 struct compat_ipt_replace {
1413 char name[IPT_TABLE_MAXNAMELEN];
1414 u32 valid_hooks;
1415 u32 num_entries;
1416 u32 size;
1417 u32 hook_entry[NF_INET_NUMHOOKS];
1418 u32 underflow[NF_INET_NUMHOOKS];
1419 u32 num_counters;
1420 compat_uptr_t counters; /* struct ipt_counters * */
1421 struct compat_ipt_entry entries[0];
1424 static int
1425 compat_copy_entry_to_user(struct ipt_entry *e, void __user **dstptr,
1426 unsigned int *size, struct xt_counters *counters,
1427 unsigned int *i)
1429 struct ipt_entry_target *t;
1430 struct compat_ipt_entry __user *ce;
1431 u_int16_t target_offset, next_offset;
1432 compat_uint_t origsize;
1433 int ret;
1435 ret = -EFAULT;
1436 origsize = *size;
1437 ce = (struct compat_ipt_entry __user *)*dstptr;
1438 if (copy_to_user(ce, e, sizeof(struct ipt_entry)))
1439 goto out;
1441 if (copy_to_user(&ce->counters, &counters[*i], sizeof(counters[*i])))
1442 goto out;
1444 *dstptr += sizeof(struct compat_ipt_entry);
1445 *size -= sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1447 ret = IPT_MATCH_ITERATE(e, xt_compat_match_to_user, dstptr, size);
1448 target_offset = e->target_offset - (origsize - *size);
1449 if (ret)
1450 goto out;
1451 t = ipt_get_target(e);
1452 ret = xt_compat_target_to_user(t, dstptr, size);
1453 if (ret)
1454 goto out;
1455 ret = -EFAULT;
1456 next_offset = e->next_offset - (origsize - *size);
1457 if (put_user(target_offset, &ce->target_offset))
1458 goto out;
1459 if (put_user(next_offset, &ce->next_offset))
1460 goto out;
1462 (*i)++;
1463 return 0;
1464 out:
1465 return ret;
1468 static int
1469 compat_find_calc_match(struct ipt_entry_match *m,
1470 const char *name,
1471 const struct ipt_ip *ip,
1472 unsigned int hookmask,
1473 int *size, unsigned int *i)
1475 struct xt_match *match;
1477 match = try_then_request_module(xt_find_match(AF_INET, m->u.user.name,
1478 m->u.user.revision),
1479 "ipt_%s", m->u.user.name);
1480 if (IS_ERR(match) || !match) {
1481 duprintf("compat_check_calc_match: `%s' not found\n",
1482 m->u.user.name);
1483 return match ? PTR_ERR(match) : -ENOENT;
1485 m->u.kernel.match = match;
1486 *size += xt_compat_match_offset(match);
1488 (*i)++;
1489 return 0;
1492 static int
1493 compat_release_match(struct ipt_entry_match *m, unsigned int *i)
1495 if (i && (*i)-- == 0)
1496 return 1;
1498 module_put(m->u.kernel.match->me);
1499 return 0;
1502 static int
1503 compat_release_entry(struct compat_ipt_entry *e, unsigned int *i)
1505 struct ipt_entry_target *t;
1507 if (i && (*i)-- == 0)
1508 return 1;
1510 /* Cleanup all matches */
1511 COMPAT_IPT_MATCH_ITERATE(e, compat_release_match, NULL);
1512 t = compat_ipt_get_target(e);
1513 module_put(t->u.kernel.target->me);
1514 return 0;
1517 static int
1518 check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
1519 struct xt_table_info *newinfo,
1520 unsigned int *size,
1521 unsigned char *base,
1522 unsigned char *limit,
1523 unsigned int *hook_entries,
1524 unsigned int *underflows,
1525 unsigned int *i,
1526 const char *name)
1528 struct ipt_entry_target *t;
1529 struct xt_target *target;
1530 unsigned int entry_offset;
1531 unsigned int j;
1532 int ret, off, h;
1534 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1535 if ((unsigned long)e % __alignof__(struct compat_ipt_entry) != 0
1536 || (unsigned char *)e + sizeof(struct compat_ipt_entry) >= limit) {
1537 duprintf("Bad offset %p, limit = %p\n", e, limit);
1538 return -EINVAL;
1541 if (e->next_offset < sizeof(struct compat_ipt_entry) +
1542 sizeof(struct compat_xt_entry_target)) {
1543 duprintf("checking: element %p size %u\n",
1544 e, e->next_offset);
1545 return -EINVAL;
1548 /* For purposes of check_entry casting the compat entry is fine */
1549 ret = check_entry((struct ipt_entry *)e, name);
1550 if (ret)
1551 return ret;
1553 off = sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1554 entry_offset = (void *)e - (void *)base;
1555 j = 0;
1556 ret = COMPAT_IPT_MATCH_ITERATE(e, compat_find_calc_match, name,
1557 &e->ip, e->comefrom, &off, &j);
1558 if (ret != 0)
1559 goto release_matches;
1561 t = compat_ipt_get_target(e);
1562 target = try_then_request_module(xt_find_target(AF_INET,
1563 t->u.user.name,
1564 t->u.user.revision),
1565 "ipt_%s", t->u.user.name);
1566 if (IS_ERR(target) || !target) {
1567 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1568 t->u.user.name);
1569 ret = target ? PTR_ERR(target) : -ENOENT;
1570 goto release_matches;
1572 t->u.kernel.target = target;
1574 off += xt_compat_target_offset(target);
1575 *size += off;
1576 ret = xt_compat_add_offset(AF_INET, entry_offset, off);
1577 if (ret)
1578 goto out;
1580 /* Check hooks & underflows */
1581 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1582 if ((unsigned char *)e - base == hook_entries[h])
1583 newinfo->hook_entry[h] = hook_entries[h];
1584 if ((unsigned char *)e - base == underflows[h])
1585 newinfo->underflow[h] = underflows[h];
1588 /* Clear counters and comefrom */
1589 memset(&e->counters, 0, sizeof(e->counters));
1590 e->comefrom = 0;
1592 (*i)++;
1593 return 0;
1595 out:
1596 module_put(t->u.kernel.target->me);
1597 release_matches:
1598 IPT_MATCH_ITERATE(e, compat_release_match, &j);
1599 return ret;
1602 static int
1603 compat_copy_entry_from_user(struct compat_ipt_entry *e, void **dstptr,
1604 unsigned int *size, const char *name,
1605 struct xt_table_info *newinfo, unsigned char *base)
1607 struct ipt_entry_target *t;
1608 struct xt_target *target;
1609 struct ipt_entry *de;
1610 unsigned int origsize;
1611 int ret, h;
1613 ret = 0;
1614 origsize = *size;
1615 de = (struct ipt_entry *)*dstptr;
1616 memcpy(de, e, sizeof(struct ipt_entry));
1617 memcpy(&de->counters, &e->counters, sizeof(e->counters));
1619 *dstptr += sizeof(struct ipt_entry);
1620 *size += sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1622 ret = COMPAT_IPT_MATCH_ITERATE(e, xt_compat_match_from_user,
1623 dstptr, size);
1624 if (ret)
1625 return ret;
1626 de->target_offset = e->target_offset - (origsize - *size);
1627 t = compat_ipt_get_target(e);
1628 target = t->u.kernel.target;
1629 xt_compat_target_from_user(t, dstptr, size);
1631 de->next_offset = e->next_offset - (origsize - *size);
1632 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1633 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1634 newinfo->hook_entry[h] -= origsize - *size;
1635 if ((unsigned char *)de - base < newinfo->underflow[h])
1636 newinfo->underflow[h] -= origsize - *size;
1638 return ret;
1641 static int
1642 compat_check_entry(struct ipt_entry *e, const char *name,
1643 unsigned int *i)
1645 struct xt_mtchk_param mtpar;
1646 unsigned int j;
1647 int ret;
1649 j = 0;
1650 mtpar.table = name;
1651 mtpar.entryinfo = &e->ip;
1652 mtpar.hook_mask = e->comefrom;
1653 mtpar.family = NFPROTO_IPV4;
1654 ret = IPT_MATCH_ITERATE(e, check_match, &mtpar, &j);
1655 if (ret)
1656 goto cleanup_matches;
1658 ret = check_target(e, name);
1659 if (ret)
1660 goto cleanup_matches;
1662 (*i)++;
1663 return 0;
1665 cleanup_matches:
1666 IPT_MATCH_ITERATE(e, cleanup_match, &j);
1667 return ret;
1670 static int
1671 translate_compat_table(const char *name,
1672 unsigned int valid_hooks,
1673 struct xt_table_info **pinfo,
1674 void **pentry0,
1675 unsigned int total_size,
1676 unsigned int number,
1677 unsigned int *hook_entries,
1678 unsigned int *underflows)
1680 unsigned int i, j;
1681 struct xt_table_info *newinfo, *info;
1682 void *pos, *entry0, *entry1;
1683 unsigned int size;
1684 int ret;
1686 info = *pinfo;
1687 entry0 = *pentry0;
1688 size = total_size;
1689 info->number = number;
1691 /* Init all hooks to impossible value. */
1692 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1693 info->hook_entry[i] = 0xFFFFFFFF;
1694 info->underflow[i] = 0xFFFFFFFF;
1697 duprintf("translate_compat_table: size %u\n", info->size);
1698 j = 0;
1699 xt_compat_lock(AF_INET);
1700 /* Walk through entries, checking offsets. */
1701 ret = COMPAT_IPT_ENTRY_ITERATE(entry0, total_size,
1702 check_compat_entry_size_and_hooks,
1703 info, &size, entry0,
1704 entry0 + total_size,
1705 hook_entries, underflows, &j, name);
1706 if (ret != 0)
1707 goto out_unlock;
1709 ret = -EINVAL;
1710 if (j != number) {
1711 duprintf("translate_compat_table: %u not %u entries\n",
1712 j, number);
1713 goto out_unlock;
1716 /* Check hooks all assigned */
1717 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1718 /* Only hooks which are valid */
1719 if (!(valid_hooks & (1 << i)))
1720 continue;
1721 if (info->hook_entry[i] == 0xFFFFFFFF) {
1722 duprintf("Invalid hook entry %u %u\n",
1723 i, hook_entries[i]);
1724 goto out_unlock;
1726 if (info->underflow[i] == 0xFFFFFFFF) {
1727 duprintf("Invalid underflow %u %u\n",
1728 i, underflows[i]);
1729 goto out_unlock;
1733 ret = -ENOMEM;
1734 newinfo = xt_alloc_table_info(size);
1735 if (!newinfo)
1736 goto out_unlock;
1738 newinfo->number = number;
1739 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1740 newinfo->hook_entry[i] = info->hook_entry[i];
1741 newinfo->underflow[i] = info->underflow[i];
1743 entry1 = newinfo->entries[raw_smp_processor_id()];
1744 pos = entry1;
1745 size = total_size;
1746 ret = COMPAT_IPT_ENTRY_ITERATE(entry0, total_size,
1747 compat_copy_entry_from_user,
1748 &pos, &size, name, newinfo, entry1);
1749 xt_compat_flush_offsets(AF_INET);
1750 xt_compat_unlock(AF_INET);
1751 if (ret)
1752 goto free_newinfo;
1754 ret = -ELOOP;
1755 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1756 goto free_newinfo;
1758 i = 0;
1759 ret = IPT_ENTRY_ITERATE(entry1, newinfo->size, compat_check_entry,
1760 name, &i);
1761 if (ret) {
1762 j -= i;
1763 COMPAT_IPT_ENTRY_ITERATE_CONTINUE(entry0, newinfo->size, i,
1764 compat_release_entry, &j);
1765 IPT_ENTRY_ITERATE(entry1, newinfo->size, cleanup_entry, &i);
1766 xt_free_table_info(newinfo);
1767 return ret;
1770 /* And one copy for every other CPU */
1771 for_each_possible_cpu(i)
1772 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1773 memcpy(newinfo->entries[i], entry1, newinfo->size);
1775 *pinfo = newinfo;
1776 *pentry0 = entry1;
1777 xt_free_table_info(info);
1778 return 0;
1780 free_newinfo:
1781 xt_free_table_info(newinfo);
1782 out:
1783 COMPAT_IPT_ENTRY_ITERATE(entry0, total_size, compat_release_entry, &j);
1784 return ret;
1785 out_unlock:
1786 xt_compat_flush_offsets(AF_INET);
1787 xt_compat_unlock(AF_INET);
1788 goto out;
1791 static int
1792 compat_do_replace(struct net *net, void __user *user, unsigned int len)
1794 int ret;
1795 struct compat_ipt_replace tmp;
1796 struct xt_table_info *newinfo;
1797 void *loc_cpu_entry;
1799 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1800 return -EFAULT;
1802 /* overflow check */
1803 if (tmp.size >= INT_MAX / num_possible_cpus())
1804 return -ENOMEM;
1805 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1806 return -ENOMEM;
1808 newinfo = xt_alloc_table_info(tmp.size);
1809 if (!newinfo)
1810 return -ENOMEM;
1812 /* choose the copy that is on our node/cpu */
1813 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1814 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1815 tmp.size) != 0) {
1816 ret = -EFAULT;
1817 goto free_newinfo;
1820 ret = translate_compat_table(tmp.name, tmp.valid_hooks,
1821 &newinfo, &loc_cpu_entry, tmp.size,
1822 tmp.num_entries, tmp.hook_entry,
1823 tmp.underflow);
1824 if (ret != 0)
1825 goto free_newinfo;
1827 duprintf("compat_do_replace: Translated table\n");
1829 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1830 tmp.num_counters, compat_ptr(tmp.counters));
1831 if (ret)
1832 goto free_newinfo_untrans;
1833 return 0;
1835 free_newinfo_untrans:
1836 IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL);
1837 free_newinfo:
1838 xt_free_table_info(newinfo);
1839 return ret;
1842 static int
1843 compat_do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user,
1844 unsigned int len)
1846 int ret;
1848 if (!capable(CAP_NET_ADMIN))
1849 return -EPERM;
1851 switch (cmd) {
1852 case IPT_SO_SET_REPLACE:
1853 ret = compat_do_replace(sock_net(sk), user, len);
1854 break;
1856 case IPT_SO_SET_ADD_COUNTERS:
1857 ret = do_add_counters(sock_net(sk), user, len, 1);
1858 break;
1860 default:
1861 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd);
1862 ret = -EINVAL;
1865 return ret;
1868 struct compat_ipt_get_entries {
1869 char name[IPT_TABLE_MAXNAMELEN];
1870 compat_uint_t size;
1871 struct compat_ipt_entry entrytable[0];
1874 static int
1875 compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1876 void __user *userptr)
1878 struct xt_counters *counters;
1879 const struct xt_table_info *private = table->private;
1880 void __user *pos;
1881 unsigned int size;
1882 int ret = 0;
1883 const void *loc_cpu_entry;
1884 unsigned int i = 0;
1886 counters = alloc_counters(table);
1887 if (IS_ERR(counters))
1888 return PTR_ERR(counters);
1890 /* choose the copy that is on our node/cpu, ...
1891 * This choice is lazy (because current thread is
1892 * allowed to migrate to another cpu)
1894 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1895 pos = userptr;
1896 size = total_size;
1897 ret = IPT_ENTRY_ITERATE(loc_cpu_entry, total_size,
1898 compat_copy_entry_to_user,
1899 &pos, &size, counters, &i);
1901 vfree(counters);
1902 return ret;
1905 static int
1906 compat_get_entries(struct net *net, struct compat_ipt_get_entries __user *uptr,
1907 int *len)
1909 int ret;
1910 struct compat_ipt_get_entries get;
1911 struct xt_table *t;
1913 if (*len < sizeof(get)) {
1914 duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get));
1915 return -EINVAL;
1918 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1919 return -EFAULT;
1921 if (*len != sizeof(struct compat_ipt_get_entries) + get.size) {
1922 duprintf("compat_get_entries: %u != %zu\n",
1923 *len, sizeof(get) + get.size);
1924 return -EINVAL;
1927 xt_compat_lock(AF_INET);
1928 t = xt_find_table_lock(net, AF_INET, get.name);
1929 if (t && !IS_ERR(t)) {
1930 const struct xt_table_info *private = t->private;
1931 struct xt_table_info info;
1932 duprintf("t->private->number = %u\n", private->number);
1933 ret = compat_table_info(private, &info);
1934 if (!ret && get.size == info.size) {
1935 ret = compat_copy_entries_to_user(private->size,
1936 t, uptr->entrytable);
1937 } else if (!ret) {
1938 duprintf("compat_get_entries: I've got %u not %u!\n",
1939 private->size, get.size);
1940 ret = -EAGAIN;
1942 xt_compat_flush_offsets(AF_INET);
1943 module_put(t->me);
1944 xt_table_unlock(t);
1945 } else
1946 ret = t ? PTR_ERR(t) : -ENOENT;
1948 xt_compat_unlock(AF_INET);
1949 return ret;
1952 static int do_ipt_get_ctl(struct sock *, int, void __user *, int *);
1954 static int
1955 compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1957 int ret;
1959 if (!capable(CAP_NET_ADMIN))
1960 return -EPERM;
1962 switch (cmd) {
1963 case IPT_SO_GET_INFO:
1964 ret = get_info(sock_net(sk), user, len, 1);
1965 break;
1966 case IPT_SO_GET_ENTRIES:
1967 ret = compat_get_entries(sock_net(sk), user, len);
1968 break;
1969 default:
1970 ret = do_ipt_get_ctl(sk, cmd, user, len);
1972 return ret;
1974 #endif
1976 static int
1977 do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
1979 int ret;
1981 if (!capable(CAP_NET_ADMIN))
1982 return -EPERM;
1984 switch (cmd) {
1985 case IPT_SO_SET_REPLACE:
1986 ret = do_replace(sock_net(sk), user, len);
1987 break;
1989 case IPT_SO_SET_ADD_COUNTERS:
1990 ret = do_add_counters(sock_net(sk), user, len, 0);
1991 break;
1993 default:
1994 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd);
1995 ret = -EINVAL;
1998 return ret;
2001 static int
2002 do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2004 int ret;
2006 if (!capable(CAP_NET_ADMIN))
2007 return -EPERM;
2009 switch (cmd) {
2010 case IPT_SO_GET_INFO:
2011 ret = get_info(sock_net(sk), user, len, 0);
2012 break;
2014 case IPT_SO_GET_ENTRIES:
2015 ret = get_entries(sock_net(sk), user, len);
2016 break;
2018 case IPT_SO_GET_REVISION_MATCH:
2019 case IPT_SO_GET_REVISION_TARGET: {
2020 struct ipt_get_revision rev;
2021 int target;
2023 if (*len != sizeof(rev)) {
2024 ret = -EINVAL;
2025 break;
2027 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2028 ret = -EFAULT;
2029 break;
2032 if (cmd == IPT_SO_GET_REVISION_TARGET)
2033 target = 1;
2034 else
2035 target = 0;
2037 try_then_request_module(xt_find_revision(AF_INET, rev.name,
2038 rev.revision,
2039 target, &ret),
2040 "ipt_%s", rev.name);
2041 break;
2044 default:
2045 duprintf("do_ipt_get_ctl: unknown request %i\n", cmd);
2046 ret = -EINVAL;
2049 return ret;
2052 struct xt_table *ipt_register_table(struct net *net, struct xt_table *table,
2053 const struct ipt_replace *repl)
2055 int ret;
2056 struct xt_table_info *newinfo;
2057 struct xt_table_info bootstrap
2058 = { 0, 0, 0, { 0 }, { 0 }, { } };
2059 void *loc_cpu_entry;
2060 struct xt_table *new_table;
2062 newinfo = xt_alloc_table_info(repl->size);
2063 if (!newinfo) {
2064 ret = -ENOMEM;
2065 goto out;
2068 /* choose the copy on our node/cpu, but dont care about preemption */
2069 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2070 memcpy(loc_cpu_entry, repl->entries, repl->size);
2072 ret = translate_table(table->name, table->valid_hooks,
2073 newinfo, loc_cpu_entry, repl->size,
2074 repl->num_entries,
2075 repl->hook_entry,
2076 repl->underflow);
2077 if (ret != 0)
2078 goto out_free;
2080 new_table = xt_register_table(net, table, &bootstrap, newinfo);
2081 if (IS_ERR(new_table)) {
2082 ret = PTR_ERR(new_table);
2083 goto out_free;
2086 return new_table;
2088 out_free:
2089 xt_free_table_info(newinfo);
2090 out:
2091 return ERR_PTR(ret);
2094 void ipt_unregister_table(struct xt_table *table)
2096 struct xt_table_info *private;
2097 void *loc_cpu_entry;
2098 struct module *table_owner = table->me;
2100 private = xt_unregister_table(table);
2102 /* Decrease module usage counts and free resources */
2103 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2104 IPT_ENTRY_ITERATE(loc_cpu_entry, private->size, cleanup_entry, NULL);
2105 if (private->number > private->initial_entries)
2106 module_put(table_owner);
2107 xt_free_table_info(private);
2110 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2111 static inline bool
2112 icmp_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2113 u_int8_t type, u_int8_t code,
2114 bool invert)
2116 return ((test_type == 0xFF) ||
2117 (type == test_type && code >= min_code && code <= max_code))
2118 ^ invert;
2121 static bool
2122 icmp_match(const struct sk_buff *skb, const struct xt_match_param *par)
2124 const struct icmphdr *ic;
2125 struct icmphdr _icmph;
2126 const struct ipt_icmp *icmpinfo = par->matchinfo;
2128 /* Must not be a fragment. */
2129 if (par->fragoff != 0)
2130 return false;
2132 ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph);
2133 if (ic == NULL) {
2134 /* We've been asked to examine this packet, and we
2135 * can't. Hence, no choice but to drop.
2137 duprintf("Dropping evil ICMP tinygram.\n");
2138 *par->hotdrop = true;
2139 return false;
2142 return icmp_type_code_match(icmpinfo->type,
2143 icmpinfo->code[0],
2144 icmpinfo->code[1],
2145 ic->type, ic->code,
2146 !!(icmpinfo->invflags&IPT_ICMP_INV));
2149 static bool icmp_checkentry(const struct xt_mtchk_param *par)
2151 const struct ipt_icmp *icmpinfo = par->matchinfo;
2153 /* Must specify no unknown invflags */
2154 return !(icmpinfo->invflags & ~IPT_ICMP_INV);
2157 /* The built-in targets: standard (NULL) and error. */
2158 static struct xt_target ipt_standard_target __read_mostly = {
2159 .name = IPT_STANDARD_TARGET,
2160 .targetsize = sizeof(int),
2161 .family = AF_INET,
2162 #ifdef CONFIG_COMPAT
2163 .compatsize = sizeof(compat_int_t),
2164 .compat_from_user = compat_standard_from_user,
2165 .compat_to_user = compat_standard_to_user,
2166 #endif
2169 static struct xt_target ipt_error_target __read_mostly = {
2170 .name = IPT_ERROR_TARGET,
2171 .target = ipt_error,
2172 .targetsize = IPT_FUNCTION_MAXNAMELEN,
2173 .family = AF_INET,
2176 static struct nf_sockopt_ops ipt_sockopts = {
2177 .pf = PF_INET,
2178 .set_optmin = IPT_BASE_CTL,
2179 .set_optmax = IPT_SO_SET_MAX+1,
2180 .set = do_ipt_set_ctl,
2181 #ifdef CONFIG_COMPAT
2182 .compat_set = compat_do_ipt_set_ctl,
2183 #endif
2184 .get_optmin = IPT_BASE_CTL,
2185 .get_optmax = IPT_SO_GET_MAX+1,
2186 .get = do_ipt_get_ctl,
2187 #ifdef CONFIG_COMPAT
2188 .compat_get = compat_do_ipt_get_ctl,
2189 #endif
2190 .owner = THIS_MODULE,
2193 static struct xt_match icmp_matchstruct __read_mostly = {
2194 .name = "icmp",
2195 .match = icmp_match,
2196 .matchsize = sizeof(struct ipt_icmp),
2197 .checkentry = icmp_checkentry,
2198 .proto = IPPROTO_ICMP,
2199 .family = AF_INET,
2202 static int __net_init ip_tables_net_init(struct net *net)
2204 return xt_proto_init(net, AF_INET);
2207 static void __net_exit ip_tables_net_exit(struct net *net)
2209 xt_proto_fini(net, AF_INET);
2212 static struct pernet_operations ip_tables_net_ops = {
2213 .init = ip_tables_net_init,
2214 .exit = ip_tables_net_exit,
2217 static int __init ip_tables_init(void)
2219 int ret;
2221 ret = register_pernet_subsys(&ip_tables_net_ops);
2222 if (ret < 0)
2223 goto err1;
2225 /* Noone else will be downing sem now, so we won't sleep */
2226 ret = xt_register_target(&ipt_standard_target);
2227 if (ret < 0)
2228 goto err2;
2229 ret = xt_register_target(&ipt_error_target);
2230 if (ret < 0)
2231 goto err3;
2232 ret = xt_register_match(&icmp_matchstruct);
2233 if (ret < 0)
2234 goto err4;
2236 /* Register setsockopt */
2237 ret = nf_register_sockopt(&ipt_sockopts);
2238 if (ret < 0)
2239 goto err5;
2241 printk(KERN_INFO "ip_tables: (C) 2000-2006 Netfilter Core Team\n");
2242 return 0;
2244 err5:
2245 xt_unregister_match(&icmp_matchstruct);
2246 err4:
2247 xt_unregister_target(&ipt_error_target);
2248 err3:
2249 xt_unregister_target(&ipt_standard_target);
2250 err2:
2251 unregister_pernet_subsys(&ip_tables_net_ops);
2252 err1:
2253 return ret;
2256 static void __exit ip_tables_fini(void)
2258 nf_unregister_sockopt(&ipt_sockopts);
2260 xt_unregister_match(&icmp_matchstruct);
2261 xt_unregister_target(&ipt_error_target);
2262 xt_unregister_target(&ipt_standard_target);
2264 unregister_pernet_subsys(&ip_tables_net_ops);
2267 EXPORT_SYMBOL(ipt_register_table);
2268 EXPORT_SYMBOL(ipt_unregister_table);
2269 EXPORT_SYMBOL(ipt_do_table);
2270 module_init(ip_tables_init);
2271 module_exit(ip_tables_fini);