[NETFILTER]: ip_tables: propagate netns from userspace
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / net / ipv4 / netfilter / ip_tables.c
blobc1b80f4cb7ccb587c1d0d586d6d71bcaea2d68e9
1 /*
2 * Packet matching code.
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #include <linux/cache.h>
12 #include <linux/capability.h>
13 #include <linux/skbuff.h>
14 #include <linux/kmod.h>
15 #include <linux/vmalloc.h>
16 #include <linux/netdevice.h>
17 #include <linux/module.h>
18 #include <linux/icmp.h>
19 #include <net/ip.h>
20 #include <net/compat.h>
21 #include <asm/uaccess.h>
22 #include <linux/mutex.h>
23 #include <linux/proc_fs.h>
24 #include <linux/err.h>
25 #include <linux/cpumask.h>
27 #include <linux/netfilter/x_tables.h>
28 #include <linux/netfilter_ipv4/ip_tables.h>
29 #include <net/netfilter/nf_log.h>
31 MODULE_LICENSE("GPL");
32 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
33 MODULE_DESCRIPTION("IPv4 packet filter");
35 /*#define DEBUG_IP_FIREWALL*/
36 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
37 /*#define DEBUG_IP_FIREWALL_USER*/
39 #ifdef DEBUG_IP_FIREWALL
40 #define dprintf(format, args...) printk(format , ## args)
41 #else
42 #define dprintf(format, args...)
43 #endif
45 #ifdef DEBUG_IP_FIREWALL_USER
46 #define duprintf(format, args...) printk(format , ## args)
47 #else
48 #define duprintf(format, args...)
49 #endif
51 #ifdef CONFIG_NETFILTER_DEBUG
52 #define IP_NF_ASSERT(x) \
53 do { \
54 if (!(x)) \
55 printk("IP_NF_ASSERT: %s:%s:%u\n", \
56 __FUNCTION__, __FILE__, __LINE__); \
57 } while(0)
58 #else
59 #define IP_NF_ASSERT(x)
60 #endif
62 #if 0
63 /* All the better to debug you with... */
64 #define static
65 #define inline
66 #endif
69 We keep a set of rules for each CPU, so we can avoid write-locking
70 them in the softirq when updating the counters and therefore
71 only need to read-lock in the softirq; doing a write_lock_bh() in user
72 context stops packets coming through and allows user context to read
73 the counters or update the rules.
75 Hence the start of any table is given by get_table() below. */
77 /* Returns whether matches rule or not. */
78 /* Performance critical - called for every packet */
79 static inline bool
80 ip_packet_match(const struct iphdr *ip,
81 const char *indev,
82 const char *outdev,
83 const struct ipt_ip *ipinfo,
84 int isfrag)
86 size_t i;
87 unsigned long ret;
89 #define FWINV(bool, invflg) ((bool) ^ !!(ipinfo->invflags & (invflg)))
91 if (FWINV((ip->saddr&ipinfo->smsk.s_addr) != ipinfo->src.s_addr,
92 IPT_INV_SRCIP)
93 || FWINV((ip->daddr&ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr,
94 IPT_INV_DSTIP)) {
95 dprintf("Source or dest mismatch.\n");
97 dprintf("SRC: %u.%u.%u.%u. Mask: %u.%u.%u.%u. Target: %u.%u.%u.%u.%s\n",
98 NIPQUAD(ip->saddr),
99 NIPQUAD(ipinfo->smsk.s_addr),
100 NIPQUAD(ipinfo->src.s_addr),
101 ipinfo->invflags & IPT_INV_SRCIP ? " (INV)" : "");
102 dprintf("DST: %u.%u.%u.%u Mask: %u.%u.%u.%u Target: %u.%u.%u.%u.%s\n",
103 NIPQUAD(ip->daddr),
104 NIPQUAD(ipinfo->dmsk.s_addr),
105 NIPQUAD(ipinfo->dst.s_addr),
106 ipinfo->invflags & IPT_INV_DSTIP ? " (INV)" : "");
107 return false;
110 /* Look for ifname matches; this should unroll nicely. */
111 for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
112 ret |= (((const unsigned long *)indev)[i]
113 ^ ((const unsigned long *)ipinfo->iniface)[i])
114 & ((const unsigned long *)ipinfo->iniface_mask)[i];
117 if (FWINV(ret != 0, IPT_INV_VIA_IN)) {
118 dprintf("VIA in mismatch (%s vs %s).%s\n",
119 indev, ipinfo->iniface,
120 ipinfo->invflags&IPT_INV_VIA_IN ?" (INV)":"");
121 return false;
124 for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
125 ret |= (((const unsigned long *)outdev)[i]
126 ^ ((const unsigned long *)ipinfo->outiface)[i])
127 & ((const unsigned long *)ipinfo->outiface_mask)[i];
130 if (FWINV(ret != 0, IPT_INV_VIA_OUT)) {
131 dprintf("VIA out mismatch (%s vs %s).%s\n",
132 outdev, ipinfo->outiface,
133 ipinfo->invflags&IPT_INV_VIA_OUT ?" (INV)":"");
134 return false;
137 /* Check specific protocol */
138 if (ipinfo->proto
139 && FWINV(ip->protocol != ipinfo->proto, IPT_INV_PROTO)) {
140 dprintf("Packet protocol %hi does not match %hi.%s\n",
141 ip->protocol, ipinfo->proto,
142 ipinfo->invflags&IPT_INV_PROTO ? " (INV)":"");
143 return false;
146 /* If we have a fragment rule but the packet is not a fragment
147 * then we return zero */
148 if (FWINV((ipinfo->flags&IPT_F_FRAG) && !isfrag, IPT_INV_FRAG)) {
149 dprintf("Fragment rule but not fragment.%s\n",
150 ipinfo->invflags & IPT_INV_FRAG ? " (INV)" : "");
151 return false;
154 return true;
157 static bool
158 ip_checkentry(const struct ipt_ip *ip)
160 if (ip->flags & ~IPT_F_MASK) {
161 duprintf("Unknown flag bits set: %08X\n",
162 ip->flags & ~IPT_F_MASK);
163 return false;
165 if (ip->invflags & ~IPT_INV_MASK) {
166 duprintf("Unknown invflag bits set: %08X\n",
167 ip->invflags & ~IPT_INV_MASK);
168 return false;
170 return true;
173 static unsigned int
174 ipt_error(struct sk_buff *skb,
175 const struct net_device *in,
176 const struct net_device *out,
177 unsigned int hooknum,
178 const struct xt_target *target,
179 const void *targinfo)
181 if (net_ratelimit())
182 printk("ip_tables: error: `%s'\n", (char *)targinfo);
184 return NF_DROP;
187 /* Performance critical - called for every packet */
188 static inline bool
189 do_match(struct ipt_entry_match *m,
190 const struct sk_buff *skb,
191 const struct net_device *in,
192 const struct net_device *out,
193 int offset,
194 bool *hotdrop)
196 /* Stop iteration if it doesn't match */
197 if (!m->u.kernel.match->match(skb, in, out, m->u.kernel.match, m->data,
198 offset, ip_hdrlen(skb), hotdrop))
199 return true;
200 else
201 return false;
204 /* Performance critical */
205 static inline struct ipt_entry *
206 get_entry(void *base, unsigned int offset)
208 return (struct ipt_entry *)(base + offset);
211 /* All zeroes == unconditional rule. */
212 /* Mildly perf critical (only if packet tracing is on) */
213 static inline int
214 unconditional(const struct ipt_ip *ip)
216 unsigned int i;
218 for (i = 0; i < sizeof(*ip)/sizeof(__u32); i++)
219 if (((__u32 *)ip)[i])
220 return 0;
222 return 1;
223 #undef FWINV
226 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
227 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
228 static const char *const hooknames[] = {
229 [NF_INET_PRE_ROUTING] = "PREROUTING",
230 [NF_INET_LOCAL_IN] = "INPUT",
231 [NF_INET_FORWARD] = "FORWARD",
232 [NF_INET_LOCAL_OUT] = "OUTPUT",
233 [NF_INET_POST_ROUTING] = "POSTROUTING",
236 enum nf_ip_trace_comments {
237 NF_IP_TRACE_COMMENT_RULE,
238 NF_IP_TRACE_COMMENT_RETURN,
239 NF_IP_TRACE_COMMENT_POLICY,
242 static const char *const comments[] = {
243 [NF_IP_TRACE_COMMENT_RULE] = "rule",
244 [NF_IP_TRACE_COMMENT_RETURN] = "return",
245 [NF_IP_TRACE_COMMENT_POLICY] = "policy",
248 static struct nf_loginfo trace_loginfo = {
249 .type = NF_LOG_TYPE_LOG,
250 .u = {
251 .log = {
252 .level = 4,
253 .logflags = NF_LOG_MASK,
258 /* Mildly perf critical (only if packet tracing is on) */
259 static inline int
260 get_chainname_rulenum(struct ipt_entry *s, struct ipt_entry *e,
261 char *hookname, char **chainname,
262 char **comment, unsigned int *rulenum)
264 struct ipt_standard_target *t = (void *)ipt_get_target(s);
266 if (strcmp(t->target.u.kernel.target->name, IPT_ERROR_TARGET) == 0) {
267 /* Head of user chain: ERROR target with chainname */
268 *chainname = t->target.data;
269 (*rulenum) = 0;
270 } else if (s == e) {
271 (*rulenum)++;
273 if (s->target_offset == sizeof(struct ipt_entry)
274 && strcmp(t->target.u.kernel.target->name,
275 IPT_STANDARD_TARGET) == 0
276 && t->verdict < 0
277 && unconditional(&s->ip)) {
278 /* Tail of chains: STANDARD target (return/policy) */
279 *comment = *chainname == hookname
280 ? (char *)comments[NF_IP_TRACE_COMMENT_POLICY]
281 : (char *)comments[NF_IP_TRACE_COMMENT_RETURN];
283 return 1;
284 } else
285 (*rulenum)++;
287 return 0;
290 static void trace_packet(struct sk_buff *skb,
291 unsigned int hook,
292 const struct net_device *in,
293 const struct net_device *out,
294 const char *tablename,
295 struct xt_table_info *private,
296 struct ipt_entry *e)
298 void *table_base;
299 struct ipt_entry *root;
300 char *hookname, *chainname, *comment;
301 unsigned int rulenum = 0;
303 table_base = (void *)private->entries[smp_processor_id()];
304 root = get_entry(table_base, private->hook_entry[hook]);
306 hookname = chainname = (char *)hooknames[hook];
307 comment = (char *)comments[NF_IP_TRACE_COMMENT_RULE];
309 IPT_ENTRY_ITERATE(root,
310 private->size - private->hook_entry[hook],
311 get_chainname_rulenum,
312 e, hookname, &chainname, &comment, &rulenum);
314 nf_log_packet(AF_INET, hook, skb, in, out, &trace_loginfo,
315 "TRACE: %s:%s:%s:%u ",
316 tablename, chainname, comment, rulenum);
318 #endif
320 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
321 unsigned int
322 ipt_do_table(struct sk_buff *skb,
323 unsigned int hook,
324 const struct net_device *in,
325 const struct net_device *out,
326 struct xt_table *table)
328 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
329 u_int16_t offset;
330 struct iphdr *ip;
331 u_int16_t datalen;
332 bool hotdrop = false;
333 /* Initializing verdict to NF_DROP keeps gcc happy. */
334 unsigned int verdict = NF_DROP;
335 const char *indev, *outdev;
336 void *table_base;
337 struct ipt_entry *e, *back;
338 struct xt_table_info *private;
340 /* Initialization */
341 ip = ip_hdr(skb);
342 datalen = skb->len - ip->ihl * 4;
343 indev = in ? in->name : nulldevname;
344 outdev = out ? out->name : nulldevname;
345 /* We handle fragments by dealing with the first fragment as
346 * if it was a normal packet. All other fragments are treated
347 * normally, except that they will NEVER match rules that ask
348 * things we don't know, ie. tcp syn flag or ports). If the
349 * rule is also a fragment-specific rule, non-fragments won't
350 * match it. */
351 offset = ntohs(ip->frag_off) & IP_OFFSET;
353 read_lock_bh(&table->lock);
354 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
355 private = table->private;
356 table_base = (void *)private->entries[smp_processor_id()];
357 e = get_entry(table_base, private->hook_entry[hook]);
359 /* For return from builtin chain */
360 back = get_entry(table_base, private->underflow[hook]);
362 do {
363 IP_NF_ASSERT(e);
364 IP_NF_ASSERT(back);
365 if (ip_packet_match(ip, indev, outdev, &e->ip, offset)) {
366 struct ipt_entry_target *t;
368 if (IPT_MATCH_ITERATE(e, do_match,
369 skb, in, out,
370 offset, &hotdrop) != 0)
371 goto no_match;
373 ADD_COUNTER(e->counters, ntohs(ip->tot_len), 1);
375 t = ipt_get_target(e);
376 IP_NF_ASSERT(t->u.kernel.target);
378 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
379 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
380 /* The packet is traced: log it */
381 if (unlikely(skb->nf_trace))
382 trace_packet(skb, hook, in, out,
383 table->name, private, e);
384 #endif
385 /* Standard target? */
386 if (!t->u.kernel.target->target) {
387 int v;
389 v = ((struct ipt_standard_target *)t)->verdict;
390 if (v < 0) {
391 /* Pop from stack? */
392 if (v != IPT_RETURN) {
393 verdict = (unsigned)(-v) - 1;
394 break;
396 e = back;
397 back = get_entry(table_base,
398 back->comefrom);
399 continue;
401 if (table_base + v != (void *)e + e->next_offset
402 && !(e->ip.flags & IPT_F_GOTO)) {
403 /* Save old back ptr in next entry */
404 struct ipt_entry *next
405 = (void *)e + e->next_offset;
406 next->comefrom
407 = (void *)back - table_base;
408 /* set back pointer to next entry */
409 back = next;
412 e = get_entry(table_base, v);
413 } else {
414 /* Targets which reenter must return
415 abs. verdicts */
416 #ifdef CONFIG_NETFILTER_DEBUG
417 ((struct ipt_entry *)table_base)->comefrom
418 = 0xeeeeeeec;
419 #endif
420 verdict = t->u.kernel.target->target(skb,
421 in, out,
422 hook,
423 t->u.kernel.target,
424 t->data);
426 #ifdef CONFIG_NETFILTER_DEBUG
427 if (((struct ipt_entry *)table_base)->comefrom
428 != 0xeeeeeeec
429 && verdict == IPT_CONTINUE) {
430 printk("Target %s reentered!\n",
431 t->u.kernel.target->name);
432 verdict = NF_DROP;
434 ((struct ipt_entry *)table_base)->comefrom
435 = 0x57acc001;
436 #endif
437 /* Target might have changed stuff. */
438 ip = ip_hdr(skb);
439 datalen = skb->len - ip->ihl * 4;
441 if (verdict == IPT_CONTINUE)
442 e = (void *)e + e->next_offset;
443 else
444 /* Verdict */
445 break;
447 } else {
449 no_match:
450 e = (void *)e + e->next_offset;
452 } while (!hotdrop);
454 read_unlock_bh(&table->lock);
456 #ifdef DEBUG_ALLOW_ALL
457 return NF_ACCEPT;
458 #else
459 if (hotdrop)
460 return NF_DROP;
461 else return verdict;
462 #endif
465 /* Figures out from what hook each rule can be called: returns 0 if
466 there are loops. Puts hook bitmask in comefrom. */
467 static int
468 mark_source_chains(struct xt_table_info *newinfo,
469 unsigned int valid_hooks, void *entry0)
471 unsigned int hook;
473 /* No recursion; use packet counter to save back ptrs (reset
474 to 0 as we leave), and comefrom to save source hook bitmask */
475 for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
476 unsigned int pos = newinfo->hook_entry[hook];
477 struct ipt_entry *e = (struct ipt_entry *)(entry0 + pos);
479 if (!(valid_hooks & (1 << hook)))
480 continue;
482 /* Set initial back pointer. */
483 e->counters.pcnt = pos;
485 for (;;) {
486 struct ipt_standard_target *t
487 = (void *)ipt_get_target(e);
488 int visited = e->comefrom & (1 << hook);
490 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
491 printk("iptables: loop hook %u pos %u %08X.\n",
492 hook, pos, e->comefrom);
493 return 0;
495 e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
497 /* Unconditional return/END. */
498 if ((e->target_offset == sizeof(struct ipt_entry)
499 && (strcmp(t->target.u.user.name,
500 IPT_STANDARD_TARGET) == 0)
501 && t->verdict < 0
502 && unconditional(&e->ip)) || visited) {
503 unsigned int oldpos, size;
505 if (t->verdict < -NF_MAX_VERDICT - 1) {
506 duprintf("mark_source_chains: bad "
507 "negative verdict (%i)\n",
508 t->verdict);
509 return 0;
512 /* Return: backtrack through the last
513 big jump. */
514 do {
515 e->comefrom ^= (1<<NF_INET_NUMHOOKS);
516 #ifdef DEBUG_IP_FIREWALL_USER
517 if (e->comefrom
518 & (1 << NF_INET_NUMHOOKS)) {
519 duprintf("Back unset "
520 "on hook %u "
521 "rule %u\n",
522 hook, pos);
524 #endif
525 oldpos = pos;
526 pos = e->counters.pcnt;
527 e->counters.pcnt = 0;
529 /* We're at the start. */
530 if (pos == oldpos)
531 goto next;
533 e = (struct ipt_entry *)
534 (entry0 + pos);
535 } while (oldpos == pos + e->next_offset);
537 /* Move along one */
538 size = e->next_offset;
539 e = (struct ipt_entry *)
540 (entry0 + pos + size);
541 e->counters.pcnt = pos;
542 pos += size;
543 } else {
544 int newpos = t->verdict;
546 if (strcmp(t->target.u.user.name,
547 IPT_STANDARD_TARGET) == 0
548 && newpos >= 0) {
549 if (newpos > newinfo->size -
550 sizeof(struct ipt_entry)) {
551 duprintf("mark_source_chains: "
552 "bad verdict (%i)\n",
553 newpos);
554 return 0;
556 /* This a jump; chase it. */
557 duprintf("Jump rule %u -> %u\n",
558 pos, newpos);
559 } else {
560 /* ... this is a fallthru */
561 newpos = pos + e->next_offset;
563 e = (struct ipt_entry *)
564 (entry0 + newpos);
565 e->counters.pcnt = pos;
566 pos = newpos;
569 next:
570 duprintf("Finished chain %u\n", hook);
572 return 1;
575 static int
576 cleanup_match(struct ipt_entry_match *m, unsigned int *i)
578 if (i && (*i)-- == 0)
579 return 1;
581 if (m->u.kernel.match->destroy)
582 m->u.kernel.match->destroy(m->u.kernel.match, m->data);
583 module_put(m->u.kernel.match->me);
584 return 0;
587 static int
588 check_entry(struct ipt_entry *e, const char *name)
590 struct ipt_entry_target *t;
592 if (!ip_checkentry(&e->ip)) {
593 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
594 return -EINVAL;
597 if (e->target_offset + sizeof(struct ipt_entry_target) >
598 e->next_offset)
599 return -EINVAL;
601 t = ipt_get_target(e);
602 if (e->target_offset + t->u.target_size > e->next_offset)
603 return -EINVAL;
605 return 0;
608 static int
609 check_match(struct ipt_entry_match *m, const char *name,
610 const struct ipt_ip *ip,
611 unsigned int hookmask, unsigned int *i)
613 struct xt_match *match;
614 int ret;
616 match = m->u.kernel.match;
617 ret = xt_check_match(match, AF_INET, m->u.match_size - sizeof(*m),
618 name, hookmask, ip->proto,
619 ip->invflags & IPT_INV_PROTO);
620 if (!ret && m->u.kernel.match->checkentry
621 && !m->u.kernel.match->checkentry(name, ip, match, m->data,
622 hookmask)) {
623 duprintf("ip_tables: check failed for `%s'.\n",
624 m->u.kernel.match->name);
625 ret = -EINVAL;
627 if (!ret)
628 (*i)++;
629 return ret;
632 static int
633 find_check_match(struct ipt_entry_match *m,
634 const char *name,
635 const struct ipt_ip *ip,
636 unsigned int hookmask,
637 unsigned int *i)
639 struct xt_match *match;
640 int ret;
642 match = try_then_request_module(xt_find_match(AF_INET, m->u.user.name,
643 m->u.user.revision),
644 "ipt_%s", m->u.user.name);
645 if (IS_ERR(match) || !match) {
646 duprintf("find_check_match: `%s' not found\n", m->u.user.name);
647 return match ? PTR_ERR(match) : -ENOENT;
649 m->u.kernel.match = match;
651 ret = check_match(m, name, ip, hookmask, i);
652 if (ret)
653 goto err;
655 return 0;
656 err:
657 module_put(m->u.kernel.match->me);
658 return ret;
661 static int check_target(struct ipt_entry *e, const char *name)
663 struct ipt_entry_target *t;
664 struct xt_target *target;
665 int ret;
667 t = ipt_get_target(e);
668 target = t->u.kernel.target;
669 ret = xt_check_target(target, AF_INET, t->u.target_size - sizeof(*t),
670 name, e->comefrom, e->ip.proto,
671 e->ip.invflags & IPT_INV_PROTO);
672 if (!ret && t->u.kernel.target->checkentry
673 && !t->u.kernel.target->checkentry(name, e, target, t->data,
674 e->comefrom)) {
675 duprintf("ip_tables: check failed for `%s'.\n",
676 t->u.kernel.target->name);
677 ret = -EINVAL;
679 return ret;
682 static int
683 find_check_entry(struct ipt_entry *e, const char *name, unsigned int size,
684 unsigned int *i)
686 struct ipt_entry_target *t;
687 struct xt_target *target;
688 int ret;
689 unsigned int j;
691 ret = check_entry(e, name);
692 if (ret)
693 return ret;
695 j = 0;
696 ret = IPT_MATCH_ITERATE(e, find_check_match, name, &e->ip,
697 e->comefrom, &j);
698 if (ret != 0)
699 goto cleanup_matches;
701 t = ipt_get_target(e);
702 target = try_then_request_module(xt_find_target(AF_INET,
703 t->u.user.name,
704 t->u.user.revision),
705 "ipt_%s", t->u.user.name);
706 if (IS_ERR(target) || !target) {
707 duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
708 ret = target ? PTR_ERR(target) : -ENOENT;
709 goto cleanup_matches;
711 t->u.kernel.target = target;
713 ret = check_target(e, name);
714 if (ret)
715 goto err;
717 (*i)++;
718 return 0;
719 err:
720 module_put(t->u.kernel.target->me);
721 cleanup_matches:
722 IPT_MATCH_ITERATE(e, cleanup_match, &j);
723 return ret;
726 static int
727 check_entry_size_and_hooks(struct ipt_entry *e,
728 struct xt_table_info *newinfo,
729 unsigned char *base,
730 unsigned char *limit,
731 const unsigned int *hook_entries,
732 const unsigned int *underflows,
733 unsigned int *i)
735 unsigned int h;
737 if ((unsigned long)e % __alignof__(struct ipt_entry) != 0
738 || (unsigned char *)e + sizeof(struct ipt_entry) >= limit) {
739 duprintf("Bad offset %p\n", e);
740 return -EINVAL;
743 if (e->next_offset
744 < sizeof(struct ipt_entry) + sizeof(struct ipt_entry_target)) {
745 duprintf("checking: element %p size %u\n",
746 e, e->next_offset);
747 return -EINVAL;
750 /* Check hooks & underflows */
751 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
752 if ((unsigned char *)e - base == hook_entries[h])
753 newinfo->hook_entry[h] = hook_entries[h];
754 if ((unsigned char *)e - base == underflows[h])
755 newinfo->underflow[h] = underflows[h];
758 /* FIXME: underflows must be unconditional, standard verdicts
759 < 0 (not IPT_RETURN). --RR */
761 /* Clear counters and comefrom */
762 e->counters = ((struct xt_counters) { 0, 0 });
763 e->comefrom = 0;
765 (*i)++;
766 return 0;
769 static int
770 cleanup_entry(struct ipt_entry *e, unsigned int *i)
772 struct ipt_entry_target *t;
774 if (i && (*i)-- == 0)
775 return 1;
777 /* Cleanup all matches */
778 IPT_MATCH_ITERATE(e, cleanup_match, NULL);
779 t = ipt_get_target(e);
780 if (t->u.kernel.target->destroy)
781 t->u.kernel.target->destroy(t->u.kernel.target, t->data);
782 module_put(t->u.kernel.target->me);
783 return 0;
786 /* Checks and translates the user-supplied table segment (held in
787 newinfo) */
788 static int
789 translate_table(const char *name,
790 unsigned int valid_hooks,
791 struct xt_table_info *newinfo,
792 void *entry0,
793 unsigned int size,
794 unsigned int number,
795 const unsigned int *hook_entries,
796 const unsigned int *underflows)
798 unsigned int i;
799 int ret;
801 newinfo->size = size;
802 newinfo->number = number;
804 /* Init all hooks to impossible value. */
805 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
806 newinfo->hook_entry[i] = 0xFFFFFFFF;
807 newinfo->underflow[i] = 0xFFFFFFFF;
810 duprintf("translate_table: size %u\n", newinfo->size);
811 i = 0;
812 /* Walk through entries, checking offsets. */
813 ret = IPT_ENTRY_ITERATE(entry0, newinfo->size,
814 check_entry_size_and_hooks,
815 newinfo,
816 entry0,
817 entry0 + size,
818 hook_entries, underflows, &i);
819 if (ret != 0)
820 return ret;
822 if (i != number) {
823 duprintf("translate_table: %u not %u entries\n",
824 i, number);
825 return -EINVAL;
828 /* Check hooks all assigned */
829 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
830 /* Only hooks which are valid */
831 if (!(valid_hooks & (1 << i)))
832 continue;
833 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
834 duprintf("Invalid hook entry %u %u\n",
835 i, hook_entries[i]);
836 return -EINVAL;
838 if (newinfo->underflow[i] == 0xFFFFFFFF) {
839 duprintf("Invalid underflow %u %u\n",
840 i, underflows[i]);
841 return -EINVAL;
845 if (!mark_source_chains(newinfo, valid_hooks, entry0))
846 return -ELOOP;
848 /* Finally, each sanity check must pass */
849 i = 0;
850 ret = IPT_ENTRY_ITERATE(entry0, newinfo->size,
851 find_check_entry, name, size, &i);
853 if (ret != 0) {
854 IPT_ENTRY_ITERATE(entry0, newinfo->size,
855 cleanup_entry, &i);
856 return ret;
859 /* And one copy for every other CPU */
860 for_each_possible_cpu(i) {
861 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
862 memcpy(newinfo->entries[i], entry0, newinfo->size);
865 return ret;
868 /* Gets counters. */
869 static inline int
870 add_entry_to_counter(const struct ipt_entry *e,
871 struct xt_counters total[],
872 unsigned int *i)
874 ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
876 (*i)++;
877 return 0;
880 static inline int
881 set_entry_to_counter(const struct ipt_entry *e,
882 struct ipt_counters total[],
883 unsigned int *i)
885 SET_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
887 (*i)++;
888 return 0;
891 static void
892 get_counters(const struct xt_table_info *t,
893 struct xt_counters counters[])
895 unsigned int cpu;
896 unsigned int i;
897 unsigned int curcpu;
899 /* Instead of clearing (by a previous call to memset())
900 * the counters and using adds, we set the counters
901 * with data used by 'current' CPU
902 * We dont care about preemption here.
904 curcpu = raw_smp_processor_id();
906 i = 0;
907 IPT_ENTRY_ITERATE(t->entries[curcpu],
908 t->size,
909 set_entry_to_counter,
910 counters,
911 &i);
913 for_each_possible_cpu(cpu) {
914 if (cpu == curcpu)
915 continue;
916 i = 0;
917 IPT_ENTRY_ITERATE(t->entries[cpu],
918 t->size,
919 add_entry_to_counter,
920 counters,
921 &i);
925 static struct xt_counters * alloc_counters(struct xt_table *table)
927 unsigned int countersize;
928 struct xt_counters *counters;
929 struct xt_table_info *private = table->private;
931 /* We need atomic snapshot of counters: rest doesn't change
932 (other than comefrom, which userspace doesn't care
933 about). */
934 countersize = sizeof(struct xt_counters) * private->number;
935 counters = vmalloc_node(countersize, numa_node_id());
937 if (counters == NULL)
938 return ERR_PTR(-ENOMEM);
940 /* First, sum counters... */
941 write_lock_bh(&table->lock);
942 get_counters(private, counters);
943 write_unlock_bh(&table->lock);
945 return counters;
948 static int
949 copy_entries_to_user(unsigned int total_size,
950 struct xt_table *table,
951 void __user *userptr)
953 unsigned int off, num;
954 struct ipt_entry *e;
955 struct xt_counters *counters;
956 struct xt_table_info *private = table->private;
957 int ret = 0;
958 void *loc_cpu_entry;
960 counters = alloc_counters(table);
961 if (IS_ERR(counters))
962 return PTR_ERR(counters);
964 /* choose the copy that is on our node/cpu, ...
965 * This choice is lazy (because current thread is
966 * allowed to migrate to another cpu)
968 loc_cpu_entry = private->entries[raw_smp_processor_id()];
969 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
970 ret = -EFAULT;
971 goto free_counters;
974 /* FIXME: use iterator macros --RR */
975 /* ... then go back and fix counters and names */
976 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
977 unsigned int i;
978 struct ipt_entry_match *m;
979 struct ipt_entry_target *t;
981 e = (struct ipt_entry *)(loc_cpu_entry + off);
982 if (copy_to_user(userptr + off
983 + offsetof(struct ipt_entry, counters),
984 &counters[num],
985 sizeof(counters[num])) != 0) {
986 ret = -EFAULT;
987 goto free_counters;
990 for (i = sizeof(struct ipt_entry);
991 i < e->target_offset;
992 i += m->u.match_size) {
993 m = (void *)e + i;
995 if (copy_to_user(userptr + off + i
996 + offsetof(struct ipt_entry_match,
997 u.user.name),
998 m->u.kernel.match->name,
999 strlen(m->u.kernel.match->name)+1)
1000 != 0) {
1001 ret = -EFAULT;
1002 goto free_counters;
1006 t = ipt_get_target(e);
1007 if (copy_to_user(userptr + off + e->target_offset
1008 + offsetof(struct ipt_entry_target,
1009 u.user.name),
1010 t->u.kernel.target->name,
1011 strlen(t->u.kernel.target->name)+1) != 0) {
1012 ret = -EFAULT;
1013 goto free_counters;
1017 free_counters:
1018 vfree(counters);
1019 return ret;
1022 #ifdef CONFIG_COMPAT
1023 static void compat_standard_from_user(void *dst, void *src)
1025 int v = *(compat_int_t *)src;
1027 if (v > 0)
1028 v += xt_compat_calc_jump(AF_INET, v);
1029 memcpy(dst, &v, sizeof(v));
1032 static int compat_standard_to_user(void __user *dst, void *src)
1034 compat_int_t cv = *(int *)src;
1036 if (cv > 0)
1037 cv -= xt_compat_calc_jump(AF_INET, cv);
1038 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
1041 static inline int
1042 compat_calc_match(struct ipt_entry_match *m, int *size)
1044 *size += xt_compat_match_offset(m->u.kernel.match);
1045 return 0;
1048 static int compat_calc_entry(struct ipt_entry *e,
1049 const struct xt_table_info *info,
1050 void *base, struct xt_table_info *newinfo)
1052 struct ipt_entry_target *t;
1053 unsigned int entry_offset;
1054 int off, i, ret;
1056 off = sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1057 entry_offset = (void *)e - base;
1058 IPT_MATCH_ITERATE(e, compat_calc_match, &off);
1059 t = ipt_get_target(e);
1060 off += xt_compat_target_offset(t->u.kernel.target);
1061 newinfo->size -= off;
1062 ret = xt_compat_add_offset(AF_INET, entry_offset, off);
1063 if (ret)
1064 return ret;
1066 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1067 if (info->hook_entry[i] &&
1068 (e < (struct ipt_entry *)(base + info->hook_entry[i])))
1069 newinfo->hook_entry[i] -= off;
1070 if (info->underflow[i] &&
1071 (e < (struct ipt_entry *)(base + info->underflow[i])))
1072 newinfo->underflow[i] -= off;
1074 return 0;
1077 static int compat_table_info(const struct xt_table_info *info,
1078 struct xt_table_info *newinfo)
1080 void *loc_cpu_entry;
1082 if (!newinfo || !info)
1083 return -EINVAL;
1085 /* we dont care about newinfo->entries[] */
1086 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
1087 newinfo->initial_entries = 0;
1088 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1089 return IPT_ENTRY_ITERATE(loc_cpu_entry, info->size,
1090 compat_calc_entry, info, loc_cpu_entry,
1091 newinfo);
1093 #endif
1095 static int get_info(struct net *net, void __user *user, int *len, int compat)
1097 char name[IPT_TABLE_MAXNAMELEN];
1098 struct xt_table *t;
1099 int ret;
1101 if (*len != sizeof(struct ipt_getinfo)) {
1102 duprintf("length %u != %zu\n", *len,
1103 sizeof(struct ipt_getinfo));
1104 return -EINVAL;
1107 if (copy_from_user(name, user, sizeof(name)) != 0)
1108 return -EFAULT;
1110 name[IPT_TABLE_MAXNAMELEN-1] = '\0';
1111 #ifdef CONFIG_COMPAT
1112 if (compat)
1113 xt_compat_lock(AF_INET);
1114 #endif
1115 t = try_then_request_module(xt_find_table_lock(net, AF_INET, name),
1116 "iptable_%s", name);
1117 if (t && !IS_ERR(t)) {
1118 struct ipt_getinfo info;
1119 struct xt_table_info *private = t->private;
1121 #ifdef CONFIG_COMPAT
1122 if (compat) {
1123 struct xt_table_info tmp;
1124 ret = compat_table_info(private, &tmp);
1125 xt_compat_flush_offsets(AF_INET);
1126 private = &tmp;
1128 #endif
1129 info.valid_hooks = t->valid_hooks;
1130 memcpy(info.hook_entry, private->hook_entry,
1131 sizeof(info.hook_entry));
1132 memcpy(info.underflow, private->underflow,
1133 sizeof(info.underflow));
1134 info.num_entries = private->number;
1135 info.size = private->size;
1136 strcpy(info.name, name);
1138 if (copy_to_user(user, &info, *len) != 0)
1139 ret = -EFAULT;
1140 else
1141 ret = 0;
1143 xt_table_unlock(t);
1144 module_put(t->me);
1145 } else
1146 ret = t ? PTR_ERR(t) : -ENOENT;
1147 #ifdef CONFIG_COMPAT
1148 if (compat)
1149 xt_compat_unlock(AF_INET);
1150 #endif
1151 return ret;
1154 static int
1155 get_entries(struct net *net, struct ipt_get_entries __user *uptr, int *len)
1157 int ret;
1158 struct ipt_get_entries get;
1159 struct xt_table *t;
1161 if (*len < sizeof(get)) {
1162 duprintf("get_entries: %u < %zu\n", *len, sizeof(get));
1163 return -EINVAL;
1165 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1166 return -EFAULT;
1167 if (*len != sizeof(struct ipt_get_entries) + get.size) {
1168 duprintf("get_entries: %u != %zu\n",
1169 *len, sizeof(get) + get.size);
1170 return -EINVAL;
1173 t = xt_find_table_lock(net, AF_INET, get.name);
1174 if (t && !IS_ERR(t)) {
1175 struct xt_table_info *private = t->private;
1176 duprintf("t->private->number = %u\n", private->number);
1177 if (get.size == private->size)
1178 ret = copy_entries_to_user(private->size,
1179 t, uptr->entrytable);
1180 else {
1181 duprintf("get_entries: I've got %u not %u!\n",
1182 private->size, get.size);
1183 ret = -EINVAL;
1185 module_put(t->me);
1186 xt_table_unlock(t);
1187 } else
1188 ret = t ? PTR_ERR(t) : -ENOENT;
1190 return ret;
1193 static int
1194 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1195 struct xt_table_info *newinfo, unsigned int num_counters,
1196 void __user *counters_ptr)
1198 int ret;
1199 struct xt_table *t;
1200 struct xt_table_info *oldinfo;
1201 struct xt_counters *counters;
1202 void *loc_cpu_old_entry;
1204 ret = 0;
1205 counters = vmalloc(num_counters * sizeof(struct xt_counters));
1206 if (!counters) {
1207 ret = -ENOMEM;
1208 goto out;
1211 t = try_then_request_module(xt_find_table_lock(net, AF_INET, name),
1212 "iptable_%s", name);
1213 if (!t || IS_ERR(t)) {
1214 ret = t ? PTR_ERR(t) : -ENOENT;
1215 goto free_newinfo_counters_untrans;
1218 /* You lied! */
1219 if (valid_hooks != t->valid_hooks) {
1220 duprintf("Valid hook crap: %08X vs %08X\n",
1221 valid_hooks, t->valid_hooks);
1222 ret = -EINVAL;
1223 goto put_module;
1226 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1227 if (!oldinfo)
1228 goto put_module;
1230 /* Update module usage count based on number of rules */
1231 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1232 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1233 if ((oldinfo->number > oldinfo->initial_entries) ||
1234 (newinfo->number <= oldinfo->initial_entries))
1235 module_put(t->me);
1236 if ((oldinfo->number > oldinfo->initial_entries) &&
1237 (newinfo->number <= oldinfo->initial_entries))
1238 module_put(t->me);
1240 /* Get the old counters. */
1241 get_counters(oldinfo, counters);
1242 /* Decrease module usage counts and free resource */
1243 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1244 IPT_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry,
1245 NULL);
1246 xt_free_table_info(oldinfo);
1247 if (copy_to_user(counters_ptr, counters,
1248 sizeof(struct xt_counters) * num_counters) != 0)
1249 ret = -EFAULT;
1250 vfree(counters);
1251 xt_table_unlock(t);
1252 return ret;
1254 put_module:
1255 module_put(t->me);
1256 xt_table_unlock(t);
1257 free_newinfo_counters_untrans:
1258 vfree(counters);
1259 out:
1260 return ret;
1263 static int
1264 do_replace(struct net *net, void __user *user, unsigned int len)
1266 int ret;
1267 struct ipt_replace tmp;
1268 struct xt_table_info *newinfo;
1269 void *loc_cpu_entry;
1271 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1272 return -EFAULT;
1274 /* overflow check */
1275 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1276 return -ENOMEM;
1278 newinfo = xt_alloc_table_info(tmp.size);
1279 if (!newinfo)
1280 return -ENOMEM;
1282 /* choose the copy that is on our node/cpu */
1283 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1284 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1285 tmp.size) != 0) {
1286 ret = -EFAULT;
1287 goto free_newinfo;
1290 ret = translate_table(tmp.name, tmp.valid_hooks,
1291 newinfo, loc_cpu_entry, tmp.size, tmp.num_entries,
1292 tmp.hook_entry, tmp.underflow);
1293 if (ret != 0)
1294 goto free_newinfo;
1296 duprintf("ip_tables: Translated table\n");
1298 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1299 tmp.num_counters, tmp.counters);
1300 if (ret)
1301 goto free_newinfo_untrans;
1302 return 0;
1304 free_newinfo_untrans:
1305 IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL);
1306 free_newinfo:
1307 xt_free_table_info(newinfo);
1308 return ret;
1311 /* We're lazy, and add to the first CPU; overflow works its fey magic
1312 * and everything is OK. */
1313 static int
1314 add_counter_to_entry(struct ipt_entry *e,
1315 const struct xt_counters addme[],
1316 unsigned int *i)
1318 #if 0
1319 duprintf("add_counter: Entry %u %lu/%lu + %lu/%lu\n",
1321 (long unsigned int)e->counters.pcnt,
1322 (long unsigned int)e->counters.bcnt,
1323 (long unsigned int)addme[*i].pcnt,
1324 (long unsigned int)addme[*i].bcnt);
1325 #endif
1327 ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
1329 (*i)++;
1330 return 0;
1333 static int
1334 do_add_counters(struct net *net, void __user *user, unsigned int len, int compat)
1336 unsigned int i;
1337 struct xt_counters_info tmp;
1338 struct xt_counters *paddc;
1339 unsigned int num_counters;
1340 char *name;
1341 int size;
1342 void *ptmp;
1343 struct xt_table *t;
1344 struct xt_table_info *private;
1345 int ret = 0;
1346 void *loc_cpu_entry;
1347 #ifdef CONFIG_COMPAT
1348 struct compat_xt_counters_info compat_tmp;
1350 if (compat) {
1351 ptmp = &compat_tmp;
1352 size = sizeof(struct compat_xt_counters_info);
1353 } else
1354 #endif
1356 ptmp = &tmp;
1357 size = sizeof(struct xt_counters_info);
1360 if (copy_from_user(ptmp, user, size) != 0)
1361 return -EFAULT;
1363 #ifdef CONFIG_COMPAT
1364 if (compat) {
1365 num_counters = compat_tmp.num_counters;
1366 name = compat_tmp.name;
1367 } else
1368 #endif
1370 num_counters = tmp.num_counters;
1371 name = tmp.name;
1374 if (len != size + num_counters * sizeof(struct xt_counters))
1375 return -EINVAL;
1377 paddc = vmalloc_node(len - size, numa_node_id());
1378 if (!paddc)
1379 return -ENOMEM;
1381 if (copy_from_user(paddc, user + size, len - size) != 0) {
1382 ret = -EFAULT;
1383 goto free;
1386 t = xt_find_table_lock(net, AF_INET, name);
1387 if (!t || IS_ERR(t)) {
1388 ret = t ? PTR_ERR(t) : -ENOENT;
1389 goto free;
1392 write_lock_bh(&t->lock);
1393 private = t->private;
1394 if (private->number != num_counters) {
1395 ret = -EINVAL;
1396 goto unlock_up_free;
1399 i = 0;
1400 /* Choose the copy that is on our node */
1401 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1402 IPT_ENTRY_ITERATE(loc_cpu_entry,
1403 private->size,
1404 add_counter_to_entry,
1405 paddc,
1406 &i);
1407 unlock_up_free:
1408 write_unlock_bh(&t->lock);
1409 xt_table_unlock(t);
1410 module_put(t->me);
1411 free:
1412 vfree(paddc);
1414 return ret;
1417 #ifdef CONFIG_COMPAT
1418 struct compat_ipt_replace {
1419 char name[IPT_TABLE_MAXNAMELEN];
1420 u32 valid_hooks;
1421 u32 num_entries;
1422 u32 size;
1423 u32 hook_entry[NF_INET_NUMHOOKS];
1424 u32 underflow[NF_INET_NUMHOOKS];
1425 u32 num_counters;
1426 compat_uptr_t counters; /* struct ipt_counters * */
1427 struct compat_ipt_entry entries[0];
1430 static int
1431 compat_copy_entry_to_user(struct ipt_entry *e, void __user **dstptr,
1432 compat_uint_t *size, struct xt_counters *counters,
1433 unsigned int *i)
1435 struct ipt_entry_target *t;
1436 struct compat_ipt_entry __user *ce;
1437 u_int16_t target_offset, next_offset;
1438 compat_uint_t origsize;
1439 int ret;
1441 ret = -EFAULT;
1442 origsize = *size;
1443 ce = (struct compat_ipt_entry __user *)*dstptr;
1444 if (copy_to_user(ce, e, sizeof(struct ipt_entry)))
1445 goto out;
1447 if (copy_to_user(&ce->counters, &counters[*i], sizeof(counters[*i])))
1448 goto out;
1450 *dstptr += sizeof(struct compat_ipt_entry);
1451 *size -= sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1453 ret = IPT_MATCH_ITERATE(e, xt_compat_match_to_user, dstptr, size);
1454 target_offset = e->target_offset - (origsize - *size);
1455 if (ret)
1456 goto out;
1457 t = ipt_get_target(e);
1458 ret = xt_compat_target_to_user(t, dstptr, size);
1459 if (ret)
1460 goto out;
1461 ret = -EFAULT;
1462 next_offset = e->next_offset - (origsize - *size);
1463 if (put_user(target_offset, &ce->target_offset))
1464 goto out;
1465 if (put_user(next_offset, &ce->next_offset))
1466 goto out;
1468 (*i)++;
1469 return 0;
1470 out:
1471 return ret;
1474 static int
1475 compat_find_calc_match(struct ipt_entry_match *m,
1476 const char *name,
1477 const struct ipt_ip *ip,
1478 unsigned int hookmask,
1479 int *size, int *i)
1481 struct xt_match *match;
1483 match = try_then_request_module(xt_find_match(AF_INET, m->u.user.name,
1484 m->u.user.revision),
1485 "ipt_%s", m->u.user.name);
1486 if (IS_ERR(match) || !match) {
1487 duprintf("compat_check_calc_match: `%s' not found\n",
1488 m->u.user.name);
1489 return match ? PTR_ERR(match) : -ENOENT;
1491 m->u.kernel.match = match;
1492 *size += xt_compat_match_offset(match);
1494 (*i)++;
1495 return 0;
1498 static int
1499 compat_release_match(struct ipt_entry_match *m, unsigned int *i)
1501 if (i && (*i)-- == 0)
1502 return 1;
1504 module_put(m->u.kernel.match->me);
1505 return 0;
1508 static int
1509 compat_release_entry(struct compat_ipt_entry *e, unsigned int *i)
1511 struct ipt_entry_target *t;
1513 if (i && (*i)-- == 0)
1514 return 1;
1516 /* Cleanup all matches */
1517 COMPAT_IPT_MATCH_ITERATE(e, compat_release_match, NULL);
1518 t = compat_ipt_get_target(e);
1519 module_put(t->u.kernel.target->me);
1520 return 0;
1523 static int
1524 check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
1525 struct xt_table_info *newinfo,
1526 unsigned int *size,
1527 unsigned char *base,
1528 unsigned char *limit,
1529 unsigned int *hook_entries,
1530 unsigned int *underflows,
1531 unsigned int *i,
1532 const char *name)
1534 struct ipt_entry_target *t;
1535 struct xt_target *target;
1536 unsigned int entry_offset;
1537 int ret, off, h, j;
1539 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1540 if ((unsigned long)e % __alignof__(struct compat_ipt_entry) != 0
1541 || (unsigned char *)e + sizeof(struct compat_ipt_entry) >= limit) {
1542 duprintf("Bad offset %p, limit = %p\n", e, limit);
1543 return -EINVAL;
1546 if (e->next_offset < sizeof(struct compat_ipt_entry) +
1547 sizeof(struct compat_xt_entry_target)) {
1548 duprintf("checking: element %p size %u\n",
1549 e, e->next_offset);
1550 return -EINVAL;
1553 /* For purposes of check_entry casting the compat entry is fine */
1554 ret = check_entry((struct ipt_entry *)e, name);
1555 if (ret)
1556 return ret;
1558 off = sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1559 entry_offset = (void *)e - (void *)base;
1560 j = 0;
1561 ret = COMPAT_IPT_MATCH_ITERATE(e, compat_find_calc_match, name,
1562 &e->ip, e->comefrom, &off, &j);
1563 if (ret != 0)
1564 goto release_matches;
1566 t = compat_ipt_get_target(e);
1567 target = try_then_request_module(xt_find_target(AF_INET,
1568 t->u.user.name,
1569 t->u.user.revision),
1570 "ipt_%s", t->u.user.name);
1571 if (IS_ERR(target) || !target) {
1572 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1573 t->u.user.name);
1574 ret = target ? PTR_ERR(target) : -ENOENT;
1575 goto release_matches;
1577 t->u.kernel.target = target;
1579 off += xt_compat_target_offset(target);
1580 *size += off;
1581 ret = xt_compat_add_offset(AF_INET, entry_offset, off);
1582 if (ret)
1583 goto out;
1585 /* Check hooks & underflows */
1586 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1587 if ((unsigned char *)e - base == hook_entries[h])
1588 newinfo->hook_entry[h] = hook_entries[h];
1589 if ((unsigned char *)e - base == underflows[h])
1590 newinfo->underflow[h] = underflows[h];
1593 /* Clear counters and comefrom */
1594 memset(&e->counters, 0, sizeof(e->counters));
1595 e->comefrom = 0;
1597 (*i)++;
1598 return 0;
1600 out:
1601 module_put(t->u.kernel.target->me);
1602 release_matches:
1603 IPT_MATCH_ITERATE(e, compat_release_match, &j);
1604 return ret;
1607 static int
1608 compat_copy_entry_from_user(struct compat_ipt_entry *e, void **dstptr,
1609 unsigned int *size, const char *name,
1610 struct xt_table_info *newinfo, unsigned char *base)
1612 struct ipt_entry_target *t;
1613 struct xt_target *target;
1614 struct ipt_entry *de;
1615 unsigned int origsize;
1616 int ret, h;
1618 ret = 0;
1619 origsize = *size;
1620 de = (struct ipt_entry *)*dstptr;
1621 memcpy(de, e, sizeof(struct ipt_entry));
1622 memcpy(&de->counters, &e->counters, sizeof(e->counters));
1624 *dstptr += sizeof(struct ipt_entry);
1625 *size += sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1627 ret = COMPAT_IPT_MATCH_ITERATE(e, xt_compat_match_from_user,
1628 dstptr, size);
1629 if (ret)
1630 return ret;
1631 de->target_offset = e->target_offset - (origsize - *size);
1632 t = compat_ipt_get_target(e);
1633 target = t->u.kernel.target;
1634 xt_compat_target_from_user(t, dstptr, size);
1636 de->next_offset = e->next_offset - (origsize - *size);
1637 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1638 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1639 newinfo->hook_entry[h] -= origsize - *size;
1640 if ((unsigned char *)de - base < newinfo->underflow[h])
1641 newinfo->underflow[h] -= origsize - *size;
1643 return ret;
1646 static int
1647 compat_check_entry(struct ipt_entry *e, const char *name,
1648 unsigned int *i)
1650 int j, ret;
1652 j = 0;
1653 ret = IPT_MATCH_ITERATE(e, check_match, name, &e->ip,
1654 e->comefrom, &j);
1655 if (ret)
1656 goto cleanup_matches;
1658 ret = check_target(e, name);
1659 if (ret)
1660 goto cleanup_matches;
1662 (*i)++;
1663 return 0;
1665 cleanup_matches:
1666 IPT_MATCH_ITERATE(e, cleanup_match, &j);
1667 return ret;
1670 static int
1671 translate_compat_table(const char *name,
1672 unsigned int valid_hooks,
1673 struct xt_table_info **pinfo,
1674 void **pentry0,
1675 unsigned int total_size,
1676 unsigned int number,
1677 unsigned int *hook_entries,
1678 unsigned int *underflows)
1680 unsigned int i, j;
1681 struct xt_table_info *newinfo, *info;
1682 void *pos, *entry0, *entry1;
1683 unsigned int size;
1684 int ret;
1686 info = *pinfo;
1687 entry0 = *pentry0;
1688 size = total_size;
1689 info->number = number;
1691 /* Init all hooks to impossible value. */
1692 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1693 info->hook_entry[i] = 0xFFFFFFFF;
1694 info->underflow[i] = 0xFFFFFFFF;
1697 duprintf("translate_compat_table: size %u\n", info->size);
1698 j = 0;
1699 xt_compat_lock(AF_INET);
1700 /* Walk through entries, checking offsets. */
1701 ret = COMPAT_IPT_ENTRY_ITERATE(entry0, total_size,
1702 check_compat_entry_size_and_hooks,
1703 info, &size, entry0,
1704 entry0 + total_size,
1705 hook_entries, underflows, &j, name);
1706 if (ret != 0)
1707 goto out_unlock;
1709 ret = -EINVAL;
1710 if (j != number) {
1711 duprintf("translate_compat_table: %u not %u entries\n",
1712 j, number);
1713 goto out_unlock;
1716 /* Check hooks all assigned */
1717 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1718 /* Only hooks which are valid */
1719 if (!(valid_hooks & (1 << i)))
1720 continue;
1721 if (info->hook_entry[i] == 0xFFFFFFFF) {
1722 duprintf("Invalid hook entry %u %u\n",
1723 i, hook_entries[i]);
1724 goto out_unlock;
1726 if (info->underflow[i] == 0xFFFFFFFF) {
1727 duprintf("Invalid underflow %u %u\n",
1728 i, underflows[i]);
1729 goto out_unlock;
1733 ret = -ENOMEM;
1734 newinfo = xt_alloc_table_info(size);
1735 if (!newinfo)
1736 goto out_unlock;
1738 newinfo->number = number;
1739 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1740 newinfo->hook_entry[i] = info->hook_entry[i];
1741 newinfo->underflow[i] = info->underflow[i];
1743 entry1 = newinfo->entries[raw_smp_processor_id()];
1744 pos = entry1;
1745 size = total_size;
1746 ret = COMPAT_IPT_ENTRY_ITERATE(entry0, total_size,
1747 compat_copy_entry_from_user,
1748 &pos, &size, name, newinfo, entry1);
1749 xt_compat_flush_offsets(AF_INET);
1750 xt_compat_unlock(AF_INET);
1751 if (ret)
1752 goto free_newinfo;
1754 ret = -ELOOP;
1755 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1756 goto free_newinfo;
1758 i = 0;
1759 ret = IPT_ENTRY_ITERATE(entry1, newinfo->size, compat_check_entry,
1760 name, &i);
1761 if (ret) {
1762 j -= i;
1763 COMPAT_IPT_ENTRY_ITERATE_CONTINUE(entry0, newinfo->size, i,
1764 compat_release_entry, &j);
1765 IPT_ENTRY_ITERATE(entry1, newinfo->size, cleanup_entry, &i);
1766 xt_free_table_info(newinfo);
1767 return ret;
1770 /* And one copy for every other CPU */
1771 for_each_possible_cpu(i)
1772 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1773 memcpy(newinfo->entries[i], entry1, newinfo->size);
1775 *pinfo = newinfo;
1776 *pentry0 = entry1;
1777 xt_free_table_info(info);
1778 return 0;
1780 free_newinfo:
1781 xt_free_table_info(newinfo);
1782 out:
1783 COMPAT_IPT_ENTRY_ITERATE(entry0, total_size, compat_release_entry, &j);
1784 return ret;
1785 out_unlock:
1786 xt_compat_flush_offsets(AF_INET);
1787 xt_compat_unlock(AF_INET);
1788 goto out;
1791 static int
1792 compat_do_replace(struct net *net, void __user *user, unsigned int len)
1794 int ret;
1795 struct compat_ipt_replace tmp;
1796 struct xt_table_info *newinfo;
1797 void *loc_cpu_entry;
1799 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1800 return -EFAULT;
1802 /* overflow check */
1803 if (tmp.size >= INT_MAX / num_possible_cpus())
1804 return -ENOMEM;
1805 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1806 return -ENOMEM;
1808 newinfo = xt_alloc_table_info(tmp.size);
1809 if (!newinfo)
1810 return -ENOMEM;
1812 /* choose the copy that is on our node/cpu */
1813 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1814 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1815 tmp.size) != 0) {
1816 ret = -EFAULT;
1817 goto free_newinfo;
1820 ret = translate_compat_table(tmp.name, tmp.valid_hooks,
1821 &newinfo, &loc_cpu_entry, tmp.size,
1822 tmp.num_entries, tmp.hook_entry,
1823 tmp.underflow);
1824 if (ret != 0)
1825 goto free_newinfo;
1827 duprintf("compat_do_replace: Translated table\n");
1829 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1830 tmp.num_counters, compat_ptr(tmp.counters));
1831 if (ret)
1832 goto free_newinfo_untrans;
1833 return 0;
1835 free_newinfo_untrans:
1836 IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL);
1837 free_newinfo:
1838 xt_free_table_info(newinfo);
1839 return ret;
1842 static int
1843 compat_do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user,
1844 unsigned int len)
1846 int ret;
1848 if (!capable(CAP_NET_ADMIN))
1849 return -EPERM;
1851 switch (cmd) {
1852 case IPT_SO_SET_REPLACE:
1853 ret = compat_do_replace(sk->sk_net, user, len);
1854 break;
1856 case IPT_SO_SET_ADD_COUNTERS:
1857 ret = do_add_counters(sk->sk_net, user, len, 1);
1858 break;
1860 default:
1861 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd);
1862 ret = -EINVAL;
1865 return ret;
1868 struct compat_ipt_get_entries {
1869 char name[IPT_TABLE_MAXNAMELEN];
1870 compat_uint_t size;
1871 struct compat_ipt_entry entrytable[0];
1874 static int
1875 compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1876 void __user *userptr)
1878 struct xt_counters *counters;
1879 struct xt_table_info *private = table->private;
1880 void __user *pos;
1881 unsigned int size;
1882 int ret = 0;
1883 void *loc_cpu_entry;
1884 unsigned int i = 0;
1886 counters = alloc_counters(table);
1887 if (IS_ERR(counters))
1888 return PTR_ERR(counters);
1890 /* choose the copy that is on our node/cpu, ...
1891 * This choice is lazy (because current thread is
1892 * allowed to migrate to another cpu)
1894 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1895 pos = userptr;
1896 size = total_size;
1897 ret = IPT_ENTRY_ITERATE(loc_cpu_entry, total_size,
1898 compat_copy_entry_to_user,
1899 &pos, &size, counters, &i);
1901 vfree(counters);
1902 return ret;
1905 static int
1906 compat_get_entries(struct net *net, struct compat_ipt_get_entries __user *uptr,
1907 int *len)
1909 int ret;
1910 struct compat_ipt_get_entries get;
1911 struct xt_table *t;
1913 if (*len < sizeof(get)) {
1914 duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get));
1915 return -EINVAL;
1918 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1919 return -EFAULT;
1921 if (*len != sizeof(struct compat_ipt_get_entries) + get.size) {
1922 duprintf("compat_get_entries: %u != %zu\n",
1923 *len, sizeof(get) + get.size);
1924 return -EINVAL;
1927 xt_compat_lock(AF_INET);
1928 t = xt_find_table_lock(net, AF_INET, get.name);
1929 if (t && !IS_ERR(t)) {
1930 struct xt_table_info *private = t->private;
1931 struct xt_table_info info;
1932 duprintf("t->private->number = %u\n", private->number);
1933 ret = compat_table_info(private, &info);
1934 if (!ret && get.size == info.size) {
1935 ret = compat_copy_entries_to_user(private->size,
1936 t, uptr->entrytable);
1937 } else if (!ret) {
1938 duprintf("compat_get_entries: I've got %u not %u!\n",
1939 private->size, get.size);
1940 ret = -EINVAL;
1942 xt_compat_flush_offsets(AF_INET);
1943 module_put(t->me);
1944 xt_table_unlock(t);
1945 } else
1946 ret = t ? PTR_ERR(t) : -ENOENT;
1948 xt_compat_unlock(AF_INET);
1949 return ret;
1952 static int do_ipt_get_ctl(struct sock *, int, void __user *, int *);
1954 static int
1955 compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1957 int ret;
1959 if (!capable(CAP_NET_ADMIN))
1960 return -EPERM;
1962 switch (cmd) {
1963 case IPT_SO_GET_INFO:
1964 ret = get_info(sk->sk_net, user, len, 1);
1965 break;
1966 case IPT_SO_GET_ENTRIES:
1967 ret = compat_get_entries(sk->sk_net, user, len);
1968 break;
1969 default:
1970 ret = do_ipt_get_ctl(sk, cmd, user, len);
1972 return ret;
1974 #endif
1976 static int
1977 do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
1979 int ret;
1981 if (!capable(CAP_NET_ADMIN))
1982 return -EPERM;
1984 switch (cmd) {
1985 case IPT_SO_SET_REPLACE:
1986 ret = do_replace(sk->sk_net, user, len);
1987 break;
1989 case IPT_SO_SET_ADD_COUNTERS:
1990 ret = do_add_counters(sk->sk_net, user, len, 0);
1991 break;
1993 default:
1994 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd);
1995 ret = -EINVAL;
1998 return ret;
2001 static int
2002 do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2004 int ret;
2006 if (!capable(CAP_NET_ADMIN))
2007 return -EPERM;
2009 switch (cmd) {
2010 case IPT_SO_GET_INFO:
2011 ret = get_info(sk->sk_net, user, len, 0);
2012 break;
2014 case IPT_SO_GET_ENTRIES:
2015 ret = get_entries(sk->sk_net, user, len);
2016 break;
2018 case IPT_SO_GET_REVISION_MATCH:
2019 case IPT_SO_GET_REVISION_TARGET: {
2020 struct ipt_get_revision rev;
2021 int target;
2023 if (*len != sizeof(rev)) {
2024 ret = -EINVAL;
2025 break;
2027 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2028 ret = -EFAULT;
2029 break;
2032 if (cmd == IPT_SO_GET_REVISION_TARGET)
2033 target = 1;
2034 else
2035 target = 0;
2037 try_then_request_module(xt_find_revision(AF_INET, rev.name,
2038 rev.revision,
2039 target, &ret),
2040 "ipt_%s", rev.name);
2041 break;
2044 default:
2045 duprintf("do_ipt_get_ctl: unknown request %i\n", cmd);
2046 ret = -EINVAL;
2049 return ret;
2052 struct xt_table *ipt_register_table(struct net *net, struct xt_table *table,
2053 const struct ipt_replace *repl)
2055 int ret;
2056 struct xt_table_info *newinfo;
2057 struct xt_table_info bootstrap
2058 = { 0, 0, 0, { 0 }, { 0 }, { } };
2059 void *loc_cpu_entry;
2060 struct xt_table *new_table;
2062 newinfo = xt_alloc_table_info(repl->size);
2063 if (!newinfo) {
2064 ret = -ENOMEM;
2065 goto out;
2068 /* choose the copy on our node/cpu, but dont care about preemption */
2069 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2070 memcpy(loc_cpu_entry, repl->entries, repl->size);
2072 ret = translate_table(table->name, table->valid_hooks,
2073 newinfo, loc_cpu_entry, repl->size,
2074 repl->num_entries,
2075 repl->hook_entry,
2076 repl->underflow);
2077 if (ret != 0)
2078 goto out_free;
2080 new_table = xt_register_table(net, table, &bootstrap, newinfo);
2081 if (IS_ERR(new_table)) {
2082 ret = PTR_ERR(new_table);
2083 goto out_free;
2086 return new_table;
2088 out_free:
2089 xt_free_table_info(newinfo);
2090 out:
2091 return ERR_PTR(ret);
2094 void ipt_unregister_table(struct xt_table *table)
2096 struct xt_table_info *private;
2097 void *loc_cpu_entry;
2099 private = xt_unregister_table(table);
2101 /* Decrease module usage counts and free resources */
2102 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2103 IPT_ENTRY_ITERATE(loc_cpu_entry, private->size, cleanup_entry, NULL);
2104 xt_free_table_info(private);
2107 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2108 static inline bool
2109 icmp_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2110 u_int8_t type, u_int8_t code,
2111 bool invert)
2113 return ((test_type == 0xFF) ||
2114 (type == test_type && code >= min_code && code <= max_code))
2115 ^ invert;
2118 static bool
2119 icmp_match(const struct sk_buff *skb,
2120 const struct net_device *in,
2121 const struct net_device *out,
2122 const struct xt_match *match,
2123 const void *matchinfo,
2124 int offset,
2125 unsigned int protoff,
2126 bool *hotdrop)
2128 struct icmphdr _icmph, *ic;
2129 const struct ipt_icmp *icmpinfo = matchinfo;
2131 /* Must not be a fragment. */
2132 if (offset)
2133 return false;
2135 ic = skb_header_pointer(skb, protoff, sizeof(_icmph), &_icmph);
2136 if (ic == NULL) {
2137 /* We've been asked to examine this packet, and we
2138 * can't. Hence, no choice but to drop.
2140 duprintf("Dropping evil ICMP tinygram.\n");
2141 *hotdrop = true;
2142 return false;
2145 return icmp_type_code_match(icmpinfo->type,
2146 icmpinfo->code[0],
2147 icmpinfo->code[1],
2148 ic->type, ic->code,
2149 !!(icmpinfo->invflags&IPT_ICMP_INV));
2152 /* Called when user tries to insert an entry of this type. */
2153 static bool
2154 icmp_checkentry(const char *tablename,
2155 const void *entry,
2156 const struct xt_match *match,
2157 void *matchinfo,
2158 unsigned int hook_mask)
2160 const struct ipt_icmp *icmpinfo = matchinfo;
2162 /* Must specify no unknown invflags */
2163 return !(icmpinfo->invflags & ~IPT_ICMP_INV);
2166 /* The built-in targets: standard (NULL) and error. */
2167 static struct xt_target ipt_standard_target __read_mostly = {
2168 .name = IPT_STANDARD_TARGET,
2169 .targetsize = sizeof(int),
2170 .family = AF_INET,
2171 #ifdef CONFIG_COMPAT
2172 .compatsize = sizeof(compat_int_t),
2173 .compat_from_user = compat_standard_from_user,
2174 .compat_to_user = compat_standard_to_user,
2175 #endif
2178 static struct xt_target ipt_error_target __read_mostly = {
2179 .name = IPT_ERROR_TARGET,
2180 .target = ipt_error,
2181 .targetsize = IPT_FUNCTION_MAXNAMELEN,
2182 .family = AF_INET,
2185 static struct nf_sockopt_ops ipt_sockopts = {
2186 .pf = PF_INET,
2187 .set_optmin = IPT_BASE_CTL,
2188 .set_optmax = IPT_SO_SET_MAX+1,
2189 .set = do_ipt_set_ctl,
2190 #ifdef CONFIG_COMPAT
2191 .compat_set = compat_do_ipt_set_ctl,
2192 #endif
2193 .get_optmin = IPT_BASE_CTL,
2194 .get_optmax = IPT_SO_GET_MAX+1,
2195 .get = do_ipt_get_ctl,
2196 #ifdef CONFIG_COMPAT
2197 .compat_get = compat_do_ipt_get_ctl,
2198 #endif
2199 .owner = THIS_MODULE,
2202 static struct xt_match icmp_matchstruct __read_mostly = {
2203 .name = "icmp",
2204 .match = icmp_match,
2205 .matchsize = sizeof(struct ipt_icmp),
2206 .checkentry = icmp_checkentry,
2207 .proto = IPPROTO_ICMP,
2208 .family = AF_INET,
2211 static int __init ip_tables_init(void)
2213 int ret;
2215 ret = xt_proto_init(AF_INET);
2216 if (ret < 0)
2217 goto err1;
2219 /* Noone else will be downing sem now, so we won't sleep */
2220 ret = xt_register_target(&ipt_standard_target);
2221 if (ret < 0)
2222 goto err2;
2223 ret = xt_register_target(&ipt_error_target);
2224 if (ret < 0)
2225 goto err3;
2226 ret = xt_register_match(&icmp_matchstruct);
2227 if (ret < 0)
2228 goto err4;
2230 /* Register setsockopt */
2231 ret = nf_register_sockopt(&ipt_sockopts);
2232 if (ret < 0)
2233 goto err5;
2235 printk(KERN_INFO "ip_tables: (C) 2000-2006 Netfilter Core Team\n");
2236 return 0;
2238 err5:
2239 xt_unregister_match(&icmp_matchstruct);
2240 err4:
2241 xt_unregister_target(&ipt_error_target);
2242 err3:
2243 xt_unregister_target(&ipt_standard_target);
2244 err2:
2245 xt_proto_fini(AF_INET);
2246 err1:
2247 return ret;
2250 static void __exit ip_tables_fini(void)
2252 nf_unregister_sockopt(&ipt_sockopts);
2254 xt_unregister_match(&icmp_matchstruct);
2255 xt_unregister_target(&ipt_error_target);
2256 xt_unregister_target(&ipt_standard_target);
2258 xt_proto_fini(AF_INET);
2261 EXPORT_SYMBOL(ipt_register_table);
2262 EXPORT_SYMBOL(ipt_unregister_table);
2263 EXPORT_SYMBOL(ipt_do_table);
2264 module_init(ip_tables_init);
2265 module_exit(ip_tables_fini);