2 * Packet matching code.
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * 19 Jan 2002 Harald Welte <laforge@gnumonks.org>
12 * - increase module usage count as soon as we have rules inside
14 * 08 Oct 2005 Harald Welte <lafore@netfilter.org>
15 * - Generalize into "x_tables" layer and "{ip,ip6,arp}_tables"
17 #include <linux/cache.h>
18 #include <linux/capability.h>
19 #include <linux/skbuff.h>
20 #include <linux/kmod.h>
21 #include <linux/vmalloc.h>
22 #include <linux/netdevice.h>
23 #include <linux/module.h>
24 #include <linux/icmp.h>
26 #include <net/compat.h>
27 #include <asm/uaccess.h>
28 #include <linux/mutex.h>
29 #include <linux/proc_fs.h>
30 #include <linux/err.h>
31 #include <linux/cpumask.h>
33 #include <linux/netfilter/x_tables.h>
34 #include <linux/netfilter_ipv4/ip_tables.h>
36 MODULE_LICENSE("GPL");
37 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
38 MODULE_DESCRIPTION("IPv4 packet filter");
40 /*#define DEBUG_IP_FIREWALL*/
41 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
42 /*#define DEBUG_IP_FIREWALL_USER*/
44 #ifdef DEBUG_IP_FIREWALL
45 #define dprintf(format, args...) printk(format , ## args)
47 #define dprintf(format, args...)
50 #ifdef DEBUG_IP_FIREWALL_USER
51 #define duprintf(format, args...) printk(format , ## args)
53 #define duprintf(format, args...)
56 #ifdef CONFIG_NETFILTER_DEBUG
57 #define IP_NF_ASSERT(x) \
60 printk("IP_NF_ASSERT: %s:%s:%u\n", \
61 __FUNCTION__, __FILE__, __LINE__); \
64 #define IP_NF_ASSERT(x)
68 /* All the better to debug you with... */
74 We keep a set of rules for each CPU, so we can avoid write-locking
75 them in the softirq when updating the counters and therefore
76 only need to read-lock in the softirq; doing a write_lock_bh() in user
77 context stops packets coming through and allows user context to read
78 the counters or update the rules.
80 Hence the start of any table is given by get_table() below. */
82 /* Returns whether matches rule or not. */
84 ip_packet_match(const struct iphdr
*ip
,
87 const struct ipt_ip
*ipinfo
,
93 #define FWINV(bool,invflg) ((bool) ^ !!(ipinfo->invflags & invflg))
95 if (FWINV((ip
->saddr
&ipinfo
->smsk
.s_addr
) != ipinfo
->src
.s_addr
,
97 || FWINV((ip
->daddr
&ipinfo
->dmsk
.s_addr
) != ipinfo
->dst
.s_addr
,
99 dprintf("Source or dest mismatch.\n");
101 dprintf("SRC: %u.%u.%u.%u. Mask: %u.%u.%u.%u. Target: %u.%u.%u.%u.%s\n",
103 NIPQUAD(ipinfo
->smsk
.s_addr
),
104 NIPQUAD(ipinfo
->src
.s_addr
),
105 ipinfo
->invflags
& IPT_INV_SRCIP
? " (INV)" : "");
106 dprintf("DST: %u.%u.%u.%u Mask: %u.%u.%u.%u Target: %u.%u.%u.%u.%s\n",
108 NIPQUAD(ipinfo
->dmsk
.s_addr
),
109 NIPQUAD(ipinfo
->dst
.s_addr
),
110 ipinfo
->invflags
& IPT_INV_DSTIP
? " (INV)" : "");
114 /* Look for ifname matches; this should unroll nicely. */
115 for (i
= 0, ret
= 0; i
< IFNAMSIZ
/sizeof(unsigned long); i
++) {
116 ret
|= (((const unsigned long *)indev
)[i
]
117 ^ ((const unsigned long *)ipinfo
->iniface
)[i
])
118 & ((const unsigned long *)ipinfo
->iniface_mask
)[i
];
121 if (FWINV(ret
!= 0, IPT_INV_VIA_IN
)) {
122 dprintf("VIA in mismatch (%s vs %s).%s\n",
123 indev
, ipinfo
->iniface
,
124 ipinfo
->invflags
&IPT_INV_VIA_IN
?" (INV)":"");
128 for (i
= 0, ret
= 0; i
< IFNAMSIZ
/sizeof(unsigned long); i
++) {
129 ret
|= (((const unsigned long *)outdev
)[i
]
130 ^ ((const unsigned long *)ipinfo
->outiface
)[i
])
131 & ((const unsigned long *)ipinfo
->outiface_mask
)[i
];
134 if (FWINV(ret
!= 0, IPT_INV_VIA_OUT
)) {
135 dprintf("VIA out mismatch (%s vs %s).%s\n",
136 outdev
, ipinfo
->outiface
,
137 ipinfo
->invflags
&IPT_INV_VIA_OUT
?" (INV)":"");
141 /* Check specific protocol */
143 && FWINV(ip
->protocol
!= ipinfo
->proto
, IPT_INV_PROTO
)) {
144 dprintf("Packet protocol %hi does not match %hi.%s\n",
145 ip
->protocol
, ipinfo
->proto
,
146 ipinfo
->invflags
&IPT_INV_PROTO
? " (INV)":"");
150 /* If we have a fragment rule but the packet is not a fragment
151 * then we return zero */
152 if (FWINV((ipinfo
->flags
&IPT_F_FRAG
) && !isfrag
, IPT_INV_FRAG
)) {
153 dprintf("Fragment rule but not fragment.%s\n",
154 ipinfo
->invflags
& IPT_INV_FRAG
? " (INV)" : "");
162 ip_checkentry(const struct ipt_ip
*ip
)
164 if (ip
->flags
& ~IPT_F_MASK
) {
165 duprintf("Unknown flag bits set: %08X\n",
166 ip
->flags
& ~IPT_F_MASK
);
169 if (ip
->invflags
& ~IPT_INV_MASK
) {
170 duprintf("Unknown invflag bits set: %08X\n",
171 ip
->invflags
& ~IPT_INV_MASK
);
178 ipt_error(struct sk_buff
**pskb
,
179 const struct net_device
*in
,
180 const struct net_device
*out
,
181 unsigned int hooknum
,
182 const struct xt_target
*target
,
183 const void *targinfo
)
186 printk("ip_tables: error: `%s'\n", (char *)targinfo
);
192 int do_match(struct ipt_entry_match
*m
,
193 const struct sk_buff
*skb
,
194 const struct net_device
*in
,
195 const struct net_device
*out
,
199 /* Stop iteration if it doesn't match */
200 if (!m
->u
.kernel
.match
->match(skb
, in
, out
, m
->u
.kernel
.match
, m
->data
,
201 offset
, skb
->nh
.iph
->ihl
*4, hotdrop
))
207 static inline struct ipt_entry
*
208 get_entry(void *base
, unsigned int offset
)
210 return (struct ipt_entry
*)(base
+ offset
);
213 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
215 ipt_do_table(struct sk_buff
**pskb
,
217 const struct net_device
*in
,
218 const struct net_device
*out
,
219 struct ipt_table
*table
)
221 static const char nulldevname
[IFNAMSIZ
] __attribute__((aligned(sizeof(long))));
226 /* Initializing verdict to NF_DROP keeps gcc happy. */
227 unsigned int verdict
= NF_DROP
;
228 const char *indev
, *outdev
;
230 struct ipt_entry
*e
, *back
;
231 struct xt_table_info
*private;
234 ip
= (*pskb
)->nh
.iph
;
235 datalen
= (*pskb
)->len
- ip
->ihl
* 4;
236 indev
= in
? in
->name
: nulldevname
;
237 outdev
= out
? out
->name
: nulldevname
;
238 /* We handle fragments by dealing with the first fragment as
239 * if it was a normal packet. All other fragments are treated
240 * normally, except that they will NEVER match rules that ask
241 * things we don't know, ie. tcp syn flag or ports). If the
242 * rule is also a fragment-specific rule, non-fragments won't
244 offset
= ntohs(ip
->frag_off
) & IP_OFFSET
;
246 read_lock_bh(&table
->lock
);
247 IP_NF_ASSERT(table
->valid_hooks
& (1 << hook
));
248 private = table
->private;
249 table_base
= (void *)private->entries
[smp_processor_id()];
250 e
= get_entry(table_base
, private->hook_entry
[hook
]);
252 /* For return from builtin chain */
253 back
= get_entry(table_base
, private->underflow
[hook
]);
258 if (ip_packet_match(ip
, indev
, outdev
, &e
->ip
, offset
)) {
259 struct ipt_entry_target
*t
;
261 if (IPT_MATCH_ITERATE(e
, do_match
,
263 offset
, &hotdrop
) != 0)
266 ADD_COUNTER(e
->counters
, ntohs(ip
->tot_len
), 1);
268 t
= ipt_get_target(e
);
269 IP_NF_ASSERT(t
->u
.kernel
.target
);
270 /* Standard target? */
271 if (!t
->u
.kernel
.target
->target
) {
274 v
= ((struct ipt_standard_target
*)t
)->verdict
;
276 /* Pop from stack? */
277 if (v
!= IPT_RETURN
) {
278 verdict
= (unsigned)(-v
) - 1;
282 back
= get_entry(table_base
,
286 if (table_base
+ v
!= (void *)e
+ e
->next_offset
287 && !(e
->ip
.flags
& IPT_F_GOTO
)) {
288 /* Save old back ptr in next entry */
289 struct ipt_entry
*next
290 = (void *)e
+ e
->next_offset
;
292 = (void *)back
- table_base
;
293 /* set back pointer to next entry */
297 e
= get_entry(table_base
, v
);
299 /* Targets which reenter must return
301 #ifdef CONFIG_NETFILTER_DEBUG
302 ((struct ipt_entry
*)table_base
)->comefrom
305 verdict
= t
->u
.kernel
.target
->target(pskb
,
311 #ifdef CONFIG_NETFILTER_DEBUG
312 if (((struct ipt_entry
*)table_base
)->comefrom
314 && verdict
== IPT_CONTINUE
) {
315 printk("Target %s reentered!\n",
316 t
->u
.kernel
.target
->name
);
319 ((struct ipt_entry
*)table_base
)->comefrom
322 /* Target might have changed stuff. */
323 ip
= (*pskb
)->nh
.iph
;
324 datalen
= (*pskb
)->len
- ip
->ihl
* 4;
326 if (verdict
== IPT_CONTINUE
)
327 e
= (void *)e
+ e
->next_offset
;
335 e
= (void *)e
+ e
->next_offset
;
339 read_unlock_bh(&table
->lock
);
341 #ifdef DEBUG_ALLOW_ALL
350 /* All zeroes == unconditional rule. */
352 unconditional(const struct ipt_ip
*ip
)
356 for (i
= 0; i
< sizeof(*ip
)/sizeof(__u32
); i
++)
357 if (((__u32
*)ip
)[i
])
363 /* Figures out from what hook each rule can be called: returns 0 if
364 there are loops. Puts hook bitmask in comefrom. */
366 mark_source_chains(struct xt_table_info
*newinfo
,
367 unsigned int valid_hooks
, void *entry0
)
371 /* No recursion; use packet counter to save back ptrs (reset
372 to 0 as we leave), and comefrom to save source hook bitmask */
373 for (hook
= 0; hook
< NF_IP_NUMHOOKS
; hook
++) {
374 unsigned int pos
= newinfo
->hook_entry
[hook
];
376 = (struct ipt_entry
*)(entry0
+ pos
);
378 if (!(valid_hooks
& (1 << hook
)))
381 /* Set initial back pointer. */
382 e
->counters
.pcnt
= pos
;
385 struct ipt_standard_target
*t
386 = (void *)ipt_get_target(e
);
388 if (e
->comefrom
& (1 << NF_IP_NUMHOOKS
)) {
389 printk("iptables: loop hook %u pos %u %08X.\n",
390 hook
, pos
, e
->comefrom
);
394 |= ((1 << hook
) | (1 << NF_IP_NUMHOOKS
));
396 /* Unconditional return/END. */
397 if (e
->target_offset
== sizeof(struct ipt_entry
)
398 && (strcmp(t
->target
.u
.user
.name
,
399 IPT_STANDARD_TARGET
) == 0)
401 && unconditional(&e
->ip
)) {
402 unsigned int oldpos
, size
;
404 /* Return: backtrack through the last
407 e
->comefrom
^= (1<<NF_IP_NUMHOOKS
);
408 #ifdef DEBUG_IP_FIREWALL_USER
410 & (1 << NF_IP_NUMHOOKS
)) {
411 duprintf("Back unset "
418 pos
= e
->counters
.pcnt
;
419 e
->counters
.pcnt
= 0;
421 /* We're at the start. */
425 e
= (struct ipt_entry
*)
427 } while (oldpos
== pos
+ e
->next_offset
);
430 size
= e
->next_offset
;
431 e
= (struct ipt_entry
*)
432 (entry0
+ pos
+ size
);
433 e
->counters
.pcnt
= pos
;
436 int newpos
= t
->verdict
;
438 if (strcmp(t
->target
.u
.user
.name
,
439 IPT_STANDARD_TARGET
) == 0
441 /* This a jump; chase it. */
442 duprintf("Jump rule %u -> %u\n",
445 /* ... this is a fallthru */
446 newpos
= pos
+ e
->next_offset
;
448 e
= (struct ipt_entry
*)
450 e
->counters
.pcnt
= pos
;
455 duprintf("Finished chain %u\n", hook
);
461 cleanup_match(struct ipt_entry_match
*m
, unsigned int *i
)
463 if (i
&& (*i
)-- == 0)
466 if (m
->u
.kernel
.match
->destroy
)
467 m
->u
.kernel
.match
->destroy(m
->u
.kernel
.match
, m
->data
,
468 m
->u
.match_size
- sizeof(*m
));
469 module_put(m
->u
.kernel
.match
->me
);
474 standard_check(const struct ipt_entry_target
*t
,
475 unsigned int max_offset
)
477 struct ipt_standard_target
*targ
= (void *)t
;
479 /* Check standard info. */
480 if (targ
->verdict
>= 0
481 && targ
->verdict
> max_offset
- sizeof(struct ipt_entry
)) {
482 duprintf("ipt_standard_check: bad verdict (%i)\n",
486 if (targ
->verdict
< -NF_MAX_VERDICT
- 1) {
487 duprintf("ipt_standard_check: bad negative verdict (%i)\n",
495 check_match(struct ipt_entry_match
*m
,
497 const struct ipt_ip
*ip
,
498 unsigned int hookmask
,
501 struct ipt_match
*match
;
504 match
= try_then_request_module(xt_find_match(AF_INET
, m
->u
.user
.name
,
506 "ipt_%s", m
->u
.user
.name
);
507 if (IS_ERR(match
) || !match
) {
508 duprintf("check_match: `%s' not found\n", m
->u
.user
.name
);
509 return match
? PTR_ERR(match
) : -ENOENT
;
511 m
->u
.kernel
.match
= match
;
513 ret
= xt_check_match(match
, AF_INET
, m
->u
.match_size
- sizeof(*m
),
514 name
, hookmask
, ip
->proto
,
515 ip
->invflags
& IPT_INV_PROTO
);
519 if (m
->u
.kernel
.match
->checkentry
520 && !m
->u
.kernel
.match
->checkentry(name
, ip
, match
, m
->data
,
521 m
->u
.match_size
- sizeof(*m
),
523 duprintf("ip_tables: check failed for `%s'.\n",
524 m
->u
.kernel
.match
->name
);
532 module_put(m
->u
.kernel
.match
->me
);
536 static struct ipt_target ipt_standard_target
;
539 check_entry(struct ipt_entry
*e
, const char *name
, unsigned int size
,
542 struct ipt_entry_target
*t
;
543 struct ipt_target
*target
;
547 if (!ip_checkentry(&e
->ip
)) {
548 duprintf("ip_tables: ip check failed %p %s.\n", e
, name
);
553 ret
= IPT_MATCH_ITERATE(e
, check_match
, name
, &e
->ip
, e
->comefrom
, &j
);
555 goto cleanup_matches
;
557 t
= ipt_get_target(e
);
558 target
= try_then_request_module(xt_find_target(AF_INET
,
561 "ipt_%s", t
->u
.user
.name
);
562 if (IS_ERR(target
) || !target
) {
563 duprintf("check_entry: `%s' not found\n", t
->u
.user
.name
);
564 ret
= target
? PTR_ERR(target
) : -ENOENT
;
565 goto cleanup_matches
;
567 t
->u
.kernel
.target
= target
;
569 ret
= xt_check_target(target
, AF_INET
, t
->u
.target_size
- sizeof(*t
),
570 name
, e
->comefrom
, e
->ip
.proto
,
571 e
->ip
.invflags
& IPT_INV_PROTO
);
575 if (t
->u
.kernel
.target
== &ipt_standard_target
) {
576 if (!standard_check(t
, size
)) {
578 goto cleanup_matches
;
580 } else if (t
->u
.kernel
.target
->checkentry
581 && !t
->u
.kernel
.target
->checkentry(name
, e
, target
, t
->data
,
585 duprintf("ip_tables: check failed for `%s'.\n",
586 t
->u
.kernel
.target
->name
);
594 module_put(t
->u
.kernel
.target
->me
);
596 IPT_MATCH_ITERATE(e
, cleanup_match
, &j
);
601 check_entry_size_and_hooks(struct ipt_entry
*e
,
602 struct xt_table_info
*newinfo
,
604 unsigned char *limit
,
605 const unsigned int *hook_entries
,
606 const unsigned int *underflows
,
611 if ((unsigned long)e
% __alignof__(struct ipt_entry
) != 0
612 || (unsigned char *)e
+ sizeof(struct ipt_entry
) >= limit
) {
613 duprintf("Bad offset %p\n", e
);
618 < sizeof(struct ipt_entry
) + sizeof(struct ipt_entry_target
)) {
619 duprintf("checking: element %p size %u\n",
624 /* Check hooks & underflows */
625 for (h
= 0; h
< NF_IP_NUMHOOKS
; h
++) {
626 if ((unsigned char *)e
- base
== hook_entries
[h
])
627 newinfo
->hook_entry
[h
] = hook_entries
[h
];
628 if ((unsigned char *)e
- base
== underflows
[h
])
629 newinfo
->underflow
[h
] = underflows
[h
];
632 /* FIXME: underflows must be unconditional, standard verdicts
633 < 0 (not IPT_RETURN). --RR */
635 /* Clear counters and comefrom */
636 e
->counters
= ((struct xt_counters
) { 0, 0 });
644 cleanup_entry(struct ipt_entry
*e
, unsigned int *i
)
646 struct ipt_entry_target
*t
;
648 if (i
&& (*i
)-- == 0)
651 /* Cleanup all matches */
652 IPT_MATCH_ITERATE(e
, cleanup_match
, NULL
);
653 t
= ipt_get_target(e
);
654 if (t
->u
.kernel
.target
->destroy
)
655 t
->u
.kernel
.target
->destroy(t
->u
.kernel
.target
, t
->data
,
656 t
->u
.target_size
- sizeof(*t
));
657 module_put(t
->u
.kernel
.target
->me
);
661 /* Checks and translates the user-supplied table segment (held in
664 translate_table(const char *name
,
665 unsigned int valid_hooks
,
666 struct xt_table_info
*newinfo
,
670 const unsigned int *hook_entries
,
671 const unsigned int *underflows
)
676 newinfo
->size
= size
;
677 newinfo
->number
= number
;
679 /* Init all hooks to impossible value. */
680 for (i
= 0; i
< NF_IP_NUMHOOKS
; i
++) {
681 newinfo
->hook_entry
[i
] = 0xFFFFFFFF;
682 newinfo
->underflow
[i
] = 0xFFFFFFFF;
685 duprintf("translate_table: size %u\n", newinfo
->size
);
687 /* Walk through entries, checking offsets. */
688 ret
= IPT_ENTRY_ITERATE(entry0
, newinfo
->size
,
689 check_entry_size_and_hooks
,
693 hook_entries
, underflows
, &i
);
698 duprintf("translate_table: %u not %u entries\n",
703 /* Check hooks all assigned */
704 for (i
= 0; i
< NF_IP_NUMHOOKS
; i
++) {
705 /* Only hooks which are valid */
706 if (!(valid_hooks
& (1 << i
)))
708 if (newinfo
->hook_entry
[i
] == 0xFFFFFFFF) {
709 duprintf("Invalid hook entry %u %u\n",
713 if (newinfo
->underflow
[i
] == 0xFFFFFFFF) {
714 duprintf("Invalid underflow %u %u\n",
720 if (!mark_source_chains(newinfo
, valid_hooks
, entry0
))
723 /* Finally, each sanity check must pass */
725 ret
= IPT_ENTRY_ITERATE(entry0
, newinfo
->size
,
726 check_entry
, name
, size
, &i
);
729 IPT_ENTRY_ITERATE(entry0
, newinfo
->size
,
734 /* And one copy for every other CPU */
735 for_each_possible_cpu(i
) {
736 if (newinfo
->entries
[i
] && newinfo
->entries
[i
] != entry0
)
737 memcpy(newinfo
->entries
[i
], entry0
, newinfo
->size
);
745 add_entry_to_counter(const struct ipt_entry
*e
,
746 struct xt_counters total
[],
749 ADD_COUNTER(total
[*i
], e
->counters
.bcnt
, e
->counters
.pcnt
);
756 set_entry_to_counter(const struct ipt_entry
*e
,
757 struct ipt_counters total
[],
760 SET_COUNTER(total
[*i
], e
->counters
.bcnt
, e
->counters
.pcnt
);
767 get_counters(const struct xt_table_info
*t
,
768 struct xt_counters counters
[])
774 /* Instead of clearing (by a previous call to memset())
775 * the counters and using adds, we set the counters
776 * with data used by 'current' CPU
777 * We dont care about preemption here.
779 curcpu
= raw_smp_processor_id();
782 IPT_ENTRY_ITERATE(t
->entries
[curcpu
],
784 set_entry_to_counter
,
788 for_each_possible_cpu(cpu
) {
792 IPT_ENTRY_ITERATE(t
->entries
[cpu
],
794 add_entry_to_counter
,
800 static inline struct xt_counters
* alloc_counters(struct ipt_table
*table
)
802 unsigned int countersize
;
803 struct xt_counters
*counters
;
804 struct xt_table_info
*private = table
->private;
806 /* We need atomic snapshot of counters: rest doesn't change
807 (other than comefrom, which userspace doesn't care
809 countersize
= sizeof(struct xt_counters
) * private->number
;
810 counters
= vmalloc_node(countersize
, numa_node_id());
812 if (counters
== NULL
)
813 return ERR_PTR(-ENOMEM
);
815 /* First, sum counters... */
816 write_lock_bh(&table
->lock
);
817 get_counters(private, counters
);
818 write_unlock_bh(&table
->lock
);
824 copy_entries_to_user(unsigned int total_size
,
825 struct ipt_table
*table
,
826 void __user
*userptr
)
828 unsigned int off
, num
;
830 struct xt_counters
*counters
;
831 struct xt_table_info
*private = table
->private;
835 counters
= alloc_counters(table
);
836 if (IS_ERR(counters
))
837 return PTR_ERR(counters
);
839 /* choose the copy that is on our node/cpu, ...
840 * This choice is lazy (because current thread is
841 * allowed to migrate to another cpu)
843 loc_cpu_entry
= private->entries
[raw_smp_processor_id()];
844 /* ... then copy entire thing ... */
845 if (copy_to_user(userptr
, loc_cpu_entry
, total_size
) != 0) {
850 /* FIXME: use iterator macros --RR */
851 /* ... then go back and fix counters and names */
852 for (off
= 0, num
= 0; off
< total_size
; off
+= e
->next_offset
, num
++){
854 struct ipt_entry_match
*m
;
855 struct ipt_entry_target
*t
;
857 e
= (struct ipt_entry
*)(loc_cpu_entry
+ off
);
858 if (copy_to_user(userptr
+ off
859 + offsetof(struct ipt_entry
, counters
),
861 sizeof(counters
[num
])) != 0) {
866 for (i
= sizeof(struct ipt_entry
);
867 i
< e
->target_offset
;
868 i
+= m
->u
.match_size
) {
871 if (copy_to_user(userptr
+ off
+ i
872 + offsetof(struct ipt_entry_match
,
874 m
->u
.kernel
.match
->name
,
875 strlen(m
->u
.kernel
.match
->name
)+1)
882 t
= ipt_get_target(e
);
883 if (copy_to_user(userptr
+ off
+ e
->target_offset
884 + offsetof(struct ipt_entry_target
,
886 t
->u
.kernel
.target
->name
,
887 strlen(t
->u
.kernel
.target
->name
)+1) != 0) {
899 struct compat_delta
{
900 struct compat_delta
*next
;
905 static struct compat_delta
*compat_offsets
= NULL
;
907 static int compat_add_offset(u_int16_t offset
, short delta
)
909 struct compat_delta
*tmp
;
911 tmp
= kmalloc(sizeof(struct compat_delta
), GFP_KERNEL
);
914 tmp
->offset
= offset
;
916 if (compat_offsets
) {
917 tmp
->next
= compat_offsets
->next
;
918 compat_offsets
->next
= tmp
;
920 compat_offsets
= tmp
;
926 static void compat_flush_offsets(void)
928 struct compat_delta
*tmp
, *next
;
930 if (compat_offsets
) {
931 for(tmp
= compat_offsets
; tmp
; tmp
= next
) {
935 compat_offsets
= NULL
;
939 static short compat_calc_jump(u_int16_t offset
)
941 struct compat_delta
*tmp
;
944 for(tmp
= compat_offsets
, delta
= 0; tmp
; tmp
= tmp
->next
)
945 if (tmp
->offset
< offset
)
950 struct compat_ipt_standard_target
952 struct compat_xt_entry_target target
;
953 compat_int_t verdict
;
956 struct compat_ipt_standard
958 struct compat_ipt_entry entry
;
959 struct compat_ipt_standard_target target
;
962 #define IPT_ST_LEN XT_ALIGN(sizeof(struct ipt_standard_target))
963 #define IPT_ST_COMPAT_LEN COMPAT_XT_ALIGN(sizeof(struct compat_ipt_standard_target))
964 #define IPT_ST_OFFSET (IPT_ST_LEN - IPT_ST_COMPAT_LEN)
966 static int compat_ipt_standard_fn(void *target
,
967 void **dstptr
, int *size
, int convert
)
969 struct compat_ipt_standard_target compat_st
, *pcompat_st
;
970 struct ipt_standard_target st
, *pst
;
977 memcpy(&compat_st
.target
, &pst
->target
,
978 sizeof(compat_st
.target
));
979 compat_st
.verdict
= pst
->verdict
;
980 if (compat_st
.verdict
> 0)
982 compat_calc_jump(compat_st
.verdict
);
983 compat_st
.target
.u
.user
.target_size
= IPT_ST_COMPAT_LEN
;
984 if (copy_to_user(*dstptr
, &compat_st
, IPT_ST_COMPAT_LEN
))
986 *size
-= IPT_ST_OFFSET
;
987 *dstptr
+= IPT_ST_COMPAT_LEN
;
989 case COMPAT_FROM_USER
:
991 memcpy(&st
.target
, &pcompat_st
->target
, IPT_ST_COMPAT_LEN
);
992 st
.verdict
= pcompat_st
->verdict
;
994 st
.verdict
+= compat_calc_jump(st
.verdict
);
995 st
.target
.u
.user
.target_size
= IPT_ST_LEN
;
996 memcpy(*dstptr
, &st
, IPT_ST_LEN
);
997 *size
+= IPT_ST_OFFSET
;
998 *dstptr
+= IPT_ST_LEN
;
1000 case COMPAT_CALC_SIZE
:
1001 *size
+= IPT_ST_OFFSET
;
1011 compat_calc_match(struct ipt_entry_match
*m
, int * size
)
1013 if (m
->u
.kernel
.match
->compat
)
1014 m
->u
.kernel
.match
->compat(m
, NULL
, size
, COMPAT_CALC_SIZE
);
1016 xt_compat_match(m
, NULL
, size
, COMPAT_CALC_SIZE
);
1020 static int compat_calc_entry(struct ipt_entry
*e
, struct xt_table_info
*info
,
1021 void *base
, struct xt_table_info
*newinfo
)
1023 struct ipt_entry_target
*t
;
1024 u_int16_t entry_offset
;
1028 entry_offset
= (void *)e
- base
;
1029 IPT_MATCH_ITERATE(e
, compat_calc_match
, &off
);
1030 t
= ipt_get_target(e
);
1031 if (t
->u
.kernel
.target
->compat
)
1032 t
->u
.kernel
.target
->compat(t
, NULL
, &off
, COMPAT_CALC_SIZE
);
1034 xt_compat_target(t
, NULL
, &off
, COMPAT_CALC_SIZE
);
1035 newinfo
->size
-= off
;
1036 ret
= compat_add_offset(entry_offset
, off
);
1040 for (i
= 0; i
< NF_IP_NUMHOOKS
; i
++) {
1041 if (info
->hook_entry
[i
] && (e
< (struct ipt_entry
*)
1042 (base
+ info
->hook_entry
[i
])))
1043 newinfo
->hook_entry
[i
] -= off
;
1044 if (info
->underflow
[i
] && (e
< (struct ipt_entry
*)
1045 (base
+ info
->underflow
[i
])))
1046 newinfo
->underflow
[i
] -= off
;
1051 static int compat_table_info(struct xt_table_info
*info
,
1052 struct xt_table_info
*newinfo
)
1054 void *loc_cpu_entry
;
1057 if (!newinfo
|| !info
)
1060 memset(newinfo
, 0, sizeof(struct xt_table_info
));
1061 newinfo
->size
= info
->size
;
1062 newinfo
->number
= info
->number
;
1063 for (i
= 0; i
< NF_IP_NUMHOOKS
; i
++) {
1064 newinfo
->hook_entry
[i
] = info
->hook_entry
[i
];
1065 newinfo
->underflow
[i
] = info
->underflow
[i
];
1067 loc_cpu_entry
= info
->entries
[raw_smp_processor_id()];
1068 return IPT_ENTRY_ITERATE(loc_cpu_entry
, info
->size
,
1069 compat_calc_entry
, info
, loc_cpu_entry
, newinfo
);
1073 static int get_info(void __user
*user
, int *len
, int compat
)
1075 char name
[IPT_TABLE_MAXNAMELEN
];
1076 struct ipt_table
*t
;
1079 if (*len
!= sizeof(struct ipt_getinfo
)) {
1080 duprintf("length %u != %u\n", *len
,
1081 (unsigned int)sizeof(struct ipt_getinfo
));
1085 if (copy_from_user(name
, user
, sizeof(name
)) != 0)
1088 name
[IPT_TABLE_MAXNAMELEN
-1] = '\0';
1089 #ifdef CONFIG_COMPAT
1091 xt_compat_lock(AF_INET
);
1093 t
= try_then_request_module(xt_find_table_lock(AF_INET
, name
),
1094 "iptable_%s", name
);
1095 if (t
&& !IS_ERR(t
)) {
1096 struct ipt_getinfo info
;
1097 struct xt_table_info
*private = t
->private;
1099 #ifdef CONFIG_COMPAT
1101 struct xt_table_info tmp
;
1102 ret
= compat_table_info(private, &tmp
);
1103 compat_flush_offsets();
1107 info
.valid_hooks
= t
->valid_hooks
;
1108 memcpy(info
.hook_entry
, private->hook_entry
,
1109 sizeof(info
.hook_entry
));
1110 memcpy(info
.underflow
, private->underflow
,
1111 sizeof(info
.underflow
));
1112 info
.num_entries
= private->number
;
1113 info
.size
= private->size
;
1114 strcpy(info
.name
, name
);
1116 if (copy_to_user(user
, &info
, *len
) != 0)
1124 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1125 #ifdef CONFIG_COMPAT
1127 xt_compat_unlock(AF_INET
);
1133 get_entries(struct ipt_get_entries __user
*uptr
, int *len
)
1136 struct ipt_get_entries get
;
1137 struct ipt_table
*t
;
1139 if (*len
< sizeof(get
)) {
1140 duprintf("get_entries: %u < %d\n", *len
,
1141 (unsigned int)sizeof(get
));
1144 if (copy_from_user(&get
, uptr
, sizeof(get
)) != 0)
1146 if (*len
!= sizeof(struct ipt_get_entries
) + get
.size
) {
1147 duprintf("get_entries: %u != %u\n", *len
,
1148 (unsigned int)(sizeof(struct ipt_get_entries
) +
1153 t
= xt_find_table_lock(AF_INET
, get
.name
);
1154 if (t
&& !IS_ERR(t
)) {
1155 struct xt_table_info
*private = t
->private;
1156 duprintf("t->private->number = %u\n",
1158 if (get
.size
== private->size
)
1159 ret
= copy_entries_to_user(private->size
,
1160 t
, uptr
->entrytable
);
1162 duprintf("get_entries: I've got %u not %u!\n",
1170 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1176 __do_replace(const char *name
, unsigned int valid_hooks
,
1177 struct xt_table_info
*newinfo
, unsigned int num_counters
,
1178 void __user
*counters_ptr
)
1181 struct ipt_table
*t
;
1182 struct xt_table_info
*oldinfo
;
1183 struct xt_counters
*counters
;
1184 void *loc_cpu_old_entry
;
1187 counters
= vmalloc(num_counters
* sizeof(struct xt_counters
));
1193 t
= try_then_request_module(xt_find_table_lock(AF_INET
, name
),
1194 "iptable_%s", name
);
1195 if (!t
|| IS_ERR(t
)) {
1196 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1197 goto free_newinfo_counters_untrans
;
1201 if (valid_hooks
!= t
->valid_hooks
) {
1202 duprintf("Valid hook crap: %08X vs %08X\n",
1203 valid_hooks
, t
->valid_hooks
);
1208 oldinfo
= xt_replace_table(t
, num_counters
, newinfo
, &ret
);
1212 /* Update module usage count based on number of rules */
1213 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1214 oldinfo
->number
, oldinfo
->initial_entries
, newinfo
->number
);
1215 if ((oldinfo
->number
> oldinfo
->initial_entries
) ||
1216 (newinfo
->number
<= oldinfo
->initial_entries
))
1218 if ((oldinfo
->number
> oldinfo
->initial_entries
) &&
1219 (newinfo
->number
<= oldinfo
->initial_entries
))
1222 /* Get the old counters. */
1223 get_counters(oldinfo
, counters
);
1224 /* Decrease module usage counts and free resource */
1225 loc_cpu_old_entry
= oldinfo
->entries
[raw_smp_processor_id()];
1226 IPT_ENTRY_ITERATE(loc_cpu_old_entry
, oldinfo
->size
, cleanup_entry
,NULL
);
1227 xt_free_table_info(oldinfo
);
1228 if (copy_to_user(counters_ptr
, counters
,
1229 sizeof(struct xt_counters
) * num_counters
) != 0)
1238 free_newinfo_counters_untrans
:
1245 do_replace(void __user
*user
, unsigned int len
)
1248 struct ipt_replace tmp
;
1249 struct xt_table_info
*newinfo
;
1250 void *loc_cpu_entry
;
1252 if (copy_from_user(&tmp
, user
, sizeof(tmp
)) != 0)
1255 /* Hack: Causes ipchains to give correct error msg --RR */
1256 if (len
!= sizeof(tmp
) + tmp
.size
)
1257 return -ENOPROTOOPT
;
1259 /* overflow check */
1260 if (tmp
.size
>= (INT_MAX
- sizeof(struct xt_table_info
)) / NR_CPUS
-
1263 if (tmp
.num_counters
>= INT_MAX
/ sizeof(struct xt_counters
))
1266 newinfo
= xt_alloc_table_info(tmp
.size
);
1270 /* choose the copy that is our node/cpu */
1271 loc_cpu_entry
= newinfo
->entries
[raw_smp_processor_id()];
1272 if (copy_from_user(loc_cpu_entry
, user
+ sizeof(tmp
),
1278 ret
= translate_table(tmp
.name
, tmp
.valid_hooks
,
1279 newinfo
, loc_cpu_entry
, tmp
.size
, tmp
.num_entries
,
1280 tmp
.hook_entry
, tmp
.underflow
);
1284 duprintf("ip_tables: Translated table\n");
1286 ret
= __do_replace(tmp
.name
, tmp
.valid_hooks
,
1287 newinfo
, tmp
.num_counters
,
1290 goto free_newinfo_untrans
;
1293 free_newinfo_untrans
:
1294 IPT_ENTRY_ITERATE(loc_cpu_entry
, newinfo
->size
, cleanup_entry
,NULL
);
1296 xt_free_table_info(newinfo
);
1300 /* We're lazy, and add to the first CPU; overflow works its fey magic
1301 * and everything is OK. */
1303 add_counter_to_entry(struct ipt_entry
*e
,
1304 const struct xt_counters addme
[],
1308 duprintf("add_counter: Entry %u %lu/%lu + %lu/%lu\n",
1310 (long unsigned int)e
->counters
.pcnt
,
1311 (long unsigned int)e
->counters
.bcnt
,
1312 (long unsigned int)addme
[*i
].pcnt
,
1313 (long unsigned int)addme
[*i
].bcnt
);
1316 ADD_COUNTER(e
->counters
, addme
[*i
].bcnt
, addme
[*i
].pcnt
);
1323 do_add_counters(void __user
*user
, unsigned int len
, int compat
)
1326 struct xt_counters_info tmp
;
1327 struct xt_counters
*paddc
;
1328 unsigned int num_counters
;
1332 struct ipt_table
*t
;
1333 struct xt_table_info
*private;
1335 void *loc_cpu_entry
;
1336 #ifdef CONFIG_COMPAT
1337 struct compat_xt_counters_info compat_tmp
;
1341 size
= sizeof(struct compat_xt_counters_info
);
1346 size
= sizeof(struct xt_counters_info
);
1349 if (copy_from_user(ptmp
, user
, size
) != 0)
1352 #ifdef CONFIG_COMPAT
1354 num_counters
= compat_tmp
.num_counters
;
1355 name
= compat_tmp
.name
;
1359 num_counters
= tmp
.num_counters
;
1363 if (len
!= size
+ num_counters
* sizeof(struct xt_counters
))
1366 paddc
= vmalloc_node(len
- size
, numa_node_id());
1370 if (copy_from_user(paddc
, user
+ size
, len
- size
) != 0) {
1375 t
= xt_find_table_lock(AF_INET
, name
);
1376 if (!t
|| IS_ERR(t
)) {
1377 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1381 write_lock_bh(&t
->lock
);
1382 private = t
->private;
1383 if (private->number
!= num_counters
) {
1385 goto unlock_up_free
;
1389 /* Choose the copy that is on our node */
1390 loc_cpu_entry
= private->entries
[raw_smp_processor_id()];
1391 IPT_ENTRY_ITERATE(loc_cpu_entry
,
1393 add_counter_to_entry
,
1397 write_unlock_bh(&t
->lock
);
1406 #ifdef CONFIG_COMPAT
1407 struct compat_ipt_replace
{
1408 char name
[IPT_TABLE_MAXNAMELEN
];
1412 u32 hook_entry
[NF_IP_NUMHOOKS
];
1413 u32 underflow
[NF_IP_NUMHOOKS
];
1415 compat_uptr_t counters
; /* struct ipt_counters * */
1416 struct compat_ipt_entry entries
[0];
1419 static inline int compat_copy_match_to_user(struct ipt_entry_match
*m
,
1420 void __user
**dstptr
, compat_uint_t
*size
)
1422 if (m
->u
.kernel
.match
->compat
)
1423 return m
->u
.kernel
.match
->compat(m
, dstptr
, size
,
1426 return xt_compat_match(m
, dstptr
, size
, COMPAT_TO_USER
);
1429 static int compat_copy_entry_to_user(struct ipt_entry
*e
,
1430 void __user
**dstptr
, compat_uint_t
*size
)
1432 struct ipt_entry_target __user
*t
;
1433 struct compat_ipt_entry __user
*ce
;
1434 u_int16_t target_offset
, next_offset
;
1435 compat_uint_t origsize
;
1440 ce
= (struct compat_ipt_entry __user
*)*dstptr
;
1441 if (copy_to_user(ce
, e
, sizeof(struct ipt_entry
)))
1444 *dstptr
+= sizeof(struct compat_ipt_entry
);
1445 ret
= IPT_MATCH_ITERATE(e
, compat_copy_match_to_user
, dstptr
, size
);
1446 target_offset
= e
->target_offset
- (origsize
- *size
);
1449 t
= ipt_get_target(e
);
1450 if (t
->u
.kernel
.target
->compat
)
1451 ret
= t
->u
.kernel
.target
->compat(t
, dstptr
, size
,
1454 ret
= xt_compat_target(t
, dstptr
, size
, COMPAT_TO_USER
);
1458 next_offset
= e
->next_offset
- (origsize
- *size
);
1459 if (put_user(target_offset
, &ce
->target_offset
))
1461 if (put_user(next_offset
, &ce
->next_offset
))
1469 compat_check_calc_match(struct ipt_entry_match
*m
,
1471 const struct ipt_ip
*ip
,
1472 unsigned int hookmask
,
1475 struct ipt_match
*match
;
1477 match
= try_then_request_module(xt_find_match(AF_INET
, m
->u
.user
.name
,
1478 m
->u
.user
.revision
),
1479 "ipt_%s", m
->u
.user
.name
);
1480 if (IS_ERR(match
) || !match
) {
1481 duprintf("compat_check_calc_match: `%s' not found\n",
1483 return match
? PTR_ERR(match
) : -ENOENT
;
1485 m
->u
.kernel
.match
= match
;
1487 if (m
->u
.kernel
.match
->compat
)
1488 m
->u
.kernel
.match
->compat(m
, NULL
, size
, COMPAT_CALC_SIZE
);
1490 xt_compat_match(m
, NULL
, size
, COMPAT_CALC_SIZE
);
1497 check_compat_entry_size_and_hooks(struct ipt_entry
*e
,
1498 struct xt_table_info
*newinfo
,
1500 unsigned char *base
,
1501 unsigned char *limit
,
1502 unsigned int *hook_entries
,
1503 unsigned int *underflows
,
1507 struct ipt_entry_target
*t
;
1508 struct ipt_target
*target
;
1509 u_int16_t entry_offset
;
1512 duprintf("check_compat_entry_size_and_hooks %p\n", e
);
1513 if ((unsigned long)e
% __alignof__(struct compat_ipt_entry
) != 0
1514 || (unsigned char *)e
+ sizeof(struct compat_ipt_entry
) >= limit
) {
1515 duprintf("Bad offset %p, limit = %p\n", e
, limit
);
1519 if (e
->next_offset
< sizeof(struct compat_ipt_entry
) +
1520 sizeof(struct compat_xt_entry_target
)) {
1521 duprintf("checking: element %p size %u\n",
1526 if (!ip_checkentry(&e
->ip
)) {
1527 duprintf("ip_tables: ip check failed %p %s.\n", e
, name
);
1532 entry_offset
= (void *)e
- (void *)base
;
1534 ret
= IPT_MATCH_ITERATE(e
, compat_check_calc_match
, name
, &e
->ip
,
1535 e
->comefrom
, &off
, &j
);
1539 t
= ipt_get_target(e
);
1540 target
= try_then_request_module(xt_find_target(AF_INET
,
1542 t
->u
.user
.revision
),
1543 "ipt_%s", t
->u
.user
.name
);
1544 if (IS_ERR(target
) || !target
) {
1545 duprintf("check_entry: `%s' not found\n", t
->u
.user
.name
);
1546 ret
= target
? PTR_ERR(target
) : -ENOENT
;
1549 t
->u
.kernel
.target
= target
;
1551 if (t
->u
.kernel
.target
->compat
)
1552 t
->u
.kernel
.target
->compat(t
, NULL
, &off
, COMPAT_CALC_SIZE
);
1554 xt_compat_target(t
, NULL
, &off
, COMPAT_CALC_SIZE
);
1556 ret
= compat_add_offset(entry_offset
, off
);
1560 /* Check hooks & underflows */
1561 for (h
= 0; h
< NF_IP_NUMHOOKS
; h
++) {
1562 if ((unsigned char *)e
- base
== hook_entries
[h
])
1563 newinfo
->hook_entry
[h
] = hook_entries
[h
];
1564 if ((unsigned char *)e
- base
== underflows
[h
])
1565 newinfo
->underflow
[h
] = underflows
[h
];
1568 /* Clear counters and comefrom */
1569 e
->counters
= ((struct ipt_counters
) { 0, 0 });
1575 IPT_MATCH_ITERATE(e
, cleanup_match
, &j
);
1579 static inline int compat_copy_match_from_user(struct ipt_entry_match
*m
,
1580 void **dstptr
, compat_uint_t
*size
, const char *name
,
1581 const struct ipt_ip
*ip
, unsigned int hookmask
)
1583 struct ipt_entry_match
*dm
;
1584 struct ipt_match
*match
;
1587 dm
= (struct ipt_entry_match
*)*dstptr
;
1588 match
= m
->u
.kernel
.match
;
1590 match
->compat(m
, dstptr
, size
, COMPAT_FROM_USER
);
1592 xt_compat_match(m
, dstptr
, size
, COMPAT_FROM_USER
);
1594 ret
= xt_check_match(match
, AF_INET
, dm
->u
.match_size
- sizeof(*dm
),
1595 name
, hookmask
, ip
->proto
,
1596 ip
->invflags
& IPT_INV_PROTO
);
1600 if (m
->u
.kernel
.match
->checkentry
1601 && !m
->u
.kernel
.match
->checkentry(name
, ip
, match
, dm
->data
,
1602 dm
->u
.match_size
- sizeof(*dm
),
1604 duprintf("ip_tables: check failed for `%s'.\n",
1605 m
->u
.kernel
.match
->name
);
1611 static int compat_copy_entry_from_user(struct ipt_entry
*e
, void **dstptr
,
1612 unsigned int *size
, const char *name
,
1613 struct xt_table_info
*newinfo
, unsigned char *base
)
1615 struct ipt_entry_target
*t
;
1616 struct ipt_target
*target
;
1617 struct ipt_entry
*de
;
1618 unsigned int origsize
;
1623 de
= (struct ipt_entry
*)*dstptr
;
1624 memcpy(de
, e
, sizeof(struct ipt_entry
));
1626 *dstptr
+= sizeof(struct compat_ipt_entry
);
1627 ret
= IPT_MATCH_ITERATE(e
, compat_copy_match_from_user
, dstptr
, size
,
1628 name
, &de
->ip
, de
->comefrom
);
1631 de
->target_offset
= e
->target_offset
- (origsize
- *size
);
1632 t
= ipt_get_target(e
);
1633 target
= t
->u
.kernel
.target
;
1635 target
->compat(t
, dstptr
, size
, COMPAT_FROM_USER
);
1637 xt_compat_target(t
, dstptr
, size
, COMPAT_FROM_USER
);
1639 de
->next_offset
= e
->next_offset
- (origsize
- *size
);
1640 for (h
= 0; h
< NF_IP_NUMHOOKS
; h
++) {
1641 if ((unsigned char *)de
- base
< newinfo
->hook_entry
[h
])
1642 newinfo
->hook_entry
[h
] -= origsize
- *size
;
1643 if ((unsigned char *)de
- base
< newinfo
->underflow
[h
])
1644 newinfo
->underflow
[h
] -= origsize
- *size
;
1647 t
= ipt_get_target(de
);
1648 target
= t
->u
.kernel
.target
;
1649 ret
= xt_check_target(target
, AF_INET
, t
->u
.target_size
- sizeof(*t
),
1650 name
, e
->comefrom
, e
->ip
.proto
,
1651 e
->ip
.invflags
& IPT_INV_PROTO
);
1656 if (t
->u
.kernel
.target
== &ipt_standard_target
) {
1657 if (!standard_check(t
, *size
))
1659 } else if (t
->u
.kernel
.target
->checkentry
1660 && !t
->u
.kernel
.target
->checkentry(name
, de
, target
,
1661 t
->data
, t
->u
.target_size
- sizeof(*t
),
1663 duprintf("ip_tables: compat: check failed for `%s'.\n",
1664 t
->u
.kernel
.target
->name
);
1673 translate_compat_table(const char *name
,
1674 unsigned int valid_hooks
,
1675 struct xt_table_info
**pinfo
,
1677 unsigned int total_size
,
1678 unsigned int number
,
1679 unsigned int *hook_entries
,
1680 unsigned int *underflows
)
1683 struct xt_table_info
*newinfo
, *info
;
1684 void *pos
, *entry0
, *entry1
;
1691 info
->number
= number
;
1693 /* Init all hooks to impossible value. */
1694 for (i
= 0; i
< NF_IP_NUMHOOKS
; i
++) {
1695 info
->hook_entry
[i
] = 0xFFFFFFFF;
1696 info
->underflow
[i
] = 0xFFFFFFFF;
1699 duprintf("translate_compat_table: size %u\n", info
->size
);
1701 xt_compat_lock(AF_INET
);
1702 /* Walk through entries, checking offsets. */
1703 ret
= IPT_ENTRY_ITERATE(entry0
, total_size
,
1704 check_compat_entry_size_and_hooks
,
1705 info
, &size
, entry0
,
1706 entry0
+ total_size
,
1707 hook_entries
, underflows
, &i
, name
);
1713 duprintf("translate_compat_table: %u not %u entries\n",
1718 /* Check hooks all assigned */
1719 for (i
= 0; i
< NF_IP_NUMHOOKS
; i
++) {
1720 /* Only hooks which are valid */
1721 if (!(valid_hooks
& (1 << i
)))
1723 if (info
->hook_entry
[i
] == 0xFFFFFFFF) {
1724 duprintf("Invalid hook entry %u %u\n",
1725 i
, hook_entries
[i
]);
1728 if (info
->underflow
[i
] == 0xFFFFFFFF) {
1729 duprintf("Invalid underflow %u %u\n",
1736 newinfo
= xt_alloc_table_info(size
);
1740 newinfo
->number
= number
;
1741 for (i
= 0; i
< NF_IP_NUMHOOKS
; i
++) {
1742 newinfo
->hook_entry
[i
] = info
->hook_entry
[i
];
1743 newinfo
->underflow
[i
] = info
->underflow
[i
];
1745 entry1
= newinfo
->entries
[raw_smp_processor_id()];
1748 ret
= IPT_ENTRY_ITERATE(entry0
, total_size
,
1749 compat_copy_entry_from_user
, &pos
, &size
,
1750 name
, newinfo
, entry1
);
1751 compat_flush_offsets();
1752 xt_compat_unlock(AF_INET
);
1757 if (!mark_source_chains(newinfo
, valid_hooks
, entry1
))
1760 /* And one copy for every other CPU */
1761 for_each_possible_cpu(i
)
1762 if (newinfo
->entries
[i
] && newinfo
->entries
[i
] != entry1
)
1763 memcpy(newinfo
->entries
[i
], entry1
, newinfo
->size
);
1767 xt_free_table_info(info
);
1771 xt_free_table_info(newinfo
);
1775 xt_compat_unlock(AF_INET
);
1780 compat_do_replace(void __user
*user
, unsigned int len
)
1783 struct compat_ipt_replace tmp
;
1784 struct xt_table_info
*newinfo
;
1785 void *loc_cpu_entry
;
1787 if (copy_from_user(&tmp
, user
, sizeof(tmp
)) != 0)
1790 /* Hack: Causes ipchains to give correct error msg --RR */
1791 if (len
!= sizeof(tmp
) + tmp
.size
)
1792 return -ENOPROTOOPT
;
1794 /* overflow check */
1795 if (tmp
.size
>= (INT_MAX
- sizeof(struct xt_table_info
)) / NR_CPUS
-
1798 if (tmp
.num_counters
>= INT_MAX
/ sizeof(struct xt_counters
))
1801 newinfo
= xt_alloc_table_info(tmp
.size
);
1805 /* choose the copy that is our node/cpu */
1806 loc_cpu_entry
= newinfo
->entries
[raw_smp_processor_id()];
1807 if (copy_from_user(loc_cpu_entry
, user
+ sizeof(tmp
),
1813 ret
= translate_compat_table(tmp
.name
, tmp
.valid_hooks
,
1814 &newinfo
, &loc_cpu_entry
, tmp
.size
,
1815 tmp
.num_entries
, tmp
.hook_entry
, tmp
.underflow
);
1819 duprintf("compat_do_replace: Translated table\n");
1821 ret
= __do_replace(tmp
.name
, tmp
.valid_hooks
,
1822 newinfo
, tmp
.num_counters
,
1823 compat_ptr(tmp
.counters
));
1825 goto free_newinfo_untrans
;
1828 free_newinfo_untrans
:
1829 IPT_ENTRY_ITERATE(loc_cpu_entry
, newinfo
->size
, cleanup_entry
,NULL
);
1831 xt_free_table_info(newinfo
);
1836 compat_do_ipt_set_ctl(struct sock
*sk
, int cmd
, void __user
*user
,
1841 if (!capable(CAP_NET_ADMIN
))
1845 case IPT_SO_SET_REPLACE
:
1846 ret
= compat_do_replace(user
, len
);
1849 case IPT_SO_SET_ADD_COUNTERS
:
1850 ret
= do_add_counters(user
, len
, 1);
1854 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd
);
1861 struct compat_ipt_get_entries
1863 char name
[IPT_TABLE_MAXNAMELEN
];
1865 struct compat_ipt_entry entrytable
[0];
1868 static int compat_copy_entries_to_user(unsigned int total_size
,
1869 struct ipt_table
*table
, void __user
*userptr
)
1871 unsigned int off
, num
;
1872 struct compat_ipt_entry e
;
1873 struct xt_counters
*counters
;
1874 struct xt_table_info
*private = table
->private;
1878 void *loc_cpu_entry
;
1880 counters
= alloc_counters(table
);
1881 if (IS_ERR(counters
))
1882 return PTR_ERR(counters
);
1884 /* choose the copy that is on our node/cpu, ...
1885 * This choice is lazy (because current thread is
1886 * allowed to migrate to another cpu)
1888 loc_cpu_entry
= private->entries
[raw_smp_processor_id()];
1891 ret
= IPT_ENTRY_ITERATE(loc_cpu_entry
, total_size
,
1892 compat_copy_entry_to_user
, &pos
, &size
);
1896 /* ... then go back and fix counters and names */
1897 for (off
= 0, num
= 0; off
< size
; off
+= e
.next_offset
, num
++) {
1899 struct ipt_entry_match m
;
1900 struct ipt_entry_target t
;
1903 if (copy_from_user(&e
, userptr
+ off
,
1904 sizeof(struct compat_ipt_entry
)))
1906 if (copy_to_user(userptr
+ off
+
1907 offsetof(struct compat_ipt_entry
, counters
),
1908 &counters
[num
], sizeof(counters
[num
])))
1911 for (i
= sizeof(struct compat_ipt_entry
);
1912 i
< e
.target_offset
; i
+= m
.u
.match_size
) {
1913 if (copy_from_user(&m
, userptr
+ off
+ i
,
1914 sizeof(struct ipt_entry_match
)))
1916 if (copy_to_user(userptr
+ off
+ i
+
1917 offsetof(struct ipt_entry_match
, u
.user
.name
),
1918 m
.u
.kernel
.match
->name
,
1919 strlen(m
.u
.kernel
.match
->name
) + 1))
1923 if (copy_from_user(&t
, userptr
+ off
+ e
.target_offset
,
1924 sizeof(struct ipt_entry_target
)))
1926 if (copy_to_user(userptr
+ off
+ e
.target_offset
+
1927 offsetof(struct ipt_entry_target
, u
.user
.name
),
1928 t
.u
.kernel
.target
->name
,
1929 strlen(t
.u
.kernel
.target
->name
) + 1))
1939 compat_get_entries(struct compat_ipt_get_entries __user
*uptr
, int *len
)
1942 struct compat_ipt_get_entries get
;
1943 struct ipt_table
*t
;
1946 if (*len
< sizeof(get
)) {
1947 duprintf("compat_get_entries: %u < %u\n",
1948 *len
, (unsigned int)sizeof(get
));
1952 if (copy_from_user(&get
, uptr
, sizeof(get
)) != 0)
1955 if (*len
!= sizeof(struct compat_ipt_get_entries
) + get
.size
) {
1956 duprintf("compat_get_entries: %u != %u\n", *len
,
1957 (unsigned int)(sizeof(struct compat_ipt_get_entries
) +
1962 xt_compat_lock(AF_INET
);
1963 t
= xt_find_table_lock(AF_INET
, get
.name
);
1964 if (t
&& !IS_ERR(t
)) {
1965 struct xt_table_info
*private = t
->private;
1966 struct xt_table_info info
;
1967 duprintf("t->private->number = %u\n",
1969 ret
= compat_table_info(private, &info
);
1970 if (!ret
&& get
.size
== info
.size
) {
1971 ret
= compat_copy_entries_to_user(private->size
,
1972 t
, uptr
->entrytable
);
1974 duprintf("compat_get_entries: I've got %u not %u!\n",
1979 compat_flush_offsets();
1983 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1985 xt_compat_unlock(AF_INET
);
1990 compat_do_ipt_get_ctl(struct sock
*sk
, int cmd
, void __user
*user
, int *len
)
1995 case IPT_SO_GET_INFO
:
1996 ret
= get_info(user
, len
, 1);
1998 case IPT_SO_GET_ENTRIES
:
1999 ret
= compat_get_entries(user
, len
);
2002 duprintf("compat_do_ipt_get_ctl: unknown request %i\n", cmd
);
2010 do_ipt_set_ctl(struct sock
*sk
, int cmd
, void __user
*user
, unsigned int len
)
2014 if (!capable(CAP_NET_ADMIN
))
2018 case IPT_SO_SET_REPLACE
:
2019 ret
= do_replace(user
, len
);
2022 case IPT_SO_SET_ADD_COUNTERS
:
2023 ret
= do_add_counters(user
, len
, 0);
2027 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd
);
2035 do_ipt_get_ctl(struct sock
*sk
, int cmd
, void __user
*user
, int *len
)
2039 if (!capable(CAP_NET_ADMIN
))
2043 case IPT_SO_GET_INFO
:
2044 ret
= get_info(user
, len
, 0);
2047 case IPT_SO_GET_ENTRIES
:
2048 ret
= get_entries(user
, len
);
2051 case IPT_SO_GET_REVISION_MATCH
:
2052 case IPT_SO_GET_REVISION_TARGET
: {
2053 struct ipt_get_revision rev
;
2056 if (*len
!= sizeof(rev
)) {
2060 if (copy_from_user(&rev
, user
, sizeof(rev
)) != 0) {
2065 if (cmd
== IPT_SO_GET_REVISION_TARGET
)
2070 try_then_request_module(xt_find_revision(AF_INET
, rev
.name
,
2073 "ipt_%s", rev
.name
);
2078 duprintf("do_ipt_get_ctl: unknown request %i\n", cmd
);
2085 int ipt_register_table(struct xt_table
*table
, const struct ipt_replace
*repl
)
2088 struct xt_table_info
*newinfo
;
2089 static struct xt_table_info bootstrap
2090 = { 0, 0, 0, { 0 }, { 0 }, { } };
2091 void *loc_cpu_entry
;
2093 newinfo
= xt_alloc_table_info(repl
->size
);
2097 /* choose the copy on our node/cpu
2098 * but dont care of preemption
2100 loc_cpu_entry
= newinfo
->entries
[raw_smp_processor_id()];
2101 memcpy(loc_cpu_entry
, repl
->entries
, repl
->size
);
2103 ret
= translate_table(table
->name
, table
->valid_hooks
,
2104 newinfo
, loc_cpu_entry
, repl
->size
,
2109 xt_free_table_info(newinfo
);
2113 ret
= xt_register_table(table
, &bootstrap
, newinfo
);
2115 xt_free_table_info(newinfo
);
2122 void ipt_unregister_table(struct ipt_table
*table
)
2124 struct xt_table_info
*private;
2125 void *loc_cpu_entry
;
2127 private = xt_unregister_table(table
);
2129 /* Decrease module usage counts and free resources */
2130 loc_cpu_entry
= private->entries
[raw_smp_processor_id()];
2131 IPT_ENTRY_ITERATE(loc_cpu_entry
, private->size
, cleanup_entry
, NULL
);
2132 xt_free_table_info(private);
2135 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2137 icmp_type_code_match(u_int8_t test_type
, u_int8_t min_code
, u_int8_t max_code
,
2138 u_int8_t type
, u_int8_t code
,
2141 return ((test_type
== 0xFF) || (type
== test_type
&& code
>= min_code
&& code
<= max_code
))
2146 icmp_match(const struct sk_buff
*skb
,
2147 const struct net_device
*in
,
2148 const struct net_device
*out
,
2149 const struct xt_match
*match
,
2150 const void *matchinfo
,
2152 unsigned int protoff
,
2155 struct icmphdr _icmph
, *ic
;
2156 const struct ipt_icmp
*icmpinfo
= matchinfo
;
2158 /* Must not be a fragment. */
2162 ic
= skb_header_pointer(skb
, protoff
, sizeof(_icmph
), &_icmph
);
2164 /* We've been asked to examine this packet, and we
2165 * can't. Hence, no choice but to drop.
2167 duprintf("Dropping evil ICMP tinygram.\n");
2172 return icmp_type_code_match(icmpinfo
->type
,
2176 !!(icmpinfo
->invflags
&IPT_ICMP_INV
));
2179 /* Called when user tries to insert an entry of this type. */
2181 icmp_checkentry(const char *tablename
,
2183 const struct xt_match
*match
,
2185 unsigned int matchsize
,
2186 unsigned int hook_mask
)
2188 const struct ipt_icmp
*icmpinfo
= matchinfo
;
2190 /* Must specify no unknown invflags */
2191 return !(icmpinfo
->invflags
& ~IPT_ICMP_INV
);
2194 /* The built-in targets: standard (NULL) and error. */
2195 static struct ipt_target ipt_standard_target
= {
2196 .name
= IPT_STANDARD_TARGET
,
2197 .targetsize
= sizeof(int),
2199 #ifdef CONFIG_COMPAT
2200 .compat
= &compat_ipt_standard_fn
,
2204 static struct ipt_target ipt_error_target
= {
2205 .name
= IPT_ERROR_TARGET
,
2206 .target
= ipt_error
,
2207 .targetsize
= IPT_FUNCTION_MAXNAMELEN
,
2211 static struct nf_sockopt_ops ipt_sockopts
= {
2213 .set_optmin
= IPT_BASE_CTL
,
2214 .set_optmax
= IPT_SO_SET_MAX
+1,
2215 .set
= do_ipt_set_ctl
,
2216 #ifdef CONFIG_COMPAT
2217 .compat_set
= compat_do_ipt_set_ctl
,
2219 .get_optmin
= IPT_BASE_CTL
,
2220 .get_optmax
= IPT_SO_GET_MAX
+1,
2221 .get
= do_ipt_get_ctl
,
2222 #ifdef CONFIG_COMPAT
2223 .compat_get
= compat_do_ipt_get_ctl
,
2227 static struct ipt_match icmp_matchstruct
= {
2229 .match
= icmp_match
,
2230 .matchsize
= sizeof(struct ipt_icmp
),
2231 .proto
= IPPROTO_ICMP
,
2233 .checkentry
= icmp_checkentry
,
2236 static int __init
ip_tables_init(void)
2240 ret
= xt_proto_init(AF_INET
);
2244 /* Noone else will be downing sem now, so we won't sleep */
2245 ret
= xt_register_target(&ipt_standard_target
);
2248 ret
= xt_register_target(&ipt_error_target
);
2251 ret
= xt_register_match(&icmp_matchstruct
);
2255 /* Register setsockopt */
2256 ret
= nf_register_sockopt(&ipt_sockopts
);
2260 printk("ip_tables: (C) 2000-2006 Netfilter Core Team\n");
2264 xt_unregister_match(&icmp_matchstruct
);
2266 xt_unregister_target(&ipt_error_target
);
2268 xt_unregister_target(&ipt_standard_target
);
2270 xt_proto_fini(AF_INET
);
2275 static void __exit
ip_tables_fini(void)
2277 nf_unregister_sockopt(&ipt_sockopts
);
2279 xt_unregister_match(&icmp_matchstruct
);
2280 xt_unregister_target(&ipt_error_target
);
2281 xt_unregister_target(&ipt_standard_target
);
2283 xt_proto_fini(AF_INET
);
2286 EXPORT_SYMBOL(ipt_register_table
);
2287 EXPORT_SYMBOL(ipt_unregister_table
);
2288 EXPORT_SYMBOL(ipt_do_table
);
2289 module_init(ip_tables_init
);
2290 module_exit(ip_tables_fini
);