2 * Packet matching code.
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #include <linux/cache.h>
12 #include <linux/capability.h>
13 #include <linux/skbuff.h>
14 #include <linux/kmod.h>
15 #include <linux/vmalloc.h>
16 #include <linux/netdevice.h>
17 #include <linux/module.h>
18 #include <linux/icmp.h>
20 #include <net/compat.h>
21 #include <asm/uaccess.h>
22 #include <linux/mutex.h>
23 #include <linux/proc_fs.h>
24 #include <linux/err.h>
25 #include <linux/cpumask.h>
27 #include <linux/netfilter/x_tables.h>
28 #include <linux/netfilter_ipv4/ip_tables.h>
30 MODULE_LICENSE("GPL");
31 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
32 MODULE_DESCRIPTION("IPv4 packet filter");
34 /*#define DEBUG_IP_FIREWALL*/
35 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
36 /*#define DEBUG_IP_FIREWALL_USER*/
38 #ifdef DEBUG_IP_FIREWALL
39 #define dprintf(format, args...) printk(format , ## args)
41 #define dprintf(format, args...)
44 #ifdef DEBUG_IP_FIREWALL_USER
45 #define duprintf(format, args...) printk(format , ## args)
47 #define duprintf(format, args...)
50 #ifdef CONFIG_NETFILTER_DEBUG
51 #define IP_NF_ASSERT(x) \
54 printk("IP_NF_ASSERT: %s:%s:%u\n", \
55 __FUNCTION__, __FILE__, __LINE__); \
58 #define IP_NF_ASSERT(x)
62 /* All the better to debug you with... */
68 ipt_cone_target(struct sk_buff
*skb
, unsigned int hooknum
,
69 const struct net_device
*in
, const struct net_device
*out
,
70 const struct xt_target
*target
, const void *targinfo
);
73 We keep a set of rules for each CPU, so we can avoid write-locking
74 them in the softirq when updating the counters and therefore
75 only need to read-lock in the softirq; doing a write_lock_bh() in user
76 context stops packets coming through and allows user context to read
77 the counters or update the rules.
79 Hence the start of any table is given by get_table() below. */
81 /* Returns whether matches rule or not. */
83 ip_packet_match(const struct iphdr
*ip
,
86 const struct ipt_ip
*ipinfo
,
91 #define FWINV(bool,invflg) ((bool) ^ !!(ipinfo->invflags & invflg))
93 if (ipinfo
->flags
& IPT_F_NO_DEF_MATCH
)
96 if (FWINV((ip
->saddr
&ipinfo
->smsk
.s_addr
) != ipinfo
->src
.s_addr
,
98 || FWINV((ip
->daddr
&ipinfo
->dmsk
.s_addr
) != ipinfo
->dst
.s_addr
,
100 dprintf("Source or dest mismatch.\n");
102 dprintf("SRC: %u.%u.%u.%u. Mask: %u.%u.%u.%u. Target: %u.%u.%u.%u.%s\n",
104 NIPQUAD(ipinfo
->smsk
.s_addr
),
105 NIPQUAD(ipinfo
->src
.s_addr
),
106 ipinfo
->invflags
& IPT_INV_SRCIP
? " (INV)" : "");
107 dprintf("DST: %u.%u.%u.%u Mask: %u.%u.%u.%u Target: %u.%u.%u.%u.%s\n",
109 NIPQUAD(ipinfo
->dmsk
.s_addr
),
110 NIPQUAD(ipinfo
->dst
.s_addr
),
111 ipinfo
->invflags
& IPT_INV_DSTIP
? " (INV)" : "");
115 ret
= ifname_compare_aligned(indev
, ipinfo
->iniface
, ipinfo
->iniface_mask
);
117 if (FWINV(ret
!= 0, IPT_INV_VIA_IN
)) {
118 dprintf("VIA in mismatch (%s vs %s).%s\n",
119 indev
, ipinfo
->iniface
,
120 ipinfo
->invflags
&IPT_INV_VIA_IN
?" (INV)":"");
124 ret
= ifname_compare_aligned(outdev
, ipinfo
->outiface
, ipinfo
->outiface_mask
);
126 if (FWINV(ret
!= 0, IPT_INV_VIA_OUT
)) {
127 dprintf("VIA out mismatch (%s vs %s).%s\n",
128 outdev
, ipinfo
->outiface
,
129 ipinfo
->invflags
&IPT_INV_VIA_OUT
?" (INV)":"");
133 /* Check specific protocol */
135 && FWINV(ip
->protocol
!= ipinfo
->proto
, IPT_INV_PROTO
)) {
136 dprintf("Packet protocol %hi does not match %hi.%s\n",
137 ip
->protocol
, ipinfo
->proto
,
138 ipinfo
->invflags
&IPT_INV_PROTO
? " (INV)":"");
142 /* If we have a fragment rule but the packet is not a fragment
143 * then we return zero */
144 if (FWINV((ipinfo
->flags
&IPT_F_FRAG
) && !isfrag
, IPT_INV_FRAG
)) {
145 dprintf("Fragment rule but not fragment.%s\n",
146 ipinfo
->invflags
& IPT_INV_FRAG
? " (INV)" : "");
155 ip_checkentry(struct ipt_ip
*ip
)
157 #define FWINV(bool, invflg) ((bool) || (ip->invflags & (invflg)))
159 if (FWINV(ip
->smsk
.s_addr
, IPT_INV_SRCIP
) ||
160 FWINV(ip
->dmsk
.s_addr
, IPT_INV_DSTIP
))
161 goto has_match_rules
;
163 if (FWINV(!!((const unsigned long *)ip
->iniface_mask
)[0], IPT_INV_VIA_IN
) ||
164 FWINV(!!((const unsigned long *)ip
->outiface_mask
)[0], IPT_INV_VIA_OUT
))
165 goto has_match_rules
;
167 if (FWINV(ip
->proto
, IPT_INV_PROTO
))
168 goto has_match_rules
;
170 if (FWINV(ip
->flags
& IPT_F_FRAG
, IPT_INV_FRAG
))
171 goto has_match_rules
;
173 ip
->flags
|= IPT_F_NO_DEF_MATCH
;
176 if (ip
->flags
& ~(IPT_F_MASK
| IPT_F_NO_DEF_MATCH
)) {
177 duprintf("Unknown flag bits set: %08X\n",
178 ip
->flags
& ~IPT_F_MASK
);
181 if (ip
->invflags
& ~IPT_INV_MASK
) {
182 duprintf("Unknown invflag bits set: %08X\n",
183 ip
->invflags
& ~IPT_INV_MASK
);
192 ipt_error(struct sk_buff
*skb
,
193 const struct net_device
*in
,
194 const struct net_device
*out
,
195 unsigned int hooknum
,
196 const struct xt_target
*target
,
197 const void *targinfo
)
200 printk("ip_tables: error: `%s'\n", (char *)targinfo
);
206 int do_match(struct ipt_entry_match
*m
,
207 const struct sk_buff
*skb
,
208 const struct net_device
*in
,
209 const struct net_device
*out
,
213 /* Stop iteration if it doesn't match */
214 if (!m
->u
.kernel
.match
->match(skb
, in
, out
, m
->u
.kernel
.match
, m
->data
,
215 offset
, ip_hdrlen(skb
), hotdrop
))
221 static inline struct ipt_entry
*
222 get_entry(void *base
, unsigned int offset
)
224 return (struct ipt_entry
*)(base
+ offset
);
227 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
229 ipt_do_table(struct sk_buff
*skb
,
231 const struct net_device
*in
,
232 const struct net_device
*out
,
233 struct xt_table
*table
)
235 static const char nulldevname
[IFNAMSIZ
] __attribute__((aligned(sizeof(long))));
240 /* Initializing verdict to NF_DROP keeps gcc happy. */
241 unsigned int verdict
= NF_DROP
;
242 const char *indev
, *outdev
;
244 struct ipt_entry
*e
, *back
;
245 struct xt_table_info
*private;
249 IP_NF_ASSERT(table
->valid_hooks
& (1 << hook
));
251 private = table
->private;
252 table_base
= private->entries
[smp_processor_id()];
254 e
= get_entry(table_base
, private->hook_entry
[hook
]);
256 if (e
->target_offset
<= sizeof(struct ipt_entry
) &&
257 (e
->ip
.flags
& IPT_F_NO_DEF_MATCH
)) {
258 struct ipt_entry_target
*t
= ipt_get_target(e
);
259 if (!t
->u
.kernel
.target
->target
) {
260 int v
= ((struct ipt_standard_target
*)t
)->verdict
;
261 if ((v
< 0) && (v
!= IPT_RETURN
)) {
262 ADD_COUNTER(e
->counters
, ntohs(ip
->tot_len
), 1);
263 xt_info_rdunlock_bh();
264 return (unsigned)(-v
) - 1;
270 datalen
= skb
->len
- ip
->ihl
* 4;
271 indev
= in
? in
->name
: nulldevname
;
272 outdev
= out
? out
->name
: nulldevname
;
273 /* We handle fragments by dealing with the first fragment as
274 * if it was a normal packet. All other fragments are treated
275 * normally, except that they will NEVER match rules that ask
276 * things we don't know, ie. tcp syn flag or ports). If the
277 * rule is also a fragment-specific rule, non-fragments won't
279 offset
= ntohs(ip
->frag_off
) & IP_OFFSET
;
281 /* For return from builtin chain */
282 back
= get_entry(table_base
, private->underflow
[hook
]);
287 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
288 skb
->nfcache
|= e
->nfcache
;
290 if (ip_packet_match(ip
, indev
, outdev
, &e
->ip
, offset
)) {
291 struct ipt_entry_target
*t
;
293 if (IPT_MATCH_ITERATE(e
, do_match
,
295 offset
, &hotdrop
) != 0)
298 ADD_COUNTER(e
->counters
, ntohs(ip
->tot_len
), 1);
300 t
= ipt_get_target(e
);
301 IP_NF_ASSERT(t
->u
.kernel
.target
);
302 /* Standard target? */
303 if (!t
->u
.kernel
.target
->target
) {
306 v
= ((struct ipt_standard_target
*)t
)->verdict
;
308 /* Pop from stack? */
309 if (v
!= IPT_RETURN
) {
310 verdict
= (unsigned)(-v
) - 1;
312 if (ipt_cone_target(skb
, hook
, in
, out
,
314 t
->data
) == NF_ACCEPT
) {
315 /* Accept cone target as default */
321 back
= get_entry(table_base
,
325 if (table_base
+ v
!= (void *)e
+ e
->next_offset
326 && !(e
->ip
.flags
& IPT_F_GOTO
)) {
327 /* Save old back ptr in next entry */
328 struct ipt_entry
*next
329 = (void *)e
+ e
->next_offset
;
331 = (void *)back
- table_base
;
332 /* set back pointer to next entry */
336 e
= get_entry(table_base
, v
);
338 /* Targets which reenter must return
340 #ifdef CONFIG_NETFILTER_DEBUG
341 ((struct ipt_entry
*)table_base
)->comefrom
344 verdict
= t
->u
.kernel
.target
->target(skb
,
350 #ifdef CONFIG_NETFILTER_DEBUG
351 if (((struct ipt_entry
*)table_base
)->comefrom
353 && verdict
== IPT_CONTINUE
) {
354 printk("Target %s reentered!\n",
355 t
->u
.kernel
.target
->name
);
358 ((struct ipt_entry
*)table_base
)->comefrom
361 /* Target might have changed stuff. */
363 datalen
= skb
->len
- ip
->ihl
* 4;
365 if (verdict
== IPT_CONTINUE
)
366 e
= (void *)e
+ e
->next_offset
;
367 else if (verdict
== IPT_RETURN
) { // added -- zzz
369 back
= get_entry(table_base
, back
->comefrom
);
379 e
= (void *)e
+ e
->next_offset
;
382 xt_info_rdunlock_bh();
384 #ifdef DEBUG_ALLOW_ALL
393 /* All zeroes == unconditional rule. */
395 unconditional(const struct ipt_ip
*ip
)
399 for (i
= 0; i
< sizeof(*ip
)/sizeof(__u32
); i
++)
400 if (((__u32
*)ip
)[i
])
406 /* Figures out from what hook each rule can be called: returns 0 if
407 there are loops. Puts hook bitmask in comefrom. */
409 mark_source_chains(struct xt_table_info
*newinfo
,
410 unsigned int valid_hooks
, void *entry0
)
414 /* No recursion; use packet counter to save back ptrs (reset
415 to 0 as we leave), and comefrom to save source hook bitmask */
416 for (hook
= 0; hook
< NF_IP_NUMHOOKS
; hook
++) {
417 unsigned int pos
= newinfo
->hook_entry
[hook
];
419 = (struct ipt_entry
*)(entry0
+ pos
);
421 if (!(valid_hooks
& (1 << hook
)))
424 /* Set initial back pointer. */
425 e
->counters
.pcnt
= pos
;
428 struct ipt_standard_target
*t
429 = (void *)ipt_get_target(e
);
430 int visited
= e
->comefrom
& (1 << hook
);
432 if (e
->comefrom
& (1 << NF_IP_NUMHOOKS
)) {
433 printk("iptables: loop hook %u pos %u %08X.\n",
434 hook
, pos
, e
->comefrom
);
438 |= ((1 << hook
) | (1 << NF_IP_NUMHOOKS
));
440 /* Unconditional return/END. */
441 if ((e
->target_offset
== sizeof(struct ipt_entry
)
442 && (strcmp(t
->target
.u
.user
.name
,
443 IPT_STANDARD_TARGET
) == 0)
445 && unconditional(&e
->ip
)) || visited
) {
446 unsigned int oldpos
, size
;
448 if ((strcmp(t
->target
.u
.user
.name
,
449 IPT_STANDARD_TARGET
) == 0) &&
450 t
->verdict
< -NF_MAX_VERDICT
- 1) {
451 duprintf("mark_source_chains: bad "
452 "negative verdict (%i)\n",
457 /* Return: backtrack through the last
460 e
->comefrom
^= (1<<NF_IP_NUMHOOKS
);
461 #ifdef DEBUG_IP_FIREWALL_USER
463 & (1 << NF_IP_NUMHOOKS
)) {
464 duprintf("Back unset "
471 pos
= e
->counters
.pcnt
;
472 e
->counters
.pcnt
= 0;
474 /* We're at the start. */
478 e
= (struct ipt_entry
*)
480 } while (oldpos
== pos
+ e
->next_offset
);
483 size
= e
->next_offset
;
484 e
= (struct ipt_entry
*)
485 (entry0
+ pos
+ size
);
486 e
->counters
.pcnt
= pos
;
489 int newpos
= t
->verdict
;
491 if (strcmp(t
->target
.u
.user
.name
,
492 IPT_STANDARD_TARGET
) == 0
494 if (newpos
> newinfo
->size
-
495 sizeof(struct ipt_entry
)) {
496 duprintf("mark_source_chains: "
497 "bad verdict (%i)\n",
501 /* This a jump; chase it. */
502 duprintf("Jump rule %u -> %u\n",
505 /* ... this is a fallthru */
506 newpos
= pos
+ e
->next_offset
;
508 e
= (struct ipt_entry
*)
510 e
->counters
.pcnt
= pos
;
515 duprintf("Finished chain %u\n", hook
);
521 cleanup_match(struct ipt_entry_match
*m
, unsigned int *i
)
523 if (i
&& (*i
)-- == 0)
526 if (m
->u
.kernel
.match
->destroy
)
527 m
->u
.kernel
.match
->destroy(m
->u
.kernel
.match
, m
->data
);
528 module_put(m
->u
.kernel
.match
->me
);
533 check_entry(struct ipt_entry
*e
, const char *name
)
535 struct ipt_entry_target
*t
;
537 if (!ip_checkentry(&e
->ip
)) {
538 duprintf("ip_tables: ip check failed %p %s.\n", e
, name
);
542 if (e
->target_offset
+ sizeof(struct ipt_entry_target
) > e
->next_offset
)
545 t
= ipt_get_target(e
);
546 if (e
->target_offset
+ t
->u
.target_size
> e
->next_offset
)
552 static inline int check_match(struct ipt_entry_match
*m
, const char *name
,
553 const struct ipt_ip
*ip
, unsigned int hookmask
,
556 struct xt_match
*match
;
559 match
= m
->u
.kernel
.match
;
560 ret
= xt_check_match(match
, AF_INET
, m
->u
.match_size
- sizeof(*m
),
561 name
, hookmask
, ip
->proto
,
562 ip
->invflags
& IPT_INV_PROTO
);
563 if (!ret
&& m
->u
.kernel
.match
->checkentry
564 && !m
->u
.kernel
.match
->checkentry(name
, ip
, match
, m
->data
,
566 duprintf("ip_tables: check failed for `%s'.\n",
567 m
->u
.kernel
.match
->name
);
576 find_check_match(struct ipt_entry_match
*m
,
578 const struct ipt_ip
*ip
,
579 unsigned int hookmask
,
582 struct xt_match
*match
;
585 match
= try_then_request_module(xt_find_match(AF_INET
, m
->u
.user
.name
,
587 "ipt_%s", m
->u
.user
.name
);
588 if (IS_ERR(match
) || !match
) {
589 duprintf("find_check_match: `%s' not found\n", m
->u
.user
.name
);
590 return match
? PTR_ERR(match
) : -ENOENT
;
592 m
->u
.kernel
.match
= match
;
594 ret
= check_match(m
, name
, ip
, hookmask
, i
);
600 module_put(m
->u
.kernel
.match
->me
);
604 static inline int check_target(struct ipt_entry
*e
, const char *name
)
606 struct ipt_entry_target
*t
;
607 struct xt_target
*target
;
610 t
= ipt_get_target(e
);
611 target
= t
->u
.kernel
.target
;
612 ret
= xt_check_target(target
, AF_INET
, t
->u
.target_size
- sizeof(*t
),
613 name
, e
->comefrom
, e
->ip
.proto
,
614 e
->ip
.invflags
& IPT_INV_PROTO
);
615 if (!ret
&& t
->u
.kernel
.target
->checkentry
616 && !t
->u
.kernel
.target
->checkentry(name
, e
, target
,
617 t
->data
, e
->comefrom
)) {
618 duprintf("ip_tables: check failed for `%s'.\n",
619 t
->u
.kernel
.target
->name
);
626 find_check_entry(struct ipt_entry
*e
, const char *name
, unsigned int size
,
629 struct ipt_entry_target
*t
;
630 struct xt_target
*target
;
634 ret
= check_entry(e
, name
);
639 ret
= IPT_MATCH_ITERATE(e
, find_check_match
, name
, &e
->ip
,
642 goto cleanup_matches
;
644 t
= ipt_get_target(e
);
645 target
= try_then_request_module(xt_find_target(AF_INET
,
648 "ipt_%s", t
->u
.user
.name
);
649 if (IS_ERR(target
) || !target
) {
650 duprintf("find_check_entry: `%s' not found\n", t
->u
.user
.name
);
651 ret
= target
? PTR_ERR(target
) : -ENOENT
;
652 goto cleanup_matches
;
654 t
->u
.kernel
.target
= target
;
656 ret
= check_target(e
, name
);
663 module_put(t
->u
.kernel
.target
->me
);
665 IPT_MATCH_ITERATE(e
, cleanup_match
, &j
);
670 check_entry_size_and_hooks(struct ipt_entry
*e
,
671 struct xt_table_info
*newinfo
,
673 unsigned char *limit
,
674 const unsigned int *hook_entries
,
675 const unsigned int *underflows
,
680 if ((unsigned long)e
% __alignof__(struct ipt_entry
) != 0
681 || (unsigned char *)e
+ sizeof(struct ipt_entry
) >= limit
) {
682 duprintf("Bad offset %p\n", e
);
687 < sizeof(struct ipt_entry
) + sizeof(struct ipt_entry_target
)) {
688 duprintf("checking: element %p size %u\n",
693 /* Check hooks & underflows */
694 for (h
= 0; h
< NF_IP_NUMHOOKS
; h
++) {
695 if ((unsigned char *)e
- base
== hook_entries
[h
])
696 newinfo
->hook_entry
[h
] = hook_entries
[h
];
697 if ((unsigned char *)e
- base
== underflows
[h
])
698 newinfo
->underflow
[h
] = underflows
[h
];
701 /* FIXME: underflows must be unconditional, standard verdicts
702 < 0 (not IPT_RETURN). --RR */
704 /* Clear counters and comefrom */
705 e
->counters
= ((struct xt_counters
) { 0, 0 });
713 cleanup_entry(struct ipt_entry
*e
, unsigned int *i
)
715 struct ipt_entry_target
*t
;
717 if (i
&& (*i
)-- == 0)
720 /* Cleanup all matches */
721 IPT_MATCH_ITERATE(e
, cleanup_match
, NULL
);
722 t
= ipt_get_target(e
);
723 if (t
->u
.kernel
.target
->destroy
)
724 t
->u
.kernel
.target
->destroy(t
->u
.kernel
.target
, t
->data
);
725 module_put(t
->u
.kernel
.target
->me
);
729 /* Checks and translates the user-supplied table segment (held in
732 translate_table(const char *name
,
733 unsigned int valid_hooks
,
734 struct xt_table_info
*newinfo
,
738 const unsigned int *hook_entries
,
739 const unsigned int *underflows
)
744 newinfo
->size
= size
;
745 newinfo
->number
= number
;
747 /* Init all hooks to impossible value. */
748 for (i
= 0; i
< NF_IP_NUMHOOKS
; i
++) {
749 newinfo
->hook_entry
[i
] = 0xFFFFFFFF;
750 newinfo
->underflow
[i
] = 0xFFFFFFFF;
753 duprintf("translate_table: size %u\n", newinfo
->size
);
755 /* Walk through entries, checking offsets. */
756 ret
= IPT_ENTRY_ITERATE(entry0
, newinfo
->size
,
757 check_entry_size_and_hooks
,
761 hook_entries
, underflows
, &i
);
766 duprintf("translate_table: %u not %u entries\n",
771 /* Check hooks all assigned */
772 for (i
= 0; i
< NF_IP_NUMHOOKS
; i
++) {
773 /* Only hooks which are valid */
774 if (!(valid_hooks
& (1 << i
)))
776 if (newinfo
->hook_entry
[i
] == 0xFFFFFFFF) {
777 duprintf("Invalid hook entry %u %u\n",
781 if (newinfo
->underflow
[i
] == 0xFFFFFFFF) {
782 duprintf("Invalid underflow %u %u\n",
788 if (!mark_source_chains(newinfo
, valid_hooks
, entry0
))
791 /* Finally, each sanity check must pass */
793 ret
= IPT_ENTRY_ITERATE(entry0
, newinfo
->size
,
794 find_check_entry
, name
, size
, &i
);
797 IPT_ENTRY_ITERATE(entry0
, newinfo
->size
,
802 /* And one copy for every other CPU */
803 for_each_possible_cpu(i
) {
804 if (newinfo
->entries
[i
] && newinfo
->entries
[i
] != entry0
)
805 memcpy(newinfo
->entries
[i
], entry0
, newinfo
->size
);
813 add_entry_to_counter(const struct ipt_entry
*e
,
814 struct xt_counters total
[],
817 ADD_COUNTER(total
[*i
], e
->counters
.bcnt
, e
->counters
.pcnt
);
824 set_entry_to_counter(const struct ipt_entry
*e
,
825 struct ipt_counters total
[],
828 SET_COUNTER(total
[*i
], e
->counters
.bcnt
, e
->counters
.pcnt
);
835 get_counters(const struct xt_table_info
*t
,
836 struct xt_counters counters
[])
842 /* Instead of clearing (by a previous call to memset())
843 * the counters and using adds, we set the counters
844 * with data used by 'current' CPU.
846 * Bottom half has to be disabled to prevent deadlock
847 * if new softirq were to run and call ipt_do_table
850 curcpu
= smp_processor_id();
853 IPT_ENTRY_ITERATE(t
->entries
[curcpu
],
855 set_entry_to_counter
,
859 for_each_possible_cpu(cpu
) {
864 IPT_ENTRY_ITERATE(t
->entries
[cpu
],
866 add_entry_to_counter
,
869 xt_info_wrunlock(cpu
);
874 static inline struct xt_counters
* alloc_counters(struct xt_table
*table
)
876 unsigned int countersize
;
877 struct xt_counters
*counters
;
878 struct xt_table_info
*private = table
->private;
880 /* We need atomic snapshot of counters: rest doesn't change
881 (other than comefrom, which userspace doesn't care
883 countersize
= sizeof(struct xt_counters
) * private->number
;
884 counters
= vmalloc(countersize
);
886 if (counters
== NULL
)
887 return ERR_PTR(-ENOMEM
);
889 get_counters(private, counters
);
895 copy_entries_to_user(unsigned int total_size
,
896 struct xt_table
*table
,
897 void __user
*userptr
)
899 unsigned int off
, num
;
901 struct xt_counters
*counters
;
902 struct xt_table_info
*private = table
->private;
906 counters
= alloc_counters(table
);
907 if (IS_ERR(counters
))
908 return PTR_ERR(counters
);
910 /* choose the copy that is on our node/cpu, ...
911 * This choice is lazy (because current thread is
912 * allowed to migrate to another cpu)
914 loc_cpu_entry
= private->entries
[raw_smp_processor_id()];
915 /* ... then copy entire thing ... */
916 if (copy_to_user(userptr
, loc_cpu_entry
, total_size
) != 0) {
921 /* FIXME: use iterator macros --RR */
922 /* ... then go back and fix counters and names */
923 for (off
= 0, num
= 0; off
< total_size
; off
+= e
->next_offset
, num
++){
925 struct ipt_entry_match
*m
;
926 struct ipt_entry_target
*t
;
928 e
= (struct ipt_entry
*)(loc_cpu_entry
+ off
);
929 if (copy_to_user(userptr
+ off
930 + offsetof(struct ipt_entry
, counters
),
932 sizeof(counters
[num
])) != 0) {
937 for (i
= sizeof(struct ipt_entry
);
938 i
< e
->target_offset
;
939 i
+= m
->u
.match_size
) {
942 if (copy_to_user(userptr
+ off
+ i
943 + offsetof(struct ipt_entry_match
,
945 m
->u
.kernel
.match
->name
,
946 strlen(m
->u
.kernel
.match
->name
)+1)
953 t
= ipt_get_target(e
);
954 if (copy_to_user(userptr
+ off
+ e
->target_offset
955 + offsetof(struct ipt_entry_target
,
957 t
->u
.kernel
.target
->name
,
958 strlen(t
->u
.kernel
.target
->name
)+1) != 0) {
970 struct compat_delta
{
971 struct compat_delta
*next
;
976 static struct compat_delta
*compat_offsets
= NULL
;
978 static int compat_add_offset(unsigned int offset
, short delta
)
980 struct compat_delta
*tmp
;
982 tmp
= kmalloc(sizeof(struct compat_delta
), GFP_KERNEL
);
985 tmp
->offset
= offset
;
987 if (compat_offsets
) {
988 tmp
->next
= compat_offsets
->next
;
989 compat_offsets
->next
= tmp
;
991 compat_offsets
= tmp
;
997 static void compat_flush_offsets(void)
999 struct compat_delta
*tmp
, *next
;
1001 if (compat_offsets
) {
1002 for(tmp
= compat_offsets
; tmp
; tmp
= next
) {
1006 compat_offsets
= NULL
;
1010 static short compat_calc_jump(unsigned int offset
)
1012 struct compat_delta
*tmp
;
1015 for(tmp
= compat_offsets
, delta
= 0; tmp
; tmp
= tmp
->next
)
1016 if (tmp
->offset
< offset
)
1017 delta
+= tmp
->delta
;
1021 static void compat_standard_from_user(void *dst
, void *src
)
1023 int v
= *(compat_int_t
*)src
;
1026 v
+= compat_calc_jump(v
);
1027 memcpy(dst
, &v
, sizeof(v
));
1030 static int compat_standard_to_user(void __user
*dst
, void *src
)
1032 compat_int_t cv
= *(int *)src
;
1035 cv
-= compat_calc_jump(cv
);
1036 return copy_to_user(dst
, &cv
, sizeof(cv
)) ? -EFAULT
: 0;
1040 compat_calc_match(struct ipt_entry_match
*m
, int * size
)
1042 *size
+= xt_compat_match_offset(m
->u
.kernel
.match
);
1046 static int compat_calc_entry(struct ipt_entry
*e
,
1047 const struct xt_table_info
*info
,
1048 void *base
, struct xt_table_info
*newinfo
)
1050 struct ipt_entry_target
*t
;
1051 unsigned int entry_offset
;
1055 entry_offset
= (void *)e
- base
;
1056 IPT_MATCH_ITERATE(e
, compat_calc_match
, &off
);
1057 t
= ipt_get_target(e
);
1058 off
+= xt_compat_target_offset(t
->u
.kernel
.target
);
1059 newinfo
->size
-= off
;
1060 ret
= compat_add_offset(entry_offset
, off
);
1064 for (i
= 0; i
< NF_IP_NUMHOOKS
; i
++) {
1065 if (info
->hook_entry
[i
] && (e
< (struct ipt_entry
*)
1066 (base
+ info
->hook_entry
[i
])))
1067 newinfo
->hook_entry
[i
] -= off
;
1068 if (info
->underflow
[i
] && (e
< (struct ipt_entry
*)
1069 (base
+ info
->underflow
[i
])))
1070 newinfo
->underflow
[i
] -= off
;
1075 static int compat_table_info(const struct xt_table_info
*info
,
1076 struct xt_table_info
*newinfo
)
1078 void *loc_cpu_entry
;
1080 if (!newinfo
|| !info
)
1083 /* we dont care about newinfo->entries[] */
1084 memcpy(newinfo
, info
, offsetof(struct xt_table_info
, entries
));
1085 newinfo
->initial_entries
= 0;
1086 loc_cpu_entry
= info
->entries
[raw_smp_processor_id()];
1087 return IPT_ENTRY_ITERATE(loc_cpu_entry
, info
->size
,
1088 compat_calc_entry
, info
, loc_cpu_entry
, newinfo
);
1092 static int get_info(void __user
*user
, int *len
, int compat
)
1094 char name
[IPT_TABLE_MAXNAMELEN
];
1098 if (*len
!= sizeof(struct ipt_getinfo
)) {
1099 duprintf("length %u != %u\n", *len
,
1100 (unsigned int)sizeof(struct ipt_getinfo
));
1104 if (copy_from_user(name
, user
, sizeof(name
)) != 0)
1107 name
[IPT_TABLE_MAXNAMELEN
-1] = '\0';
1108 #ifdef CONFIG_COMPAT
1110 xt_compat_lock(AF_INET
);
1112 t
= try_then_request_module(xt_find_table_lock(AF_INET
, name
),
1113 "iptable_%s", name
);
1114 if (t
&& !IS_ERR(t
)) {
1115 struct ipt_getinfo info
;
1116 struct xt_table_info
*private = t
->private;
1118 #ifdef CONFIG_COMPAT
1120 struct xt_table_info tmp
;
1121 ret
= compat_table_info(private, &tmp
);
1122 compat_flush_offsets();
1126 info
.valid_hooks
= t
->valid_hooks
;
1127 memcpy(info
.hook_entry
, private->hook_entry
,
1128 sizeof(info
.hook_entry
));
1129 memcpy(info
.underflow
, private->underflow
,
1130 sizeof(info
.underflow
));
1131 info
.num_entries
= private->number
;
1132 info
.size
= private->size
;
1133 strcpy(info
.name
, name
);
1135 if (copy_to_user(user
, &info
, *len
) != 0)
1143 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1144 #ifdef CONFIG_COMPAT
1146 xt_compat_unlock(AF_INET
);
1152 get_entries(struct ipt_get_entries __user
*uptr
, int *len
)
1155 struct ipt_get_entries get
;
1158 if (*len
< sizeof(get
)) {
1159 duprintf("get_entries: %u < %d\n", *len
,
1160 (unsigned int)sizeof(get
));
1163 if (copy_from_user(&get
, uptr
, sizeof(get
)) != 0)
1165 if (*len
!= sizeof(struct ipt_get_entries
) + get
.size
) {
1166 duprintf("get_entries: %u != %u\n", *len
,
1167 (unsigned int)(sizeof(struct ipt_get_entries
) +
1172 t
= xt_find_table_lock(AF_INET
, get
.name
);
1173 if (t
&& !IS_ERR(t
)) {
1174 struct xt_table_info
*private = t
->private;
1175 duprintf("t->private->number = %u\n",
1177 if (get
.size
== private->size
)
1178 ret
= copy_entries_to_user(private->size
,
1179 t
, uptr
->entrytable
);
1181 duprintf("get_entries: I've got %u not %u!\n",
1189 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1195 __do_replace(const char *name
, unsigned int valid_hooks
,
1196 struct xt_table_info
*newinfo
, unsigned int num_counters
,
1197 void __user
*counters_ptr
)
1201 struct xt_table_info
*oldinfo
;
1202 struct xt_counters
*counters
;
1203 void *loc_cpu_old_entry
;
1206 counters
= vmalloc(num_counters
* sizeof(struct xt_counters
));
1212 t
= try_then_request_module(xt_find_table_lock(AF_INET
, name
),
1213 "iptable_%s", name
);
1214 if (!t
|| IS_ERR(t
)) {
1215 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1216 goto free_newinfo_counters_untrans
;
1220 if (valid_hooks
!= t
->valid_hooks
) {
1221 duprintf("Valid hook crap: %08X vs %08X\n",
1222 valid_hooks
, t
->valid_hooks
);
1227 oldinfo
= xt_replace_table(t
, num_counters
, newinfo
, &ret
);
1231 /* Update module usage count based on number of rules */
1232 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1233 oldinfo
->number
, oldinfo
->initial_entries
, newinfo
->number
);
1234 if ((oldinfo
->number
> oldinfo
->initial_entries
) ||
1235 (newinfo
->number
<= oldinfo
->initial_entries
))
1237 if ((oldinfo
->number
> oldinfo
->initial_entries
) &&
1238 (newinfo
->number
<= oldinfo
->initial_entries
))
1241 /* Get the old counters, and synchronize with replace */
1242 get_counters(oldinfo
, counters
);
1244 /* Decrease module usage counts and free resource */
1245 loc_cpu_old_entry
= oldinfo
->entries
[raw_smp_processor_id()];
1246 IPT_ENTRY_ITERATE(loc_cpu_old_entry
, oldinfo
->size
, cleanup_entry
,NULL
);
1247 xt_free_table_info(oldinfo
);
1248 if (copy_to_user(counters_ptr
, counters
,
1249 sizeof(struct xt_counters
) * num_counters
) != 0)
1258 free_newinfo_counters_untrans
:
1265 do_replace(void __user
*user
, unsigned int len
)
1268 struct ipt_replace tmp
;
1269 struct xt_table_info
*newinfo
;
1270 void *loc_cpu_entry
;
1272 if (copy_from_user(&tmp
, user
, sizeof(tmp
)) != 0)
1275 /* overflow check */
1276 if (tmp
.num_counters
>= INT_MAX
/ sizeof(struct xt_counters
))
1279 newinfo
= xt_alloc_table_info(tmp
.size
);
1283 /* choose the copy that is our node/cpu */
1284 loc_cpu_entry
= newinfo
->entries
[raw_smp_processor_id()];
1285 if (copy_from_user(loc_cpu_entry
, user
+ sizeof(tmp
),
1291 ret
= translate_table(tmp
.name
, tmp
.valid_hooks
,
1292 newinfo
, loc_cpu_entry
, tmp
.size
, tmp
.num_entries
,
1293 tmp
.hook_entry
, tmp
.underflow
);
1297 duprintf("ip_tables: Translated table\n");
1299 ret
= __do_replace(tmp
.name
, tmp
.valid_hooks
,
1300 newinfo
, tmp
.num_counters
,
1303 goto free_newinfo_untrans
;
1306 free_newinfo_untrans
:
1307 IPT_ENTRY_ITERATE(loc_cpu_entry
, newinfo
->size
, cleanup_entry
,NULL
);
1309 xt_free_table_info(newinfo
);
1313 /* We're lazy, and add to the first CPU; overflow works its fey magic
1314 * and everything is OK. */
1316 add_counter_to_entry(struct ipt_entry
*e
,
1317 const struct xt_counters addme
[],
1320 ADD_COUNTER(e
->counters
, addme
[*i
].bcnt
, addme
[*i
].pcnt
);
1327 do_add_counters(void __user
*user
, unsigned int len
, int compat
)
1329 unsigned int i
, curcpu
;
1330 struct xt_counters_info tmp
;
1331 struct xt_counters
*paddc
;
1332 unsigned int num_counters
;
1337 struct xt_table_info
*private;
1339 void *loc_cpu_entry
;
1340 #ifdef CONFIG_COMPAT
1341 struct compat_xt_counters_info compat_tmp
;
1345 size
= sizeof(struct compat_xt_counters_info
);
1350 size
= sizeof(struct xt_counters_info
);
1353 if (copy_from_user(ptmp
, user
, size
) != 0)
1356 #ifdef CONFIG_COMPAT
1358 num_counters
= compat_tmp
.num_counters
;
1359 name
= compat_tmp
.name
;
1363 num_counters
= tmp
.num_counters
;
1367 if (len
!= size
+ num_counters
* sizeof(struct xt_counters
))
1370 paddc
= vmalloc(len
- size
);
1374 if (copy_from_user(paddc
, user
+ size
, len
- size
) != 0) {
1379 t
= xt_find_table_lock(AF_INET
, name
);
1380 if (!t
|| IS_ERR(t
)) {
1381 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1386 private = t
->private;
1387 if (private->number
!= num_counters
) {
1389 goto unlock_up_free
;
1393 /* Choose the copy that is on our node */
1394 curcpu
= smp_processor_id();
1395 loc_cpu_entry
= private->entries
[curcpu
];
1396 xt_info_wrlock(curcpu
);
1397 IPT_ENTRY_ITERATE(loc_cpu_entry
,
1399 add_counter_to_entry
,
1402 xt_info_wrunlock(curcpu
);
1413 #ifdef CONFIG_COMPAT
1414 struct compat_ipt_replace
{
1415 char name
[IPT_TABLE_MAXNAMELEN
];
1419 u32 hook_entry
[NF_IP_NUMHOOKS
];
1420 u32 underflow
[NF_IP_NUMHOOKS
];
1422 compat_uptr_t counters
; /* struct ipt_counters * */
1423 struct compat_ipt_entry entries
[0];
1426 static inline int compat_copy_match_to_user(struct ipt_entry_match
*m
,
1427 void __user
**dstptr
, compat_uint_t
*size
)
1429 return xt_compat_match_to_user(m
, dstptr
, size
);
1432 static int compat_copy_entry_to_user(struct ipt_entry
*e
,
1433 void __user
**dstptr
, compat_uint_t
*size
)
1435 struct ipt_entry_target
*t
;
1436 struct compat_ipt_entry __user
*ce
;
1437 u_int16_t target_offset
, next_offset
;
1438 compat_uint_t origsize
;
1443 ce
= (struct compat_ipt_entry __user
*)*dstptr
;
1444 if (copy_to_user(ce
, e
, sizeof(struct ipt_entry
)))
1447 *dstptr
+= sizeof(struct compat_ipt_entry
);
1448 ret
= IPT_MATCH_ITERATE(e
, compat_copy_match_to_user
, dstptr
, size
);
1449 target_offset
= e
->target_offset
- (origsize
- *size
);
1452 t
= ipt_get_target(e
);
1453 ret
= xt_compat_target_to_user(t
, dstptr
, size
);
1457 next_offset
= e
->next_offset
- (origsize
- *size
);
1458 if (put_user(target_offset
, &ce
->target_offset
))
1460 if (put_user(next_offset
, &ce
->next_offset
))
1468 compat_find_calc_match(struct ipt_entry_match
*m
,
1470 const struct ipt_ip
*ip
,
1471 unsigned int hookmask
,
1474 struct xt_match
*match
;
1476 match
= try_then_request_module(xt_find_match(AF_INET
, m
->u
.user
.name
,
1477 m
->u
.user
.revision
),
1478 "ipt_%s", m
->u
.user
.name
);
1479 if (IS_ERR(match
) || !match
) {
1480 duprintf("compat_check_calc_match: `%s' not found\n",
1482 return match
? PTR_ERR(match
) : -ENOENT
;
1484 m
->u
.kernel
.match
= match
;
1485 *size
+= xt_compat_match_offset(match
);
1492 compat_release_match(struct ipt_entry_match
*m
, unsigned int *i
)
1494 if (i
&& (*i
)-- == 0)
1497 module_put(m
->u
.kernel
.match
->me
);
1502 compat_release_entry(struct ipt_entry
*e
, unsigned int *i
)
1504 struct ipt_entry_target
*t
;
1506 if (i
&& (*i
)-- == 0)
1509 /* Cleanup all matches */
1510 IPT_MATCH_ITERATE(e
, compat_release_match
, NULL
);
1511 t
= ipt_get_target(e
);
1512 module_put(t
->u
.kernel
.target
->me
);
1517 check_compat_entry_size_and_hooks(struct ipt_entry
*e
,
1518 struct xt_table_info
*newinfo
,
1520 unsigned char *base
,
1521 unsigned char *limit
,
1522 unsigned int *hook_entries
,
1523 unsigned int *underflows
,
1527 struct ipt_entry_target
*t
;
1528 struct xt_target
*target
;
1529 unsigned int entry_offset
;
1532 duprintf("check_compat_entry_size_and_hooks %p\n", e
);
1533 if ((unsigned long)e
% __alignof__(struct compat_ipt_entry
) != 0
1534 || (unsigned char *)e
+ sizeof(struct compat_ipt_entry
) >= limit
) {
1535 duprintf("Bad offset %p, limit = %p\n", e
, limit
);
1539 if (e
->next_offset
< sizeof(struct compat_ipt_entry
) +
1540 sizeof(struct compat_xt_entry_target
)) {
1541 duprintf("checking: element %p size %u\n",
1546 ret
= check_entry(e
, name
);
1551 entry_offset
= (void *)e
- (void *)base
;
1553 ret
= IPT_MATCH_ITERATE(e
, compat_find_calc_match
, name
, &e
->ip
,
1554 e
->comefrom
, &off
, &j
);
1556 goto release_matches
;
1558 t
= ipt_get_target(e
);
1559 target
= try_then_request_module(xt_find_target(AF_INET
,
1561 t
->u
.user
.revision
),
1562 "ipt_%s", t
->u
.user
.name
);
1563 if (IS_ERR(target
) || !target
) {
1564 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1566 ret
= target
? PTR_ERR(target
) : -ENOENT
;
1567 goto release_matches
;
1569 t
->u
.kernel
.target
= target
;
1571 off
+= xt_compat_target_offset(target
);
1573 ret
= compat_add_offset(entry_offset
, off
);
1577 /* Check hooks & underflows */
1578 for (h
= 0; h
< NF_IP_NUMHOOKS
; h
++) {
1579 if ((unsigned char *)e
- base
== hook_entries
[h
])
1580 newinfo
->hook_entry
[h
] = hook_entries
[h
];
1581 if ((unsigned char *)e
- base
== underflows
[h
])
1582 newinfo
->underflow
[h
] = underflows
[h
];
1585 /* Clear counters and comefrom */
1586 e
->counters
= ((struct ipt_counters
) { 0, 0 });
1593 module_put(t
->u
.kernel
.target
->me
);
1595 IPT_MATCH_ITERATE(e
, compat_release_match
, &j
);
1599 static inline int compat_copy_match_from_user(struct ipt_entry_match
*m
,
1600 void **dstptr
, compat_uint_t
*size
, const char *name
,
1601 const struct ipt_ip
*ip
, unsigned int hookmask
)
1603 xt_compat_match_from_user(m
, dstptr
, size
);
1607 static int compat_copy_entry_from_user(struct ipt_entry
*e
, void **dstptr
,
1608 unsigned int *size
, const char *name
,
1609 struct xt_table_info
*newinfo
, unsigned char *base
)
1611 struct ipt_entry_target
*t
;
1612 struct xt_target
*target
;
1613 struct ipt_entry
*de
;
1614 unsigned int origsize
;
1619 de
= (struct ipt_entry
*)*dstptr
;
1620 memcpy(de
, e
, sizeof(struct ipt_entry
));
1622 *dstptr
+= sizeof(struct compat_ipt_entry
);
1623 ret
= IPT_MATCH_ITERATE(e
, compat_copy_match_from_user
, dstptr
, size
,
1624 name
, &de
->ip
, de
->comefrom
);
1627 de
->target_offset
= e
->target_offset
- (origsize
- *size
);
1628 t
= ipt_get_target(e
);
1629 target
= t
->u
.kernel
.target
;
1630 xt_compat_target_from_user(t
, dstptr
, size
);
1632 de
->next_offset
= e
->next_offset
- (origsize
- *size
);
1633 for (h
= 0; h
< NF_IP_NUMHOOKS
; h
++) {
1634 if ((unsigned char *)de
- base
< newinfo
->hook_entry
[h
])
1635 newinfo
->hook_entry
[h
] -= origsize
- *size
;
1636 if ((unsigned char *)de
- base
< newinfo
->underflow
[h
])
1637 newinfo
->underflow
[h
] -= origsize
- *size
;
1642 static inline int compat_check_entry(struct ipt_entry
*e
, const char *name
,
1648 ret
= IPT_MATCH_ITERATE(e
, check_match
, name
, &e
->ip
, e
->comefrom
, &j
);
1650 goto cleanup_matches
;
1652 ret
= check_target(e
, name
);
1654 goto cleanup_matches
;
1660 IPT_MATCH_ITERATE(e
, cleanup_match
, &j
);
1665 translate_compat_table(const char *name
,
1666 unsigned int valid_hooks
,
1667 struct xt_table_info
**pinfo
,
1669 unsigned int total_size
,
1670 unsigned int number
,
1671 unsigned int *hook_entries
,
1672 unsigned int *underflows
)
1675 struct xt_table_info
*newinfo
, *info
;
1676 void *pos
, *entry0
, *entry1
;
1683 info
->number
= number
;
1685 /* Init all hooks to impossible value. */
1686 for (i
= 0; i
< NF_IP_NUMHOOKS
; i
++) {
1687 info
->hook_entry
[i
] = 0xFFFFFFFF;
1688 info
->underflow
[i
] = 0xFFFFFFFF;
1691 duprintf("translate_compat_table: size %u\n", info
->size
);
1693 xt_compat_lock(AF_INET
);
1694 /* Walk through entries, checking offsets. */
1695 ret
= IPT_ENTRY_ITERATE(entry0
, total_size
,
1696 check_compat_entry_size_and_hooks
,
1697 info
, &size
, entry0
,
1698 entry0
+ total_size
,
1699 hook_entries
, underflows
, &j
, name
);
1705 duprintf("translate_compat_table: %u not %u entries\n",
1710 /* Check hooks all assigned */
1711 for (i
= 0; i
< NF_IP_NUMHOOKS
; i
++) {
1712 /* Only hooks which are valid */
1713 if (!(valid_hooks
& (1 << i
)))
1715 if (info
->hook_entry
[i
] == 0xFFFFFFFF) {
1716 duprintf("Invalid hook entry %u %u\n",
1717 i
, hook_entries
[i
]);
1720 if (info
->underflow
[i
] == 0xFFFFFFFF) {
1721 duprintf("Invalid underflow %u %u\n",
1728 newinfo
= xt_alloc_table_info(size
);
1732 newinfo
->number
= number
;
1733 for (i
= 0; i
< NF_IP_NUMHOOKS
; i
++) {
1734 newinfo
->hook_entry
[i
] = info
->hook_entry
[i
];
1735 newinfo
->underflow
[i
] = info
->underflow
[i
];
1737 entry1
= newinfo
->entries
[raw_smp_processor_id()];
1740 ret
= IPT_ENTRY_ITERATE(entry0
, total_size
,
1741 compat_copy_entry_from_user
, &pos
, &size
,
1742 name
, newinfo
, entry1
);
1743 compat_flush_offsets();
1744 xt_compat_unlock(AF_INET
);
1749 if (!mark_source_chains(newinfo
, valid_hooks
, entry1
))
1753 ret
= IPT_ENTRY_ITERATE(entry1
, newinfo
->size
, compat_check_entry
,
1757 IPT_ENTRY_ITERATE_CONTINUE(entry1
, newinfo
->size
, i
,
1758 compat_release_entry
, &j
);
1759 IPT_ENTRY_ITERATE(entry1
, newinfo
->size
, cleanup_entry
, &i
);
1760 xt_free_table_info(newinfo
);
1764 /* And one copy for every other CPU */
1765 for_each_possible_cpu(i
)
1766 if (newinfo
->entries
[i
] && newinfo
->entries
[i
] != entry1
)
1767 memcpy(newinfo
->entries
[i
], entry1
, newinfo
->size
);
1771 xt_free_table_info(info
);
1775 xt_free_table_info(newinfo
);
1777 IPT_ENTRY_ITERATE(entry0
, total_size
, compat_release_entry
, &j
);
1780 compat_flush_offsets();
1781 xt_compat_unlock(AF_INET
);
1786 compat_do_replace(void __user
*user
, unsigned int len
)
1789 struct compat_ipt_replace tmp
;
1790 struct xt_table_info
*newinfo
;
1791 void *loc_cpu_entry
;
1793 if (copy_from_user(&tmp
, user
, sizeof(tmp
)) != 0)
1796 /* overflow check */
1797 if (tmp
.size
>= INT_MAX
/ num_possible_cpus())
1799 if (tmp
.num_counters
>= INT_MAX
/ sizeof(struct xt_counters
))
1802 newinfo
= xt_alloc_table_info(tmp
.size
);
1806 /* choose the copy that is our node/cpu */
1807 loc_cpu_entry
= newinfo
->entries
[raw_smp_processor_id()];
1808 if (copy_from_user(loc_cpu_entry
, user
+ sizeof(tmp
),
1814 ret
= translate_compat_table(tmp
.name
, tmp
.valid_hooks
,
1815 &newinfo
, &loc_cpu_entry
, tmp
.size
,
1816 tmp
.num_entries
, tmp
.hook_entry
, tmp
.underflow
);
1820 duprintf("compat_do_replace: Translated table\n");
1822 ret
= __do_replace(tmp
.name
, tmp
.valid_hooks
,
1823 newinfo
, tmp
.num_counters
,
1824 compat_ptr(tmp
.counters
));
1826 goto free_newinfo_untrans
;
1829 free_newinfo_untrans
:
1830 IPT_ENTRY_ITERATE(loc_cpu_entry
, newinfo
->size
, cleanup_entry
,NULL
);
1832 xt_free_table_info(newinfo
);
1837 compat_do_ipt_set_ctl(struct sock
*sk
, int cmd
, void __user
*user
,
1842 if (!capable(CAP_NET_ADMIN
))
1846 case IPT_SO_SET_REPLACE
:
1847 ret
= compat_do_replace(user
, len
);
1850 case IPT_SO_SET_ADD_COUNTERS
:
1851 ret
= do_add_counters(user
, len
, 1);
1855 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd
);
1862 struct compat_ipt_get_entries
1864 char name
[IPT_TABLE_MAXNAMELEN
];
1866 struct compat_ipt_entry entrytable
[0];
1869 static int compat_copy_entries_to_user(unsigned int total_size
,
1870 struct xt_table
*table
, void __user
*userptr
)
1872 unsigned int off
, num
;
1873 struct compat_ipt_entry e
;
1874 struct xt_counters
*counters
;
1875 struct xt_table_info
*private = table
->private;
1879 void *loc_cpu_entry
;
1881 counters
= alloc_counters(table
);
1882 if (IS_ERR(counters
))
1883 return PTR_ERR(counters
);
1885 /* choose the copy that is on our node/cpu, ...
1886 * This choice is lazy (because current thread is
1887 * allowed to migrate to another cpu)
1889 loc_cpu_entry
= private->entries
[raw_smp_processor_id()];
1892 ret
= IPT_ENTRY_ITERATE(loc_cpu_entry
, total_size
,
1893 compat_copy_entry_to_user
, &pos
, &size
);
1897 /* ... then go back and fix counters and names */
1898 for (off
= 0, num
= 0; off
< size
; off
+= e
.next_offset
, num
++) {
1900 struct ipt_entry_match m
;
1901 struct ipt_entry_target t
;
1904 if (copy_from_user(&e
, userptr
+ off
,
1905 sizeof(struct compat_ipt_entry
)))
1907 if (copy_to_user(userptr
+ off
+
1908 offsetof(struct compat_ipt_entry
, counters
),
1909 &counters
[num
], sizeof(counters
[num
])))
1912 for (i
= sizeof(struct compat_ipt_entry
);
1913 i
< e
.target_offset
; i
+= m
.u
.match_size
) {
1914 if (copy_from_user(&m
, userptr
+ off
+ i
,
1915 sizeof(struct ipt_entry_match
)))
1917 if (copy_to_user(userptr
+ off
+ i
+
1918 offsetof(struct ipt_entry_match
, u
.user
.name
),
1919 m
.u
.kernel
.match
->name
,
1920 strlen(m
.u
.kernel
.match
->name
) + 1))
1924 if (copy_from_user(&t
, userptr
+ off
+ e
.target_offset
,
1925 sizeof(struct ipt_entry_target
)))
1927 if (copy_to_user(userptr
+ off
+ e
.target_offset
+
1928 offsetof(struct ipt_entry_target
, u
.user
.name
),
1929 t
.u
.kernel
.target
->name
,
1930 strlen(t
.u
.kernel
.target
->name
) + 1))
1940 compat_get_entries(struct compat_ipt_get_entries __user
*uptr
, int *len
)
1943 struct compat_ipt_get_entries get
;
1947 if (*len
< sizeof(get
)) {
1948 duprintf("compat_get_entries: %u < %u\n",
1949 *len
, (unsigned int)sizeof(get
));
1953 if (copy_from_user(&get
, uptr
, sizeof(get
)) != 0)
1956 if (*len
!= sizeof(struct compat_ipt_get_entries
) + get
.size
) {
1957 duprintf("compat_get_entries: %u != %u\n", *len
,
1958 (unsigned int)(sizeof(struct compat_ipt_get_entries
) +
1963 xt_compat_lock(AF_INET
);
1964 t
= xt_find_table_lock(AF_INET
, get
.name
);
1965 if (t
&& !IS_ERR(t
)) {
1966 struct xt_table_info
*private = t
->private;
1967 struct xt_table_info info
;
1968 duprintf("t->private->number = %u\n",
1970 ret
= compat_table_info(private, &info
);
1971 if (!ret
&& get
.size
== info
.size
) {
1972 ret
= compat_copy_entries_to_user(private->size
,
1973 t
, uptr
->entrytable
);
1975 duprintf("compat_get_entries: I've got %u not %u!\n",
1980 compat_flush_offsets();
1984 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1986 xt_compat_unlock(AF_INET
);
1990 static int do_ipt_get_ctl(struct sock
*, int, void __user
*, int *);
1993 compat_do_ipt_get_ctl(struct sock
*sk
, int cmd
, void __user
*user
, int *len
)
1997 if (!capable(CAP_NET_ADMIN
))
2001 case IPT_SO_GET_INFO
:
2002 ret
= get_info(user
, len
, 1);
2004 case IPT_SO_GET_ENTRIES
:
2005 ret
= compat_get_entries(user
, len
);
2008 ret
= do_ipt_get_ctl(sk
, cmd
, user
, len
);
2015 do_ipt_set_ctl(struct sock
*sk
, int cmd
, void __user
*user
, unsigned int len
)
2019 if (!capable(CAP_NET_ADMIN
))
2023 case IPT_SO_SET_REPLACE
:
2024 ret
= do_replace(user
, len
);
2027 case IPT_SO_SET_ADD_COUNTERS
:
2028 ret
= do_add_counters(user
, len
, 0);
2032 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd
);
2040 do_ipt_get_ctl(struct sock
*sk
, int cmd
, void __user
*user
, int *len
)
2044 if (!capable(CAP_NET_ADMIN
))
2048 case IPT_SO_GET_INFO
:
2049 ret
= get_info(user
, len
, 0);
2052 case IPT_SO_GET_ENTRIES
:
2053 ret
= get_entries(user
, len
);
2056 case IPT_SO_GET_REVISION_MATCH
:
2057 case IPT_SO_GET_REVISION_TARGET
: {
2058 struct ipt_get_revision rev
;
2061 if (*len
!= sizeof(rev
)) {
2065 if (copy_from_user(&rev
, user
, sizeof(rev
)) != 0) {
2070 if (cmd
== IPT_SO_GET_REVISION_TARGET
)
2075 try_then_request_module(xt_find_revision(AF_INET
, rev
.name
,
2078 "ipt_%s", rev
.name
);
2083 duprintf("do_ipt_get_ctl: unknown request %i\n", cmd
);
2090 int ipt_register_table(struct xt_table
*table
, const struct ipt_replace
*repl
)
2093 struct xt_table_info
*newinfo
;
2094 struct xt_table_info bootstrap
2095 = { 0, 0, 0, { 0 }, { 0 }, { } };
2096 void *loc_cpu_entry
;
2098 newinfo
= xt_alloc_table_info(repl
->size
);
2102 /* choose the copy on our node/cpu
2103 * but dont care of preemption
2105 loc_cpu_entry
= newinfo
->entries
[raw_smp_processor_id()];
2106 memcpy(loc_cpu_entry
, repl
->entries
, repl
->size
);
2108 ret
= translate_table(table
->name
, table
->valid_hooks
,
2109 newinfo
, loc_cpu_entry
, repl
->size
,
2114 xt_free_table_info(newinfo
);
2118 ret
= xt_register_table(table
, &bootstrap
, newinfo
);
2120 xt_free_table_info(newinfo
);
2127 void ipt_unregister_table(struct xt_table
*table
)
2129 struct xt_table_info
*private;
2130 void *loc_cpu_entry
;
2132 private = xt_unregister_table(table
);
2134 /* Decrease module usage counts and free resources */
2135 loc_cpu_entry
= private->entries
[raw_smp_processor_id()];
2136 IPT_ENTRY_ITERATE(loc_cpu_entry
, private->size
, cleanup_entry
, NULL
);
2137 xt_free_table_info(private);
2140 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2142 icmp_type_code_match(u_int8_t test_type
, u_int8_t min_code
, u_int8_t max_code
,
2143 u_int8_t type
, u_int8_t code
,
2146 return ((test_type
== 0xFF) || (type
== test_type
&& code
>= min_code
&& code
<= max_code
))
2151 icmp_match(const struct sk_buff
*skb
,
2152 const struct net_device
*in
,
2153 const struct net_device
*out
,
2154 const struct xt_match
*match
,
2155 const void *matchinfo
,
2157 unsigned int protoff
,
2160 struct icmphdr _icmph
, *ic
;
2161 const struct ipt_icmp
*icmpinfo
= matchinfo
;
2163 /* Must not be a fragment. */
2167 ic
= skb_header_pointer(skb
, protoff
, sizeof(_icmph
), &_icmph
);
2169 /* We've been asked to examine this packet, and we
2170 * can't. Hence, no choice but to drop.
2172 duprintf("Dropping evil ICMP tinygram.\n");
2177 return icmp_type_code_match(icmpinfo
->type
,
2181 !!(icmpinfo
->invflags
&IPT_ICMP_INV
));
2184 /* Called when user tries to insert an entry of this type. */
2186 icmp_checkentry(const char *tablename
,
2188 const struct xt_match
*match
,
2190 unsigned int hook_mask
)
2192 const struct ipt_icmp
*icmpinfo
= matchinfo
;
2194 /* Must specify no unknown invflags */
2195 return !(icmpinfo
->invflags
& ~IPT_ICMP_INV
);
2198 /* The built-in targets: standard (NULL) and error. */
2199 static struct xt_target ipt_standard_target
= {
2200 .name
= IPT_STANDARD_TARGET
,
2201 .targetsize
= sizeof(int),
2203 #ifdef CONFIG_COMPAT
2204 .compatsize
= sizeof(compat_int_t
),
2205 .compat_from_user
= compat_standard_from_user
,
2206 .compat_to_user
= compat_standard_to_user
,
2210 static struct xt_target ipt_error_target
= {
2211 .name
= IPT_ERROR_TARGET
,
2212 .target
= ipt_error
,
2213 .targetsize
= IPT_FUNCTION_MAXNAMELEN
,
2217 static struct nf_sockopt_ops ipt_sockopts
= {
2219 .set_optmin
= IPT_BASE_CTL
,
2220 .set_optmax
= IPT_SO_SET_MAX
+1,
2221 .set
= do_ipt_set_ctl
,
2222 #ifdef CONFIG_COMPAT
2223 .compat_set
= compat_do_ipt_set_ctl
,
2225 .get_optmin
= IPT_BASE_CTL
,
2226 .get_optmax
= IPT_SO_GET_MAX
+1,
2227 .get
= do_ipt_get_ctl
,
2228 #ifdef CONFIG_COMPAT
2229 .compat_get
= compat_do_ipt_get_ctl
,
2233 static struct xt_match icmp_matchstruct
= {
2235 .match
= icmp_match
,
2236 .matchsize
= sizeof(struct ipt_icmp
),
2237 .proto
= IPPROTO_ICMP
,
2239 .checkentry
= icmp_checkentry
,
2242 static int __init
ip_tables_init(void)
2246 ret
= xt_proto_init(AF_INET
);
2250 /* Noone else will be downing sem now, so we won't sleep */
2251 ret
= xt_register_target(&ipt_standard_target
);
2254 ret
= xt_register_target(&ipt_error_target
);
2257 ret
= xt_register_match(&icmp_matchstruct
);
2261 /* Register setsockopt */
2262 ret
= nf_register_sockopt(&ipt_sockopts
);
2266 printk("ip_tables: (C) 2000-2006 Netfilter Core Team\n");
2270 xt_unregister_match(&icmp_matchstruct
);
2272 xt_unregister_target(&ipt_error_target
);
2274 xt_unregister_target(&ipt_standard_target
);
2276 xt_proto_fini(AF_INET
);
2281 static void __exit
ip_tables_fini(void)
2283 nf_unregister_sockopt(&ipt_sockopts
);
2285 xt_unregister_match(&icmp_matchstruct
);
2286 xt_unregister_target(&ipt_error_target
);
2287 xt_unregister_target(&ipt_standard_target
);
2289 xt_proto_fini(AF_INET
);
2292 EXPORT_SYMBOL(ipt_register_table
);
2293 EXPORT_SYMBOL(ipt_unregister_table
);
2294 EXPORT_SYMBOL(ipt_do_table
);
2295 module_init(ip_tables_init
);
2296 module_exit(ip_tables_fini
);