2 * Packet matching code.
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #include <linux/cache.h>
12 #include <linux/capability.h>
13 #include <linux/skbuff.h>
14 #include <linux/kmod.h>
15 #include <linux/vmalloc.h>
16 #include <linux/netdevice.h>
17 #include <linux/module.h>
18 #include <linux/icmp.h>
20 #include <net/compat.h>
21 #include <asm/uaccess.h>
22 #include <linux/mutex.h>
23 #include <linux/proc_fs.h>
24 #include <linux/err.h>
25 #include <linux/cpumask.h>
27 #include <linux/netfilter/x_tables.h>
28 #include <linux/netfilter_ipv4/ip_tables.h>
29 #include <net/netfilter/nf_log.h>
31 MODULE_LICENSE("GPL");
32 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
33 MODULE_DESCRIPTION("IPv4 packet filter");
35 /*#define DEBUG_IP_FIREWALL*/
36 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
37 /*#define DEBUG_IP_FIREWALL_USER*/
39 #ifdef DEBUG_IP_FIREWALL
40 #define dprintf(format, args...) printk(format , ## args)
42 #define dprintf(format, args...)
45 #ifdef DEBUG_IP_FIREWALL_USER
46 #define duprintf(format, args...) printk(format , ## args)
48 #define duprintf(format, args...)
51 #ifdef CONFIG_NETFILTER_DEBUG
52 #define IP_NF_ASSERT(x) \
55 printk("IP_NF_ASSERT: %s:%s:%u\n", \
56 __func__, __FILE__, __LINE__); \
59 #define IP_NF_ASSERT(x)
63 /* All the better to debug you with... */
69 We keep a set of rules for each CPU, so we can avoid write-locking
70 them in the softirq when updating the counters and therefore
71 only need to read-lock in the softirq; doing a write_lock_bh() in user
72 context stops packets coming through and allows user context to read
73 the counters or update the rules.
75 Hence the start of any table is given by get_table() below. */
77 /* Returns whether matches rule or not. */
78 /* Performance critical - called for every packet */
80 ip_packet_match(const struct iphdr
*ip
,
83 const struct ipt_ip
*ipinfo
,
89 #define FWINV(bool, invflg) ((bool) ^ !!(ipinfo->invflags & (invflg)))
91 if (FWINV((ip
->saddr
&ipinfo
->smsk
.s_addr
) != ipinfo
->src
.s_addr
,
93 || FWINV((ip
->daddr
&ipinfo
->dmsk
.s_addr
) != ipinfo
->dst
.s_addr
,
95 dprintf("Source or dest mismatch.\n");
97 dprintf("SRC: %pI4. Mask: %pI4. Target: %pI4.%s\n",
98 &ip
->saddr
, &ipinfo
->smsk
.s_addr
, &ipinfo
->src
.s_addr
,
99 ipinfo
->invflags
& IPT_INV_SRCIP
? " (INV)" : "");
100 dprintf("DST: %pI4 Mask: %pI4 Target: %pI4.%s\n",
101 &ip
->daddr
, &ipinfo
->dmsk
.s_addr
, &ipinfo
->dst
.s_addr
,
102 ipinfo
->invflags
& IPT_INV_DSTIP
? " (INV)" : "");
106 /* Look for ifname matches; this should unroll nicely. */
107 for (i
= 0, ret
= 0; i
< IFNAMSIZ
/sizeof(unsigned long); i
++) {
108 ret
|= (((const unsigned long *)indev
)[i
]
109 ^ ((const unsigned long *)ipinfo
->iniface
)[i
])
110 & ((const unsigned long *)ipinfo
->iniface_mask
)[i
];
113 if (FWINV(ret
!= 0, IPT_INV_VIA_IN
)) {
114 dprintf("VIA in mismatch (%s vs %s).%s\n",
115 indev
, ipinfo
->iniface
,
116 ipinfo
->invflags
&IPT_INV_VIA_IN
?" (INV)":"");
120 for (i
= 0, ret
= 0; i
< IFNAMSIZ
/sizeof(unsigned long); i
++) {
121 ret
|= (((const unsigned long *)outdev
)[i
]
122 ^ ((const unsigned long *)ipinfo
->outiface
)[i
])
123 & ((const unsigned long *)ipinfo
->outiface_mask
)[i
];
126 if (FWINV(ret
!= 0, IPT_INV_VIA_OUT
)) {
127 dprintf("VIA out mismatch (%s vs %s).%s\n",
128 outdev
, ipinfo
->outiface
,
129 ipinfo
->invflags
&IPT_INV_VIA_OUT
?" (INV)":"");
133 /* Check specific protocol */
135 && FWINV(ip
->protocol
!= ipinfo
->proto
, IPT_INV_PROTO
)) {
136 dprintf("Packet protocol %hi does not match %hi.%s\n",
137 ip
->protocol
, ipinfo
->proto
,
138 ipinfo
->invflags
&IPT_INV_PROTO
? " (INV)":"");
142 /* If we have a fragment rule but the packet is not a fragment
143 * then we return zero */
144 if (FWINV((ipinfo
->flags
&IPT_F_FRAG
) && !isfrag
, IPT_INV_FRAG
)) {
145 dprintf("Fragment rule but not fragment.%s\n",
146 ipinfo
->invflags
& IPT_INV_FRAG
? " (INV)" : "");
154 ip_checkentry(const struct ipt_ip
*ip
)
156 if (ip
->flags
& ~IPT_F_MASK
) {
157 duprintf("Unknown flag bits set: %08X\n",
158 ip
->flags
& ~IPT_F_MASK
);
161 if (ip
->invflags
& ~IPT_INV_MASK
) {
162 duprintf("Unknown invflag bits set: %08X\n",
163 ip
->invflags
& ~IPT_INV_MASK
);
170 ipt_error(struct sk_buff
*skb
, const struct xt_target_param
*par
)
173 printk("ip_tables: error: `%s'\n",
174 (const char *)par
->targinfo
);
179 /* Performance critical - called for every packet */
181 do_match(struct ipt_entry_match
*m
, const struct sk_buff
*skb
,
182 struct xt_match_param
*par
)
184 par
->match
= m
->u
.kernel
.match
;
185 par
->matchinfo
= m
->data
;
187 /* Stop iteration if it doesn't match */
188 if (!m
->u
.kernel
.match
->match(skb
, par
))
194 /* Performance critical */
195 static inline struct ipt_entry
*
196 get_entry(void *base
, unsigned int offset
)
198 return (struct ipt_entry
*)(base
+ offset
);
201 /* All zeroes == unconditional rule. */
202 /* Mildly perf critical (only if packet tracing is on) */
204 unconditional(const struct ipt_ip
*ip
)
208 for (i
= 0; i
< sizeof(*ip
)/sizeof(__u32
); i
++)
209 if (((__u32
*)ip
)[i
])
216 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
217 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
218 static const char *const hooknames
[] = {
219 [NF_INET_PRE_ROUTING
] = "PREROUTING",
220 [NF_INET_LOCAL_IN
] = "INPUT",
221 [NF_INET_FORWARD
] = "FORWARD",
222 [NF_INET_LOCAL_OUT
] = "OUTPUT",
223 [NF_INET_POST_ROUTING
] = "POSTROUTING",
226 enum nf_ip_trace_comments
{
227 NF_IP_TRACE_COMMENT_RULE
,
228 NF_IP_TRACE_COMMENT_RETURN
,
229 NF_IP_TRACE_COMMENT_POLICY
,
232 static const char *const comments
[] = {
233 [NF_IP_TRACE_COMMENT_RULE
] = "rule",
234 [NF_IP_TRACE_COMMENT_RETURN
] = "return",
235 [NF_IP_TRACE_COMMENT_POLICY
] = "policy",
238 static struct nf_loginfo trace_loginfo
= {
239 .type
= NF_LOG_TYPE_LOG
,
243 .logflags
= NF_LOG_MASK
,
248 /* Mildly perf critical (only if packet tracing is on) */
250 get_chainname_rulenum(struct ipt_entry
*s
, struct ipt_entry
*e
,
251 char *hookname
, char **chainname
,
252 char **comment
, unsigned int *rulenum
)
254 struct ipt_standard_target
*t
= (void *)ipt_get_target(s
);
256 if (strcmp(t
->target
.u
.kernel
.target
->name
, IPT_ERROR_TARGET
) == 0) {
257 /* Head of user chain: ERROR target with chainname */
258 *chainname
= t
->target
.data
;
263 if (s
->target_offset
== sizeof(struct ipt_entry
)
264 && strcmp(t
->target
.u
.kernel
.target
->name
,
265 IPT_STANDARD_TARGET
) == 0
267 && unconditional(&s
->ip
)) {
268 /* Tail of chains: STANDARD target (return/policy) */
269 *comment
= *chainname
== hookname
270 ? (char *)comments
[NF_IP_TRACE_COMMENT_POLICY
]
271 : (char *)comments
[NF_IP_TRACE_COMMENT_RETURN
];
280 static void trace_packet(struct sk_buff
*skb
,
282 const struct net_device
*in
,
283 const struct net_device
*out
,
284 const char *tablename
,
285 struct xt_table_info
*private,
289 const struct ipt_entry
*root
;
290 char *hookname
, *chainname
, *comment
;
291 unsigned int rulenum
= 0;
293 table_base
= (void *)private->entries
[smp_processor_id()];
294 root
= get_entry(table_base
, private->hook_entry
[hook
]);
296 hookname
= chainname
= (char *)hooknames
[hook
];
297 comment
= (char *)comments
[NF_IP_TRACE_COMMENT_RULE
];
299 IPT_ENTRY_ITERATE(root
,
300 private->size
- private->hook_entry
[hook
],
301 get_chainname_rulenum
,
302 e
, hookname
, &chainname
, &comment
, &rulenum
);
304 nf_log_packet(AF_INET
, hook
, skb
, in
, out
, &trace_loginfo
,
305 "TRACE: %s:%s:%s:%u ",
306 tablename
, chainname
, comment
, rulenum
);
310 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
312 ipt_do_table(struct sk_buff
*skb
,
314 const struct net_device
*in
,
315 const struct net_device
*out
,
316 struct xt_table
*table
)
318 static const char nulldevname
[IFNAMSIZ
] __attribute__((aligned(sizeof(long))));
319 const struct iphdr
*ip
;
321 bool hotdrop
= false;
322 /* Initializing verdict to NF_DROP keeps gcc happy. */
323 unsigned int verdict
= NF_DROP
;
324 const char *indev
, *outdev
;
326 struct ipt_entry
*e
, *back
;
327 struct xt_table_info
*private;
328 struct xt_match_param mtpar
;
329 struct xt_target_param tgpar
;
333 datalen
= skb
->len
- ip
->ihl
* 4;
334 indev
= in
? in
->name
: nulldevname
;
335 outdev
= out
? out
->name
: nulldevname
;
336 /* We handle fragments by dealing with the first fragment as
337 * if it was a normal packet. All other fragments are treated
338 * normally, except that they will NEVER match rules that ask
339 * things we don't know, ie. tcp syn flag or ports). If the
340 * rule is also a fragment-specific rule, non-fragments won't
342 mtpar
.fragoff
= ntohs(ip
->frag_off
) & IP_OFFSET
;
343 mtpar
.thoff
= ip_hdrlen(skb
);
344 mtpar
.hotdrop
= &hotdrop
;
345 mtpar
.in
= tgpar
.in
= in
;
346 mtpar
.out
= tgpar
.out
= out
;
347 mtpar
.family
= tgpar
.family
= NFPROTO_IPV4
;
348 tgpar
.hooknum
= hook
;
350 read_lock_bh(&table
->lock
);
351 IP_NF_ASSERT(table
->valid_hooks
& (1 << hook
));
352 private = table
->private;
353 table_base
= (void *)private->entries
[raw_smp_processor_id()];
354 e
= get_entry(table_base
, private->hook_entry
[hook
]);
356 /* For return from builtin chain */
357 back
= get_entry(table_base
, private->underflow
[hook
]);
362 if (ip_packet_match(ip
, indev
, outdev
,
363 &e
->ip
, mtpar
.fragoff
)) {
364 struct ipt_entry_target
*t
;
366 if (IPT_MATCH_ITERATE(e
, do_match
, skb
, &mtpar
) != 0)
369 ADD_COUNTER(e
->counters
, ntohs(ip
->tot_len
), 1);
371 t
= ipt_get_target(e
);
372 IP_NF_ASSERT(t
->u
.kernel
.target
);
374 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
375 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
376 /* The packet is traced: log it */
377 if (unlikely(skb
->nf_trace
))
378 trace_packet(skb
, hook
, in
, out
,
379 table
->name
, private, e
);
381 /* Standard target? */
382 if (!t
->u
.kernel
.target
->target
) {
385 v
= ((struct ipt_standard_target
*)t
)->verdict
;
387 /* Pop from stack? */
388 if (v
!= IPT_RETURN
) {
389 verdict
= (unsigned)(-v
) - 1;
393 back
= get_entry(table_base
,
397 if (table_base
+ v
!= (void *)e
+ e
->next_offset
398 && !(e
->ip
.flags
& IPT_F_GOTO
)) {
399 /* Save old back ptr in next entry */
400 struct ipt_entry
*next
401 = (void *)e
+ e
->next_offset
;
403 = (void *)back
- table_base
;
404 /* set back pointer to next entry */
408 e
= get_entry(table_base
, v
);
410 /* Targets which reenter must return
412 tgpar
.target
= t
->u
.kernel
.target
;
413 tgpar
.targinfo
= t
->data
;
414 #ifdef CONFIG_NETFILTER_DEBUG
415 ((struct ipt_entry
*)table_base
)->comefrom
418 verdict
= t
->u
.kernel
.target
->target(skb
,
420 #ifdef CONFIG_NETFILTER_DEBUG
421 if (((struct ipt_entry
*)table_base
)->comefrom
423 && verdict
== IPT_CONTINUE
) {
424 printk("Target %s reentered!\n",
425 t
->u
.kernel
.target
->name
);
428 ((struct ipt_entry
*)table_base
)->comefrom
431 /* Target might have changed stuff. */
433 datalen
= skb
->len
- ip
->ihl
* 4;
435 if (verdict
== IPT_CONTINUE
)
436 e
= (void *)e
+ e
->next_offset
;
444 e
= (void *)e
+ e
->next_offset
;
448 read_unlock_bh(&table
->lock
);
450 #ifdef DEBUG_ALLOW_ALL
459 /* Figures out from what hook each rule can be called: returns 0 if
460 there are loops. Puts hook bitmask in comefrom. */
462 mark_source_chains(struct xt_table_info
*newinfo
,
463 unsigned int valid_hooks
, void *entry0
)
467 /* No recursion; use packet counter to save back ptrs (reset
468 to 0 as we leave), and comefrom to save source hook bitmask */
469 for (hook
= 0; hook
< NF_INET_NUMHOOKS
; hook
++) {
470 unsigned int pos
= newinfo
->hook_entry
[hook
];
471 struct ipt_entry
*e
= (struct ipt_entry
*)(entry0
+ pos
);
473 if (!(valid_hooks
& (1 << hook
)))
476 /* Set initial back pointer. */
477 e
->counters
.pcnt
= pos
;
480 struct ipt_standard_target
*t
481 = (void *)ipt_get_target(e
);
482 int visited
= e
->comefrom
& (1 << hook
);
484 if (e
->comefrom
& (1 << NF_INET_NUMHOOKS
)) {
485 printk("iptables: loop hook %u pos %u %08X.\n",
486 hook
, pos
, e
->comefrom
);
489 e
->comefrom
|= ((1 << hook
) | (1 << NF_INET_NUMHOOKS
));
491 /* Unconditional return/END. */
492 if ((e
->target_offset
== sizeof(struct ipt_entry
)
493 && (strcmp(t
->target
.u
.user
.name
,
494 IPT_STANDARD_TARGET
) == 0)
496 && unconditional(&e
->ip
)) || visited
) {
497 unsigned int oldpos
, size
;
499 if ((strcmp(t
->target
.u
.user
.name
,
500 IPT_STANDARD_TARGET
) == 0) &&
501 t
->verdict
< -NF_MAX_VERDICT
- 1) {
502 duprintf("mark_source_chains: bad "
503 "negative verdict (%i)\n",
508 /* Return: backtrack through the last
511 e
->comefrom
^= (1<<NF_INET_NUMHOOKS
);
512 #ifdef DEBUG_IP_FIREWALL_USER
514 & (1 << NF_INET_NUMHOOKS
)) {
515 duprintf("Back unset "
522 pos
= e
->counters
.pcnt
;
523 e
->counters
.pcnt
= 0;
525 /* We're at the start. */
529 e
= (struct ipt_entry
*)
531 } while (oldpos
== pos
+ e
->next_offset
);
534 size
= e
->next_offset
;
535 e
= (struct ipt_entry
*)
536 (entry0
+ pos
+ size
);
537 e
->counters
.pcnt
= pos
;
540 int newpos
= t
->verdict
;
542 if (strcmp(t
->target
.u
.user
.name
,
543 IPT_STANDARD_TARGET
) == 0
545 if (newpos
> newinfo
->size
-
546 sizeof(struct ipt_entry
)) {
547 duprintf("mark_source_chains: "
548 "bad verdict (%i)\n",
552 /* This a jump; chase it. */
553 duprintf("Jump rule %u -> %u\n",
556 /* ... this is a fallthru */
557 newpos
= pos
+ e
->next_offset
;
559 e
= (struct ipt_entry
*)
561 e
->counters
.pcnt
= pos
;
566 duprintf("Finished chain %u\n", hook
);
572 cleanup_match(struct ipt_entry_match
*m
, unsigned int *i
)
574 struct xt_mtdtor_param par
;
576 if (i
&& (*i
)-- == 0)
579 par
.match
= m
->u
.kernel
.match
;
580 par
.matchinfo
= m
->data
;
581 par
.family
= NFPROTO_IPV4
;
582 if (par
.match
->destroy
!= NULL
)
583 par
.match
->destroy(&par
);
584 module_put(par
.match
->me
);
589 check_entry(struct ipt_entry
*e
, const char *name
)
591 struct ipt_entry_target
*t
;
593 if (!ip_checkentry(&e
->ip
)) {
594 duprintf("ip_tables: ip check failed %p %s.\n", e
, name
);
598 if (e
->target_offset
+ sizeof(struct ipt_entry_target
) >
602 t
= ipt_get_target(e
);
603 if (e
->target_offset
+ t
->u
.target_size
> e
->next_offset
)
610 check_match(struct ipt_entry_match
*m
, struct xt_mtchk_param
*par
,
613 const struct ipt_ip
*ip
= par
->entryinfo
;
616 par
->match
= m
->u
.kernel
.match
;
617 par
->matchinfo
= m
->data
;
619 ret
= xt_check_match(par
, m
->u
.match_size
- sizeof(*m
),
620 ip
->proto
, ip
->invflags
& IPT_INV_PROTO
);
622 duprintf("ip_tables: check failed for `%s'.\n",
631 find_check_match(struct ipt_entry_match
*m
, struct xt_mtchk_param
*par
,
634 struct xt_match
*match
;
637 match
= try_then_request_module(xt_find_match(AF_INET
, m
->u
.user
.name
,
639 "ipt_%s", m
->u
.user
.name
);
640 if (IS_ERR(match
) || !match
) {
641 duprintf("find_check_match: `%s' not found\n", m
->u
.user
.name
);
642 return match
? PTR_ERR(match
) : -ENOENT
;
644 m
->u
.kernel
.match
= match
;
646 ret
= check_match(m
, par
, i
);
652 module_put(m
->u
.kernel
.match
->me
);
656 static int check_target(struct ipt_entry
*e
, const char *name
)
658 struct ipt_entry_target
*t
= ipt_get_target(e
);
659 struct xt_tgchk_param par
= {
662 .target
= t
->u
.kernel
.target
,
664 .hook_mask
= e
->comefrom
,
665 .family
= NFPROTO_IPV4
,
669 ret
= xt_check_target(&par
, t
->u
.target_size
- sizeof(*t
),
670 e
->ip
.proto
, e
->ip
.invflags
& IPT_INV_PROTO
);
672 duprintf("ip_tables: check failed for `%s'.\n",
673 t
->u
.kernel
.target
->name
);
680 find_check_entry(struct ipt_entry
*e
, const char *name
, unsigned int size
,
683 struct ipt_entry_target
*t
;
684 struct xt_target
*target
;
687 struct xt_mtchk_param mtpar
;
689 ret
= check_entry(e
, name
);
695 mtpar
.entryinfo
= &e
->ip
;
696 mtpar
.hook_mask
= e
->comefrom
;
697 mtpar
.family
= NFPROTO_IPV4
;
698 ret
= IPT_MATCH_ITERATE(e
, find_check_match
, &mtpar
, &j
);
700 goto cleanup_matches
;
702 t
= ipt_get_target(e
);
703 target
= try_then_request_module(xt_find_target(AF_INET
,
706 "ipt_%s", t
->u
.user
.name
);
707 if (IS_ERR(target
) || !target
) {
708 duprintf("find_check_entry: `%s' not found\n", t
->u
.user
.name
);
709 ret
= target
? PTR_ERR(target
) : -ENOENT
;
710 goto cleanup_matches
;
712 t
->u
.kernel
.target
= target
;
714 ret
= check_target(e
, name
);
721 module_put(t
->u
.kernel
.target
->me
);
723 IPT_MATCH_ITERATE(e
, cleanup_match
, &j
);
728 check_entry_size_and_hooks(struct ipt_entry
*e
,
729 struct xt_table_info
*newinfo
,
731 unsigned char *limit
,
732 const unsigned int *hook_entries
,
733 const unsigned int *underflows
,
738 if ((unsigned long)e
% __alignof__(struct ipt_entry
) != 0
739 || (unsigned char *)e
+ sizeof(struct ipt_entry
) >= limit
) {
740 duprintf("Bad offset %p\n", e
);
745 < sizeof(struct ipt_entry
) + sizeof(struct ipt_entry_target
)) {
746 duprintf("checking: element %p size %u\n",
751 /* Check hooks & underflows */
752 for (h
= 0; h
< NF_INET_NUMHOOKS
; h
++) {
753 if ((unsigned char *)e
- base
== hook_entries
[h
])
754 newinfo
->hook_entry
[h
] = hook_entries
[h
];
755 if ((unsigned char *)e
- base
== underflows
[h
])
756 newinfo
->underflow
[h
] = underflows
[h
];
759 /* FIXME: underflows must be unconditional, standard verdicts
760 < 0 (not IPT_RETURN). --RR */
762 /* Clear counters and comefrom */
763 e
->counters
= ((struct xt_counters
) { 0, 0 });
771 cleanup_entry(struct ipt_entry
*e
, unsigned int *i
)
773 struct xt_tgdtor_param par
;
774 struct ipt_entry_target
*t
;
776 if (i
&& (*i
)-- == 0)
779 /* Cleanup all matches */
780 IPT_MATCH_ITERATE(e
, cleanup_match
, NULL
);
781 t
= ipt_get_target(e
);
783 par
.target
= t
->u
.kernel
.target
;
784 par
.targinfo
= t
->data
;
785 par
.family
= NFPROTO_IPV4
;
786 if (par
.target
->destroy
!= NULL
)
787 par
.target
->destroy(&par
);
788 module_put(par
.target
->me
);
792 /* Checks and translates the user-supplied table segment (held in
795 translate_table(const char *name
,
796 unsigned int valid_hooks
,
797 struct xt_table_info
*newinfo
,
801 const unsigned int *hook_entries
,
802 const unsigned int *underflows
)
807 newinfo
->size
= size
;
808 newinfo
->number
= number
;
810 /* Init all hooks to impossible value. */
811 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
812 newinfo
->hook_entry
[i
] = 0xFFFFFFFF;
813 newinfo
->underflow
[i
] = 0xFFFFFFFF;
816 duprintf("translate_table: size %u\n", newinfo
->size
);
818 /* Walk through entries, checking offsets. */
819 ret
= IPT_ENTRY_ITERATE(entry0
, newinfo
->size
,
820 check_entry_size_and_hooks
,
824 hook_entries
, underflows
, &i
);
829 duprintf("translate_table: %u not %u entries\n",
834 /* Check hooks all assigned */
835 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
836 /* Only hooks which are valid */
837 if (!(valid_hooks
& (1 << i
)))
839 if (newinfo
->hook_entry
[i
] == 0xFFFFFFFF) {
840 duprintf("Invalid hook entry %u %u\n",
844 if (newinfo
->underflow
[i
] == 0xFFFFFFFF) {
845 duprintf("Invalid underflow %u %u\n",
851 if (!mark_source_chains(newinfo
, valid_hooks
, entry0
))
854 /* Finally, each sanity check must pass */
856 ret
= IPT_ENTRY_ITERATE(entry0
, newinfo
->size
,
857 find_check_entry
, name
, size
, &i
);
860 IPT_ENTRY_ITERATE(entry0
, newinfo
->size
,
865 /* And one copy for every other CPU */
866 for_each_possible_cpu(i
) {
867 if (newinfo
->entries
[i
] && newinfo
->entries
[i
] != entry0
)
868 memcpy(newinfo
->entries
[i
], entry0
, newinfo
->size
);
876 add_entry_to_counter(const struct ipt_entry
*e
,
877 struct xt_counters total
[],
880 ADD_COUNTER(total
[*i
], e
->counters
.bcnt
, e
->counters
.pcnt
);
887 set_entry_to_counter(const struct ipt_entry
*e
,
888 struct ipt_counters total
[],
891 SET_COUNTER(total
[*i
], e
->counters
.bcnt
, e
->counters
.pcnt
);
898 get_counters(const struct xt_table_info
*t
,
899 struct xt_counters counters
[])
905 /* Instead of clearing (by a previous call to memset())
906 * the counters and using adds, we set the counters
907 * with data used by 'current' CPU
908 * We dont care about preemption here.
910 curcpu
= raw_smp_processor_id();
913 IPT_ENTRY_ITERATE(t
->entries
[curcpu
],
915 set_entry_to_counter
,
919 for_each_possible_cpu(cpu
) {
923 IPT_ENTRY_ITERATE(t
->entries
[cpu
],
925 add_entry_to_counter
,
931 static struct xt_counters
* alloc_counters(struct xt_table
*table
)
933 unsigned int countersize
;
934 struct xt_counters
*counters
;
935 const struct xt_table_info
*private = table
->private;
937 /* We need atomic snapshot of counters: rest doesn't change
938 (other than comefrom, which userspace doesn't care
940 countersize
= sizeof(struct xt_counters
) * private->number
;
941 counters
= vmalloc_node(countersize
, numa_node_id());
943 if (counters
== NULL
)
944 return ERR_PTR(-ENOMEM
);
946 /* First, sum counters... */
947 write_lock_bh(&table
->lock
);
948 get_counters(private, counters
);
949 write_unlock_bh(&table
->lock
);
955 copy_entries_to_user(unsigned int total_size
,
956 struct xt_table
*table
,
957 void __user
*userptr
)
959 unsigned int off
, num
;
961 struct xt_counters
*counters
;
962 const struct xt_table_info
*private = table
->private;
964 const void *loc_cpu_entry
;
966 counters
= alloc_counters(table
);
967 if (IS_ERR(counters
))
968 return PTR_ERR(counters
);
970 /* choose the copy that is on our node/cpu, ...
971 * This choice is lazy (because current thread is
972 * allowed to migrate to another cpu)
974 loc_cpu_entry
= private->entries
[raw_smp_processor_id()];
975 if (copy_to_user(userptr
, loc_cpu_entry
, total_size
) != 0) {
980 /* FIXME: use iterator macros --RR */
981 /* ... then go back and fix counters and names */
982 for (off
= 0, num
= 0; off
< total_size
; off
+= e
->next_offset
, num
++){
984 const struct ipt_entry_match
*m
;
985 const struct ipt_entry_target
*t
;
987 e
= (struct ipt_entry
*)(loc_cpu_entry
+ off
);
988 if (copy_to_user(userptr
+ off
989 + offsetof(struct ipt_entry
, counters
),
991 sizeof(counters
[num
])) != 0) {
996 for (i
= sizeof(struct ipt_entry
);
997 i
< e
->target_offset
;
998 i
+= m
->u
.match_size
) {
1001 if (copy_to_user(userptr
+ off
+ i
1002 + offsetof(struct ipt_entry_match
,
1004 m
->u
.kernel
.match
->name
,
1005 strlen(m
->u
.kernel
.match
->name
)+1)
1012 t
= ipt_get_target(e
);
1013 if (copy_to_user(userptr
+ off
+ e
->target_offset
1014 + offsetof(struct ipt_entry_target
,
1016 t
->u
.kernel
.target
->name
,
1017 strlen(t
->u
.kernel
.target
->name
)+1) != 0) {
1028 #ifdef CONFIG_COMPAT
1029 static void compat_standard_from_user(void *dst
, void *src
)
1031 int v
= *(compat_int_t
*)src
;
1034 v
+= xt_compat_calc_jump(AF_INET
, v
);
1035 memcpy(dst
, &v
, sizeof(v
));
1038 static int compat_standard_to_user(void __user
*dst
, void *src
)
1040 compat_int_t cv
= *(int *)src
;
1043 cv
-= xt_compat_calc_jump(AF_INET
, cv
);
1044 return copy_to_user(dst
, &cv
, sizeof(cv
)) ? -EFAULT
: 0;
1048 compat_calc_match(struct ipt_entry_match
*m
, int *size
)
1050 *size
+= xt_compat_match_offset(m
->u
.kernel
.match
);
1054 static int compat_calc_entry(struct ipt_entry
*e
,
1055 const struct xt_table_info
*info
,
1056 void *base
, struct xt_table_info
*newinfo
)
1058 struct ipt_entry_target
*t
;
1059 unsigned int entry_offset
;
1062 off
= sizeof(struct ipt_entry
) - sizeof(struct compat_ipt_entry
);
1063 entry_offset
= (void *)e
- base
;
1064 IPT_MATCH_ITERATE(e
, compat_calc_match
, &off
);
1065 t
= ipt_get_target(e
);
1066 off
+= xt_compat_target_offset(t
->u
.kernel
.target
);
1067 newinfo
->size
-= off
;
1068 ret
= xt_compat_add_offset(AF_INET
, entry_offset
, off
);
1072 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
1073 if (info
->hook_entry
[i
] &&
1074 (e
< (struct ipt_entry
*)(base
+ info
->hook_entry
[i
])))
1075 newinfo
->hook_entry
[i
] -= off
;
1076 if (info
->underflow
[i
] &&
1077 (e
< (struct ipt_entry
*)(base
+ info
->underflow
[i
])))
1078 newinfo
->underflow
[i
] -= off
;
1083 static int compat_table_info(const struct xt_table_info
*info
,
1084 struct xt_table_info
*newinfo
)
1086 void *loc_cpu_entry
;
1088 if (!newinfo
|| !info
)
1091 /* we dont care about newinfo->entries[] */
1092 memcpy(newinfo
, info
, offsetof(struct xt_table_info
, entries
));
1093 newinfo
->initial_entries
= 0;
1094 loc_cpu_entry
= info
->entries
[raw_smp_processor_id()];
1095 return IPT_ENTRY_ITERATE(loc_cpu_entry
, info
->size
,
1096 compat_calc_entry
, info
, loc_cpu_entry
,
1101 static int get_info(struct net
*net
, void __user
*user
, int *len
, int compat
)
1103 char name
[IPT_TABLE_MAXNAMELEN
];
1107 if (*len
!= sizeof(struct ipt_getinfo
)) {
1108 duprintf("length %u != %zu\n", *len
,
1109 sizeof(struct ipt_getinfo
));
1113 if (copy_from_user(name
, user
, sizeof(name
)) != 0)
1116 name
[IPT_TABLE_MAXNAMELEN
-1] = '\0';
1117 #ifdef CONFIG_COMPAT
1119 xt_compat_lock(AF_INET
);
1121 t
= try_then_request_module(xt_find_table_lock(net
, AF_INET
, name
),
1122 "iptable_%s", name
);
1123 if (t
&& !IS_ERR(t
)) {
1124 struct ipt_getinfo info
;
1125 const struct xt_table_info
*private = t
->private;
1127 #ifdef CONFIG_COMPAT
1129 struct xt_table_info tmp
;
1130 ret
= compat_table_info(private, &tmp
);
1131 xt_compat_flush_offsets(AF_INET
);
1135 info
.valid_hooks
= t
->valid_hooks
;
1136 memcpy(info
.hook_entry
, private->hook_entry
,
1137 sizeof(info
.hook_entry
));
1138 memcpy(info
.underflow
, private->underflow
,
1139 sizeof(info
.underflow
));
1140 info
.num_entries
= private->number
;
1141 info
.size
= private->size
;
1142 strcpy(info
.name
, name
);
1144 if (copy_to_user(user
, &info
, *len
) != 0)
1152 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1153 #ifdef CONFIG_COMPAT
1155 xt_compat_unlock(AF_INET
);
1161 get_entries(struct net
*net
, struct ipt_get_entries __user
*uptr
, int *len
)
1164 struct ipt_get_entries get
;
1167 if (*len
< sizeof(get
)) {
1168 duprintf("get_entries: %u < %zu\n", *len
, sizeof(get
));
1171 if (copy_from_user(&get
, uptr
, sizeof(get
)) != 0)
1173 if (*len
!= sizeof(struct ipt_get_entries
) + get
.size
) {
1174 duprintf("get_entries: %u != %zu\n",
1175 *len
, sizeof(get
) + get
.size
);
1179 t
= xt_find_table_lock(net
, AF_INET
, get
.name
);
1180 if (t
&& !IS_ERR(t
)) {
1181 const struct xt_table_info
*private = t
->private;
1182 duprintf("t->private->number = %u\n", private->number
);
1183 if (get
.size
== private->size
)
1184 ret
= copy_entries_to_user(private->size
,
1185 t
, uptr
->entrytable
);
1187 duprintf("get_entries: I've got %u not %u!\n",
1188 private->size
, get
.size
);
1194 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1200 __do_replace(struct net
*net
, const char *name
, unsigned int valid_hooks
,
1201 struct xt_table_info
*newinfo
, unsigned int num_counters
,
1202 void __user
*counters_ptr
)
1206 struct xt_table_info
*oldinfo
;
1207 struct xt_counters
*counters
;
1208 void *loc_cpu_old_entry
;
1211 counters
= vmalloc(num_counters
* sizeof(struct xt_counters
));
1217 t
= try_then_request_module(xt_find_table_lock(net
, AF_INET
, name
),
1218 "iptable_%s", name
);
1219 if (!t
|| IS_ERR(t
)) {
1220 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1221 goto free_newinfo_counters_untrans
;
1225 if (valid_hooks
!= t
->valid_hooks
) {
1226 duprintf("Valid hook crap: %08X vs %08X\n",
1227 valid_hooks
, t
->valid_hooks
);
1232 oldinfo
= xt_replace_table(t
, num_counters
, newinfo
, &ret
);
1236 /* Update module usage count based on number of rules */
1237 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1238 oldinfo
->number
, oldinfo
->initial_entries
, newinfo
->number
);
1239 if ((oldinfo
->number
> oldinfo
->initial_entries
) ||
1240 (newinfo
->number
<= oldinfo
->initial_entries
))
1242 if ((oldinfo
->number
> oldinfo
->initial_entries
) &&
1243 (newinfo
->number
<= oldinfo
->initial_entries
))
1246 /* Get the old counters. */
1247 get_counters(oldinfo
, counters
);
1248 /* Decrease module usage counts and free resource */
1249 loc_cpu_old_entry
= oldinfo
->entries
[raw_smp_processor_id()];
1250 IPT_ENTRY_ITERATE(loc_cpu_old_entry
, oldinfo
->size
, cleanup_entry
,
1252 xt_free_table_info(oldinfo
);
1253 if (copy_to_user(counters_ptr
, counters
,
1254 sizeof(struct xt_counters
) * num_counters
) != 0)
1263 free_newinfo_counters_untrans
:
1270 do_replace(struct net
*net
, void __user
*user
, unsigned int len
)
1273 struct ipt_replace tmp
;
1274 struct xt_table_info
*newinfo
;
1275 void *loc_cpu_entry
;
1277 if (copy_from_user(&tmp
, user
, sizeof(tmp
)) != 0)
1280 /* overflow check */
1281 if (tmp
.num_counters
>= INT_MAX
/ sizeof(struct xt_counters
))
1284 newinfo
= xt_alloc_table_info(tmp
.size
);
1288 /* choose the copy that is on our node/cpu */
1289 loc_cpu_entry
= newinfo
->entries
[raw_smp_processor_id()];
1290 if (copy_from_user(loc_cpu_entry
, user
+ sizeof(tmp
),
1296 ret
= translate_table(tmp
.name
, tmp
.valid_hooks
,
1297 newinfo
, loc_cpu_entry
, tmp
.size
, tmp
.num_entries
,
1298 tmp
.hook_entry
, tmp
.underflow
);
1302 duprintf("ip_tables: Translated table\n");
1304 ret
= __do_replace(net
, tmp
.name
, tmp
.valid_hooks
, newinfo
,
1305 tmp
.num_counters
, tmp
.counters
);
1307 goto free_newinfo_untrans
;
1310 free_newinfo_untrans
:
1311 IPT_ENTRY_ITERATE(loc_cpu_entry
, newinfo
->size
, cleanup_entry
, NULL
);
1313 xt_free_table_info(newinfo
);
1317 /* We're lazy, and add to the first CPU; overflow works its fey magic
1318 * and everything is OK. */
1320 add_counter_to_entry(struct ipt_entry
*e
,
1321 const struct xt_counters addme
[],
1325 duprintf("add_counter: Entry %u %lu/%lu + %lu/%lu\n",
1327 (long unsigned int)e
->counters
.pcnt
,
1328 (long unsigned int)e
->counters
.bcnt
,
1329 (long unsigned int)addme
[*i
].pcnt
,
1330 (long unsigned int)addme
[*i
].bcnt
);
1333 ADD_COUNTER(e
->counters
, addme
[*i
].bcnt
, addme
[*i
].pcnt
);
1340 do_add_counters(struct net
*net
, void __user
*user
, unsigned int len
, int compat
)
1343 struct xt_counters_info tmp
;
1344 struct xt_counters
*paddc
;
1345 unsigned int num_counters
;
1350 const struct xt_table_info
*private;
1352 void *loc_cpu_entry
;
1353 #ifdef CONFIG_COMPAT
1354 struct compat_xt_counters_info compat_tmp
;
1358 size
= sizeof(struct compat_xt_counters_info
);
1363 size
= sizeof(struct xt_counters_info
);
1366 if (copy_from_user(ptmp
, user
, size
) != 0)
1369 #ifdef CONFIG_COMPAT
1371 num_counters
= compat_tmp
.num_counters
;
1372 name
= compat_tmp
.name
;
1376 num_counters
= tmp
.num_counters
;
1380 if (len
!= size
+ num_counters
* sizeof(struct xt_counters
))
1383 paddc
= vmalloc_node(len
- size
, numa_node_id());
1387 if (copy_from_user(paddc
, user
+ size
, len
- size
) != 0) {
1392 t
= xt_find_table_lock(net
, AF_INET
, name
);
1393 if (!t
|| IS_ERR(t
)) {
1394 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1398 write_lock_bh(&t
->lock
);
1399 private = t
->private;
1400 if (private->number
!= num_counters
) {
1402 goto unlock_up_free
;
1406 /* Choose the copy that is on our node */
1407 loc_cpu_entry
= private->entries
[raw_smp_processor_id()];
1408 IPT_ENTRY_ITERATE(loc_cpu_entry
,
1410 add_counter_to_entry
,
1414 write_unlock_bh(&t
->lock
);
1423 #ifdef CONFIG_COMPAT
1424 struct compat_ipt_replace
{
1425 char name
[IPT_TABLE_MAXNAMELEN
];
1429 u32 hook_entry
[NF_INET_NUMHOOKS
];
1430 u32 underflow
[NF_INET_NUMHOOKS
];
1432 compat_uptr_t counters
; /* struct ipt_counters * */
1433 struct compat_ipt_entry entries
[0];
1437 compat_copy_entry_to_user(struct ipt_entry
*e
, void __user
**dstptr
,
1438 unsigned int *size
, struct xt_counters
*counters
,
1441 struct ipt_entry_target
*t
;
1442 struct compat_ipt_entry __user
*ce
;
1443 u_int16_t target_offset
, next_offset
;
1444 compat_uint_t origsize
;
1449 ce
= (struct compat_ipt_entry __user
*)*dstptr
;
1450 if (copy_to_user(ce
, e
, sizeof(struct ipt_entry
)))
1453 if (copy_to_user(&ce
->counters
, &counters
[*i
], sizeof(counters
[*i
])))
1456 *dstptr
+= sizeof(struct compat_ipt_entry
);
1457 *size
-= sizeof(struct ipt_entry
) - sizeof(struct compat_ipt_entry
);
1459 ret
= IPT_MATCH_ITERATE(e
, xt_compat_match_to_user
, dstptr
, size
);
1460 target_offset
= e
->target_offset
- (origsize
- *size
);
1463 t
= ipt_get_target(e
);
1464 ret
= xt_compat_target_to_user(t
, dstptr
, size
);
1468 next_offset
= e
->next_offset
- (origsize
- *size
);
1469 if (put_user(target_offset
, &ce
->target_offset
))
1471 if (put_user(next_offset
, &ce
->next_offset
))
1481 compat_find_calc_match(struct ipt_entry_match
*m
,
1483 const struct ipt_ip
*ip
,
1484 unsigned int hookmask
,
1485 int *size
, unsigned int *i
)
1487 struct xt_match
*match
;
1489 match
= try_then_request_module(xt_find_match(AF_INET
, m
->u
.user
.name
,
1490 m
->u
.user
.revision
),
1491 "ipt_%s", m
->u
.user
.name
);
1492 if (IS_ERR(match
) || !match
) {
1493 duprintf("compat_check_calc_match: `%s' not found\n",
1495 return match
? PTR_ERR(match
) : -ENOENT
;
1497 m
->u
.kernel
.match
= match
;
1498 *size
+= xt_compat_match_offset(match
);
1505 compat_release_match(struct ipt_entry_match
*m
, unsigned int *i
)
1507 if (i
&& (*i
)-- == 0)
1510 module_put(m
->u
.kernel
.match
->me
);
1515 compat_release_entry(struct compat_ipt_entry
*e
, unsigned int *i
)
1517 struct ipt_entry_target
*t
;
1519 if (i
&& (*i
)-- == 0)
1522 /* Cleanup all matches */
1523 COMPAT_IPT_MATCH_ITERATE(e
, compat_release_match
, NULL
);
1524 t
= compat_ipt_get_target(e
);
1525 module_put(t
->u
.kernel
.target
->me
);
1530 check_compat_entry_size_and_hooks(struct compat_ipt_entry
*e
,
1531 struct xt_table_info
*newinfo
,
1533 unsigned char *base
,
1534 unsigned char *limit
,
1535 unsigned int *hook_entries
,
1536 unsigned int *underflows
,
1540 struct ipt_entry_target
*t
;
1541 struct xt_target
*target
;
1542 unsigned int entry_offset
;
1546 duprintf("check_compat_entry_size_and_hooks %p\n", e
);
1547 if ((unsigned long)e
% __alignof__(struct compat_ipt_entry
) != 0
1548 || (unsigned char *)e
+ sizeof(struct compat_ipt_entry
) >= limit
) {
1549 duprintf("Bad offset %p, limit = %p\n", e
, limit
);
1553 if (e
->next_offset
< sizeof(struct compat_ipt_entry
) +
1554 sizeof(struct compat_xt_entry_target
)) {
1555 duprintf("checking: element %p size %u\n",
1560 /* For purposes of check_entry casting the compat entry is fine */
1561 ret
= check_entry((struct ipt_entry
*)e
, name
);
1565 off
= sizeof(struct ipt_entry
) - sizeof(struct compat_ipt_entry
);
1566 entry_offset
= (void *)e
- (void *)base
;
1568 ret
= COMPAT_IPT_MATCH_ITERATE(e
, compat_find_calc_match
, name
,
1569 &e
->ip
, e
->comefrom
, &off
, &j
);
1571 goto release_matches
;
1573 t
= compat_ipt_get_target(e
);
1574 target
= try_then_request_module(xt_find_target(AF_INET
,
1576 t
->u
.user
.revision
),
1577 "ipt_%s", t
->u
.user
.name
);
1578 if (IS_ERR(target
) || !target
) {
1579 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1581 ret
= target
? PTR_ERR(target
) : -ENOENT
;
1582 goto release_matches
;
1584 t
->u
.kernel
.target
= target
;
1586 off
+= xt_compat_target_offset(target
);
1588 ret
= xt_compat_add_offset(AF_INET
, entry_offset
, off
);
1592 /* Check hooks & underflows */
1593 for (h
= 0; h
< NF_INET_NUMHOOKS
; h
++) {
1594 if ((unsigned char *)e
- base
== hook_entries
[h
])
1595 newinfo
->hook_entry
[h
] = hook_entries
[h
];
1596 if ((unsigned char *)e
- base
== underflows
[h
])
1597 newinfo
->underflow
[h
] = underflows
[h
];
1600 /* Clear counters and comefrom */
1601 memset(&e
->counters
, 0, sizeof(e
->counters
));
1608 module_put(t
->u
.kernel
.target
->me
);
1610 IPT_MATCH_ITERATE(e
, compat_release_match
, &j
);
1615 compat_copy_entry_from_user(struct compat_ipt_entry
*e
, void **dstptr
,
1616 unsigned int *size
, const char *name
,
1617 struct xt_table_info
*newinfo
, unsigned char *base
)
1619 struct ipt_entry_target
*t
;
1620 struct xt_target
*target
;
1621 struct ipt_entry
*de
;
1622 unsigned int origsize
;
1627 de
= (struct ipt_entry
*)*dstptr
;
1628 memcpy(de
, e
, sizeof(struct ipt_entry
));
1629 memcpy(&de
->counters
, &e
->counters
, sizeof(e
->counters
));
1631 *dstptr
+= sizeof(struct ipt_entry
);
1632 *size
+= sizeof(struct ipt_entry
) - sizeof(struct compat_ipt_entry
);
1634 ret
= COMPAT_IPT_MATCH_ITERATE(e
, xt_compat_match_from_user
,
1638 de
->target_offset
= e
->target_offset
- (origsize
- *size
);
1639 t
= compat_ipt_get_target(e
);
1640 target
= t
->u
.kernel
.target
;
1641 xt_compat_target_from_user(t
, dstptr
, size
);
1643 de
->next_offset
= e
->next_offset
- (origsize
- *size
);
1644 for (h
= 0; h
< NF_INET_NUMHOOKS
; h
++) {
1645 if ((unsigned char *)de
- base
< newinfo
->hook_entry
[h
])
1646 newinfo
->hook_entry
[h
] -= origsize
- *size
;
1647 if ((unsigned char *)de
- base
< newinfo
->underflow
[h
])
1648 newinfo
->underflow
[h
] -= origsize
- *size
;
1654 compat_check_entry(struct ipt_entry
*e
, const char *name
,
1657 struct xt_mtchk_param mtpar
;
1663 mtpar
.entryinfo
= &e
->ip
;
1664 mtpar
.hook_mask
= e
->comefrom
;
1665 mtpar
.family
= NFPROTO_IPV4
;
1666 ret
= IPT_MATCH_ITERATE(e
, check_match
, &mtpar
, &j
);
1668 goto cleanup_matches
;
1670 ret
= check_target(e
, name
);
1672 goto cleanup_matches
;
1678 IPT_MATCH_ITERATE(e
, cleanup_match
, &j
);
1683 translate_compat_table(const char *name
,
1684 unsigned int valid_hooks
,
1685 struct xt_table_info
**pinfo
,
1687 unsigned int total_size
,
1688 unsigned int number
,
1689 unsigned int *hook_entries
,
1690 unsigned int *underflows
)
1693 struct xt_table_info
*newinfo
, *info
;
1694 void *pos
, *entry0
, *entry1
;
1701 info
->number
= number
;
1703 /* Init all hooks to impossible value. */
1704 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
1705 info
->hook_entry
[i
] = 0xFFFFFFFF;
1706 info
->underflow
[i
] = 0xFFFFFFFF;
1709 duprintf("translate_compat_table: size %u\n", info
->size
);
1711 xt_compat_lock(AF_INET
);
1712 /* Walk through entries, checking offsets. */
1713 ret
= COMPAT_IPT_ENTRY_ITERATE(entry0
, total_size
,
1714 check_compat_entry_size_and_hooks
,
1715 info
, &size
, entry0
,
1716 entry0
+ total_size
,
1717 hook_entries
, underflows
, &j
, name
);
1723 duprintf("translate_compat_table: %u not %u entries\n",
1728 /* Check hooks all assigned */
1729 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
1730 /* Only hooks which are valid */
1731 if (!(valid_hooks
& (1 << i
)))
1733 if (info
->hook_entry
[i
] == 0xFFFFFFFF) {
1734 duprintf("Invalid hook entry %u %u\n",
1735 i
, hook_entries
[i
]);
1738 if (info
->underflow
[i
] == 0xFFFFFFFF) {
1739 duprintf("Invalid underflow %u %u\n",
1746 newinfo
= xt_alloc_table_info(size
);
1750 newinfo
->number
= number
;
1751 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
1752 newinfo
->hook_entry
[i
] = info
->hook_entry
[i
];
1753 newinfo
->underflow
[i
] = info
->underflow
[i
];
1755 entry1
= newinfo
->entries
[raw_smp_processor_id()];
1758 ret
= COMPAT_IPT_ENTRY_ITERATE(entry0
, total_size
,
1759 compat_copy_entry_from_user
,
1760 &pos
, &size
, name
, newinfo
, entry1
);
1761 xt_compat_flush_offsets(AF_INET
);
1762 xt_compat_unlock(AF_INET
);
1767 if (!mark_source_chains(newinfo
, valid_hooks
, entry1
))
1771 ret
= IPT_ENTRY_ITERATE(entry1
, newinfo
->size
, compat_check_entry
,
1775 COMPAT_IPT_ENTRY_ITERATE_CONTINUE(entry0
, newinfo
->size
, i
,
1776 compat_release_entry
, &j
);
1777 IPT_ENTRY_ITERATE(entry1
, newinfo
->size
, cleanup_entry
, &i
);
1778 xt_free_table_info(newinfo
);
1782 /* And one copy for every other CPU */
1783 for_each_possible_cpu(i
)
1784 if (newinfo
->entries
[i
] && newinfo
->entries
[i
] != entry1
)
1785 memcpy(newinfo
->entries
[i
], entry1
, newinfo
->size
);
1789 xt_free_table_info(info
);
1793 xt_free_table_info(newinfo
);
1795 COMPAT_IPT_ENTRY_ITERATE(entry0
, total_size
, compat_release_entry
, &j
);
1798 xt_compat_flush_offsets(AF_INET
);
1799 xt_compat_unlock(AF_INET
);
1804 compat_do_replace(struct net
*net
, void __user
*user
, unsigned int len
)
1807 struct compat_ipt_replace tmp
;
1808 struct xt_table_info
*newinfo
;
1809 void *loc_cpu_entry
;
1811 if (copy_from_user(&tmp
, user
, sizeof(tmp
)) != 0)
1814 /* overflow check */
1815 if (tmp
.size
>= INT_MAX
/ num_possible_cpus())
1817 if (tmp
.num_counters
>= INT_MAX
/ sizeof(struct xt_counters
))
1820 newinfo
= xt_alloc_table_info(tmp
.size
);
1824 /* choose the copy that is on our node/cpu */
1825 loc_cpu_entry
= newinfo
->entries
[raw_smp_processor_id()];
1826 if (copy_from_user(loc_cpu_entry
, user
+ sizeof(tmp
),
1832 ret
= translate_compat_table(tmp
.name
, tmp
.valid_hooks
,
1833 &newinfo
, &loc_cpu_entry
, tmp
.size
,
1834 tmp
.num_entries
, tmp
.hook_entry
,
1839 duprintf("compat_do_replace: Translated table\n");
1841 ret
= __do_replace(net
, tmp
.name
, tmp
.valid_hooks
, newinfo
,
1842 tmp
.num_counters
, compat_ptr(tmp
.counters
));
1844 goto free_newinfo_untrans
;
1847 free_newinfo_untrans
:
1848 IPT_ENTRY_ITERATE(loc_cpu_entry
, newinfo
->size
, cleanup_entry
, NULL
);
1850 xt_free_table_info(newinfo
);
1855 compat_do_ipt_set_ctl(struct sock
*sk
, int cmd
, void __user
*user
,
1860 if (!capable(CAP_NET_ADMIN
))
1864 case IPT_SO_SET_REPLACE
:
1865 ret
= compat_do_replace(sock_net(sk
), user
, len
);
1868 case IPT_SO_SET_ADD_COUNTERS
:
1869 ret
= do_add_counters(sock_net(sk
), user
, len
, 1);
1873 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd
);
1880 struct compat_ipt_get_entries
{
1881 char name
[IPT_TABLE_MAXNAMELEN
];
1883 struct compat_ipt_entry entrytable
[0];
1887 compat_copy_entries_to_user(unsigned int total_size
, struct xt_table
*table
,
1888 void __user
*userptr
)
1890 struct xt_counters
*counters
;
1891 const struct xt_table_info
*private = table
->private;
1895 const void *loc_cpu_entry
;
1898 counters
= alloc_counters(table
);
1899 if (IS_ERR(counters
))
1900 return PTR_ERR(counters
);
1902 /* choose the copy that is on our node/cpu, ...
1903 * This choice is lazy (because current thread is
1904 * allowed to migrate to another cpu)
1906 loc_cpu_entry
= private->entries
[raw_smp_processor_id()];
1909 ret
= IPT_ENTRY_ITERATE(loc_cpu_entry
, total_size
,
1910 compat_copy_entry_to_user
,
1911 &pos
, &size
, counters
, &i
);
1918 compat_get_entries(struct net
*net
, struct compat_ipt_get_entries __user
*uptr
,
1922 struct compat_ipt_get_entries get
;
1925 if (*len
< sizeof(get
)) {
1926 duprintf("compat_get_entries: %u < %zu\n", *len
, sizeof(get
));
1930 if (copy_from_user(&get
, uptr
, sizeof(get
)) != 0)
1933 if (*len
!= sizeof(struct compat_ipt_get_entries
) + get
.size
) {
1934 duprintf("compat_get_entries: %u != %zu\n",
1935 *len
, sizeof(get
) + get
.size
);
1939 xt_compat_lock(AF_INET
);
1940 t
= xt_find_table_lock(net
, AF_INET
, get
.name
);
1941 if (t
&& !IS_ERR(t
)) {
1942 const struct xt_table_info
*private = t
->private;
1943 struct xt_table_info info
;
1944 duprintf("t->private->number = %u\n", private->number
);
1945 ret
= compat_table_info(private, &info
);
1946 if (!ret
&& get
.size
== info
.size
) {
1947 ret
= compat_copy_entries_to_user(private->size
,
1948 t
, uptr
->entrytable
);
1950 duprintf("compat_get_entries: I've got %u not %u!\n",
1951 private->size
, get
.size
);
1954 xt_compat_flush_offsets(AF_INET
);
1958 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1960 xt_compat_unlock(AF_INET
);
1964 static int do_ipt_get_ctl(struct sock
*, int, void __user
*, int *);
1967 compat_do_ipt_get_ctl(struct sock
*sk
, int cmd
, void __user
*user
, int *len
)
1971 if (!capable(CAP_NET_ADMIN
))
1975 case IPT_SO_GET_INFO
:
1976 ret
= get_info(sock_net(sk
), user
, len
, 1);
1978 case IPT_SO_GET_ENTRIES
:
1979 ret
= compat_get_entries(sock_net(sk
), user
, len
);
1982 ret
= do_ipt_get_ctl(sk
, cmd
, user
, len
);
1989 do_ipt_set_ctl(struct sock
*sk
, int cmd
, void __user
*user
, unsigned int len
)
1993 if (!capable(CAP_NET_ADMIN
))
1997 case IPT_SO_SET_REPLACE
:
1998 ret
= do_replace(sock_net(sk
), user
, len
);
2001 case IPT_SO_SET_ADD_COUNTERS
:
2002 ret
= do_add_counters(sock_net(sk
), user
, len
, 0);
2006 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd
);
2014 do_ipt_get_ctl(struct sock
*sk
, int cmd
, void __user
*user
, int *len
)
2018 if (!capable(CAP_NET_ADMIN
))
2022 case IPT_SO_GET_INFO
:
2023 ret
= get_info(sock_net(sk
), user
, len
, 0);
2026 case IPT_SO_GET_ENTRIES
:
2027 ret
= get_entries(sock_net(sk
), user
, len
);
2030 case IPT_SO_GET_REVISION_MATCH
:
2031 case IPT_SO_GET_REVISION_TARGET
: {
2032 struct ipt_get_revision rev
;
2035 if (*len
!= sizeof(rev
)) {
2039 if (copy_from_user(&rev
, user
, sizeof(rev
)) != 0) {
2044 if (cmd
== IPT_SO_GET_REVISION_TARGET
)
2049 try_then_request_module(xt_find_revision(AF_INET
, rev
.name
,
2052 "ipt_%s", rev
.name
);
2057 duprintf("do_ipt_get_ctl: unknown request %i\n", cmd
);
2064 struct xt_table
*ipt_register_table(struct net
*net
, struct xt_table
*table
,
2065 const struct ipt_replace
*repl
)
2068 struct xt_table_info
*newinfo
;
2069 struct xt_table_info bootstrap
2070 = { 0, 0, 0, { 0 }, { 0 }, { } };
2071 void *loc_cpu_entry
;
2072 struct xt_table
*new_table
;
2074 newinfo
= xt_alloc_table_info(repl
->size
);
2080 /* choose the copy on our node/cpu, but dont care about preemption */
2081 loc_cpu_entry
= newinfo
->entries
[raw_smp_processor_id()];
2082 memcpy(loc_cpu_entry
, repl
->entries
, repl
->size
);
2084 ret
= translate_table(table
->name
, table
->valid_hooks
,
2085 newinfo
, loc_cpu_entry
, repl
->size
,
2092 new_table
= xt_register_table(net
, table
, &bootstrap
, newinfo
);
2093 if (IS_ERR(new_table
)) {
2094 ret
= PTR_ERR(new_table
);
2101 xt_free_table_info(newinfo
);
2103 return ERR_PTR(ret
);
2106 void ipt_unregister_table(struct xt_table
*table
)
2108 struct xt_table_info
*private;
2109 void *loc_cpu_entry
;
2110 struct module
*table_owner
= table
->me
;
2112 private = xt_unregister_table(table
);
2114 /* Decrease module usage counts and free resources */
2115 loc_cpu_entry
= private->entries
[raw_smp_processor_id()];
2116 IPT_ENTRY_ITERATE(loc_cpu_entry
, private->size
, cleanup_entry
, NULL
);
2117 if (private->number
> private->initial_entries
)
2118 module_put(table_owner
);
2119 xt_free_table_info(private);
2122 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2124 icmp_type_code_match(u_int8_t test_type
, u_int8_t min_code
, u_int8_t max_code
,
2125 u_int8_t type
, u_int8_t code
,
2128 return ((test_type
== 0xFF) ||
2129 (type
== test_type
&& code
>= min_code
&& code
<= max_code
))
2134 icmp_match(const struct sk_buff
*skb
, const struct xt_match_param
*par
)
2136 const struct icmphdr
*ic
;
2137 struct icmphdr _icmph
;
2138 const struct ipt_icmp
*icmpinfo
= par
->matchinfo
;
2140 /* Must not be a fragment. */
2141 if (par
->fragoff
!= 0)
2144 ic
= skb_header_pointer(skb
, par
->thoff
, sizeof(_icmph
), &_icmph
);
2146 /* We've been asked to examine this packet, and we
2147 * can't. Hence, no choice but to drop.
2149 duprintf("Dropping evil ICMP tinygram.\n");
2150 *par
->hotdrop
= true;
2154 return icmp_type_code_match(icmpinfo
->type
,
2158 !!(icmpinfo
->invflags
&IPT_ICMP_INV
));
2161 static bool icmp_checkentry(const struct xt_mtchk_param
*par
)
2163 const struct ipt_icmp
*icmpinfo
= par
->matchinfo
;
2165 /* Must specify no unknown invflags */
2166 return !(icmpinfo
->invflags
& ~IPT_ICMP_INV
);
2169 /* The built-in targets: standard (NULL) and error. */
2170 static struct xt_target ipt_standard_target __read_mostly
= {
2171 .name
= IPT_STANDARD_TARGET
,
2172 .targetsize
= sizeof(int),
2174 #ifdef CONFIG_COMPAT
2175 .compatsize
= sizeof(compat_int_t
),
2176 .compat_from_user
= compat_standard_from_user
,
2177 .compat_to_user
= compat_standard_to_user
,
2181 static struct xt_target ipt_error_target __read_mostly
= {
2182 .name
= IPT_ERROR_TARGET
,
2183 .target
= ipt_error
,
2184 .targetsize
= IPT_FUNCTION_MAXNAMELEN
,
2188 static struct nf_sockopt_ops ipt_sockopts
= {
2190 .set_optmin
= IPT_BASE_CTL
,
2191 .set_optmax
= IPT_SO_SET_MAX
+1,
2192 .set
= do_ipt_set_ctl
,
2193 #ifdef CONFIG_COMPAT
2194 .compat_set
= compat_do_ipt_set_ctl
,
2196 .get_optmin
= IPT_BASE_CTL
,
2197 .get_optmax
= IPT_SO_GET_MAX
+1,
2198 .get
= do_ipt_get_ctl
,
2199 #ifdef CONFIG_COMPAT
2200 .compat_get
= compat_do_ipt_get_ctl
,
2202 .owner
= THIS_MODULE
,
2205 static struct xt_match icmp_matchstruct __read_mostly
= {
2207 .match
= icmp_match
,
2208 .matchsize
= sizeof(struct ipt_icmp
),
2209 .checkentry
= icmp_checkentry
,
2210 .proto
= IPPROTO_ICMP
,
2214 static int __net_init
ip_tables_net_init(struct net
*net
)
2216 return xt_proto_init(net
, AF_INET
);
2219 static void __net_exit
ip_tables_net_exit(struct net
*net
)
2221 xt_proto_fini(net
, AF_INET
);
2224 static struct pernet_operations ip_tables_net_ops
= {
2225 .init
= ip_tables_net_init
,
2226 .exit
= ip_tables_net_exit
,
2229 static int __init
ip_tables_init(void)
2233 ret
= register_pernet_subsys(&ip_tables_net_ops
);
2237 /* Noone else will be downing sem now, so we won't sleep */
2238 ret
= xt_register_target(&ipt_standard_target
);
2241 ret
= xt_register_target(&ipt_error_target
);
2244 ret
= xt_register_match(&icmp_matchstruct
);
2248 /* Register setsockopt */
2249 ret
= nf_register_sockopt(&ipt_sockopts
);
2253 printk(KERN_INFO
"ip_tables: (C) 2000-2006 Netfilter Core Team\n");
2257 xt_unregister_match(&icmp_matchstruct
);
2259 xt_unregister_target(&ipt_error_target
);
2261 xt_unregister_target(&ipt_standard_target
);
2263 unregister_pernet_subsys(&ip_tables_net_ops
);
2268 static void __exit
ip_tables_fini(void)
2270 nf_unregister_sockopt(&ipt_sockopts
);
2272 xt_unregister_match(&icmp_matchstruct
);
2273 xt_unregister_target(&ipt_error_target
);
2274 xt_unregister_target(&ipt_standard_target
);
2276 unregister_pernet_subsys(&ip_tables_net_ops
);
2279 EXPORT_SYMBOL(ipt_register_table
);
2280 EXPORT_SYMBOL(ipt_unregister_table
);
2281 EXPORT_SYMBOL(ipt_do_table
);
2282 module_init(ip_tables_init
);
2283 module_exit(ip_tables_fini
);