2 * Packet matching code.
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 #include <linux/capability.h>
14 #include <linux/skbuff.h>
15 #include <linux/kmod.h>
16 #include <linux/vmalloc.h>
17 #include <linux/netdevice.h>
18 #include <linux/module.h>
19 #include <linux/poison.h>
20 #include <linux/icmpv6.h>
22 #include <net/compat.h>
23 #include <asm/uaccess.h>
24 #include <linux/mutex.h>
25 #include <linux/proc_fs.h>
26 #include <linux/err.h>
27 #include <linux/cpumask.h>
29 #include <linux/netfilter_ipv6/ip6_tables.h>
30 #include <linux/netfilter/x_tables.h>
31 #include <net/netfilter/nf_log.h>
32 #include "../../netfilter/xt_repldata.h"
34 MODULE_LICENSE("GPL");
35 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
36 MODULE_DESCRIPTION("IPv6 packet filter");
38 /*#define DEBUG_IP_FIREWALL*/
39 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
40 /*#define DEBUG_IP_FIREWALL_USER*/
42 #ifdef DEBUG_IP_FIREWALL
43 #define dprintf(format, args...) printk(format , ## args)
45 #define dprintf(format, args...)
48 #ifdef DEBUG_IP_FIREWALL_USER
49 #define duprintf(format, args...) printk(format , ## args)
51 #define duprintf(format, args...)
54 #ifdef CONFIG_NETFILTER_DEBUG
55 #define IP_NF_ASSERT(x) \
58 printk("IP_NF_ASSERT: %s:%s:%u\n", \
59 __func__, __FILE__, __LINE__); \
62 #define IP_NF_ASSERT(x)
66 /* All the better to debug you with... */
71 void *ip6t_alloc_initial_table(const struct xt_table
*info
)
73 return xt_alloc_initial_table(ip6t
, IP6T
);
75 EXPORT_SYMBOL_GPL(ip6t_alloc_initial_table
);
78 We keep a set of rules for each CPU, so we can avoid write-locking
79 them in the softirq when updating the counters and therefore
80 only need to read-lock in the softirq; doing a write_lock_bh() in user
81 context stops packets coming through and allows user context to read
82 the counters or update the rules.
84 Hence the start of any table is given by get_table() below. */
86 /* Check for an extension */
88 ip6t_ext_hdr(u8 nexthdr
)
90 return ( (nexthdr
== IPPROTO_HOPOPTS
) ||
91 (nexthdr
== IPPROTO_ROUTING
) ||
92 (nexthdr
== IPPROTO_FRAGMENT
) ||
93 (nexthdr
== IPPROTO_ESP
) ||
94 (nexthdr
== IPPROTO_AH
) ||
95 (nexthdr
== IPPROTO_NONE
) ||
96 (nexthdr
== IPPROTO_DSTOPTS
) );
99 /* Returns whether matches rule or not. */
100 /* Performance critical - called for every packet */
102 ip6_packet_match(const struct sk_buff
*skb
,
105 const struct ip6t_ip6
*ip6info
,
106 unsigned int *protoff
,
107 int *fragoff
, bool *hotdrop
)
110 const struct ipv6hdr
*ipv6
= ipv6_hdr(skb
);
112 #define FWINV(bool, invflg) ((bool) ^ !!(ip6info->invflags & (invflg)))
114 if (FWINV(ipv6_masked_addr_cmp(&ipv6
->saddr
, &ip6info
->smsk
,
115 &ip6info
->src
), IP6T_INV_SRCIP
) ||
116 FWINV(ipv6_masked_addr_cmp(&ipv6
->daddr
, &ip6info
->dmsk
,
117 &ip6info
->dst
), IP6T_INV_DSTIP
)) {
118 dprintf("Source or dest mismatch.\n");
120 dprintf("SRC: %u. Mask: %u. Target: %u.%s\n", ip->saddr,
121 ipinfo->smsk.s_addr, ipinfo->src.s_addr,
122 ipinfo->invflags & IP6T_INV_SRCIP ? " (INV)" : "");
123 dprintf("DST: %u. Mask: %u. Target: %u.%s\n", ip->daddr,
124 ipinfo->dmsk.s_addr, ipinfo->dst.s_addr,
125 ipinfo->invflags & IP6T_INV_DSTIP ? " (INV)" : "");*/
129 ret
= ifname_compare_aligned(indev
, ip6info
->iniface
, ip6info
->iniface_mask
);
131 if (FWINV(ret
!= 0, IP6T_INV_VIA_IN
)) {
132 dprintf("VIA in mismatch (%s vs %s).%s\n",
133 indev
, ip6info
->iniface
,
134 ip6info
->invflags
&IP6T_INV_VIA_IN
?" (INV)":"");
138 ret
= ifname_compare_aligned(outdev
, ip6info
->outiface
, ip6info
->outiface_mask
);
140 if (FWINV(ret
!= 0, IP6T_INV_VIA_OUT
)) {
141 dprintf("VIA out mismatch (%s vs %s).%s\n",
142 outdev
, ip6info
->outiface
,
143 ip6info
->invflags
&IP6T_INV_VIA_OUT
?" (INV)":"");
147 /* ... might want to do something with class and flowlabel here ... */
149 /* look for the desired protocol header */
150 if((ip6info
->flags
& IP6T_F_PROTO
)) {
152 unsigned short _frag_off
;
154 protohdr
= ipv6_find_hdr(skb
, protoff
, -1, &_frag_off
);
160 *fragoff
= _frag_off
;
162 dprintf("Packet protocol %hi ?= %s%hi.\n",
164 ip6info
->invflags
& IP6T_INV_PROTO
? "!":"",
167 if (ip6info
->proto
== protohdr
) {
168 if(ip6info
->invflags
& IP6T_INV_PROTO
) {
174 /* We need match for the '-p all', too! */
175 if ((ip6info
->proto
!= 0) &&
176 !(ip6info
->invflags
& IP6T_INV_PROTO
))
182 /* should be ip6 safe */
184 ip6_checkentry(const struct ip6t_ip6
*ipv6
)
186 if (ipv6
->flags
& ~IP6T_F_MASK
) {
187 duprintf("Unknown flag bits set: %08X\n",
188 ipv6
->flags
& ~IP6T_F_MASK
);
191 if (ipv6
->invflags
& ~IP6T_INV_MASK
) {
192 duprintf("Unknown invflag bits set: %08X\n",
193 ipv6
->invflags
& ~IP6T_INV_MASK
);
200 ip6t_error(struct sk_buff
*skb
, const struct xt_target_param
*par
)
203 printk("ip6_tables: error: `%s'\n",
204 (const char *)par
->targinfo
);
209 /* Performance critical - called for every packet */
211 do_match(const struct ip6t_entry_match
*m
, const struct sk_buff
*skb
,
212 struct xt_match_param
*par
)
214 par
->match
= m
->u
.kernel
.match
;
215 par
->matchinfo
= m
->data
;
217 /* Stop iteration if it doesn't match */
218 if (!m
->u
.kernel
.match
->match(skb
, par
))
224 static inline struct ip6t_entry
*
225 get_entry(const void *base
, unsigned int offset
)
227 return (struct ip6t_entry
*)(base
+ offset
);
230 /* All zeroes == unconditional rule. */
231 /* Mildly perf critical (only if packet tracing is on) */
232 static inline bool unconditional(const struct ip6t_ip6
*ipv6
)
234 static const struct ip6t_ip6 uncond
;
236 return memcmp(ipv6
, &uncond
, sizeof(uncond
)) == 0;
239 static inline const struct ip6t_entry_target
*
240 ip6t_get_target_c(const struct ip6t_entry
*e
)
242 return ip6t_get_target((struct ip6t_entry
*)e
);
245 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
246 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
247 /* This cries for unification! */
248 static const char *const hooknames
[] = {
249 [NF_INET_PRE_ROUTING
] = "PREROUTING",
250 [NF_INET_LOCAL_IN
] = "INPUT",
251 [NF_INET_FORWARD
] = "FORWARD",
252 [NF_INET_LOCAL_OUT
] = "OUTPUT",
253 [NF_INET_POST_ROUTING
] = "POSTROUTING",
256 enum nf_ip_trace_comments
{
257 NF_IP6_TRACE_COMMENT_RULE
,
258 NF_IP6_TRACE_COMMENT_RETURN
,
259 NF_IP6_TRACE_COMMENT_POLICY
,
262 static const char *const comments
[] = {
263 [NF_IP6_TRACE_COMMENT_RULE
] = "rule",
264 [NF_IP6_TRACE_COMMENT_RETURN
] = "return",
265 [NF_IP6_TRACE_COMMENT_POLICY
] = "policy",
268 static struct nf_loginfo trace_loginfo
= {
269 .type
= NF_LOG_TYPE_LOG
,
273 .logflags
= NF_LOG_MASK
,
278 /* Mildly perf critical (only if packet tracing is on) */
280 get_chainname_rulenum(const struct ip6t_entry
*s
, const struct ip6t_entry
*e
,
281 const char *hookname
, const char **chainname
,
282 const char **comment
, unsigned int *rulenum
)
284 const struct ip6t_standard_target
*t
= (void *)ip6t_get_target_c(s
);
286 if (strcmp(t
->target
.u
.kernel
.target
->name
, IP6T_ERROR_TARGET
) == 0) {
287 /* Head of user chain: ERROR target with chainname */
288 *chainname
= t
->target
.data
;
293 if (s
->target_offset
== sizeof(struct ip6t_entry
) &&
294 strcmp(t
->target
.u
.kernel
.target
->name
,
295 IP6T_STANDARD_TARGET
) == 0 &&
297 unconditional(&s
->ipv6
)) {
298 /* Tail of chains: STANDARD target (return/policy) */
299 *comment
= *chainname
== hookname
300 ? comments
[NF_IP6_TRACE_COMMENT_POLICY
]
301 : comments
[NF_IP6_TRACE_COMMENT_RETURN
];
310 static void trace_packet(const struct sk_buff
*skb
,
312 const struct net_device
*in
,
313 const struct net_device
*out
,
314 const char *tablename
,
315 const struct xt_table_info
*private,
316 const struct ip6t_entry
*e
)
318 const void *table_base
;
319 const struct ip6t_entry
*root
;
320 const char *hookname
, *chainname
, *comment
;
321 const struct ip6t_entry
*iter
;
322 unsigned int rulenum
= 0;
324 table_base
= private->entries
[smp_processor_id()];
325 root
= get_entry(table_base
, private->hook_entry
[hook
]);
327 hookname
= chainname
= hooknames
[hook
];
328 comment
= comments
[NF_IP6_TRACE_COMMENT_RULE
];
330 xt_entry_foreach(iter
, root
, private->size
- private->hook_entry
[hook
])
331 if (get_chainname_rulenum(iter
, e
, hookname
,
332 &chainname
, &comment
, &rulenum
) != 0)
335 nf_log_packet(AF_INET6
, hook
, skb
, in
, out
, &trace_loginfo
,
336 "TRACE: %s:%s:%s:%u ",
337 tablename
, chainname
, comment
, rulenum
);
341 static inline __pure
struct ip6t_entry
*
342 ip6t_next_entry(const struct ip6t_entry
*entry
)
344 return (void *)entry
+ entry
->next_offset
;
347 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
349 ip6t_do_table(struct sk_buff
*skb
,
351 const struct net_device
*in
,
352 const struct net_device
*out
,
353 struct xt_table
*table
)
355 #define tb_comefrom ((struct ip6t_entry *)table_base)->comefrom
357 static const char nulldevname
[IFNAMSIZ
] __attribute__((aligned(sizeof(long))));
358 bool hotdrop
= false;
359 /* Initializing verdict to NF_DROP keeps gcc happy. */
360 unsigned int verdict
= NF_DROP
;
361 const char *indev
, *outdev
;
362 const void *table_base
;
363 struct ip6t_entry
*e
, *back
;
364 const struct xt_table_info
*private;
365 struct xt_match_param mtpar
;
366 struct xt_target_param tgpar
;
369 indev
= in
? in
->name
: nulldevname
;
370 outdev
= out
? out
->name
: nulldevname
;
371 /* We handle fragments by dealing with the first fragment as
372 * if it was a normal packet. All other fragments are treated
373 * normally, except that they will NEVER match rules that ask
374 * things we don't know, ie. tcp syn flag or ports). If the
375 * rule is also a fragment-specific rule, non-fragments won't
377 mtpar
.hotdrop
= &hotdrop
;
378 mtpar
.in
= tgpar
.in
= in
;
379 mtpar
.out
= tgpar
.out
= out
;
380 mtpar
.family
= tgpar
.family
= NFPROTO_IPV6
;
381 mtpar
.hooknum
= tgpar
.hooknum
= hook
;
383 IP_NF_ASSERT(table
->valid_hooks
& (1 << hook
));
386 private = table
->private;
387 table_base
= private->entries
[smp_processor_id()];
389 e
= get_entry(table_base
, private->hook_entry
[hook
]);
391 /* For return from builtin chain */
392 back
= get_entry(table_base
, private->underflow
[hook
]);
395 const struct ip6t_entry_target
*t
;
399 if (!ip6_packet_match(skb
, indev
, outdev
, &e
->ipv6
,
400 &mtpar
.thoff
, &mtpar
.fragoff
, &hotdrop
) ||
401 IP6T_MATCH_ITERATE(e
, do_match
, skb
, &mtpar
) != 0) {
402 e
= ip6t_next_entry(e
);
406 ADD_COUNTER(e
->counters
,
407 ntohs(ipv6_hdr(skb
)->payload_len
) +
408 sizeof(struct ipv6hdr
), 1);
410 t
= ip6t_get_target_c(e
);
411 IP_NF_ASSERT(t
->u
.kernel
.target
);
413 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
414 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
415 /* The packet is traced: log it */
416 if (unlikely(skb
->nf_trace
))
417 trace_packet(skb
, hook
, in
, out
,
418 table
->name
, private, e
);
420 /* Standard target? */
421 if (!t
->u
.kernel
.target
->target
) {
424 v
= ((struct ip6t_standard_target
*)t
)->verdict
;
426 /* Pop from stack? */
427 if (v
!= IP6T_RETURN
) {
428 verdict
= (unsigned)(-v
) - 1;
432 back
= get_entry(table_base
, back
->comefrom
);
435 if (table_base
+ v
!= ip6t_next_entry(e
) &&
436 !(e
->ipv6
.flags
& IP6T_F_GOTO
)) {
437 /* Save old back ptr in next entry */
438 struct ip6t_entry
*next
= ip6t_next_entry(e
);
439 next
->comefrom
= (void *)back
- table_base
;
440 /* set back pointer to next entry */
444 e
= get_entry(table_base
, v
);
448 /* Targets which reenter must return
450 tgpar
.target
= t
->u
.kernel
.target
;
451 tgpar
.targinfo
= t
->data
;
453 #ifdef CONFIG_NETFILTER_DEBUG
454 tb_comefrom
= 0xeeeeeeec;
456 verdict
= t
->u
.kernel
.target
->target(skb
, &tgpar
);
458 #ifdef CONFIG_NETFILTER_DEBUG
459 if (tb_comefrom
!= 0xeeeeeeec && verdict
== IP6T_CONTINUE
) {
460 printk("Target %s reentered!\n",
461 t
->u
.kernel
.target
->name
);
464 tb_comefrom
= 0x57acc001;
466 if (verdict
== IP6T_CONTINUE
)
467 e
= ip6t_next_entry(e
);
473 #ifdef CONFIG_NETFILTER_DEBUG
474 tb_comefrom
= NETFILTER_LINK_POISON
;
476 xt_info_rdunlock_bh();
478 #ifdef DEBUG_ALLOW_ALL
489 /* Figures out from what hook each rule can be called: returns 0 if
490 there are loops. Puts hook bitmask in comefrom. */
492 mark_source_chains(const struct xt_table_info
*newinfo
,
493 unsigned int valid_hooks
, void *entry0
)
497 /* No recursion; use packet counter to save back ptrs (reset
498 to 0 as we leave), and comefrom to save source hook bitmask */
499 for (hook
= 0; hook
< NF_INET_NUMHOOKS
; hook
++) {
500 unsigned int pos
= newinfo
->hook_entry
[hook
];
501 struct ip6t_entry
*e
= (struct ip6t_entry
*)(entry0
+ pos
);
503 if (!(valid_hooks
& (1 << hook
)))
506 /* Set initial back pointer. */
507 e
->counters
.pcnt
= pos
;
510 const struct ip6t_standard_target
*t
511 = (void *)ip6t_get_target_c(e
);
512 int visited
= e
->comefrom
& (1 << hook
);
514 if (e
->comefrom
& (1 << NF_INET_NUMHOOKS
)) {
515 printk("iptables: loop hook %u pos %u %08X.\n",
516 hook
, pos
, e
->comefrom
);
519 e
->comefrom
|= ((1 << hook
) | (1 << NF_INET_NUMHOOKS
));
521 /* Unconditional return/END. */
522 if ((e
->target_offset
== sizeof(struct ip6t_entry
) &&
523 (strcmp(t
->target
.u
.user
.name
,
524 IP6T_STANDARD_TARGET
) == 0) &&
526 unconditional(&e
->ipv6
)) || visited
) {
527 unsigned int oldpos
, size
;
529 if ((strcmp(t
->target
.u
.user
.name
,
530 IP6T_STANDARD_TARGET
) == 0) &&
531 t
->verdict
< -NF_MAX_VERDICT
- 1) {
532 duprintf("mark_source_chains: bad "
533 "negative verdict (%i)\n",
538 /* Return: backtrack through the last
541 e
->comefrom
^= (1<<NF_INET_NUMHOOKS
);
542 #ifdef DEBUG_IP_FIREWALL_USER
544 & (1 << NF_INET_NUMHOOKS
)) {
545 duprintf("Back unset "
552 pos
= e
->counters
.pcnt
;
553 e
->counters
.pcnt
= 0;
555 /* We're at the start. */
559 e
= (struct ip6t_entry
*)
561 } while (oldpos
== pos
+ e
->next_offset
);
564 size
= e
->next_offset
;
565 e
= (struct ip6t_entry
*)
566 (entry0
+ pos
+ size
);
567 e
->counters
.pcnt
= pos
;
570 int newpos
= t
->verdict
;
572 if (strcmp(t
->target
.u
.user
.name
,
573 IP6T_STANDARD_TARGET
) == 0 &&
575 if (newpos
> newinfo
->size
-
576 sizeof(struct ip6t_entry
)) {
577 duprintf("mark_source_chains: "
578 "bad verdict (%i)\n",
582 /* This a jump; chase it. */
583 duprintf("Jump rule %u -> %u\n",
586 /* ... this is a fallthru */
587 newpos
= pos
+ e
->next_offset
;
589 e
= (struct ip6t_entry
*)
591 e
->counters
.pcnt
= pos
;
596 duprintf("Finished chain %u\n", hook
);
602 cleanup_match(struct ip6t_entry_match
*m
, struct net
*net
, unsigned int *i
)
604 struct xt_mtdtor_param par
;
606 if (i
&& (*i
)-- == 0)
610 par
.match
= m
->u
.kernel
.match
;
611 par
.matchinfo
= m
->data
;
612 par
.family
= NFPROTO_IPV6
;
613 if (par
.match
->destroy
!= NULL
)
614 par
.match
->destroy(&par
);
615 module_put(par
.match
->me
);
620 check_entry(const struct ip6t_entry
*e
, const char *name
)
622 const struct ip6t_entry_target
*t
;
624 if (!ip6_checkentry(&e
->ipv6
)) {
625 duprintf("ip_tables: ip check failed %p %s.\n", e
, name
);
629 if (e
->target_offset
+ sizeof(struct ip6t_entry_target
) >
633 t
= ip6t_get_target_c(e
);
634 if (e
->target_offset
+ t
->u
.target_size
> e
->next_offset
)
640 static int check_match(struct ip6t_entry_match
*m
, struct xt_mtchk_param
*par
,
643 const struct ip6t_ip6
*ipv6
= par
->entryinfo
;
646 par
->match
= m
->u
.kernel
.match
;
647 par
->matchinfo
= m
->data
;
649 ret
= xt_check_match(par
, m
->u
.match_size
- sizeof(*m
),
650 ipv6
->proto
, ipv6
->invflags
& IP6T_INV_PROTO
);
652 duprintf("ip_tables: check failed for `%s'.\n",
661 find_check_match(struct ip6t_entry_match
*m
, struct xt_mtchk_param
*par
,
664 struct xt_match
*match
;
667 match
= try_then_request_module(xt_find_match(AF_INET6
, m
->u
.user
.name
,
669 "ip6t_%s", m
->u
.user
.name
);
670 if (IS_ERR(match
) || !match
) {
671 duprintf("find_check_match: `%s' not found\n", m
->u
.user
.name
);
672 return match
? PTR_ERR(match
) : -ENOENT
;
674 m
->u
.kernel
.match
= match
;
676 ret
= check_match(m
, par
, i
);
682 module_put(m
->u
.kernel
.match
->me
);
686 static int check_target(struct ip6t_entry
*e
, struct net
*net
, const char *name
)
688 struct ip6t_entry_target
*t
= ip6t_get_target(e
);
689 struct xt_tgchk_param par
= {
693 .target
= t
->u
.kernel
.target
,
695 .hook_mask
= e
->comefrom
,
696 .family
= NFPROTO_IPV6
,
700 t
= ip6t_get_target(e
);
701 ret
= xt_check_target(&par
, t
->u
.target_size
- sizeof(*t
),
702 e
->ipv6
.proto
, e
->ipv6
.invflags
& IP6T_INV_PROTO
);
704 duprintf("ip_tables: check failed for `%s'.\n",
705 t
->u
.kernel
.target
->name
);
712 find_check_entry(struct ip6t_entry
*e
, struct net
*net
, const char *name
,
713 unsigned int size
, unsigned int *i
)
715 struct ip6t_entry_target
*t
;
716 struct xt_target
*target
;
719 struct xt_mtchk_param mtpar
;
721 ret
= check_entry(e
, name
);
728 mtpar
.entryinfo
= &e
->ipv6
;
729 mtpar
.hook_mask
= e
->comefrom
;
730 mtpar
.family
= NFPROTO_IPV6
;
731 ret
= IP6T_MATCH_ITERATE(e
, find_check_match
, &mtpar
, &j
);
733 goto cleanup_matches
;
735 t
= ip6t_get_target(e
);
736 target
= try_then_request_module(xt_find_target(AF_INET6
,
739 "ip6t_%s", t
->u
.user
.name
);
740 if (IS_ERR(target
) || !target
) {
741 duprintf("find_check_entry: `%s' not found\n", t
->u
.user
.name
);
742 ret
= target
? PTR_ERR(target
) : -ENOENT
;
743 goto cleanup_matches
;
745 t
->u
.kernel
.target
= target
;
747 ret
= check_target(e
, net
, name
);
754 module_put(t
->u
.kernel
.target
->me
);
756 IP6T_MATCH_ITERATE(e
, cleanup_match
, net
, &j
);
760 static bool check_underflow(const struct ip6t_entry
*e
)
762 const struct ip6t_entry_target
*t
;
763 unsigned int verdict
;
765 if (!unconditional(&e
->ipv6
))
767 t
= ip6t_get_target_c(e
);
768 if (strcmp(t
->u
.user
.name
, XT_STANDARD_TARGET
) != 0)
770 verdict
= ((struct ip6t_standard_target
*)t
)->verdict
;
771 verdict
= -verdict
- 1;
772 return verdict
== NF_DROP
|| verdict
== NF_ACCEPT
;
776 check_entry_size_and_hooks(struct ip6t_entry
*e
,
777 struct xt_table_info
*newinfo
,
778 const unsigned char *base
,
779 const unsigned char *limit
,
780 const unsigned int *hook_entries
,
781 const unsigned int *underflows
,
782 unsigned int valid_hooks
,
787 if ((unsigned long)e
% __alignof__(struct ip6t_entry
) != 0 ||
788 (unsigned char *)e
+ sizeof(struct ip6t_entry
) >= limit
) {
789 duprintf("Bad offset %p\n", e
);
794 < sizeof(struct ip6t_entry
) + sizeof(struct ip6t_entry_target
)) {
795 duprintf("checking: element %p size %u\n",
800 /* Check hooks & underflows */
801 for (h
= 0; h
< NF_INET_NUMHOOKS
; h
++) {
802 if (!(valid_hooks
& (1 << h
)))
804 if ((unsigned char *)e
- base
== hook_entries
[h
])
805 newinfo
->hook_entry
[h
] = hook_entries
[h
];
806 if ((unsigned char *)e
- base
== underflows
[h
]) {
807 if (!check_underflow(e
)) {
808 pr_err("Underflows must be unconditional and "
809 "use the STANDARD target with "
813 newinfo
->underflow
[h
] = underflows
[h
];
817 /* Clear counters and comefrom */
818 e
->counters
= ((struct xt_counters
) { 0, 0 });
826 cleanup_entry(struct ip6t_entry
*e
, struct net
*net
, unsigned int *i
)
828 struct xt_tgdtor_param par
;
829 struct ip6t_entry_target
*t
;
831 if (i
&& (*i
)-- == 0)
834 /* Cleanup all matches */
835 IP6T_MATCH_ITERATE(e
, cleanup_match
, net
, NULL
);
836 t
= ip6t_get_target(e
);
839 par
.target
= t
->u
.kernel
.target
;
840 par
.targinfo
= t
->data
;
841 par
.family
= NFPROTO_IPV6
;
842 if (par
.target
->destroy
!= NULL
)
843 par
.target
->destroy(&par
);
844 module_put(par
.target
->me
);
848 /* Checks and translates the user-supplied table segment (held in
851 translate_table(struct net
*net
,
853 unsigned int valid_hooks
,
854 struct xt_table_info
*newinfo
,
858 const unsigned int *hook_entries
,
859 const unsigned int *underflows
)
861 struct ip6t_entry
*iter
;
865 newinfo
->size
= size
;
866 newinfo
->number
= number
;
868 /* Init all hooks to impossible value. */
869 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
870 newinfo
->hook_entry
[i
] = 0xFFFFFFFF;
871 newinfo
->underflow
[i
] = 0xFFFFFFFF;
874 duprintf("translate_table: size %u\n", newinfo
->size
);
876 /* Walk through entries, checking offsets. */
877 xt_entry_foreach(iter
, entry0
, newinfo
->size
) {
878 ret
= check_entry_size_and_hooks(iter
, newinfo
, entry0
,
879 entry0
+ size
, hook_entries
, underflows
,
888 duprintf("translate_table: %u not %u entries\n",
893 /* Check hooks all assigned */
894 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
895 /* Only hooks which are valid */
896 if (!(valid_hooks
& (1 << i
)))
898 if (newinfo
->hook_entry
[i
] == 0xFFFFFFFF) {
899 duprintf("Invalid hook entry %u %u\n",
903 if (newinfo
->underflow
[i
] == 0xFFFFFFFF) {
904 duprintf("Invalid underflow %u %u\n",
910 if (!mark_source_chains(newinfo
, valid_hooks
, entry0
))
913 /* Finally, each sanity check must pass */
915 xt_entry_foreach(iter
, entry0
, newinfo
->size
) {
916 ret
= find_check_entry(iter
, net
, name
, size
, &i
);
922 xt_entry_foreach(iter
, entry0
, newinfo
->size
)
923 if (cleanup_entry(iter
, net
, &i
) != 0)
928 /* And one copy for every other CPU */
929 for_each_possible_cpu(i
) {
930 if (newinfo
->entries
[i
] && newinfo
->entries
[i
] != entry0
)
931 memcpy(newinfo
->entries
[i
], entry0
, newinfo
->size
);
939 add_entry_to_counter(const struct ip6t_entry
*e
,
940 struct xt_counters total
[],
943 ADD_COUNTER(total
[*i
], e
->counters
.bcnt
, e
->counters
.pcnt
);
950 set_entry_to_counter(const struct ip6t_entry
*e
,
951 struct ip6t_counters total
[],
954 SET_COUNTER(total
[*i
], e
->counters
.bcnt
, e
->counters
.pcnt
);
961 get_counters(const struct xt_table_info
*t
,
962 struct xt_counters counters
[])
964 struct ip6t_entry
*iter
;
969 /* Instead of clearing (by a previous call to memset())
970 * the counters and using adds, we set the counters
971 * with data used by 'current' CPU
973 * Bottom half has to be disabled to prevent deadlock
974 * if new softirq were to run and call ipt_do_table
977 curcpu
= smp_processor_id();
980 xt_entry_foreach(iter
, t
->entries
[curcpu
], t
->size
)
981 if (set_entry_to_counter(iter
, counters
, &i
) != 0)
984 for_each_possible_cpu(cpu
) {
989 xt_entry_foreach(iter
, t
->entries
[cpu
], t
->size
)
990 if (add_entry_to_counter(iter
, counters
, &i
) != 0)
992 xt_info_wrunlock(cpu
);
997 static struct xt_counters
*alloc_counters(const struct xt_table
*table
)
999 unsigned int countersize
;
1000 struct xt_counters
*counters
;
1001 const struct xt_table_info
*private = table
->private;
1003 /* We need atomic snapshot of counters: rest doesn't change
1004 (other than comefrom, which userspace doesn't care
1006 countersize
= sizeof(struct xt_counters
) * private->number
;
1007 counters
= vmalloc_node(countersize
, numa_node_id());
1009 if (counters
== NULL
)
1010 return ERR_PTR(-ENOMEM
);
1012 get_counters(private, counters
);
1018 copy_entries_to_user(unsigned int total_size
,
1019 const struct xt_table
*table
,
1020 void __user
*userptr
)
1022 unsigned int off
, num
;
1023 const struct ip6t_entry
*e
;
1024 struct xt_counters
*counters
;
1025 const struct xt_table_info
*private = table
->private;
1027 const void *loc_cpu_entry
;
1029 counters
= alloc_counters(table
);
1030 if (IS_ERR(counters
))
1031 return PTR_ERR(counters
);
1033 /* choose the copy that is on our node/cpu, ...
1034 * This choice is lazy (because current thread is
1035 * allowed to migrate to another cpu)
1037 loc_cpu_entry
= private->entries
[raw_smp_processor_id()];
1038 if (copy_to_user(userptr
, loc_cpu_entry
, total_size
) != 0) {
1043 /* FIXME: use iterator macros --RR */
1044 /* ... then go back and fix counters and names */
1045 for (off
= 0, num
= 0; off
< total_size
; off
+= e
->next_offset
, num
++){
1047 const struct ip6t_entry_match
*m
;
1048 const struct ip6t_entry_target
*t
;
1050 e
= (struct ip6t_entry
*)(loc_cpu_entry
+ off
);
1051 if (copy_to_user(userptr
+ off
1052 + offsetof(struct ip6t_entry
, counters
),
1054 sizeof(counters
[num
])) != 0) {
1059 for (i
= sizeof(struct ip6t_entry
);
1060 i
< e
->target_offset
;
1061 i
+= m
->u
.match_size
) {
1064 if (copy_to_user(userptr
+ off
+ i
1065 + offsetof(struct ip6t_entry_match
,
1067 m
->u
.kernel
.match
->name
,
1068 strlen(m
->u
.kernel
.match
->name
)+1)
1075 t
= ip6t_get_target_c(e
);
1076 if (copy_to_user(userptr
+ off
+ e
->target_offset
1077 + offsetof(struct ip6t_entry_target
,
1079 t
->u
.kernel
.target
->name
,
1080 strlen(t
->u
.kernel
.target
->name
)+1) != 0) {
1091 #ifdef CONFIG_COMPAT
1092 static void compat_standard_from_user(void *dst
, const void *src
)
1094 int v
= *(compat_int_t
*)src
;
1097 v
+= xt_compat_calc_jump(AF_INET6
, v
);
1098 memcpy(dst
, &v
, sizeof(v
));
1101 static int compat_standard_to_user(void __user
*dst
, const void *src
)
1103 compat_int_t cv
= *(int *)src
;
1106 cv
-= xt_compat_calc_jump(AF_INET6
, cv
);
1107 return copy_to_user(dst
, &cv
, sizeof(cv
)) ? -EFAULT
: 0;
1111 compat_calc_match(const struct ip6t_entry_match
*m
, int *size
)
1113 *size
+= xt_compat_match_offset(m
->u
.kernel
.match
);
1117 static int compat_calc_entry(const struct ip6t_entry
*e
,
1118 const struct xt_table_info
*info
,
1119 const void *base
, struct xt_table_info
*newinfo
)
1121 const struct ip6t_entry_target
*t
;
1122 unsigned int entry_offset
;
1125 off
= sizeof(struct ip6t_entry
) - sizeof(struct compat_ip6t_entry
);
1126 entry_offset
= (void *)e
- base
;
1127 IP6T_MATCH_ITERATE(e
, compat_calc_match
, &off
);
1128 t
= ip6t_get_target_c(e
);
1129 off
+= xt_compat_target_offset(t
->u
.kernel
.target
);
1130 newinfo
->size
-= off
;
1131 ret
= xt_compat_add_offset(AF_INET6
, entry_offset
, off
);
1135 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
1136 if (info
->hook_entry
[i
] &&
1137 (e
< (struct ip6t_entry
*)(base
+ info
->hook_entry
[i
])))
1138 newinfo
->hook_entry
[i
] -= off
;
1139 if (info
->underflow
[i
] &&
1140 (e
< (struct ip6t_entry
*)(base
+ info
->underflow
[i
])))
1141 newinfo
->underflow
[i
] -= off
;
1146 static int compat_table_info(const struct xt_table_info
*info
,
1147 struct xt_table_info
*newinfo
)
1149 struct ip6t_entry
*iter
;
1150 void *loc_cpu_entry
;
1153 if (!newinfo
|| !info
)
1156 /* we dont care about newinfo->entries[] */
1157 memcpy(newinfo
, info
, offsetof(struct xt_table_info
, entries
));
1158 newinfo
->initial_entries
= 0;
1159 loc_cpu_entry
= info
->entries
[raw_smp_processor_id()];
1160 xt_entry_foreach(iter
, loc_cpu_entry
, info
->size
) {
1161 ret
= compat_calc_entry(iter
, info
, loc_cpu_entry
, newinfo
);
1169 static int get_info(struct net
*net
, void __user
*user
,
1170 const int *len
, int compat
)
1172 char name
[IP6T_TABLE_MAXNAMELEN
];
1176 if (*len
!= sizeof(struct ip6t_getinfo
)) {
1177 duprintf("length %u != %zu\n", *len
,
1178 sizeof(struct ip6t_getinfo
));
1182 if (copy_from_user(name
, user
, sizeof(name
)) != 0)
1185 name
[IP6T_TABLE_MAXNAMELEN
-1] = '\0';
1186 #ifdef CONFIG_COMPAT
1188 xt_compat_lock(AF_INET6
);
1190 t
= try_then_request_module(xt_find_table_lock(net
, AF_INET6
, name
),
1191 "ip6table_%s", name
);
1192 if (t
&& !IS_ERR(t
)) {
1193 struct ip6t_getinfo info
;
1194 const struct xt_table_info
*private = t
->private;
1195 #ifdef CONFIG_COMPAT
1196 struct xt_table_info tmp
;
1199 ret
= compat_table_info(private, &tmp
);
1200 xt_compat_flush_offsets(AF_INET6
);
1204 info
.valid_hooks
= t
->valid_hooks
;
1205 memcpy(info
.hook_entry
, private->hook_entry
,
1206 sizeof(info
.hook_entry
));
1207 memcpy(info
.underflow
, private->underflow
,
1208 sizeof(info
.underflow
));
1209 info
.num_entries
= private->number
;
1210 info
.size
= private->size
;
1211 strcpy(info
.name
, name
);
1213 if (copy_to_user(user
, &info
, *len
) != 0)
1221 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1222 #ifdef CONFIG_COMPAT
1224 xt_compat_unlock(AF_INET6
);
1230 get_entries(struct net
*net
, struct ip6t_get_entries __user
*uptr
,
1234 struct ip6t_get_entries get
;
1237 if (*len
< sizeof(get
)) {
1238 duprintf("get_entries: %u < %zu\n", *len
, sizeof(get
));
1241 if (copy_from_user(&get
, uptr
, sizeof(get
)) != 0)
1243 if (*len
!= sizeof(struct ip6t_get_entries
) + get
.size
) {
1244 duprintf("get_entries: %u != %zu\n",
1245 *len
, sizeof(get
) + get
.size
);
1249 t
= xt_find_table_lock(net
, AF_INET6
, get
.name
);
1250 if (t
&& !IS_ERR(t
)) {
1251 struct xt_table_info
*private = t
->private;
1252 duprintf("t->private->number = %u\n", private->number
);
1253 if (get
.size
== private->size
)
1254 ret
= copy_entries_to_user(private->size
,
1255 t
, uptr
->entrytable
);
1257 duprintf("get_entries: I've got %u not %u!\n",
1258 private->size
, get
.size
);
1264 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1270 __do_replace(struct net
*net
, const char *name
, unsigned int valid_hooks
,
1271 struct xt_table_info
*newinfo
, unsigned int num_counters
,
1272 void __user
*counters_ptr
)
1276 struct xt_table_info
*oldinfo
;
1277 struct xt_counters
*counters
;
1278 const void *loc_cpu_old_entry
;
1279 struct ip6t_entry
*iter
;
1282 counters
= vmalloc_node(num_counters
* sizeof(struct xt_counters
),
1289 t
= try_then_request_module(xt_find_table_lock(net
, AF_INET6
, name
),
1290 "ip6table_%s", name
);
1291 if (!t
|| IS_ERR(t
)) {
1292 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1293 goto free_newinfo_counters_untrans
;
1297 if (valid_hooks
!= t
->valid_hooks
) {
1298 duprintf("Valid hook crap: %08X vs %08X\n",
1299 valid_hooks
, t
->valid_hooks
);
1304 oldinfo
= xt_replace_table(t
, num_counters
, newinfo
, &ret
);
1308 /* Update module usage count based on number of rules */
1309 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1310 oldinfo
->number
, oldinfo
->initial_entries
, newinfo
->number
);
1311 if ((oldinfo
->number
> oldinfo
->initial_entries
) ||
1312 (newinfo
->number
<= oldinfo
->initial_entries
))
1314 if ((oldinfo
->number
> oldinfo
->initial_entries
) &&
1315 (newinfo
->number
<= oldinfo
->initial_entries
))
1318 /* Get the old counters, and synchronize with replace */
1319 get_counters(oldinfo
, counters
);
1321 /* Decrease module usage counts and free resource */
1322 loc_cpu_old_entry
= oldinfo
->entries
[raw_smp_processor_id()];
1323 xt_entry_foreach(iter
, loc_cpu_old_entry
, oldinfo
->size
)
1324 if (cleanup_entry(iter
, net
, NULL
) != 0)
1327 xt_free_table_info(oldinfo
);
1328 if (copy_to_user(counters_ptr
, counters
,
1329 sizeof(struct xt_counters
) * num_counters
) != 0)
1338 free_newinfo_counters_untrans
:
1345 do_replace(struct net
*net
, const void __user
*user
, unsigned int len
)
1348 struct ip6t_replace tmp
;
1349 struct xt_table_info
*newinfo
;
1350 void *loc_cpu_entry
;
1351 struct ip6t_entry
*iter
;
1353 if (copy_from_user(&tmp
, user
, sizeof(tmp
)) != 0)
1356 /* overflow check */
1357 if (tmp
.num_counters
>= INT_MAX
/ sizeof(struct xt_counters
))
1360 newinfo
= xt_alloc_table_info(tmp
.size
);
1364 /* choose the copy that is on our node/cpu */
1365 loc_cpu_entry
= newinfo
->entries
[raw_smp_processor_id()];
1366 if (copy_from_user(loc_cpu_entry
, user
+ sizeof(tmp
),
1372 ret
= translate_table(net
, tmp
.name
, tmp
.valid_hooks
,
1373 newinfo
, loc_cpu_entry
, tmp
.size
, tmp
.num_entries
,
1374 tmp
.hook_entry
, tmp
.underflow
);
1378 duprintf("ip_tables: Translated table\n");
1380 ret
= __do_replace(net
, tmp
.name
, tmp
.valid_hooks
, newinfo
,
1381 tmp
.num_counters
, tmp
.counters
);
1383 goto free_newinfo_untrans
;
1386 free_newinfo_untrans
:
1387 xt_entry_foreach(iter
, loc_cpu_entry
, newinfo
->size
)
1388 if (cleanup_entry(iter
, net
, NULL
) != 0)
1391 xt_free_table_info(newinfo
);
1395 /* We're lazy, and add to the first CPU; overflow works its fey magic
1396 * and everything is OK. */
1398 add_counter_to_entry(struct ip6t_entry
*e
,
1399 const struct xt_counters addme
[],
1402 ADD_COUNTER(e
->counters
, addme
[*i
].bcnt
, addme
[*i
].pcnt
);
1409 do_add_counters(struct net
*net
, const void __user
*user
, unsigned int len
,
1412 unsigned int i
, curcpu
;
1413 struct xt_counters_info tmp
;
1414 struct xt_counters
*paddc
;
1415 unsigned int num_counters
;
1420 const struct xt_table_info
*private;
1422 const void *loc_cpu_entry
;
1423 struct ip6t_entry
*iter
;
1424 #ifdef CONFIG_COMPAT
1425 struct compat_xt_counters_info compat_tmp
;
1429 size
= sizeof(struct compat_xt_counters_info
);
1434 size
= sizeof(struct xt_counters_info
);
1437 if (copy_from_user(ptmp
, user
, size
) != 0)
1440 #ifdef CONFIG_COMPAT
1442 num_counters
= compat_tmp
.num_counters
;
1443 name
= compat_tmp
.name
;
1447 num_counters
= tmp
.num_counters
;
1451 if (len
!= size
+ num_counters
* sizeof(struct xt_counters
))
1454 paddc
= vmalloc_node(len
- size
, numa_node_id());
1458 if (copy_from_user(paddc
, user
+ size
, len
- size
) != 0) {
1463 t
= xt_find_table_lock(net
, AF_INET6
, name
);
1464 if (!t
|| IS_ERR(t
)) {
1465 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1471 private = t
->private;
1472 if (private->number
!= num_counters
) {
1474 goto unlock_up_free
;
1478 /* Choose the copy that is on our node */
1479 curcpu
= smp_processor_id();
1480 xt_info_wrlock(curcpu
);
1481 loc_cpu_entry
= private->entries
[curcpu
];
1482 xt_entry_foreach(iter
, loc_cpu_entry
, private->size
)
1483 if (add_counter_to_entry(iter
, paddc
, &i
) != 0)
1485 xt_info_wrunlock(curcpu
);
1497 #ifdef CONFIG_COMPAT
1498 struct compat_ip6t_replace
{
1499 char name
[IP6T_TABLE_MAXNAMELEN
];
1503 u32 hook_entry
[NF_INET_NUMHOOKS
];
1504 u32 underflow
[NF_INET_NUMHOOKS
];
1506 compat_uptr_t counters
; /* struct ip6t_counters * */
1507 struct compat_ip6t_entry entries
[0];
1511 compat_copy_entry_to_user(struct ip6t_entry
*e
, void __user
**dstptr
,
1512 unsigned int *size
, struct xt_counters
*counters
,
1515 struct ip6t_entry_target
*t
;
1516 struct compat_ip6t_entry __user
*ce
;
1517 u_int16_t target_offset
, next_offset
;
1518 compat_uint_t origsize
;
1523 ce
= (struct compat_ip6t_entry __user
*)*dstptr
;
1524 if (copy_to_user(ce
, e
, sizeof(struct ip6t_entry
)))
1527 if (copy_to_user(&ce
->counters
, &counters
[*i
], sizeof(counters
[*i
])))
1530 *dstptr
+= sizeof(struct compat_ip6t_entry
);
1531 *size
-= sizeof(struct ip6t_entry
) - sizeof(struct compat_ip6t_entry
);
1533 ret
= IP6T_MATCH_ITERATE(e
, xt_compat_match_to_user
, dstptr
, size
);
1534 target_offset
= e
->target_offset
- (origsize
- *size
);
1537 t
= ip6t_get_target(e
);
1538 ret
= xt_compat_target_to_user(t
, dstptr
, size
);
1542 next_offset
= e
->next_offset
- (origsize
- *size
);
1543 if (put_user(target_offset
, &ce
->target_offset
))
1545 if (put_user(next_offset
, &ce
->next_offset
))
1555 compat_find_calc_match(struct ip6t_entry_match
*m
,
1557 const struct ip6t_ip6
*ipv6
,
1558 unsigned int hookmask
,
1559 int *size
, unsigned int *i
)
1561 struct xt_match
*match
;
1563 match
= try_then_request_module(xt_find_match(AF_INET6
, m
->u
.user
.name
,
1564 m
->u
.user
.revision
),
1565 "ip6t_%s", m
->u
.user
.name
);
1566 if (IS_ERR(match
) || !match
) {
1567 duprintf("compat_check_calc_match: `%s' not found\n",
1569 return match
? PTR_ERR(match
) : -ENOENT
;
1571 m
->u
.kernel
.match
= match
;
1572 *size
+= xt_compat_match_offset(match
);
1579 compat_release_match(struct ip6t_entry_match
*m
, unsigned int *i
)
1581 if (i
&& (*i
)-- == 0)
1584 module_put(m
->u
.kernel
.match
->me
);
1589 compat_release_entry(struct compat_ip6t_entry
*e
, unsigned int *i
)
1591 struct ip6t_entry_target
*t
;
1593 if (i
&& (*i
)-- == 0)
1596 /* Cleanup all matches */
1597 COMPAT_IP6T_MATCH_ITERATE(e
, compat_release_match
, NULL
);
1598 t
= compat_ip6t_get_target(e
);
1599 module_put(t
->u
.kernel
.target
->me
);
1604 check_compat_entry_size_and_hooks(struct compat_ip6t_entry
*e
,
1605 struct xt_table_info
*newinfo
,
1607 const unsigned char *base
,
1608 const unsigned char *limit
,
1609 const unsigned int *hook_entries
,
1610 const unsigned int *underflows
,
1614 struct ip6t_entry_target
*t
;
1615 struct xt_target
*target
;
1616 unsigned int entry_offset
;
1620 duprintf("check_compat_entry_size_and_hooks %p\n", e
);
1621 if ((unsigned long)e
% __alignof__(struct compat_ip6t_entry
) != 0 ||
1622 (unsigned char *)e
+ sizeof(struct compat_ip6t_entry
) >= limit
) {
1623 duprintf("Bad offset %p, limit = %p\n", e
, limit
);
1627 if (e
->next_offset
< sizeof(struct compat_ip6t_entry
) +
1628 sizeof(struct compat_xt_entry_target
)) {
1629 duprintf("checking: element %p size %u\n",
1634 /* For purposes of check_entry casting the compat entry is fine */
1635 ret
= check_entry((struct ip6t_entry
*)e
, name
);
1639 off
= sizeof(struct ip6t_entry
) - sizeof(struct compat_ip6t_entry
);
1640 entry_offset
= (void *)e
- (void *)base
;
1642 ret
= COMPAT_IP6T_MATCH_ITERATE(e
, compat_find_calc_match
, name
,
1643 &e
->ipv6
, e
->comefrom
, &off
, &j
);
1645 goto release_matches
;
1647 t
= compat_ip6t_get_target(e
);
1648 target
= try_then_request_module(xt_find_target(AF_INET6
,
1650 t
->u
.user
.revision
),
1651 "ip6t_%s", t
->u
.user
.name
);
1652 if (IS_ERR(target
) || !target
) {
1653 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1655 ret
= target
? PTR_ERR(target
) : -ENOENT
;
1656 goto release_matches
;
1658 t
->u
.kernel
.target
= target
;
1660 off
+= xt_compat_target_offset(target
);
1662 ret
= xt_compat_add_offset(AF_INET6
, entry_offset
, off
);
1666 /* Check hooks & underflows */
1667 for (h
= 0; h
< NF_INET_NUMHOOKS
; h
++) {
1668 if ((unsigned char *)e
- base
== hook_entries
[h
])
1669 newinfo
->hook_entry
[h
] = hook_entries
[h
];
1670 if ((unsigned char *)e
- base
== underflows
[h
])
1671 newinfo
->underflow
[h
] = underflows
[h
];
1674 /* Clear counters and comefrom */
1675 memset(&e
->counters
, 0, sizeof(e
->counters
));
1682 module_put(t
->u
.kernel
.target
->me
);
1684 IP6T_MATCH_ITERATE(e
, compat_release_match
, &j
);
1689 compat_copy_entry_from_user(struct compat_ip6t_entry
*e
, void **dstptr
,
1690 unsigned int *size
, const char *name
,
1691 struct xt_table_info
*newinfo
, unsigned char *base
)
1693 struct ip6t_entry_target
*t
;
1694 struct xt_target
*target
;
1695 struct ip6t_entry
*de
;
1696 unsigned int origsize
;
1701 de
= (struct ip6t_entry
*)*dstptr
;
1702 memcpy(de
, e
, sizeof(struct ip6t_entry
));
1703 memcpy(&de
->counters
, &e
->counters
, sizeof(e
->counters
));
1705 *dstptr
+= sizeof(struct ip6t_entry
);
1706 *size
+= sizeof(struct ip6t_entry
) - sizeof(struct compat_ip6t_entry
);
1708 ret
= COMPAT_IP6T_MATCH_ITERATE(e
, xt_compat_match_from_user
,
1712 de
->target_offset
= e
->target_offset
- (origsize
- *size
);
1713 t
= compat_ip6t_get_target(e
);
1714 target
= t
->u
.kernel
.target
;
1715 xt_compat_target_from_user(t
, dstptr
, size
);
1717 de
->next_offset
= e
->next_offset
- (origsize
- *size
);
1718 for (h
= 0; h
< NF_INET_NUMHOOKS
; h
++) {
1719 if ((unsigned char *)de
- base
< newinfo
->hook_entry
[h
])
1720 newinfo
->hook_entry
[h
] -= origsize
- *size
;
1721 if ((unsigned char *)de
- base
< newinfo
->underflow
[h
])
1722 newinfo
->underflow
[h
] -= origsize
- *size
;
1727 static int compat_check_entry(struct ip6t_entry
*e
, struct net
*net
,
1728 const char *name
, unsigned int *i
)
1732 struct xt_mtchk_param mtpar
;
1737 mtpar
.entryinfo
= &e
->ipv6
;
1738 mtpar
.hook_mask
= e
->comefrom
;
1739 mtpar
.family
= NFPROTO_IPV6
;
1740 ret
= IP6T_MATCH_ITERATE(e
, check_match
, &mtpar
, &j
);
1742 goto cleanup_matches
;
1744 ret
= check_target(e
, net
, name
);
1746 goto cleanup_matches
;
1752 IP6T_MATCH_ITERATE(e
, cleanup_match
, net
, &j
);
1757 translate_compat_table(struct net
*net
,
1759 unsigned int valid_hooks
,
1760 struct xt_table_info
**pinfo
,
1762 unsigned int total_size
,
1763 unsigned int number
,
1764 unsigned int *hook_entries
,
1765 unsigned int *underflows
)
1768 struct xt_table_info
*newinfo
, *info
;
1769 void *pos
, *entry0
, *entry1
;
1770 struct compat_ip6t_entry
*iter0
;
1771 struct ip6t_entry
*iter1
;
1778 info
->number
= number
;
1780 /* Init all hooks to impossible value. */
1781 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
1782 info
->hook_entry
[i
] = 0xFFFFFFFF;
1783 info
->underflow
[i
] = 0xFFFFFFFF;
1786 duprintf("translate_compat_table: size %u\n", info
->size
);
1788 xt_compat_lock(AF_INET6
);
1789 /* Walk through entries, checking offsets. */
1790 xt_entry_foreach(iter0
, entry0
, total_size
) {
1791 ret
= check_compat_entry_size_and_hooks(iter0
, info
, &size
,
1792 entry0
, entry0
+ total_size
, hook_entries
, underflows
,
1802 duprintf("translate_compat_table: %u not %u entries\n",
1807 /* Check hooks all assigned */
1808 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
1809 /* Only hooks which are valid */
1810 if (!(valid_hooks
& (1 << i
)))
1812 if (info
->hook_entry
[i
] == 0xFFFFFFFF) {
1813 duprintf("Invalid hook entry %u %u\n",
1814 i
, hook_entries
[i
]);
1817 if (info
->underflow
[i
] == 0xFFFFFFFF) {
1818 duprintf("Invalid underflow %u %u\n",
1825 newinfo
= xt_alloc_table_info(size
);
1829 newinfo
->number
= number
;
1830 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
1831 newinfo
->hook_entry
[i
] = info
->hook_entry
[i
];
1832 newinfo
->underflow
[i
] = info
->underflow
[i
];
1834 entry1
= newinfo
->entries
[raw_smp_processor_id()];
1837 xt_entry_foreach(iter0
, entry0
, total_size
) {
1838 ret
= compat_copy_entry_from_user(iter0
, &pos
,
1839 &size
, name
, newinfo
, entry1
);
1843 xt_compat_flush_offsets(AF_INET6
);
1844 xt_compat_unlock(AF_INET6
);
1849 if (!mark_source_chains(newinfo
, valid_hooks
, entry1
))
1853 xt_entry_foreach(iter1
, entry1
, newinfo
->size
) {
1854 ret
= compat_check_entry(iter1
, net
, name
, &i
);
1860 * The first i matches need cleanup_entry (calls ->destroy)
1861 * because they had called ->check already. The other j-i
1862 * entries need only release.
1866 xt_entry_foreach(iter0
, entry0
, newinfo
->size
) {
1869 if (compat_release_entry(iter0
, &j
) != 0)
1872 xt_entry_foreach(iter1
, entry1
, newinfo
->size
)
1873 if (cleanup_entry(iter1
, net
, &i
) != 0)
1875 xt_free_table_info(newinfo
);
1879 /* And one copy for every other CPU */
1880 for_each_possible_cpu(i
)
1881 if (newinfo
->entries
[i
] && newinfo
->entries
[i
] != entry1
)
1882 memcpy(newinfo
->entries
[i
], entry1
, newinfo
->size
);
1886 xt_free_table_info(info
);
1890 xt_free_table_info(newinfo
);
1892 xt_entry_foreach(iter0
, entry0
, total_size
)
1893 if (compat_release_entry(iter0
, &j
) != 0)
1897 xt_compat_flush_offsets(AF_INET6
);
1898 xt_compat_unlock(AF_INET6
);
1903 compat_do_replace(struct net
*net
, void __user
*user
, unsigned int len
)
1906 struct compat_ip6t_replace tmp
;
1907 struct xt_table_info
*newinfo
;
1908 void *loc_cpu_entry
;
1909 struct ip6t_entry
*iter
;
1911 if (copy_from_user(&tmp
, user
, sizeof(tmp
)) != 0)
1914 /* overflow check */
1915 if (tmp
.size
>= INT_MAX
/ num_possible_cpus())
1917 if (tmp
.num_counters
>= INT_MAX
/ sizeof(struct xt_counters
))
1920 newinfo
= xt_alloc_table_info(tmp
.size
);
1924 /* choose the copy that is on our node/cpu */
1925 loc_cpu_entry
= newinfo
->entries
[raw_smp_processor_id()];
1926 if (copy_from_user(loc_cpu_entry
, user
+ sizeof(tmp
),
1932 ret
= translate_compat_table(net
, tmp
.name
, tmp
.valid_hooks
,
1933 &newinfo
, &loc_cpu_entry
, tmp
.size
,
1934 tmp
.num_entries
, tmp
.hook_entry
,
1939 duprintf("compat_do_replace: Translated table\n");
1941 ret
= __do_replace(net
, tmp
.name
, tmp
.valid_hooks
, newinfo
,
1942 tmp
.num_counters
, compat_ptr(tmp
.counters
));
1944 goto free_newinfo_untrans
;
1947 free_newinfo_untrans
:
1948 xt_entry_foreach(iter
, loc_cpu_entry
, newinfo
->size
)
1949 if (cleanup_entry(iter
, net
, NULL
) != 0)
1952 xt_free_table_info(newinfo
);
1957 compat_do_ip6t_set_ctl(struct sock
*sk
, int cmd
, void __user
*user
,
1962 if (!capable(CAP_NET_ADMIN
))
1966 case IP6T_SO_SET_REPLACE
:
1967 ret
= compat_do_replace(sock_net(sk
), user
, len
);
1970 case IP6T_SO_SET_ADD_COUNTERS
:
1971 ret
= do_add_counters(sock_net(sk
), user
, len
, 1);
1975 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd
);
1982 struct compat_ip6t_get_entries
{
1983 char name
[IP6T_TABLE_MAXNAMELEN
];
1985 struct compat_ip6t_entry entrytable
[0];
1989 compat_copy_entries_to_user(unsigned int total_size
, struct xt_table
*table
,
1990 void __user
*userptr
)
1992 struct xt_counters
*counters
;
1993 const struct xt_table_info
*private = table
->private;
1997 const void *loc_cpu_entry
;
1999 struct ip6t_entry
*iter
;
2001 counters
= alloc_counters(table
);
2002 if (IS_ERR(counters
))
2003 return PTR_ERR(counters
);
2005 /* choose the copy that is on our node/cpu, ...
2006 * This choice is lazy (because current thread is
2007 * allowed to migrate to another cpu)
2009 loc_cpu_entry
= private->entries
[raw_smp_processor_id()];
2012 xt_entry_foreach(iter
, loc_cpu_entry
, total_size
) {
2013 ret
= compat_copy_entry_to_user(iter
, &pos
,
2014 &size
, counters
, &i
);
2024 compat_get_entries(struct net
*net
, struct compat_ip6t_get_entries __user
*uptr
,
2028 struct compat_ip6t_get_entries get
;
2031 if (*len
< sizeof(get
)) {
2032 duprintf("compat_get_entries: %u < %zu\n", *len
, sizeof(get
));
2036 if (copy_from_user(&get
, uptr
, sizeof(get
)) != 0)
2039 if (*len
!= sizeof(struct compat_ip6t_get_entries
) + get
.size
) {
2040 duprintf("compat_get_entries: %u != %zu\n",
2041 *len
, sizeof(get
) + get
.size
);
2045 xt_compat_lock(AF_INET6
);
2046 t
= xt_find_table_lock(net
, AF_INET6
, get
.name
);
2047 if (t
&& !IS_ERR(t
)) {
2048 const struct xt_table_info
*private = t
->private;
2049 struct xt_table_info info
;
2050 duprintf("t->private->number = %u\n", private->number
);
2051 ret
= compat_table_info(private, &info
);
2052 if (!ret
&& get
.size
== info
.size
) {
2053 ret
= compat_copy_entries_to_user(private->size
,
2054 t
, uptr
->entrytable
);
2056 duprintf("compat_get_entries: I've got %u not %u!\n",
2057 private->size
, get
.size
);
2060 xt_compat_flush_offsets(AF_INET6
);
2064 ret
= t
? PTR_ERR(t
) : -ENOENT
;
2066 xt_compat_unlock(AF_INET6
);
2070 static int do_ip6t_get_ctl(struct sock
*, int, void __user
*, int *);
2073 compat_do_ip6t_get_ctl(struct sock
*sk
, int cmd
, void __user
*user
, int *len
)
2077 if (!capable(CAP_NET_ADMIN
))
2081 case IP6T_SO_GET_INFO
:
2082 ret
= get_info(sock_net(sk
), user
, len
, 1);
2084 case IP6T_SO_GET_ENTRIES
:
2085 ret
= compat_get_entries(sock_net(sk
), user
, len
);
2088 ret
= do_ip6t_get_ctl(sk
, cmd
, user
, len
);
2095 do_ip6t_set_ctl(struct sock
*sk
, int cmd
, void __user
*user
, unsigned int len
)
2099 if (!capable(CAP_NET_ADMIN
))
2103 case IP6T_SO_SET_REPLACE
:
2104 ret
= do_replace(sock_net(sk
), user
, len
);
2107 case IP6T_SO_SET_ADD_COUNTERS
:
2108 ret
= do_add_counters(sock_net(sk
), user
, len
, 0);
2112 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd
);
2120 do_ip6t_get_ctl(struct sock
*sk
, int cmd
, void __user
*user
, int *len
)
2124 if (!capable(CAP_NET_ADMIN
))
2128 case IP6T_SO_GET_INFO
:
2129 ret
= get_info(sock_net(sk
), user
, len
, 0);
2132 case IP6T_SO_GET_ENTRIES
:
2133 ret
= get_entries(sock_net(sk
), user
, len
);
2136 case IP6T_SO_GET_REVISION_MATCH
:
2137 case IP6T_SO_GET_REVISION_TARGET
: {
2138 struct ip6t_get_revision rev
;
2141 if (*len
!= sizeof(rev
)) {
2145 if (copy_from_user(&rev
, user
, sizeof(rev
)) != 0) {
2150 if (cmd
== IP6T_SO_GET_REVISION_TARGET
)
2155 try_then_request_module(xt_find_revision(AF_INET6
, rev
.name
,
2158 "ip6t_%s", rev
.name
);
2163 duprintf("do_ip6t_get_ctl: unknown request %i\n", cmd
);
2170 struct xt_table
*ip6t_register_table(struct net
*net
,
2171 const struct xt_table
*table
,
2172 const struct ip6t_replace
*repl
)
2175 struct xt_table_info
*newinfo
;
2176 struct xt_table_info bootstrap
2177 = { 0, 0, 0, { 0 }, { 0 }, { } };
2178 void *loc_cpu_entry
;
2179 struct xt_table
*new_table
;
2181 newinfo
= xt_alloc_table_info(repl
->size
);
2187 /* choose the copy on our node/cpu, but dont care about preemption */
2188 loc_cpu_entry
= newinfo
->entries
[raw_smp_processor_id()];
2189 memcpy(loc_cpu_entry
, repl
->entries
, repl
->size
);
2191 ret
= translate_table(net
, table
->name
, table
->valid_hooks
,
2192 newinfo
, loc_cpu_entry
, repl
->size
,
2199 new_table
= xt_register_table(net
, table
, &bootstrap
, newinfo
);
2200 if (IS_ERR(new_table
)) {
2201 ret
= PTR_ERR(new_table
);
2207 xt_free_table_info(newinfo
);
2209 return ERR_PTR(ret
);
2212 void ip6t_unregister_table(struct net
*net
, struct xt_table
*table
)
2214 struct xt_table_info
*private;
2215 void *loc_cpu_entry
;
2216 struct module
*table_owner
= table
->me
;
2217 struct ip6t_entry
*iter
;
2219 private = xt_unregister_table(table
);
2221 /* Decrease module usage counts and free resources */
2222 loc_cpu_entry
= private->entries
[raw_smp_processor_id()];
2223 xt_entry_foreach(iter
, loc_cpu_entry
, private->size
)
2224 if (cleanup_entry(iter
, net
, NULL
) != 0)
2226 if (private->number
> private->initial_entries
)
2227 module_put(table_owner
);
2228 xt_free_table_info(private);
2231 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2233 icmp6_type_code_match(u_int8_t test_type
, u_int8_t min_code
, u_int8_t max_code
,
2234 u_int8_t type
, u_int8_t code
,
2237 return (type
== test_type
&& code
>= min_code
&& code
<= max_code
)
2242 icmp6_match(const struct sk_buff
*skb
, const struct xt_match_param
*par
)
2244 const struct icmp6hdr
*ic
;
2245 struct icmp6hdr _icmph
;
2246 const struct ip6t_icmp
*icmpinfo
= par
->matchinfo
;
2248 /* Must not be a fragment. */
2249 if (par
->fragoff
!= 0)
2252 ic
= skb_header_pointer(skb
, par
->thoff
, sizeof(_icmph
), &_icmph
);
2254 /* We've been asked to examine this packet, and we
2255 * can't. Hence, no choice but to drop.
2257 duprintf("Dropping evil ICMP tinygram.\n");
2258 *par
->hotdrop
= true;
2262 return icmp6_type_code_match(icmpinfo
->type
,
2265 ic
->icmp6_type
, ic
->icmp6_code
,
2266 !!(icmpinfo
->invflags
&IP6T_ICMP_INV
));
2269 /* Called when user tries to insert an entry of this type. */
2270 static bool icmp6_checkentry(const struct xt_mtchk_param
*par
)
2272 const struct ip6t_icmp
*icmpinfo
= par
->matchinfo
;
2274 /* Must specify no unknown invflags */
2275 return !(icmpinfo
->invflags
& ~IP6T_ICMP_INV
);
2278 /* The built-in targets: standard (NULL) and error. */
2279 static struct xt_target ip6t_standard_target __read_mostly
= {
2280 .name
= IP6T_STANDARD_TARGET
,
2281 .targetsize
= sizeof(int),
2282 .family
= NFPROTO_IPV6
,
2283 #ifdef CONFIG_COMPAT
2284 .compatsize
= sizeof(compat_int_t
),
2285 .compat_from_user
= compat_standard_from_user
,
2286 .compat_to_user
= compat_standard_to_user
,
2290 static struct xt_target ip6t_error_target __read_mostly
= {
2291 .name
= IP6T_ERROR_TARGET
,
2292 .target
= ip6t_error
,
2293 .targetsize
= IP6T_FUNCTION_MAXNAMELEN
,
2294 .family
= NFPROTO_IPV6
,
2297 static struct nf_sockopt_ops ip6t_sockopts
= {
2299 .set_optmin
= IP6T_BASE_CTL
,
2300 .set_optmax
= IP6T_SO_SET_MAX
+1,
2301 .set
= do_ip6t_set_ctl
,
2302 #ifdef CONFIG_COMPAT
2303 .compat_set
= compat_do_ip6t_set_ctl
,
2305 .get_optmin
= IP6T_BASE_CTL
,
2306 .get_optmax
= IP6T_SO_GET_MAX
+1,
2307 .get
= do_ip6t_get_ctl
,
2308 #ifdef CONFIG_COMPAT
2309 .compat_get
= compat_do_ip6t_get_ctl
,
2311 .owner
= THIS_MODULE
,
2314 static struct xt_match icmp6_matchstruct __read_mostly
= {
2316 .match
= icmp6_match
,
2317 .matchsize
= sizeof(struct ip6t_icmp
),
2318 .checkentry
= icmp6_checkentry
,
2319 .proto
= IPPROTO_ICMPV6
,
2320 .family
= NFPROTO_IPV6
,
2323 static int __net_init
ip6_tables_net_init(struct net
*net
)
2325 return xt_proto_init(net
, NFPROTO_IPV6
);
2328 static void __net_exit
ip6_tables_net_exit(struct net
*net
)
2330 xt_proto_fini(net
, NFPROTO_IPV6
);
2333 static struct pernet_operations ip6_tables_net_ops
= {
2334 .init
= ip6_tables_net_init
,
2335 .exit
= ip6_tables_net_exit
,
2338 static int __init
ip6_tables_init(void)
2342 ret
= register_pernet_subsys(&ip6_tables_net_ops
);
2346 /* Noone else will be downing sem now, so we won't sleep */
2347 ret
= xt_register_target(&ip6t_standard_target
);
2350 ret
= xt_register_target(&ip6t_error_target
);
2353 ret
= xt_register_match(&icmp6_matchstruct
);
2357 /* Register setsockopt */
2358 ret
= nf_register_sockopt(&ip6t_sockopts
);
2362 printk(KERN_INFO
"ip6_tables: (C) 2000-2006 Netfilter Core Team\n");
2366 xt_unregister_match(&icmp6_matchstruct
);
2368 xt_unregister_target(&ip6t_error_target
);
2370 xt_unregister_target(&ip6t_standard_target
);
2372 unregister_pernet_subsys(&ip6_tables_net_ops
);
2377 static void __exit
ip6_tables_fini(void)
2379 nf_unregister_sockopt(&ip6t_sockopts
);
2381 xt_unregister_match(&icmp6_matchstruct
);
2382 xt_unregister_target(&ip6t_error_target
);
2383 xt_unregister_target(&ip6t_standard_target
);
2385 unregister_pernet_subsys(&ip6_tables_net_ops
);
2389 * find the offset to specified header or the protocol number of last header
2390 * if target < 0. "last header" is transport protocol header, ESP, or
2393 * If target header is found, its offset is set in *offset and return protocol
2394 * number. Otherwise, return -1.
2396 * If the first fragment doesn't contain the final protocol header or
2397 * NEXTHDR_NONE it is considered invalid.
2399 * Note that non-1st fragment is special case that "the protocol number
2400 * of last header" is "next header" field in Fragment header. In this case,
2401 * *offset is meaningless and fragment offset is stored in *fragoff if fragoff
2405 int ipv6_find_hdr(const struct sk_buff
*skb
, unsigned int *offset
,
2406 int target
, unsigned short *fragoff
)
2408 unsigned int start
= skb_network_offset(skb
) + sizeof(struct ipv6hdr
);
2409 u8 nexthdr
= ipv6_hdr(skb
)->nexthdr
;
2410 unsigned int len
= skb
->len
- start
;
2415 while (nexthdr
!= target
) {
2416 struct ipv6_opt_hdr _hdr
, *hp
;
2417 unsigned int hdrlen
;
2419 if ((!ipv6_ext_hdr(nexthdr
)) || nexthdr
== NEXTHDR_NONE
) {
2425 hp
= skb_header_pointer(skb
, start
, sizeof(_hdr
), &_hdr
);
2428 if (nexthdr
== NEXTHDR_FRAGMENT
) {
2429 unsigned short _frag_off
;
2431 fp
= skb_header_pointer(skb
,
2432 start
+offsetof(struct frag_hdr
,
2439 _frag_off
= ntohs(*fp
) & ~0x7;
2442 ((!ipv6_ext_hdr(hp
->nexthdr
)) ||
2443 hp
->nexthdr
== NEXTHDR_NONE
)) {
2445 *fragoff
= _frag_off
;
2451 } else if (nexthdr
== NEXTHDR_AUTH
)
2452 hdrlen
= (hp
->hdrlen
+ 2) << 2;
2454 hdrlen
= ipv6_optlen(hp
);
2456 nexthdr
= hp
->nexthdr
;
2465 EXPORT_SYMBOL(ip6t_register_table
);
2466 EXPORT_SYMBOL(ip6t_unregister_table
);
2467 EXPORT_SYMBOL(ip6t_do_table
);
2468 EXPORT_SYMBOL(ip6t_ext_hdr
);
2469 EXPORT_SYMBOL(ipv6_find_hdr
);
2471 module_init(ip6_tables_init
);
2472 module_exit(ip6_tables_fini
);