2 * Packet matching code.
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 #include <linux/capability.h>
14 #include <linux/skbuff.h>
15 #include <linux/kmod.h>
16 #include <linux/vmalloc.h>
17 #include <linux/netdevice.h>
18 #include <linux/module.h>
19 #include <linux/poison.h>
20 #include <linux/icmpv6.h>
22 #include <net/compat.h>
23 #include <asm/uaccess.h>
24 #include <linux/mutex.h>
25 #include <linux/proc_fs.h>
26 #include <linux/err.h>
27 #include <linux/cpumask.h>
29 #include <linux/netfilter_ipv6/ip6_tables.h>
30 #include <linux/netfilter/x_tables.h>
31 #include <net/netfilter/nf_log.h>
33 MODULE_LICENSE("GPL");
34 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
35 MODULE_DESCRIPTION("IPv6 packet filter");
37 /*#define DEBUG_IP_FIREWALL*/
38 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
39 /*#define DEBUG_IP_FIREWALL_USER*/
41 #ifdef DEBUG_IP_FIREWALL
42 #define dprintf(format, args...) printk(format , ## args)
44 #define dprintf(format, args...)
47 #ifdef DEBUG_IP_FIREWALL_USER
48 #define duprintf(format, args...) printk(format , ## args)
50 #define duprintf(format, args...)
53 #ifdef CONFIG_NETFILTER_DEBUG
54 #define IP_NF_ASSERT(x) \
57 printk("IP_NF_ASSERT: %s:%s:%u\n", \
58 __func__, __FILE__, __LINE__); \
61 #define IP_NF_ASSERT(x)
65 /* All the better to debug you with... */
71 We keep a set of rules for each CPU, so we can avoid write-locking
72 them in the softirq when updating the counters and therefore
73 only need to read-lock in the softirq; doing a write_lock_bh() in user
74 context stops packets coming through and allows user context to read
75 the counters or update the rules.
77 Hence the start of any table is given by get_table() below. */
79 /* Check for an extension */
81 ip6t_ext_hdr(u8 nexthdr
)
83 return ( (nexthdr
== IPPROTO_HOPOPTS
) ||
84 (nexthdr
== IPPROTO_ROUTING
) ||
85 (nexthdr
== IPPROTO_FRAGMENT
) ||
86 (nexthdr
== IPPROTO_ESP
) ||
87 (nexthdr
== IPPROTO_AH
) ||
88 (nexthdr
== IPPROTO_NONE
) ||
89 (nexthdr
== IPPROTO_DSTOPTS
) );
92 /* Returns whether matches rule or not. */
93 /* Performance critical - called for every packet */
95 ip6_packet_match(const struct sk_buff
*skb
,
98 const struct ip6t_ip6
*ip6info
,
99 unsigned int *protoff
,
100 int *fragoff
, bool *hotdrop
)
103 const struct ipv6hdr
*ipv6
= ipv6_hdr(skb
);
105 #define FWINV(bool, invflg) ((bool) ^ !!(ip6info->invflags & (invflg)))
107 if (FWINV(ipv6_masked_addr_cmp(&ipv6
->saddr
, &ip6info
->smsk
,
108 &ip6info
->src
), IP6T_INV_SRCIP
) ||
109 FWINV(ipv6_masked_addr_cmp(&ipv6
->daddr
, &ip6info
->dmsk
,
110 &ip6info
->dst
), IP6T_INV_DSTIP
)) {
111 dprintf("Source or dest mismatch.\n");
113 dprintf("SRC: %u. Mask: %u. Target: %u.%s\n", ip->saddr,
114 ipinfo->smsk.s_addr, ipinfo->src.s_addr,
115 ipinfo->invflags & IP6T_INV_SRCIP ? " (INV)" : "");
116 dprintf("DST: %u. Mask: %u. Target: %u.%s\n", ip->daddr,
117 ipinfo->dmsk.s_addr, ipinfo->dst.s_addr,
118 ipinfo->invflags & IP6T_INV_DSTIP ? " (INV)" : "");*/
122 ret
= ifname_compare_aligned(indev
, ip6info
->iniface
, ip6info
->iniface_mask
);
124 if (FWINV(ret
!= 0, IP6T_INV_VIA_IN
)) {
125 dprintf("VIA in mismatch (%s vs %s).%s\n",
126 indev
, ip6info
->iniface
,
127 ip6info
->invflags
&IP6T_INV_VIA_IN
?" (INV)":"");
131 ret
= ifname_compare_aligned(outdev
, ip6info
->outiface
, ip6info
->outiface_mask
);
133 if (FWINV(ret
!= 0, IP6T_INV_VIA_OUT
)) {
134 dprintf("VIA out mismatch (%s vs %s).%s\n",
135 outdev
, ip6info
->outiface
,
136 ip6info
->invflags
&IP6T_INV_VIA_OUT
?" (INV)":"");
140 /* ... might want to do something with class and flowlabel here ... */
142 /* look for the desired protocol header */
143 if((ip6info
->flags
& IP6T_F_PROTO
)) {
145 unsigned short _frag_off
;
147 protohdr
= ipv6_find_hdr(skb
, protoff
, -1, &_frag_off
);
153 *fragoff
= _frag_off
;
155 dprintf("Packet protocol %hi ?= %s%hi.\n",
157 ip6info
->invflags
& IP6T_INV_PROTO
? "!":"",
160 if (ip6info
->proto
== protohdr
) {
161 if(ip6info
->invflags
& IP6T_INV_PROTO
) {
167 /* We need match for the '-p all', too! */
168 if ((ip6info
->proto
!= 0) &&
169 !(ip6info
->invflags
& IP6T_INV_PROTO
))
175 /* should be ip6 safe */
177 ip6_checkentry(const struct ip6t_ip6
*ipv6
)
179 if (ipv6
->flags
& ~IP6T_F_MASK
) {
180 duprintf("Unknown flag bits set: %08X\n",
181 ipv6
->flags
& ~IP6T_F_MASK
);
184 if (ipv6
->invflags
& ~IP6T_INV_MASK
) {
185 duprintf("Unknown invflag bits set: %08X\n",
186 ipv6
->invflags
& ~IP6T_INV_MASK
);
193 ip6t_error(struct sk_buff
*skb
, const struct xt_target_param
*par
)
196 printk("ip6_tables: error: `%s'\n",
197 (const char *)par
->targinfo
);
202 /* Performance critical - called for every packet */
204 do_match(struct ip6t_entry_match
*m
, const struct sk_buff
*skb
,
205 struct xt_match_param
*par
)
207 par
->match
= m
->u
.kernel
.match
;
208 par
->matchinfo
= m
->data
;
210 /* Stop iteration if it doesn't match */
211 if (!m
->u
.kernel
.match
->match(skb
, par
))
217 static inline struct ip6t_entry
*
218 get_entry(void *base
, unsigned int offset
)
220 return (struct ip6t_entry
*)(base
+ offset
);
223 /* All zeroes == unconditional rule. */
224 /* Mildly perf critical (only if packet tracing is on) */
225 static inline bool unconditional(const struct ip6t_ip6
*ipv6
)
227 static const struct ip6t_ip6 uncond
;
229 return memcmp(ipv6
, &uncond
, sizeof(uncond
)) == 0;
232 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
233 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
234 /* This cries for unification! */
235 static const char *const hooknames
[] = {
236 [NF_INET_PRE_ROUTING
] = "PREROUTING",
237 [NF_INET_LOCAL_IN
] = "INPUT",
238 [NF_INET_FORWARD
] = "FORWARD",
239 [NF_INET_LOCAL_OUT
] = "OUTPUT",
240 [NF_INET_POST_ROUTING
] = "POSTROUTING",
243 enum nf_ip_trace_comments
{
244 NF_IP6_TRACE_COMMENT_RULE
,
245 NF_IP6_TRACE_COMMENT_RETURN
,
246 NF_IP6_TRACE_COMMENT_POLICY
,
249 static const char *const comments
[] = {
250 [NF_IP6_TRACE_COMMENT_RULE
] = "rule",
251 [NF_IP6_TRACE_COMMENT_RETURN
] = "return",
252 [NF_IP6_TRACE_COMMENT_POLICY
] = "policy",
255 static struct nf_loginfo trace_loginfo
= {
256 .type
= NF_LOG_TYPE_LOG
,
260 .logflags
= NF_LOG_MASK
,
265 /* Mildly perf critical (only if packet tracing is on) */
267 get_chainname_rulenum(struct ip6t_entry
*s
, struct ip6t_entry
*e
,
268 const char *hookname
, const char **chainname
,
269 const char **comment
, unsigned int *rulenum
)
271 struct ip6t_standard_target
*t
= (void *)ip6t_get_target(s
);
273 if (strcmp(t
->target
.u
.kernel
.target
->name
, IP6T_ERROR_TARGET
) == 0) {
274 /* Head of user chain: ERROR target with chainname */
275 *chainname
= t
->target
.data
;
280 if (s
->target_offset
== sizeof(struct ip6t_entry
) &&
281 strcmp(t
->target
.u
.kernel
.target
->name
,
282 IP6T_STANDARD_TARGET
) == 0 &&
284 unconditional(&s
->ipv6
)) {
285 /* Tail of chains: STANDARD target (return/policy) */
286 *comment
= *chainname
== hookname
287 ? comments
[NF_IP6_TRACE_COMMENT_POLICY
]
288 : comments
[NF_IP6_TRACE_COMMENT_RETURN
];
297 static void trace_packet(struct sk_buff
*skb
,
299 const struct net_device
*in
,
300 const struct net_device
*out
,
301 const char *tablename
,
302 struct xt_table_info
*private,
303 struct ip6t_entry
*e
)
306 const struct ip6t_entry
*root
;
307 const char *hookname
, *chainname
, *comment
;
308 unsigned int rulenum
= 0;
310 table_base
= private->entries
[smp_processor_id()];
311 root
= get_entry(table_base
, private->hook_entry
[hook
]);
313 hookname
= chainname
= hooknames
[hook
];
314 comment
= comments
[NF_IP6_TRACE_COMMENT_RULE
];
316 IP6T_ENTRY_ITERATE(root
,
317 private->size
- private->hook_entry
[hook
],
318 get_chainname_rulenum
,
319 e
, hookname
, &chainname
, &comment
, &rulenum
);
321 nf_log_packet(AF_INET6
, hook
, skb
, in
, out
, &trace_loginfo
,
322 "TRACE: %s:%s:%s:%u ",
323 tablename
, chainname
, comment
, rulenum
);
327 static inline __pure
struct ip6t_entry
*
328 ip6t_next_entry(const struct ip6t_entry
*entry
)
330 return (void *)entry
+ entry
->next_offset
;
333 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
335 ip6t_do_table(struct sk_buff
*skb
,
337 const struct net_device
*in
,
338 const struct net_device
*out
,
339 struct xt_table
*table
)
341 #define tb_comefrom ((struct ip6t_entry *)table_base)->comefrom
343 static const char nulldevname
[IFNAMSIZ
] __attribute__((aligned(sizeof(long))));
344 bool hotdrop
= false;
345 /* Initializing verdict to NF_DROP keeps gcc happy. */
346 unsigned int verdict
= NF_DROP
;
347 const char *indev
, *outdev
;
349 struct ip6t_entry
*e
, *back
;
350 struct xt_table_info
*private;
351 struct xt_match_param mtpar
;
352 struct xt_target_param tgpar
;
355 indev
= in
? in
->name
: nulldevname
;
356 outdev
= out
? out
->name
: nulldevname
;
357 /* We handle fragments by dealing with the first fragment as
358 * if it was a normal packet. All other fragments are treated
359 * normally, except that they will NEVER match rules that ask
360 * things we don't know, ie. tcp syn flag or ports). If the
361 * rule is also a fragment-specific rule, non-fragments won't
363 mtpar
.hotdrop
= &hotdrop
;
364 mtpar
.in
= tgpar
.in
= in
;
365 mtpar
.out
= tgpar
.out
= out
;
366 mtpar
.family
= tgpar
.family
= NFPROTO_IPV6
;
367 mtpar
.hooknum
= tgpar
.hooknum
= hook
;
369 IP_NF_ASSERT(table
->valid_hooks
& (1 << hook
));
372 private = table
->private;
373 table_base
= private->entries
[smp_processor_id()];
375 e
= get_entry(table_base
, private->hook_entry
[hook
]);
377 /* For return from builtin chain */
378 back
= get_entry(table_base
, private->underflow
[hook
]);
381 struct ip6t_entry_target
*t
;
385 if (!ip6_packet_match(skb
, indev
, outdev
, &e
->ipv6
,
386 &mtpar
.thoff
, &mtpar
.fragoff
, &hotdrop
) ||
387 IP6T_MATCH_ITERATE(e
, do_match
, skb
, &mtpar
) != 0) {
388 e
= ip6t_next_entry(e
);
392 ADD_COUNTER(e
->counters
,
393 ntohs(ipv6_hdr(skb
)->payload_len
) +
394 sizeof(struct ipv6hdr
), 1);
396 t
= ip6t_get_target(e
);
397 IP_NF_ASSERT(t
->u
.kernel
.target
);
399 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
400 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
401 /* The packet is traced: log it */
402 if (unlikely(skb
->nf_trace
))
403 trace_packet(skb
, hook
, in
, out
,
404 table
->name
, private, e
);
406 /* Standard target? */
407 if (!t
->u
.kernel
.target
->target
) {
410 v
= ((struct ip6t_standard_target
*)t
)->verdict
;
412 /* Pop from stack? */
413 if (v
!= IP6T_RETURN
) {
414 verdict
= (unsigned)(-v
) - 1;
418 back
= get_entry(table_base
, back
->comefrom
);
421 if (table_base
+ v
!= ip6t_next_entry(e
) &&
422 !(e
->ipv6
.flags
& IP6T_F_GOTO
)) {
423 /* Save old back ptr in next entry */
424 struct ip6t_entry
*next
= ip6t_next_entry(e
);
425 next
->comefrom
= (void *)back
- table_base
;
426 /* set back pointer to next entry */
430 e
= get_entry(table_base
, v
);
434 /* Targets which reenter must return
436 tgpar
.target
= t
->u
.kernel
.target
;
437 tgpar
.targinfo
= t
->data
;
439 #ifdef CONFIG_NETFILTER_DEBUG
440 tb_comefrom
= 0xeeeeeeec;
442 verdict
= t
->u
.kernel
.target
->target(skb
, &tgpar
);
444 #ifdef CONFIG_NETFILTER_DEBUG
445 if (tb_comefrom
!= 0xeeeeeeec && verdict
== IP6T_CONTINUE
) {
446 printk("Target %s reentered!\n",
447 t
->u
.kernel
.target
->name
);
450 tb_comefrom
= 0x57acc001;
452 if (verdict
== IP6T_CONTINUE
)
453 e
= ip6t_next_entry(e
);
459 #ifdef CONFIG_NETFILTER_DEBUG
460 tb_comefrom
= NETFILTER_LINK_POISON
;
462 xt_info_rdunlock_bh();
464 #ifdef DEBUG_ALLOW_ALL
475 /* Figures out from what hook each rule can be called: returns 0 if
476 there are loops. Puts hook bitmask in comefrom. */
478 mark_source_chains(struct xt_table_info
*newinfo
,
479 unsigned int valid_hooks
, void *entry0
)
483 /* No recursion; use packet counter to save back ptrs (reset
484 to 0 as we leave), and comefrom to save source hook bitmask */
485 for (hook
= 0; hook
< NF_INET_NUMHOOKS
; hook
++) {
486 unsigned int pos
= newinfo
->hook_entry
[hook
];
487 struct ip6t_entry
*e
= (struct ip6t_entry
*)(entry0
+ pos
);
489 if (!(valid_hooks
& (1 << hook
)))
492 /* Set initial back pointer. */
493 e
->counters
.pcnt
= pos
;
496 struct ip6t_standard_target
*t
497 = (void *)ip6t_get_target(e
);
498 int visited
= e
->comefrom
& (1 << hook
);
500 if (e
->comefrom
& (1 << NF_INET_NUMHOOKS
)) {
501 printk("iptables: loop hook %u pos %u %08X.\n",
502 hook
, pos
, e
->comefrom
);
505 e
->comefrom
|= ((1 << hook
) | (1 << NF_INET_NUMHOOKS
));
507 /* Unconditional return/END. */
508 if ((e
->target_offset
== sizeof(struct ip6t_entry
) &&
509 (strcmp(t
->target
.u
.user
.name
,
510 IP6T_STANDARD_TARGET
) == 0) &&
512 unconditional(&e
->ipv6
)) || visited
) {
513 unsigned int oldpos
, size
;
515 if ((strcmp(t
->target
.u
.user
.name
,
516 IP6T_STANDARD_TARGET
) == 0) &&
517 t
->verdict
< -NF_MAX_VERDICT
- 1) {
518 duprintf("mark_source_chains: bad "
519 "negative verdict (%i)\n",
524 /* Return: backtrack through the last
527 e
->comefrom
^= (1<<NF_INET_NUMHOOKS
);
528 #ifdef DEBUG_IP_FIREWALL_USER
530 & (1 << NF_INET_NUMHOOKS
)) {
531 duprintf("Back unset "
538 pos
= e
->counters
.pcnt
;
539 e
->counters
.pcnt
= 0;
541 /* We're at the start. */
545 e
= (struct ip6t_entry
*)
547 } while (oldpos
== pos
+ e
->next_offset
);
550 size
= e
->next_offset
;
551 e
= (struct ip6t_entry
*)
552 (entry0
+ pos
+ size
);
553 e
->counters
.pcnt
= pos
;
556 int newpos
= t
->verdict
;
558 if (strcmp(t
->target
.u
.user
.name
,
559 IP6T_STANDARD_TARGET
) == 0 &&
561 if (newpos
> newinfo
->size
-
562 sizeof(struct ip6t_entry
)) {
563 duprintf("mark_source_chains: "
564 "bad verdict (%i)\n",
568 /* This a jump; chase it. */
569 duprintf("Jump rule %u -> %u\n",
572 /* ... this is a fallthru */
573 newpos
= pos
+ e
->next_offset
;
575 e
= (struct ip6t_entry
*)
577 e
->counters
.pcnt
= pos
;
582 duprintf("Finished chain %u\n", hook
);
588 cleanup_match(struct ip6t_entry_match
*m
, struct net
*net
, unsigned int *i
)
590 struct xt_mtdtor_param par
;
592 if (i
&& (*i
)-- == 0)
596 par
.match
= m
->u
.kernel
.match
;
597 par
.matchinfo
= m
->data
;
598 par
.family
= NFPROTO_IPV6
;
599 if (par
.match
->destroy
!= NULL
)
600 par
.match
->destroy(&par
);
601 module_put(par
.match
->me
);
606 check_entry(struct ip6t_entry
*e
, const char *name
)
608 struct ip6t_entry_target
*t
;
610 if (!ip6_checkentry(&e
->ipv6
)) {
611 duprintf("ip_tables: ip check failed %p %s.\n", e
, name
);
615 if (e
->target_offset
+ sizeof(struct ip6t_entry_target
) >
619 t
= ip6t_get_target(e
);
620 if (e
->target_offset
+ t
->u
.target_size
> e
->next_offset
)
626 static int check_match(struct ip6t_entry_match
*m
, struct xt_mtchk_param
*par
,
629 const struct ip6t_ip6
*ipv6
= par
->entryinfo
;
632 par
->match
= m
->u
.kernel
.match
;
633 par
->matchinfo
= m
->data
;
635 ret
= xt_check_match(par
, m
->u
.match_size
- sizeof(*m
),
636 ipv6
->proto
, ipv6
->invflags
& IP6T_INV_PROTO
);
638 duprintf("ip_tables: check failed for `%s'.\n",
647 find_check_match(struct ip6t_entry_match
*m
, struct xt_mtchk_param
*par
,
650 struct xt_match
*match
;
653 match
= try_then_request_module(xt_find_match(AF_INET6
, m
->u
.user
.name
,
655 "ip6t_%s", m
->u
.user
.name
);
656 if (IS_ERR(match
) || !match
) {
657 duprintf("find_check_match: `%s' not found\n", m
->u
.user
.name
);
658 return match
? PTR_ERR(match
) : -ENOENT
;
660 m
->u
.kernel
.match
= match
;
662 ret
= check_match(m
, par
, i
);
668 module_put(m
->u
.kernel
.match
->me
);
672 static int check_target(struct ip6t_entry
*e
, struct net
*net
, const char *name
)
674 struct ip6t_entry_target
*t
= ip6t_get_target(e
);
675 struct xt_tgchk_param par
= {
679 .target
= t
->u
.kernel
.target
,
681 .hook_mask
= e
->comefrom
,
682 .family
= NFPROTO_IPV6
,
686 t
= ip6t_get_target(e
);
687 ret
= xt_check_target(&par
, t
->u
.target_size
- sizeof(*t
),
688 e
->ipv6
.proto
, e
->ipv6
.invflags
& IP6T_INV_PROTO
);
690 duprintf("ip_tables: check failed for `%s'.\n",
691 t
->u
.kernel
.target
->name
);
698 find_check_entry(struct ip6t_entry
*e
, struct net
*net
, const char *name
,
699 unsigned int size
, unsigned int *i
)
701 struct ip6t_entry_target
*t
;
702 struct xt_target
*target
;
705 struct xt_mtchk_param mtpar
;
707 ret
= check_entry(e
, name
);
714 mtpar
.entryinfo
= &e
->ipv6
;
715 mtpar
.hook_mask
= e
->comefrom
;
716 mtpar
.family
= NFPROTO_IPV6
;
717 ret
= IP6T_MATCH_ITERATE(e
, find_check_match
, &mtpar
, &j
);
719 goto cleanup_matches
;
721 t
= ip6t_get_target(e
);
722 target
= try_then_request_module(xt_find_target(AF_INET6
,
725 "ip6t_%s", t
->u
.user
.name
);
726 if (IS_ERR(target
) || !target
) {
727 duprintf("find_check_entry: `%s' not found\n", t
->u
.user
.name
);
728 ret
= target
? PTR_ERR(target
) : -ENOENT
;
729 goto cleanup_matches
;
731 t
->u
.kernel
.target
= target
;
733 ret
= check_target(e
, net
, name
);
740 module_put(t
->u
.kernel
.target
->me
);
742 IP6T_MATCH_ITERATE(e
, cleanup_match
, net
, &j
);
746 static bool check_underflow(struct ip6t_entry
*e
)
748 const struct ip6t_entry_target
*t
;
749 unsigned int verdict
;
751 if (!unconditional(&e
->ipv6
))
753 t
= ip6t_get_target(e
);
754 if (strcmp(t
->u
.user
.name
, XT_STANDARD_TARGET
) != 0)
756 verdict
= ((struct ip6t_standard_target
*)t
)->verdict
;
757 verdict
= -verdict
- 1;
758 return verdict
== NF_DROP
|| verdict
== NF_ACCEPT
;
762 check_entry_size_and_hooks(struct ip6t_entry
*e
,
763 struct xt_table_info
*newinfo
,
765 unsigned char *limit
,
766 const unsigned int *hook_entries
,
767 const unsigned int *underflows
,
768 unsigned int valid_hooks
,
773 if ((unsigned long)e
% __alignof__(struct ip6t_entry
) != 0 ||
774 (unsigned char *)e
+ sizeof(struct ip6t_entry
) >= limit
) {
775 duprintf("Bad offset %p\n", e
);
780 < sizeof(struct ip6t_entry
) + sizeof(struct ip6t_entry_target
)) {
781 duprintf("checking: element %p size %u\n",
786 /* Check hooks & underflows */
787 for (h
= 0; h
< NF_INET_NUMHOOKS
; h
++) {
788 if (!(valid_hooks
& (1 << h
)))
790 if ((unsigned char *)e
- base
== hook_entries
[h
])
791 newinfo
->hook_entry
[h
] = hook_entries
[h
];
792 if ((unsigned char *)e
- base
== underflows
[h
]) {
793 if (!check_underflow(e
)) {
794 pr_err("Underflows must be unconditional and "
795 "use the STANDARD target with "
799 newinfo
->underflow
[h
] = underflows
[h
];
803 /* Clear counters and comefrom */
804 e
->counters
= ((struct xt_counters
) { 0, 0 });
812 cleanup_entry(struct ip6t_entry
*e
, struct net
*net
, unsigned int *i
)
814 struct xt_tgdtor_param par
;
815 struct ip6t_entry_target
*t
;
817 if (i
&& (*i
)-- == 0)
820 /* Cleanup all matches */
821 IP6T_MATCH_ITERATE(e
, cleanup_match
, net
, NULL
);
822 t
= ip6t_get_target(e
);
825 par
.target
= t
->u
.kernel
.target
;
826 par
.targinfo
= t
->data
;
827 par
.family
= NFPROTO_IPV6
;
828 if (par
.target
->destroy
!= NULL
)
829 par
.target
->destroy(&par
);
830 module_put(par
.target
->me
);
834 /* Checks and translates the user-supplied table segment (held in
837 translate_table(struct net
*net
,
839 unsigned int valid_hooks
,
840 struct xt_table_info
*newinfo
,
844 const unsigned int *hook_entries
,
845 const unsigned int *underflows
)
850 newinfo
->size
= size
;
851 newinfo
->number
= number
;
853 /* Init all hooks to impossible value. */
854 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
855 newinfo
->hook_entry
[i
] = 0xFFFFFFFF;
856 newinfo
->underflow
[i
] = 0xFFFFFFFF;
859 duprintf("translate_table: size %u\n", newinfo
->size
);
861 /* Walk through entries, checking offsets. */
862 ret
= IP6T_ENTRY_ITERATE(entry0
, newinfo
->size
,
863 check_entry_size_and_hooks
,
867 hook_entries
, underflows
, valid_hooks
, &i
);
872 duprintf("translate_table: %u not %u entries\n",
877 /* Check hooks all assigned */
878 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
879 /* Only hooks which are valid */
880 if (!(valid_hooks
& (1 << i
)))
882 if (newinfo
->hook_entry
[i
] == 0xFFFFFFFF) {
883 duprintf("Invalid hook entry %u %u\n",
887 if (newinfo
->underflow
[i
] == 0xFFFFFFFF) {
888 duprintf("Invalid underflow %u %u\n",
894 if (!mark_source_chains(newinfo
, valid_hooks
, entry0
))
897 /* Finally, each sanity check must pass */
899 ret
= IP6T_ENTRY_ITERATE(entry0
, newinfo
->size
,
900 find_check_entry
, net
, name
, size
, &i
);
903 IP6T_ENTRY_ITERATE(entry0
, newinfo
->size
,
904 cleanup_entry
, net
, &i
);
908 /* And one copy for every other CPU */
909 for_each_possible_cpu(i
) {
910 if (newinfo
->entries
[i
] && newinfo
->entries
[i
] != entry0
)
911 memcpy(newinfo
->entries
[i
], entry0
, newinfo
->size
);
919 add_entry_to_counter(const struct ip6t_entry
*e
,
920 struct xt_counters total
[],
923 ADD_COUNTER(total
[*i
], e
->counters
.bcnt
, e
->counters
.pcnt
);
930 set_entry_to_counter(const struct ip6t_entry
*e
,
931 struct ip6t_counters total
[],
934 SET_COUNTER(total
[*i
], e
->counters
.bcnt
, e
->counters
.pcnt
);
941 get_counters(const struct xt_table_info
*t
,
942 struct xt_counters counters
[])
948 /* Instead of clearing (by a previous call to memset())
949 * the counters and using adds, we set the counters
950 * with data used by 'current' CPU
952 * Bottom half has to be disabled to prevent deadlock
953 * if new softirq were to run and call ipt_do_table
956 curcpu
= smp_processor_id();
959 IP6T_ENTRY_ITERATE(t
->entries
[curcpu
],
961 set_entry_to_counter
,
965 for_each_possible_cpu(cpu
) {
970 IP6T_ENTRY_ITERATE(t
->entries
[cpu
],
972 add_entry_to_counter
,
975 xt_info_wrunlock(cpu
);
980 static struct xt_counters
*alloc_counters(struct xt_table
*table
)
982 unsigned int countersize
;
983 struct xt_counters
*counters
;
984 struct xt_table_info
*private = table
->private;
986 /* We need atomic snapshot of counters: rest doesn't change
987 (other than comefrom, which userspace doesn't care
989 countersize
= sizeof(struct xt_counters
) * private->number
;
990 counters
= vmalloc_node(countersize
, numa_node_id());
992 if (counters
== NULL
)
993 return ERR_PTR(-ENOMEM
);
995 get_counters(private, counters
);
1001 copy_entries_to_user(unsigned int total_size
,
1002 struct xt_table
*table
,
1003 void __user
*userptr
)
1005 unsigned int off
, num
;
1006 struct ip6t_entry
*e
;
1007 struct xt_counters
*counters
;
1008 const struct xt_table_info
*private = table
->private;
1010 const void *loc_cpu_entry
;
1012 counters
= alloc_counters(table
);
1013 if (IS_ERR(counters
))
1014 return PTR_ERR(counters
);
1016 /* choose the copy that is on our node/cpu, ...
1017 * This choice is lazy (because current thread is
1018 * allowed to migrate to another cpu)
1020 loc_cpu_entry
= private->entries
[raw_smp_processor_id()];
1021 if (copy_to_user(userptr
, loc_cpu_entry
, total_size
) != 0) {
1026 /* FIXME: use iterator macros --RR */
1027 /* ... then go back and fix counters and names */
1028 for (off
= 0, num
= 0; off
< total_size
; off
+= e
->next_offset
, num
++){
1030 const struct ip6t_entry_match
*m
;
1031 const struct ip6t_entry_target
*t
;
1033 e
= (struct ip6t_entry
*)(loc_cpu_entry
+ off
);
1034 if (copy_to_user(userptr
+ off
1035 + offsetof(struct ip6t_entry
, counters
),
1037 sizeof(counters
[num
])) != 0) {
1042 for (i
= sizeof(struct ip6t_entry
);
1043 i
< e
->target_offset
;
1044 i
+= m
->u
.match_size
) {
1047 if (copy_to_user(userptr
+ off
+ i
1048 + offsetof(struct ip6t_entry_match
,
1050 m
->u
.kernel
.match
->name
,
1051 strlen(m
->u
.kernel
.match
->name
)+1)
1058 t
= ip6t_get_target(e
);
1059 if (copy_to_user(userptr
+ off
+ e
->target_offset
1060 + offsetof(struct ip6t_entry_target
,
1062 t
->u
.kernel
.target
->name
,
1063 strlen(t
->u
.kernel
.target
->name
)+1) != 0) {
1074 #ifdef CONFIG_COMPAT
1075 static void compat_standard_from_user(void *dst
, void *src
)
1077 int v
= *(compat_int_t
*)src
;
1080 v
+= xt_compat_calc_jump(AF_INET6
, v
);
1081 memcpy(dst
, &v
, sizeof(v
));
1084 static int compat_standard_to_user(void __user
*dst
, void *src
)
1086 compat_int_t cv
= *(int *)src
;
1089 cv
-= xt_compat_calc_jump(AF_INET6
, cv
);
1090 return copy_to_user(dst
, &cv
, sizeof(cv
)) ? -EFAULT
: 0;
1094 compat_calc_match(struct ip6t_entry_match
*m
, int *size
)
1096 *size
+= xt_compat_match_offset(m
->u
.kernel
.match
);
1100 static int compat_calc_entry(struct ip6t_entry
*e
,
1101 const struct xt_table_info
*info
,
1102 void *base
, struct xt_table_info
*newinfo
)
1104 struct ip6t_entry_target
*t
;
1105 unsigned int entry_offset
;
1108 off
= sizeof(struct ip6t_entry
) - sizeof(struct compat_ip6t_entry
);
1109 entry_offset
= (void *)e
- base
;
1110 IP6T_MATCH_ITERATE(e
, compat_calc_match
, &off
);
1111 t
= ip6t_get_target(e
);
1112 off
+= xt_compat_target_offset(t
->u
.kernel
.target
);
1113 newinfo
->size
-= off
;
1114 ret
= xt_compat_add_offset(AF_INET6
, entry_offset
, off
);
1118 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
1119 if (info
->hook_entry
[i
] &&
1120 (e
< (struct ip6t_entry
*)(base
+ info
->hook_entry
[i
])))
1121 newinfo
->hook_entry
[i
] -= off
;
1122 if (info
->underflow
[i
] &&
1123 (e
< (struct ip6t_entry
*)(base
+ info
->underflow
[i
])))
1124 newinfo
->underflow
[i
] -= off
;
1129 static int compat_table_info(const struct xt_table_info
*info
,
1130 struct xt_table_info
*newinfo
)
1132 void *loc_cpu_entry
;
1134 if (!newinfo
|| !info
)
1137 /* we dont care about newinfo->entries[] */
1138 memcpy(newinfo
, info
, offsetof(struct xt_table_info
, entries
));
1139 newinfo
->initial_entries
= 0;
1140 loc_cpu_entry
= info
->entries
[raw_smp_processor_id()];
1141 return IP6T_ENTRY_ITERATE(loc_cpu_entry
, info
->size
,
1142 compat_calc_entry
, info
, loc_cpu_entry
,
1147 static int get_info(struct net
*net
, void __user
*user
, int *len
, int compat
)
1149 char name
[IP6T_TABLE_MAXNAMELEN
];
1153 if (*len
!= sizeof(struct ip6t_getinfo
)) {
1154 duprintf("length %u != %zu\n", *len
,
1155 sizeof(struct ip6t_getinfo
));
1159 if (copy_from_user(name
, user
, sizeof(name
)) != 0)
1162 name
[IP6T_TABLE_MAXNAMELEN
-1] = '\0';
1163 #ifdef CONFIG_COMPAT
1165 xt_compat_lock(AF_INET6
);
1167 t
= try_then_request_module(xt_find_table_lock(net
, AF_INET6
, name
),
1168 "ip6table_%s", name
);
1169 if (t
&& !IS_ERR(t
)) {
1170 struct ip6t_getinfo info
;
1171 const struct xt_table_info
*private = t
->private;
1172 #ifdef CONFIG_COMPAT
1173 struct xt_table_info tmp
;
1176 ret
= compat_table_info(private, &tmp
);
1177 xt_compat_flush_offsets(AF_INET6
);
1181 info
.valid_hooks
= t
->valid_hooks
;
1182 memcpy(info
.hook_entry
, private->hook_entry
,
1183 sizeof(info
.hook_entry
));
1184 memcpy(info
.underflow
, private->underflow
,
1185 sizeof(info
.underflow
));
1186 info
.num_entries
= private->number
;
1187 info
.size
= private->size
;
1188 strcpy(info
.name
, name
);
1190 if (copy_to_user(user
, &info
, *len
) != 0)
1198 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1199 #ifdef CONFIG_COMPAT
1201 xt_compat_unlock(AF_INET6
);
1207 get_entries(struct net
*net
, struct ip6t_get_entries __user
*uptr
, int *len
)
1210 struct ip6t_get_entries get
;
1213 if (*len
< sizeof(get
)) {
1214 duprintf("get_entries: %u < %zu\n", *len
, sizeof(get
));
1217 if (copy_from_user(&get
, uptr
, sizeof(get
)) != 0)
1219 if (*len
!= sizeof(struct ip6t_get_entries
) + get
.size
) {
1220 duprintf("get_entries: %u != %zu\n",
1221 *len
, sizeof(get
) + get
.size
);
1225 t
= xt_find_table_lock(net
, AF_INET6
, get
.name
);
1226 if (t
&& !IS_ERR(t
)) {
1227 struct xt_table_info
*private = t
->private;
1228 duprintf("t->private->number = %u\n", private->number
);
1229 if (get
.size
== private->size
)
1230 ret
= copy_entries_to_user(private->size
,
1231 t
, uptr
->entrytable
);
1233 duprintf("get_entries: I've got %u not %u!\n",
1234 private->size
, get
.size
);
1240 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1246 __do_replace(struct net
*net
, const char *name
, unsigned int valid_hooks
,
1247 struct xt_table_info
*newinfo
, unsigned int num_counters
,
1248 void __user
*counters_ptr
)
1252 struct xt_table_info
*oldinfo
;
1253 struct xt_counters
*counters
;
1254 const void *loc_cpu_old_entry
;
1257 counters
= vmalloc_node(num_counters
* sizeof(struct xt_counters
),
1264 t
= try_then_request_module(xt_find_table_lock(net
, AF_INET6
, name
),
1265 "ip6table_%s", name
);
1266 if (!t
|| IS_ERR(t
)) {
1267 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1268 goto free_newinfo_counters_untrans
;
1272 if (valid_hooks
!= t
->valid_hooks
) {
1273 duprintf("Valid hook crap: %08X vs %08X\n",
1274 valid_hooks
, t
->valid_hooks
);
1279 oldinfo
= xt_replace_table(t
, num_counters
, newinfo
, &ret
);
1283 /* Update module usage count based on number of rules */
1284 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1285 oldinfo
->number
, oldinfo
->initial_entries
, newinfo
->number
);
1286 if ((oldinfo
->number
> oldinfo
->initial_entries
) ||
1287 (newinfo
->number
<= oldinfo
->initial_entries
))
1289 if ((oldinfo
->number
> oldinfo
->initial_entries
) &&
1290 (newinfo
->number
<= oldinfo
->initial_entries
))
1293 /* Get the old counters, and synchronize with replace */
1294 get_counters(oldinfo
, counters
);
1296 /* Decrease module usage counts and free resource */
1297 loc_cpu_old_entry
= oldinfo
->entries
[raw_smp_processor_id()];
1298 IP6T_ENTRY_ITERATE(loc_cpu_old_entry
, oldinfo
->size
, cleanup_entry
,
1300 xt_free_table_info(oldinfo
);
1301 if (copy_to_user(counters_ptr
, counters
,
1302 sizeof(struct xt_counters
) * num_counters
) != 0)
1311 free_newinfo_counters_untrans
:
1318 do_replace(struct net
*net
, void __user
*user
, unsigned int len
)
1321 struct ip6t_replace tmp
;
1322 struct xt_table_info
*newinfo
;
1323 void *loc_cpu_entry
;
1325 if (copy_from_user(&tmp
, user
, sizeof(tmp
)) != 0)
1328 /* overflow check */
1329 if (tmp
.num_counters
>= INT_MAX
/ sizeof(struct xt_counters
))
1332 newinfo
= xt_alloc_table_info(tmp
.size
);
1336 /* choose the copy that is on our node/cpu */
1337 loc_cpu_entry
= newinfo
->entries
[raw_smp_processor_id()];
1338 if (copy_from_user(loc_cpu_entry
, user
+ sizeof(tmp
),
1344 ret
= translate_table(net
, tmp
.name
, tmp
.valid_hooks
,
1345 newinfo
, loc_cpu_entry
, tmp
.size
, tmp
.num_entries
,
1346 tmp
.hook_entry
, tmp
.underflow
);
1350 duprintf("ip_tables: Translated table\n");
1352 ret
= __do_replace(net
, tmp
.name
, tmp
.valid_hooks
, newinfo
,
1353 tmp
.num_counters
, tmp
.counters
);
1355 goto free_newinfo_untrans
;
1358 free_newinfo_untrans
:
1359 IP6T_ENTRY_ITERATE(loc_cpu_entry
, newinfo
->size
, cleanup_entry
, net
, NULL
);
1361 xt_free_table_info(newinfo
);
1365 /* We're lazy, and add to the first CPU; overflow works its fey magic
1366 * and everything is OK. */
1368 add_counter_to_entry(struct ip6t_entry
*e
,
1369 const struct xt_counters addme
[],
1372 ADD_COUNTER(e
->counters
, addme
[*i
].bcnt
, addme
[*i
].pcnt
);
1379 do_add_counters(struct net
*net
, void __user
*user
, unsigned int len
,
1382 unsigned int i
, curcpu
;
1383 struct xt_counters_info tmp
;
1384 struct xt_counters
*paddc
;
1385 unsigned int num_counters
;
1390 const struct xt_table_info
*private;
1392 const void *loc_cpu_entry
;
1393 #ifdef CONFIG_COMPAT
1394 struct compat_xt_counters_info compat_tmp
;
1398 size
= sizeof(struct compat_xt_counters_info
);
1403 size
= sizeof(struct xt_counters_info
);
1406 if (copy_from_user(ptmp
, user
, size
) != 0)
1409 #ifdef CONFIG_COMPAT
1411 num_counters
= compat_tmp
.num_counters
;
1412 name
= compat_tmp
.name
;
1416 num_counters
= tmp
.num_counters
;
1420 if (len
!= size
+ num_counters
* sizeof(struct xt_counters
))
1423 paddc
= vmalloc_node(len
- size
, numa_node_id());
1427 if (copy_from_user(paddc
, user
+ size
, len
- size
) != 0) {
1432 t
= xt_find_table_lock(net
, AF_INET6
, name
);
1433 if (!t
|| IS_ERR(t
)) {
1434 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1440 private = t
->private;
1441 if (private->number
!= num_counters
) {
1443 goto unlock_up_free
;
1447 /* Choose the copy that is on our node */
1448 curcpu
= smp_processor_id();
1449 xt_info_wrlock(curcpu
);
1450 loc_cpu_entry
= private->entries
[curcpu
];
1451 IP6T_ENTRY_ITERATE(loc_cpu_entry
,
1453 add_counter_to_entry
,
1456 xt_info_wrunlock(curcpu
);
1468 #ifdef CONFIG_COMPAT
1469 struct compat_ip6t_replace
{
1470 char name
[IP6T_TABLE_MAXNAMELEN
];
1474 u32 hook_entry
[NF_INET_NUMHOOKS
];
1475 u32 underflow
[NF_INET_NUMHOOKS
];
1477 compat_uptr_t counters
; /* struct ip6t_counters * */
1478 struct compat_ip6t_entry entries
[0];
1482 compat_copy_entry_to_user(struct ip6t_entry
*e
, void __user
**dstptr
,
1483 unsigned int *size
, struct xt_counters
*counters
,
1486 struct ip6t_entry_target
*t
;
1487 struct compat_ip6t_entry __user
*ce
;
1488 u_int16_t target_offset
, next_offset
;
1489 compat_uint_t origsize
;
1494 ce
= (struct compat_ip6t_entry __user
*)*dstptr
;
1495 if (copy_to_user(ce
, e
, sizeof(struct ip6t_entry
)))
1498 if (copy_to_user(&ce
->counters
, &counters
[*i
], sizeof(counters
[*i
])))
1501 *dstptr
+= sizeof(struct compat_ip6t_entry
);
1502 *size
-= sizeof(struct ip6t_entry
) - sizeof(struct compat_ip6t_entry
);
1504 ret
= IP6T_MATCH_ITERATE(e
, xt_compat_match_to_user
, dstptr
, size
);
1505 target_offset
= e
->target_offset
- (origsize
- *size
);
1508 t
= ip6t_get_target(e
);
1509 ret
= xt_compat_target_to_user(t
, dstptr
, size
);
1513 next_offset
= e
->next_offset
- (origsize
- *size
);
1514 if (put_user(target_offset
, &ce
->target_offset
))
1516 if (put_user(next_offset
, &ce
->next_offset
))
1526 compat_find_calc_match(struct ip6t_entry_match
*m
,
1528 const struct ip6t_ip6
*ipv6
,
1529 unsigned int hookmask
,
1530 int *size
, unsigned int *i
)
1532 struct xt_match
*match
;
1534 match
= try_then_request_module(xt_find_match(AF_INET6
, m
->u
.user
.name
,
1535 m
->u
.user
.revision
),
1536 "ip6t_%s", m
->u
.user
.name
);
1537 if (IS_ERR(match
) || !match
) {
1538 duprintf("compat_check_calc_match: `%s' not found\n",
1540 return match
? PTR_ERR(match
) : -ENOENT
;
1542 m
->u
.kernel
.match
= match
;
1543 *size
+= xt_compat_match_offset(match
);
1550 compat_release_match(struct ip6t_entry_match
*m
, unsigned int *i
)
1552 if (i
&& (*i
)-- == 0)
1555 module_put(m
->u
.kernel
.match
->me
);
1560 compat_release_entry(struct compat_ip6t_entry
*e
, unsigned int *i
)
1562 struct ip6t_entry_target
*t
;
1564 if (i
&& (*i
)-- == 0)
1567 /* Cleanup all matches */
1568 COMPAT_IP6T_MATCH_ITERATE(e
, compat_release_match
, NULL
);
1569 t
= compat_ip6t_get_target(e
);
1570 module_put(t
->u
.kernel
.target
->me
);
1575 check_compat_entry_size_and_hooks(struct compat_ip6t_entry
*e
,
1576 struct xt_table_info
*newinfo
,
1578 unsigned char *base
,
1579 unsigned char *limit
,
1580 unsigned int *hook_entries
,
1581 unsigned int *underflows
,
1585 struct ip6t_entry_target
*t
;
1586 struct xt_target
*target
;
1587 unsigned int entry_offset
;
1591 duprintf("check_compat_entry_size_and_hooks %p\n", e
);
1592 if ((unsigned long)e
% __alignof__(struct compat_ip6t_entry
) != 0 ||
1593 (unsigned char *)e
+ sizeof(struct compat_ip6t_entry
) >= limit
) {
1594 duprintf("Bad offset %p, limit = %p\n", e
, limit
);
1598 if (e
->next_offset
< sizeof(struct compat_ip6t_entry
) +
1599 sizeof(struct compat_xt_entry_target
)) {
1600 duprintf("checking: element %p size %u\n",
1605 /* For purposes of check_entry casting the compat entry is fine */
1606 ret
= check_entry((struct ip6t_entry
*)e
, name
);
1610 off
= sizeof(struct ip6t_entry
) - sizeof(struct compat_ip6t_entry
);
1611 entry_offset
= (void *)e
- (void *)base
;
1613 ret
= COMPAT_IP6T_MATCH_ITERATE(e
, compat_find_calc_match
, name
,
1614 &e
->ipv6
, e
->comefrom
, &off
, &j
);
1616 goto release_matches
;
1618 t
= compat_ip6t_get_target(e
);
1619 target
= try_then_request_module(xt_find_target(AF_INET6
,
1621 t
->u
.user
.revision
),
1622 "ip6t_%s", t
->u
.user
.name
);
1623 if (IS_ERR(target
) || !target
) {
1624 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1626 ret
= target
? PTR_ERR(target
) : -ENOENT
;
1627 goto release_matches
;
1629 t
->u
.kernel
.target
= target
;
1631 off
+= xt_compat_target_offset(target
);
1633 ret
= xt_compat_add_offset(AF_INET6
, entry_offset
, off
);
1637 /* Check hooks & underflows */
1638 for (h
= 0; h
< NF_INET_NUMHOOKS
; h
++) {
1639 if ((unsigned char *)e
- base
== hook_entries
[h
])
1640 newinfo
->hook_entry
[h
] = hook_entries
[h
];
1641 if ((unsigned char *)e
- base
== underflows
[h
])
1642 newinfo
->underflow
[h
] = underflows
[h
];
1645 /* Clear counters and comefrom */
1646 memset(&e
->counters
, 0, sizeof(e
->counters
));
1653 module_put(t
->u
.kernel
.target
->me
);
1655 IP6T_MATCH_ITERATE(e
, compat_release_match
, &j
);
1660 compat_copy_entry_from_user(struct compat_ip6t_entry
*e
, void **dstptr
,
1661 unsigned int *size
, const char *name
,
1662 struct xt_table_info
*newinfo
, unsigned char *base
)
1664 struct ip6t_entry_target
*t
;
1665 struct xt_target
*target
;
1666 struct ip6t_entry
*de
;
1667 unsigned int origsize
;
1672 de
= (struct ip6t_entry
*)*dstptr
;
1673 memcpy(de
, e
, sizeof(struct ip6t_entry
));
1674 memcpy(&de
->counters
, &e
->counters
, sizeof(e
->counters
));
1676 *dstptr
+= sizeof(struct ip6t_entry
);
1677 *size
+= sizeof(struct ip6t_entry
) - sizeof(struct compat_ip6t_entry
);
1679 ret
= COMPAT_IP6T_MATCH_ITERATE(e
, xt_compat_match_from_user
,
1683 de
->target_offset
= e
->target_offset
- (origsize
- *size
);
1684 t
= compat_ip6t_get_target(e
);
1685 target
= t
->u
.kernel
.target
;
1686 xt_compat_target_from_user(t
, dstptr
, size
);
1688 de
->next_offset
= e
->next_offset
- (origsize
- *size
);
1689 for (h
= 0; h
< NF_INET_NUMHOOKS
; h
++) {
1690 if ((unsigned char *)de
- base
< newinfo
->hook_entry
[h
])
1691 newinfo
->hook_entry
[h
] -= origsize
- *size
;
1692 if ((unsigned char *)de
- base
< newinfo
->underflow
[h
])
1693 newinfo
->underflow
[h
] -= origsize
- *size
;
1698 static int compat_check_entry(struct ip6t_entry
*e
, struct net
*net
,
1699 const char *name
, unsigned int *i
)
1703 struct xt_mtchk_param mtpar
;
1708 mtpar
.entryinfo
= &e
->ipv6
;
1709 mtpar
.hook_mask
= e
->comefrom
;
1710 mtpar
.family
= NFPROTO_IPV6
;
1711 ret
= IP6T_MATCH_ITERATE(e
, check_match
, &mtpar
, &j
);
1713 goto cleanup_matches
;
1715 ret
= check_target(e
, net
, name
);
1717 goto cleanup_matches
;
1723 IP6T_MATCH_ITERATE(e
, cleanup_match
, net
, &j
);
1728 translate_compat_table(struct net
*net
,
1730 unsigned int valid_hooks
,
1731 struct xt_table_info
**pinfo
,
1733 unsigned int total_size
,
1734 unsigned int number
,
1735 unsigned int *hook_entries
,
1736 unsigned int *underflows
)
1739 struct xt_table_info
*newinfo
, *info
;
1740 void *pos
, *entry0
, *entry1
;
1747 info
->number
= number
;
1749 /* Init all hooks to impossible value. */
1750 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
1751 info
->hook_entry
[i
] = 0xFFFFFFFF;
1752 info
->underflow
[i
] = 0xFFFFFFFF;
1755 duprintf("translate_compat_table: size %u\n", info
->size
);
1757 xt_compat_lock(AF_INET6
);
1758 /* Walk through entries, checking offsets. */
1759 ret
= COMPAT_IP6T_ENTRY_ITERATE(entry0
, total_size
,
1760 check_compat_entry_size_and_hooks
,
1761 info
, &size
, entry0
,
1762 entry0
+ total_size
,
1763 hook_entries
, underflows
, &j
, name
);
1769 duprintf("translate_compat_table: %u not %u entries\n",
1774 /* Check hooks all assigned */
1775 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
1776 /* Only hooks which are valid */
1777 if (!(valid_hooks
& (1 << i
)))
1779 if (info
->hook_entry
[i
] == 0xFFFFFFFF) {
1780 duprintf("Invalid hook entry %u %u\n",
1781 i
, hook_entries
[i
]);
1784 if (info
->underflow
[i
] == 0xFFFFFFFF) {
1785 duprintf("Invalid underflow %u %u\n",
1792 newinfo
= xt_alloc_table_info(size
);
1796 newinfo
->number
= number
;
1797 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
1798 newinfo
->hook_entry
[i
] = info
->hook_entry
[i
];
1799 newinfo
->underflow
[i
] = info
->underflow
[i
];
1801 entry1
= newinfo
->entries
[raw_smp_processor_id()];
1804 ret
= COMPAT_IP6T_ENTRY_ITERATE(entry0
, total_size
,
1805 compat_copy_entry_from_user
,
1806 &pos
, &size
, name
, newinfo
, entry1
);
1807 xt_compat_flush_offsets(AF_INET6
);
1808 xt_compat_unlock(AF_INET6
);
1813 if (!mark_source_chains(newinfo
, valid_hooks
, entry1
))
1817 ret
= IP6T_ENTRY_ITERATE(entry1
, newinfo
->size
, compat_check_entry
,
1821 COMPAT_IP6T_ENTRY_ITERATE_CONTINUE(entry0
, newinfo
->size
, i
,
1822 compat_release_entry
, &j
);
1823 IP6T_ENTRY_ITERATE(entry1
, newinfo
->size
, cleanup_entry
, net
, &i
);
1824 xt_free_table_info(newinfo
);
1828 /* And one copy for every other CPU */
1829 for_each_possible_cpu(i
)
1830 if (newinfo
->entries
[i
] && newinfo
->entries
[i
] != entry1
)
1831 memcpy(newinfo
->entries
[i
], entry1
, newinfo
->size
);
1835 xt_free_table_info(info
);
1839 xt_free_table_info(newinfo
);
1841 COMPAT_IP6T_ENTRY_ITERATE(entry0
, total_size
, compat_release_entry
, &j
);
1844 xt_compat_flush_offsets(AF_INET6
);
1845 xt_compat_unlock(AF_INET6
);
1850 compat_do_replace(struct net
*net
, void __user
*user
, unsigned int len
)
1853 struct compat_ip6t_replace tmp
;
1854 struct xt_table_info
*newinfo
;
1855 void *loc_cpu_entry
;
1857 if (copy_from_user(&tmp
, user
, sizeof(tmp
)) != 0)
1860 /* overflow check */
1861 if (tmp
.size
>= INT_MAX
/ num_possible_cpus())
1863 if (tmp
.num_counters
>= INT_MAX
/ sizeof(struct xt_counters
))
1866 newinfo
= xt_alloc_table_info(tmp
.size
);
1870 /* choose the copy that is on our node/cpu */
1871 loc_cpu_entry
= newinfo
->entries
[raw_smp_processor_id()];
1872 if (copy_from_user(loc_cpu_entry
, user
+ sizeof(tmp
),
1878 ret
= translate_compat_table(net
, tmp
.name
, tmp
.valid_hooks
,
1879 &newinfo
, &loc_cpu_entry
, tmp
.size
,
1880 tmp
.num_entries
, tmp
.hook_entry
,
1885 duprintf("compat_do_replace: Translated table\n");
1887 ret
= __do_replace(net
, tmp
.name
, tmp
.valid_hooks
, newinfo
,
1888 tmp
.num_counters
, compat_ptr(tmp
.counters
));
1890 goto free_newinfo_untrans
;
1893 free_newinfo_untrans
:
1894 IP6T_ENTRY_ITERATE(loc_cpu_entry
, newinfo
->size
, cleanup_entry
, net
, NULL
);
1896 xt_free_table_info(newinfo
);
1901 compat_do_ip6t_set_ctl(struct sock
*sk
, int cmd
, void __user
*user
,
1906 if (!capable(CAP_NET_ADMIN
))
1910 case IP6T_SO_SET_REPLACE
:
1911 ret
= compat_do_replace(sock_net(sk
), user
, len
);
1914 case IP6T_SO_SET_ADD_COUNTERS
:
1915 ret
= do_add_counters(sock_net(sk
), user
, len
, 1);
1919 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd
);
1926 struct compat_ip6t_get_entries
{
1927 char name
[IP6T_TABLE_MAXNAMELEN
];
1929 struct compat_ip6t_entry entrytable
[0];
1933 compat_copy_entries_to_user(unsigned int total_size
, struct xt_table
*table
,
1934 void __user
*userptr
)
1936 struct xt_counters
*counters
;
1937 const struct xt_table_info
*private = table
->private;
1941 const void *loc_cpu_entry
;
1944 counters
= alloc_counters(table
);
1945 if (IS_ERR(counters
))
1946 return PTR_ERR(counters
);
1948 /* choose the copy that is on our node/cpu, ...
1949 * This choice is lazy (because current thread is
1950 * allowed to migrate to another cpu)
1952 loc_cpu_entry
= private->entries
[raw_smp_processor_id()];
1955 ret
= IP6T_ENTRY_ITERATE(loc_cpu_entry
, total_size
,
1956 compat_copy_entry_to_user
,
1957 &pos
, &size
, counters
, &i
);
1964 compat_get_entries(struct net
*net
, struct compat_ip6t_get_entries __user
*uptr
,
1968 struct compat_ip6t_get_entries get
;
1971 if (*len
< sizeof(get
)) {
1972 duprintf("compat_get_entries: %u < %zu\n", *len
, sizeof(get
));
1976 if (copy_from_user(&get
, uptr
, sizeof(get
)) != 0)
1979 if (*len
!= sizeof(struct compat_ip6t_get_entries
) + get
.size
) {
1980 duprintf("compat_get_entries: %u != %zu\n",
1981 *len
, sizeof(get
) + get
.size
);
1985 xt_compat_lock(AF_INET6
);
1986 t
= xt_find_table_lock(net
, AF_INET6
, get
.name
);
1987 if (t
&& !IS_ERR(t
)) {
1988 const struct xt_table_info
*private = t
->private;
1989 struct xt_table_info info
;
1990 duprintf("t->private->number = %u\n", private->number
);
1991 ret
= compat_table_info(private, &info
);
1992 if (!ret
&& get
.size
== info
.size
) {
1993 ret
= compat_copy_entries_to_user(private->size
,
1994 t
, uptr
->entrytable
);
1996 duprintf("compat_get_entries: I've got %u not %u!\n",
1997 private->size
, get
.size
);
2000 xt_compat_flush_offsets(AF_INET6
);
2004 ret
= t
? PTR_ERR(t
) : -ENOENT
;
2006 xt_compat_unlock(AF_INET6
);
2010 static int do_ip6t_get_ctl(struct sock
*, int, void __user
*, int *);
2013 compat_do_ip6t_get_ctl(struct sock
*sk
, int cmd
, void __user
*user
, int *len
)
2017 if (!capable(CAP_NET_ADMIN
))
2021 case IP6T_SO_GET_INFO
:
2022 ret
= get_info(sock_net(sk
), user
, len
, 1);
2024 case IP6T_SO_GET_ENTRIES
:
2025 ret
= compat_get_entries(sock_net(sk
), user
, len
);
2028 ret
= do_ip6t_get_ctl(sk
, cmd
, user
, len
);
2035 do_ip6t_set_ctl(struct sock
*sk
, int cmd
, void __user
*user
, unsigned int len
)
2039 if (!capable(CAP_NET_ADMIN
))
2043 case IP6T_SO_SET_REPLACE
:
2044 ret
= do_replace(sock_net(sk
), user
, len
);
2047 case IP6T_SO_SET_ADD_COUNTERS
:
2048 ret
= do_add_counters(sock_net(sk
), user
, len
, 0);
2052 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd
);
2060 do_ip6t_get_ctl(struct sock
*sk
, int cmd
, void __user
*user
, int *len
)
2064 if (!capable(CAP_NET_ADMIN
))
2068 case IP6T_SO_GET_INFO
:
2069 ret
= get_info(sock_net(sk
), user
, len
, 0);
2072 case IP6T_SO_GET_ENTRIES
:
2073 ret
= get_entries(sock_net(sk
), user
, len
);
2076 case IP6T_SO_GET_REVISION_MATCH
:
2077 case IP6T_SO_GET_REVISION_TARGET
: {
2078 struct ip6t_get_revision rev
;
2081 if (*len
!= sizeof(rev
)) {
2085 if (copy_from_user(&rev
, user
, sizeof(rev
)) != 0) {
2090 if (cmd
== IP6T_SO_GET_REVISION_TARGET
)
2095 try_then_request_module(xt_find_revision(AF_INET6
, rev
.name
,
2098 "ip6t_%s", rev
.name
);
2103 duprintf("do_ip6t_get_ctl: unknown request %i\n", cmd
);
2110 struct xt_table
*ip6t_register_table(struct net
*net
,
2111 const struct xt_table
*table
,
2112 const struct ip6t_replace
*repl
)
2115 struct xt_table_info
*newinfo
;
2116 struct xt_table_info bootstrap
2117 = { 0, 0, 0, { 0 }, { 0 }, { } };
2118 void *loc_cpu_entry
;
2119 struct xt_table
*new_table
;
2121 newinfo
= xt_alloc_table_info(repl
->size
);
2127 /* choose the copy on our node/cpu, but dont care about preemption */
2128 loc_cpu_entry
= newinfo
->entries
[raw_smp_processor_id()];
2129 memcpy(loc_cpu_entry
, repl
->entries
, repl
->size
);
2131 ret
= translate_table(net
, table
->name
, table
->valid_hooks
,
2132 newinfo
, loc_cpu_entry
, repl
->size
,
2139 new_table
= xt_register_table(net
, table
, &bootstrap
, newinfo
);
2140 if (IS_ERR(new_table
)) {
2141 ret
= PTR_ERR(new_table
);
2147 xt_free_table_info(newinfo
);
2149 return ERR_PTR(ret
);
2152 void ip6t_unregister_table(struct net
*net
, struct xt_table
*table
)
2154 struct xt_table_info
*private;
2155 void *loc_cpu_entry
;
2156 struct module
*table_owner
= table
->me
;
2158 private = xt_unregister_table(table
);
2160 /* Decrease module usage counts and free resources */
2161 loc_cpu_entry
= private->entries
[raw_smp_processor_id()];
2162 IP6T_ENTRY_ITERATE(loc_cpu_entry
, private->size
, cleanup_entry
, net
, NULL
);
2163 if (private->number
> private->initial_entries
)
2164 module_put(table_owner
);
2165 xt_free_table_info(private);
2168 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2170 icmp6_type_code_match(u_int8_t test_type
, u_int8_t min_code
, u_int8_t max_code
,
2171 u_int8_t type
, u_int8_t code
,
2174 return (type
== test_type
&& code
>= min_code
&& code
<= max_code
)
2179 icmp6_match(const struct sk_buff
*skb
, const struct xt_match_param
*par
)
2181 const struct icmp6hdr
*ic
;
2182 struct icmp6hdr _icmph
;
2183 const struct ip6t_icmp
*icmpinfo
= par
->matchinfo
;
2185 /* Must not be a fragment. */
2186 if (par
->fragoff
!= 0)
2189 ic
= skb_header_pointer(skb
, par
->thoff
, sizeof(_icmph
), &_icmph
);
2191 /* We've been asked to examine this packet, and we
2192 * can't. Hence, no choice but to drop.
2194 duprintf("Dropping evil ICMP tinygram.\n");
2195 *par
->hotdrop
= true;
2199 return icmp6_type_code_match(icmpinfo
->type
,
2202 ic
->icmp6_type
, ic
->icmp6_code
,
2203 !!(icmpinfo
->invflags
&IP6T_ICMP_INV
));
2206 /* Called when user tries to insert an entry of this type. */
2207 static bool icmp6_checkentry(const struct xt_mtchk_param
*par
)
2209 const struct ip6t_icmp
*icmpinfo
= par
->matchinfo
;
2211 /* Must specify no unknown invflags */
2212 return !(icmpinfo
->invflags
& ~IP6T_ICMP_INV
);
2215 /* The built-in targets: standard (NULL) and error. */
2216 static struct xt_target ip6t_standard_target __read_mostly
= {
2217 .name
= IP6T_STANDARD_TARGET
,
2218 .targetsize
= sizeof(int),
2219 .family
= NFPROTO_IPV6
,
2220 #ifdef CONFIG_COMPAT
2221 .compatsize
= sizeof(compat_int_t
),
2222 .compat_from_user
= compat_standard_from_user
,
2223 .compat_to_user
= compat_standard_to_user
,
2227 static struct xt_target ip6t_error_target __read_mostly
= {
2228 .name
= IP6T_ERROR_TARGET
,
2229 .target
= ip6t_error
,
2230 .targetsize
= IP6T_FUNCTION_MAXNAMELEN
,
2231 .family
= NFPROTO_IPV6
,
2234 static struct nf_sockopt_ops ip6t_sockopts
= {
2236 .set_optmin
= IP6T_BASE_CTL
,
2237 .set_optmax
= IP6T_SO_SET_MAX
+1,
2238 .set
= do_ip6t_set_ctl
,
2239 #ifdef CONFIG_COMPAT
2240 .compat_set
= compat_do_ip6t_set_ctl
,
2242 .get_optmin
= IP6T_BASE_CTL
,
2243 .get_optmax
= IP6T_SO_GET_MAX
+1,
2244 .get
= do_ip6t_get_ctl
,
2245 #ifdef CONFIG_COMPAT
2246 .compat_get
= compat_do_ip6t_get_ctl
,
2248 .owner
= THIS_MODULE
,
2251 static struct xt_match icmp6_matchstruct __read_mostly
= {
2253 .match
= icmp6_match
,
2254 .matchsize
= sizeof(struct ip6t_icmp
),
2255 .checkentry
= icmp6_checkentry
,
2256 .proto
= IPPROTO_ICMPV6
,
2257 .family
= NFPROTO_IPV6
,
2260 static int __net_init
ip6_tables_net_init(struct net
*net
)
2262 return xt_proto_init(net
, NFPROTO_IPV6
);
2265 static void __net_exit
ip6_tables_net_exit(struct net
*net
)
2267 xt_proto_fini(net
, NFPROTO_IPV6
);
2270 static struct pernet_operations ip6_tables_net_ops
= {
2271 .init
= ip6_tables_net_init
,
2272 .exit
= ip6_tables_net_exit
,
2275 static int __init
ip6_tables_init(void)
2279 ret
= register_pernet_subsys(&ip6_tables_net_ops
);
2283 /* Noone else will be downing sem now, so we won't sleep */
2284 ret
= xt_register_target(&ip6t_standard_target
);
2287 ret
= xt_register_target(&ip6t_error_target
);
2290 ret
= xt_register_match(&icmp6_matchstruct
);
2294 /* Register setsockopt */
2295 ret
= nf_register_sockopt(&ip6t_sockopts
);
2299 printk(KERN_INFO
"ip6_tables: (C) 2000-2006 Netfilter Core Team\n");
2303 xt_unregister_match(&icmp6_matchstruct
);
2305 xt_unregister_target(&ip6t_error_target
);
2307 xt_unregister_target(&ip6t_standard_target
);
2309 unregister_pernet_subsys(&ip6_tables_net_ops
);
2314 static void __exit
ip6_tables_fini(void)
2316 nf_unregister_sockopt(&ip6t_sockopts
);
2318 xt_unregister_match(&icmp6_matchstruct
);
2319 xt_unregister_target(&ip6t_error_target
);
2320 xt_unregister_target(&ip6t_standard_target
);
2322 unregister_pernet_subsys(&ip6_tables_net_ops
);
2326 * find the offset to specified header or the protocol number of last header
2327 * if target < 0. "last header" is transport protocol header, ESP, or
2330 * If target header is found, its offset is set in *offset and return protocol
2331 * number. Otherwise, return -1.
2333 * If the first fragment doesn't contain the final protocol header or
2334 * NEXTHDR_NONE it is considered invalid.
2336 * Note that non-1st fragment is special case that "the protocol number
2337 * of last header" is "next header" field in Fragment header. In this case,
2338 * *offset is meaningless and fragment offset is stored in *fragoff if fragoff
2342 int ipv6_find_hdr(const struct sk_buff
*skb
, unsigned int *offset
,
2343 int target
, unsigned short *fragoff
)
2345 unsigned int start
= skb_network_offset(skb
) + sizeof(struct ipv6hdr
);
2346 u8 nexthdr
= ipv6_hdr(skb
)->nexthdr
;
2347 unsigned int len
= skb
->len
- start
;
2352 while (nexthdr
!= target
) {
2353 struct ipv6_opt_hdr _hdr
, *hp
;
2354 unsigned int hdrlen
;
2356 if ((!ipv6_ext_hdr(nexthdr
)) || nexthdr
== NEXTHDR_NONE
) {
2362 hp
= skb_header_pointer(skb
, start
, sizeof(_hdr
), &_hdr
);
2365 if (nexthdr
== NEXTHDR_FRAGMENT
) {
2366 unsigned short _frag_off
;
2368 fp
= skb_header_pointer(skb
,
2369 start
+offsetof(struct frag_hdr
,
2376 _frag_off
= ntohs(*fp
) & ~0x7;
2379 ((!ipv6_ext_hdr(hp
->nexthdr
)) ||
2380 hp
->nexthdr
== NEXTHDR_NONE
)) {
2382 *fragoff
= _frag_off
;
2388 } else if (nexthdr
== NEXTHDR_AUTH
)
2389 hdrlen
= (hp
->hdrlen
+ 2) << 2;
2391 hdrlen
= ipv6_optlen(hp
);
2393 nexthdr
= hp
->nexthdr
;
2402 EXPORT_SYMBOL(ip6t_register_table
);
2403 EXPORT_SYMBOL(ip6t_unregister_table
);
2404 EXPORT_SYMBOL(ip6t_do_table
);
2405 EXPORT_SYMBOL(ip6t_ext_hdr
);
2406 EXPORT_SYMBOL(ipv6_find_hdr
);
2408 module_init(ip6_tables_init
);
2409 module_exit(ip6_tables_fini
);