2 * Packet matching code.
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 #include <linux/capability.h>
14 #include <linux/skbuff.h>
15 #include <linux/kmod.h>
16 #include <linux/vmalloc.h>
17 #include <linux/netdevice.h>
18 #include <linux/module.h>
19 #include <linux/poison.h>
20 #include <linux/icmpv6.h>
22 #include <net/compat.h>
23 #include <asm/uaccess.h>
24 #include <linux/mutex.h>
25 #include <linux/proc_fs.h>
26 #include <linux/err.h>
27 #include <linux/cpumask.h>
29 #include <linux/netfilter_ipv6/ip6_tables.h>
30 #include <linux/netfilter/x_tables.h>
31 #include <net/netfilter/nf_log.h>
32 #include "../../netfilter/xt_repldata.h"
34 MODULE_LICENSE("GPL");
35 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
36 MODULE_DESCRIPTION("IPv6 packet filter");
38 /*#define DEBUG_IP_FIREWALL*/
39 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
40 /*#define DEBUG_IP_FIREWALL_USER*/
42 #ifdef DEBUG_IP_FIREWALL
43 #define dprintf(format, args...) pr_info(format , ## args)
45 #define dprintf(format, args...)
48 #ifdef DEBUG_IP_FIREWALL_USER
49 #define duprintf(format, args...) pr_info(format , ## args)
51 #define duprintf(format, args...)
54 #ifdef CONFIG_NETFILTER_DEBUG
55 #define IP_NF_ASSERT(x) WARN_ON(!(x))
57 #define IP_NF_ASSERT(x)
61 void *ip6t_alloc_initial_table(const struct xt_table
*info
)
63 return xt_alloc_initial_table(ip6t
, IP6T
);
65 EXPORT_SYMBOL_GPL(ip6t_alloc_initial_table
);
68 We keep a set of rules for each CPU, so we can avoid write-locking
69 them in the softirq when updating the counters and therefore
70 only need to read-lock in the softirq; doing a write_lock_bh() in user
71 context stops packets coming through and allows user context to read
72 the counters or update the rules.
74 Hence the start of any table is given by get_table() below. */
76 /* Check for an extension */
78 ip6t_ext_hdr(u8 nexthdr
)
80 return ( (nexthdr
== IPPROTO_HOPOPTS
) ||
81 (nexthdr
== IPPROTO_ROUTING
) ||
82 (nexthdr
== IPPROTO_FRAGMENT
) ||
83 (nexthdr
== IPPROTO_ESP
) ||
84 (nexthdr
== IPPROTO_AH
) ||
85 (nexthdr
== IPPROTO_NONE
) ||
86 (nexthdr
== IPPROTO_DSTOPTS
) );
89 /* Returns whether matches rule or not. */
90 /* Performance critical - called for every packet */
92 ip6_packet_match(const struct sk_buff
*skb
,
95 const struct ip6t_ip6
*ip6info
,
96 unsigned int *protoff
,
97 int *fragoff
, bool *hotdrop
)
100 const struct ipv6hdr
*ipv6
= ipv6_hdr(skb
);
102 #define FWINV(bool, invflg) ((bool) ^ !!(ip6info->invflags & (invflg)))
104 if (FWINV(ipv6_masked_addr_cmp(&ipv6
->saddr
, &ip6info
->smsk
,
105 &ip6info
->src
), IP6T_INV_SRCIP
) ||
106 FWINV(ipv6_masked_addr_cmp(&ipv6
->daddr
, &ip6info
->dmsk
,
107 &ip6info
->dst
), IP6T_INV_DSTIP
)) {
108 dprintf("Source or dest mismatch.\n");
110 dprintf("SRC: %u. Mask: %u. Target: %u.%s\n", ip->saddr,
111 ipinfo->smsk.s_addr, ipinfo->src.s_addr,
112 ipinfo->invflags & IP6T_INV_SRCIP ? " (INV)" : "");
113 dprintf("DST: %u. Mask: %u. Target: %u.%s\n", ip->daddr,
114 ipinfo->dmsk.s_addr, ipinfo->dst.s_addr,
115 ipinfo->invflags & IP6T_INV_DSTIP ? " (INV)" : "");*/
119 ret
= ifname_compare_aligned(indev
, ip6info
->iniface
, ip6info
->iniface_mask
);
121 if (FWINV(ret
!= 0, IP6T_INV_VIA_IN
)) {
122 dprintf("VIA in mismatch (%s vs %s).%s\n",
123 indev
, ip6info
->iniface
,
124 ip6info
->invflags
&IP6T_INV_VIA_IN
?" (INV)":"");
128 ret
= ifname_compare_aligned(outdev
, ip6info
->outiface
, ip6info
->outiface_mask
);
130 if (FWINV(ret
!= 0, IP6T_INV_VIA_OUT
)) {
131 dprintf("VIA out mismatch (%s vs %s).%s\n",
132 outdev
, ip6info
->outiface
,
133 ip6info
->invflags
&IP6T_INV_VIA_OUT
?" (INV)":"");
137 /* ... might want to do something with class and flowlabel here ... */
139 /* look for the desired protocol header */
140 if((ip6info
->flags
& IP6T_F_PROTO
)) {
142 unsigned short _frag_off
;
144 protohdr
= ipv6_find_hdr(skb
, protoff
, -1, &_frag_off
);
150 *fragoff
= _frag_off
;
152 dprintf("Packet protocol %hi ?= %s%hi.\n",
154 ip6info
->invflags
& IP6T_INV_PROTO
? "!":"",
157 if (ip6info
->proto
== protohdr
) {
158 if(ip6info
->invflags
& IP6T_INV_PROTO
) {
164 /* We need match for the '-p all', too! */
165 if ((ip6info
->proto
!= 0) &&
166 !(ip6info
->invflags
& IP6T_INV_PROTO
))
172 /* should be ip6 safe */
174 ip6_checkentry(const struct ip6t_ip6
*ipv6
)
176 if (ipv6
->flags
& ~IP6T_F_MASK
) {
177 duprintf("Unknown flag bits set: %08X\n",
178 ipv6
->flags
& ~IP6T_F_MASK
);
181 if (ipv6
->invflags
& ~IP6T_INV_MASK
) {
182 duprintf("Unknown invflag bits set: %08X\n",
183 ipv6
->invflags
& ~IP6T_INV_MASK
);
190 ip6t_error(struct sk_buff
*skb
, const struct xt_action_param
*par
)
193 pr_info("error: `%s'\n", (const char *)par
->targinfo
);
198 static inline struct ip6t_entry
*
199 get_entry(const void *base
, unsigned int offset
)
201 return (struct ip6t_entry
*)(base
+ offset
);
204 /* All zeroes == unconditional rule. */
205 /* Mildly perf critical (only if packet tracing is on) */
206 static inline bool unconditional(const struct ip6t_ip6
*ipv6
)
208 static const struct ip6t_ip6 uncond
;
210 return memcmp(ipv6
, &uncond
, sizeof(uncond
)) == 0;
213 static inline const struct ip6t_entry_target
*
214 ip6t_get_target_c(const struct ip6t_entry
*e
)
216 return ip6t_get_target((struct ip6t_entry
*)e
);
219 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
220 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
221 /* This cries for unification! */
222 static const char *const hooknames
[] = {
223 [NF_INET_PRE_ROUTING
] = "PREROUTING",
224 [NF_INET_LOCAL_IN
] = "INPUT",
225 [NF_INET_FORWARD
] = "FORWARD",
226 [NF_INET_LOCAL_OUT
] = "OUTPUT",
227 [NF_INET_POST_ROUTING
] = "POSTROUTING",
230 enum nf_ip_trace_comments
{
231 NF_IP6_TRACE_COMMENT_RULE
,
232 NF_IP6_TRACE_COMMENT_RETURN
,
233 NF_IP6_TRACE_COMMENT_POLICY
,
236 static const char *const comments
[] = {
237 [NF_IP6_TRACE_COMMENT_RULE
] = "rule",
238 [NF_IP6_TRACE_COMMENT_RETURN
] = "return",
239 [NF_IP6_TRACE_COMMENT_POLICY
] = "policy",
242 static struct nf_loginfo trace_loginfo
= {
243 .type
= NF_LOG_TYPE_LOG
,
247 .logflags
= NF_LOG_MASK
,
252 /* Mildly perf critical (only if packet tracing is on) */
254 get_chainname_rulenum(const struct ip6t_entry
*s
, const struct ip6t_entry
*e
,
255 const char *hookname
, const char **chainname
,
256 const char **comment
, unsigned int *rulenum
)
258 const struct ip6t_standard_target
*t
= (void *)ip6t_get_target_c(s
);
260 if (strcmp(t
->target
.u
.kernel
.target
->name
, IP6T_ERROR_TARGET
) == 0) {
261 /* Head of user chain: ERROR target with chainname */
262 *chainname
= t
->target
.data
;
267 if (s
->target_offset
== sizeof(struct ip6t_entry
) &&
268 strcmp(t
->target
.u
.kernel
.target
->name
,
269 IP6T_STANDARD_TARGET
) == 0 &&
271 unconditional(&s
->ipv6
)) {
272 /* Tail of chains: STANDARD target (return/policy) */
273 *comment
= *chainname
== hookname
274 ? comments
[NF_IP6_TRACE_COMMENT_POLICY
]
275 : comments
[NF_IP6_TRACE_COMMENT_RETURN
];
284 static void trace_packet(const struct sk_buff
*skb
,
286 const struct net_device
*in
,
287 const struct net_device
*out
,
288 const char *tablename
,
289 const struct xt_table_info
*private,
290 const struct ip6t_entry
*e
)
292 const void *table_base
;
293 const struct ip6t_entry
*root
;
294 const char *hookname
, *chainname
, *comment
;
295 const struct ip6t_entry
*iter
;
296 unsigned int rulenum
= 0;
298 table_base
= private->entries
[smp_processor_id()];
299 root
= get_entry(table_base
, private->hook_entry
[hook
]);
301 hookname
= chainname
= hooknames
[hook
];
302 comment
= comments
[NF_IP6_TRACE_COMMENT_RULE
];
304 xt_entry_foreach(iter
, root
, private->size
- private->hook_entry
[hook
])
305 if (get_chainname_rulenum(iter
, e
, hookname
,
306 &chainname
, &comment
, &rulenum
) != 0)
309 nf_log_packet(AF_INET6
, hook
, skb
, in
, out
, &trace_loginfo
,
310 "TRACE: %s:%s:%s:%u ",
311 tablename
, chainname
, comment
, rulenum
);
315 static inline __pure
struct ip6t_entry
*
316 ip6t_next_entry(const struct ip6t_entry
*entry
)
318 return (void *)entry
+ entry
->next_offset
;
321 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
323 ip6t_do_table(struct sk_buff
*skb
,
325 const struct net_device
*in
,
326 const struct net_device
*out
,
327 struct xt_table
*table
)
329 static const char nulldevname
[IFNAMSIZ
] __attribute__((aligned(sizeof(long))));
330 /* Initializing verdict to NF_DROP keeps gcc happy. */
331 unsigned int verdict
= NF_DROP
;
332 const char *indev
, *outdev
;
333 const void *table_base
;
334 struct ip6t_entry
*e
, **jumpstack
;
335 unsigned int *stackptr
, origptr
, cpu
;
336 const struct xt_table_info
*private;
337 struct xt_action_param acpar
;
340 indev
= in
? in
->name
: nulldevname
;
341 outdev
= out
? out
->name
: nulldevname
;
342 /* We handle fragments by dealing with the first fragment as
343 * if it was a normal packet. All other fragments are treated
344 * normally, except that they will NEVER match rules that ask
345 * things we don't know, ie. tcp syn flag or ports). If the
346 * rule is also a fragment-specific rule, non-fragments won't
348 acpar
.hotdrop
= false;
351 acpar
.family
= NFPROTO_IPV6
;
352 acpar
.hooknum
= hook
;
354 IP_NF_ASSERT(table
->valid_hooks
& (1 << hook
));
357 private = table
->private;
358 cpu
= smp_processor_id();
359 table_base
= private->entries
[cpu
];
360 jumpstack
= (struct ip6t_entry
**)private->jumpstack
[cpu
];
361 stackptr
= per_cpu_ptr(private->stackptr
, cpu
);
364 e
= get_entry(table_base
, private->hook_entry
[hook
]);
367 const struct ip6t_entry_target
*t
;
368 const struct xt_entry_match
*ematch
;
371 if (!ip6_packet_match(skb
, indev
, outdev
, &e
->ipv6
,
372 &acpar
.thoff
, &acpar
.fragoff
, &acpar
.hotdrop
)) {
374 e
= ip6t_next_entry(e
);
378 xt_ematch_foreach(ematch
, e
) {
379 acpar
.match
= ematch
->u
.kernel
.match
;
380 acpar
.matchinfo
= ematch
->data
;
381 if (!acpar
.match
->match(skb
, &acpar
))
385 ADD_COUNTER(e
->counters
, skb
->len
, 1);
387 t
= ip6t_get_target_c(e
);
388 IP_NF_ASSERT(t
->u
.kernel
.target
);
390 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
391 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
392 /* The packet is traced: log it */
393 if (unlikely(skb
->nf_trace
))
394 trace_packet(skb
, hook
, in
, out
,
395 table
->name
, private, e
);
397 /* Standard target? */
398 if (!t
->u
.kernel
.target
->target
) {
401 v
= ((struct ip6t_standard_target
*)t
)->verdict
;
403 /* Pop from stack? */
404 if (v
!= IP6T_RETURN
) {
405 verdict
= (unsigned)(-v
) - 1;
409 e
= get_entry(table_base
,
410 private->underflow
[hook
]);
412 e
= ip6t_next_entry(jumpstack
[--*stackptr
]);
415 if (table_base
+ v
!= ip6t_next_entry(e
) &&
416 !(e
->ipv6
.flags
& IP6T_F_GOTO
)) {
417 if (*stackptr
>= private->stacksize
) {
421 jumpstack
[(*stackptr
)++] = e
;
424 e
= get_entry(table_base
, v
);
428 acpar
.target
= t
->u
.kernel
.target
;
429 acpar
.targinfo
= t
->data
;
431 verdict
= t
->u
.kernel
.target
->target(skb
, &acpar
);
432 if (verdict
== IP6T_CONTINUE
)
433 e
= ip6t_next_entry(e
);
437 } while (!acpar
.hotdrop
);
439 xt_info_rdunlock_bh();
442 #ifdef DEBUG_ALLOW_ALL
451 /* Figures out from what hook each rule can be called: returns 0 if
452 there are loops. Puts hook bitmask in comefrom. */
454 mark_source_chains(const struct xt_table_info
*newinfo
,
455 unsigned int valid_hooks
, void *entry0
)
459 /* No recursion; use packet counter to save back ptrs (reset
460 to 0 as we leave), and comefrom to save source hook bitmask */
461 for (hook
= 0; hook
< NF_INET_NUMHOOKS
; hook
++) {
462 unsigned int pos
= newinfo
->hook_entry
[hook
];
463 struct ip6t_entry
*e
= (struct ip6t_entry
*)(entry0
+ pos
);
465 if (!(valid_hooks
& (1 << hook
)))
468 /* Set initial back pointer. */
469 e
->counters
.pcnt
= pos
;
472 const struct ip6t_standard_target
*t
473 = (void *)ip6t_get_target_c(e
);
474 int visited
= e
->comefrom
& (1 << hook
);
476 if (e
->comefrom
& (1 << NF_INET_NUMHOOKS
)) {
477 pr_err("iptables: loop hook %u pos %u %08X.\n",
478 hook
, pos
, e
->comefrom
);
481 e
->comefrom
|= ((1 << hook
) | (1 << NF_INET_NUMHOOKS
));
483 /* Unconditional return/END. */
484 if ((e
->target_offset
== sizeof(struct ip6t_entry
) &&
485 (strcmp(t
->target
.u
.user
.name
,
486 IP6T_STANDARD_TARGET
) == 0) &&
488 unconditional(&e
->ipv6
)) || visited
) {
489 unsigned int oldpos
, size
;
491 if ((strcmp(t
->target
.u
.user
.name
,
492 IP6T_STANDARD_TARGET
) == 0) &&
493 t
->verdict
< -NF_MAX_VERDICT
- 1) {
494 duprintf("mark_source_chains: bad "
495 "negative verdict (%i)\n",
500 /* Return: backtrack through the last
503 e
->comefrom
^= (1<<NF_INET_NUMHOOKS
);
504 #ifdef DEBUG_IP_FIREWALL_USER
506 & (1 << NF_INET_NUMHOOKS
)) {
507 duprintf("Back unset "
514 pos
= e
->counters
.pcnt
;
515 e
->counters
.pcnt
= 0;
517 /* We're at the start. */
521 e
= (struct ip6t_entry
*)
523 } while (oldpos
== pos
+ e
->next_offset
);
526 size
= e
->next_offset
;
527 e
= (struct ip6t_entry
*)
528 (entry0
+ pos
+ size
);
529 e
->counters
.pcnt
= pos
;
532 int newpos
= t
->verdict
;
534 if (strcmp(t
->target
.u
.user
.name
,
535 IP6T_STANDARD_TARGET
) == 0 &&
537 if (newpos
> newinfo
->size
-
538 sizeof(struct ip6t_entry
)) {
539 duprintf("mark_source_chains: "
540 "bad verdict (%i)\n",
544 /* This a jump; chase it. */
545 duprintf("Jump rule %u -> %u\n",
548 /* ... this is a fallthru */
549 newpos
= pos
+ e
->next_offset
;
551 e
= (struct ip6t_entry
*)
553 e
->counters
.pcnt
= pos
;
558 duprintf("Finished chain %u\n", hook
);
563 static void cleanup_match(struct ip6t_entry_match
*m
, struct net
*net
)
565 struct xt_mtdtor_param par
;
568 par
.match
= m
->u
.kernel
.match
;
569 par
.matchinfo
= m
->data
;
570 par
.family
= NFPROTO_IPV6
;
571 if (par
.match
->destroy
!= NULL
)
572 par
.match
->destroy(&par
);
573 module_put(par
.match
->me
);
577 check_entry(const struct ip6t_entry
*e
, const char *name
)
579 const struct ip6t_entry_target
*t
;
581 if (!ip6_checkentry(&e
->ipv6
)) {
582 duprintf("ip_tables: ip check failed %p %s.\n", e
, name
);
586 if (e
->target_offset
+ sizeof(struct ip6t_entry_target
) >
590 t
= ip6t_get_target_c(e
);
591 if (e
->target_offset
+ t
->u
.target_size
> e
->next_offset
)
597 static int check_match(struct ip6t_entry_match
*m
, struct xt_mtchk_param
*par
)
599 const struct ip6t_ip6
*ipv6
= par
->entryinfo
;
602 par
->match
= m
->u
.kernel
.match
;
603 par
->matchinfo
= m
->data
;
605 ret
= xt_check_match(par
, m
->u
.match_size
- sizeof(*m
),
606 ipv6
->proto
, ipv6
->invflags
& IP6T_INV_PROTO
);
608 duprintf("ip_tables: check failed for `%s'.\n",
616 find_check_match(struct ip6t_entry_match
*m
, struct xt_mtchk_param
*par
)
618 struct xt_match
*match
;
621 match
= xt_request_find_match(NFPROTO_IPV6
, m
->u
.user
.name
,
624 duprintf("find_check_match: `%s' not found\n", m
->u
.user
.name
);
625 return PTR_ERR(match
);
627 m
->u
.kernel
.match
= match
;
629 ret
= check_match(m
, par
);
635 module_put(m
->u
.kernel
.match
->me
);
639 static int check_target(struct ip6t_entry
*e
, struct net
*net
, const char *name
)
641 struct ip6t_entry_target
*t
= ip6t_get_target(e
);
642 struct xt_tgchk_param par
= {
646 .target
= t
->u
.kernel
.target
,
648 .hook_mask
= e
->comefrom
,
649 .family
= NFPROTO_IPV6
,
653 t
= ip6t_get_target(e
);
654 ret
= xt_check_target(&par
, t
->u
.target_size
- sizeof(*t
),
655 e
->ipv6
.proto
, e
->ipv6
.invflags
& IP6T_INV_PROTO
);
657 duprintf("ip_tables: check failed for `%s'.\n",
658 t
->u
.kernel
.target
->name
);
665 find_check_entry(struct ip6t_entry
*e
, struct net
*net
, const char *name
,
668 struct ip6t_entry_target
*t
;
669 struct xt_target
*target
;
672 struct xt_mtchk_param mtpar
;
673 struct xt_entry_match
*ematch
;
675 ret
= check_entry(e
, name
);
682 mtpar
.entryinfo
= &e
->ipv6
;
683 mtpar
.hook_mask
= e
->comefrom
;
684 mtpar
.family
= NFPROTO_IPV6
;
685 xt_ematch_foreach(ematch
, e
) {
686 ret
= find_check_match(ematch
, &mtpar
);
688 goto cleanup_matches
;
692 t
= ip6t_get_target(e
);
693 target
= xt_request_find_target(NFPROTO_IPV6
, t
->u
.user
.name
,
695 if (IS_ERR(target
)) {
696 duprintf("find_check_entry: `%s' not found\n", t
->u
.user
.name
);
697 ret
= PTR_ERR(target
);
698 goto cleanup_matches
;
700 t
->u
.kernel
.target
= target
;
702 ret
= check_target(e
, net
, name
);
707 module_put(t
->u
.kernel
.target
->me
);
709 xt_ematch_foreach(ematch
, e
) {
712 cleanup_match(ematch
, net
);
717 static bool check_underflow(const struct ip6t_entry
*e
)
719 const struct ip6t_entry_target
*t
;
720 unsigned int verdict
;
722 if (!unconditional(&e
->ipv6
))
724 t
= ip6t_get_target_c(e
);
725 if (strcmp(t
->u
.user
.name
, XT_STANDARD_TARGET
) != 0)
727 verdict
= ((struct ip6t_standard_target
*)t
)->verdict
;
728 verdict
= -verdict
- 1;
729 return verdict
== NF_DROP
|| verdict
== NF_ACCEPT
;
733 check_entry_size_and_hooks(struct ip6t_entry
*e
,
734 struct xt_table_info
*newinfo
,
735 const unsigned char *base
,
736 const unsigned char *limit
,
737 const unsigned int *hook_entries
,
738 const unsigned int *underflows
,
739 unsigned int valid_hooks
)
743 if ((unsigned long)e
% __alignof__(struct ip6t_entry
) != 0 ||
744 (unsigned char *)e
+ sizeof(struct ip6t_entry
) >= limit
) {
745 duprintf("Bad offset %p\n", e
);
750 < sizeof(struct ip6t_entry
) + sizeof(struct ip6t_entry_target
)) {
751 duprintf("checking: element %p size %u\n",
756 /* Check hooks & underflows */
757 for (h
= 0; h
< NF_INET_NUMHOOKS
; h
++) {
758 if (!(valid_hooks
& (1 << h
)))
760 if ((unsigned char *)e
- base
== hook_entries
[h
])
761 newinfo
->hook_entry
[h
] = hook_entries
[h
];
762 if ((unsigned char *)e
- base
== underflows
[h
]) {
763 if (!check_underflow(e
)) {
764 pr_err("Underflows must be unconditional and "
765 "use the STANDARD target with "
769 newinfo
->underflow
[h
] = underflows
[h
];
773 /* Clear counters and comefrom */
774 e
->counters
= ((struct xt_counters
) { 0, 0 });
779 static void cleanup_entry(struct ip6t_entry
*e
, struct net
*net
)
781 struct xt_tgdtor_param par
;
782 struct ip6t_entry_target
*t
;
783 struct xt_entry_match
*ematch
;
785 /* Cleanup all matches */
786 xt_ematch_foreach(ematch
, e
)
787 cleanup_match(ematch
, net
);
788 t
= ip6t_get_target(e
);
791 par
.target
= t
->u
.kernel
.target
;
792 par
.targinfo
= t
->data
;
793 par
.family
= NFPROTO_IPV6
;
794 if (par
.target
->destroy
!= NULL
)
795 par
.target
->destroy(&par
);
796 module_put(par
.target
->me
);
799 /* Checks and translates the user-supplied table segment (held in
802 translate_table(struct net
*net
, struct xt_table_info
*newinfo
, void *entry0
,
803 const struct ip6t_replace
*repl
)
805 struct ip6t_entry
*iter
;
809 newinfo
->size
= repl
->size
;
810 newinfo
->number
= repl
->num_entries
;
812 /* Init all hooks to impossible value. */
813 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
814 newinfo
->hook_entry
[i
] = 0xFFFFFFFF;
815 newinfo
->underflow
[i
] = 0xFFFFFFFF;
818 duprintf("translate_table: size %u\n", newinfo
->size
);
820 /* Walk through entries, checking offsets. */
821 xt_entry_foreach(iter
, entry0
, newinfo
->size
) {
822 ret
= check_entry_size_and_hooks(iter
, newinfo
, entry0
,
830 if (strcmp(ip6t_get_target(iter
)->u
.user
.name
,
831 XT_ERROR_TARGET
) == 0)
832 ++newinfo
->stacksize
;
835 if (i
!= repl
->num_entries
) {
836 duprintf("translate_table: %u not %u entries\n",
837 i
, repl
->num_entries
);
841 /* Check hooks all assigned */
842 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
843 /* Only hooks which are valid */
844 if (!(repl
->valid_hooks
& (1 << i
)))
846 if (newinfo
->hook_entry
[i
] == 0xFFFFFFFF) {
847 duprintf("Invalid hook entry %u %u\n",
848 i
, repl
->hook_entry
[i
]);
851 if (newinfo
->underflow
[i
] == 0xFFFFFFFF) {
852 duprintf("Invalid underflow %u %u\n",
853 i
, repl
->underflow
[i
]);
858 if (!mark_source_chains(newinfo
, repl
->valid_hooks
, entry0
))
861 /* Finally, each sanity check must pass */
863 xt_entry_foreach(iter
, entry0
, newinfo
->size
) {
864 ret
= find_check_entry(iter
, net
, repl
->name
, repl
->size
);
871 xt_entry_foreach(iter
, entry0
, newinfo
->size
) {
874 cleanup_entry(iter
, net
);
879 /* And one copy for every other CPU */
880 for_each_possible_cpu(i
) {
881 if (newinfo
->entries
[i
] && newinfo
->entries
[i
] != entry0
)
882 memcpy(newinfo
->entries
[i
], entry0
, newinfo
->size
);
889 get_counters(const struct xt_table_info
*t
,
890 struct xt_counters counters
[])
892 struct ip6t_entry
*iter
;
895 unsigned int curcpu
= get_cpu();
897 /* Instead of clearing (by a previous call to memset())
898 * the counters and using adds, we set the counters
899 * with data used by 'current' CPU
901 * Bottom half has to be disabled to prevent deadlock
902 * if new softirq were to run and call ipt_do_table
906 xt_entry_foreach(iter
, t
->entries
[curcpu
], t
->size
) {
907 SET_COUNTER(counters
[i
], iter
->counters
.bcnt
,
908 iter
->counters
.pcnt
);
912 /* Processing counters from other cpus, we can let bottom half enabled,
913 * (preemption is disabled)
916 for_each_possible_cpu(cpu
) {
922 xt_entry_foreach(iter
, t
->entries
[cpu
], t
->size
) {
923 ADD_COUNTER(counters
[i
], iter
->counters
.bcnt
,
924 iter
->counters
.pcnt
);
927 xt_info_wrunlock(cpu
);
933 static struct xt_counters
*alloc_counters(const struct xt_table
*table
)
935 unsigned int countersize
;
936 struct xt_counters
*counters
;
937 const struct xt_table_info
*private = table
->private;
939 /* We need atomic snapshot of counters: rest doesn't change
940 (other than comefrom, which userspace doesn't care
942 countersize
= sizeof(struct xt_counters
) * private->number
;
943 counters
= vmalloc(countersize
);
945 if (counters
== NULL
)
946 return ERR_PTR(-ENOMEM
);
948 get_counters(private, counters
);
954 copy_entries_to_user(unsigned int total_size
,
955 const struct xt_table
*table
,
956 void __user
*userptr
)
958 unsigned int off
, num
;
959 const struct ip6t_entry
*e
;
960 struct xt_counters
*counters
;
961 const struct xt_table_info
*private = table
->private;
963 const void *loc_cpu_entry
;
965 counters
= alloc_counters(table
);
966 if (IS_ERR(counters
))
967 return PTR_ERR(counters
);
969 /* choose the copy that is on our node/cpu, ...
970 * This choice is lazy (because current thread is
971 * allowed to migrate to another cpu)
973 loc_cpu_entry
= private->entries
[raw_smp_processor_id()];
974 if (copy_to_user(userptr
, loc_cpu_entry
, total_size
) != 0) {
979 /* ... then go back and fix counters and names */
980 for (off
= 0, num
= 0; off
< total_size
; off
+= e
->next_offset
, num
++){
982 const struct ip6t_entry_match
*m
;
983 const struct ip6t_entry_target
*t
;
985 e
= (struct ip6t_entry
*)(loc_cpu_entry
+ off
);
986 if (copy_to_user(userptr
+ off
987 + offsetof(struct ip6t_entry
, counters
),
989 sizeof(counters
[num
])) != 0) {
994 for (i
= sizeof(struct ip6t_entry
);
995 i
< e
->target_offset
;
996 i
+= m
->u
.match_size
) {
999 if (copy_to_user(userptr
+ off
+ i
1000 + offsetof(struct ip6t_entry_match
,
1002 m
->u
.kernel
.match
->name
,
1003 strlen(m
->u
.kernel
.match
->name
)+1)
1010 t
= ip6t_get_target_c(e
);
1011 if (copy_to_user(userptr
+ off
+ e
->target_offset
1012 + offsetof(struct ip6t_entry_target
,
1014 t
->u
.kernel
.target
->name
,
1015 strlen(t
->u
.kernel
.target
->name
)+1) != 0) {
1026 #ifdef CONFIG_COMPAT
1027 static void compat_standard_from_user(void *dst
, const void *src
)
1029 int v
= *(compat_int_t
*)src
;
1032 v
+= xt_compat_calc_jump(AF_INET6
, v
);
1033 memcpy(dst
, &v
, sizeof(v
));
1036 static int compat_standard_to_user(void __user
*dst
, const void *src
)
1038 compat_int_t cv
= *(int *)src
;
1041 cv
-= xt_compat_calc_jump(AF_INET6
, cv
);
1042 return copy_to_user(dst
, &cv
, sizeof(cv
)) ? -EFAULT
: 0;
1045 static int compat_calc_entry(const struct ip6t_entry
*e
,
1046 const struct xt_table_info
*info
,
1047 const void *base
, struct xt_table_info
*newinfo
)
1049 const struct xt_entry_match
*ematch
;
1050 const struct ip6t_entry_target
*t
;
1051 unsigned int entry_offset
;
1054 off
= sizeof(struct ip6t_entry
) - sizeof(struct compat_ip6t_entry
);
1055 entry_offset
= (void *)e
- base
;
1056 xt_ematch_foreach(ematch
, e
)
1057 off
+= xt_compat_match_offset(ematch
->u
.kernel
.match
);
1058 t
= ip6t_get_target_c(e
);
1059 off
+= xt_compat_target_offset(t
->u
.kernel
.target
);
1060 newinfo
->size
-= off
;
1061 ret
= xt_compat_add_offset(AF_INET6
, entry_offset
, off
);
1065 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
1066 if (info
->hook_entry
[i
] &&
1067 (e
< (struct ip6t_entry
*)(base
+ info
->hook_entry
[i
])))
1068 newinfo
->hook_entry
[i
] -= off
;
1069 if (info
->underflow
[i
] &&
1070 (e
< (struct ip6t_entry
*)(base
+ info
->underflow
[i
])))
1071 newinfo
->underflow
[i
] -= off
;
1076 static int compat_table_info(const struct xt_table_info
*info
,
1077 struct xt_table_info
*newinfo
)
1079 struct ip6t_entry
*iter
;
1080 void *loc_cpu_entry
;
1083 if (!newinfo
|| !info
)
1086 /* we dont care about newinfo->entries[] */
1087 memcpy(newinfo
, info
, offsetof(struct xt_table_info
, entries
));
1088 newinfo
->initial_entries
= 0;
1089 loc_cpu_entry
= info
->entries
[raw_smp_processor_id()];
1090 xt_entry_foreach(iter
, loc_cpu_entry
, info
->size
) {
1091 ret
= compat_calc_entry(iter
, info
, loc_cpu_entry
, newinfo
);
1099 static int get_info(struct net
*net
, void __user
*user
,
1100 const int *len
, int compat
)
1102 char name
[IP6T_TABLE_MAXNAMELEN
];
1106 if (*len
!= sizeof(struct ip6t_getinfo
)) {
1107 duprintf("length %u != %zu\n", *len
,
1108 sizeof(struct ip6t_getinfo
));
1112 if (copy_from_user(name
, user
, sizeof(name
)) != 0)
1115 name
[IP6T_TABLE_MAXNAMELEN
-1] = '\0';
1116 #ifdef CONFIG_COMPAT
1118 xt_compat_lock(AF_INET6
);
1120 t
= try_then_request_module(xt_find_table_lock(net
, AF_INET6
, name
),
1121 "ip6table_%s", name
);
1122 if (t
&& !IS_ERR(t
)) {
1123 struct ip6t_getinfo info
;
1124 const struct xt_table_info
*private = t
->private;
1125 #ifdef CONFIG_COMPAT
1126 struct xt_table_info tmp
;
1129 ret
= compat_table_info(private, &tmp
);
1130 xt_compat_flush_offsets(AF_INET6
);
1134 info
.valid_hooks
= t
->valid_hooks
;
1135 memcpy(info
.hook_entry
, private->hook_entry
,
1136 sizeof(info
.hook_entry
));
1137 memcpy(info
.underflow
, private->underflow
,
1138 sizeof(info
.underflow
));
1139 info
.num_entries
= private->number
;
1140 info
.size
= private->size
;
1141 strcpy(info
.name
, name
);
1143 if (copy_to_user(user
, &info
, *len
) != 0)
1151 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1152 #ifdef CONFIG_COMPAT
1154 xt_compat_unlock(AF_INET6
);
1160 get_entries(struct net
*net
, struct ip6t_get_entries __user
*uptr
,
1164 struct ip6t_get_entries get
;
1167 if (*len
< sizeof(get
)) {
1168 duprintf("get_entries: %u < %zu\n", *len
, sizeof(get
));
1171 if (copy_from_user(&get
, uptr
, sizeof(get
)) != 0)
1173 if (*len
!= sizeof(struct ip6t_get_entries
) + get
.size
) {
1174 duprintf("get_entries: %u != %zu\n",
1175 *len
, sizeof(get
) + get
.size
);
1179 t
= xt_find_table_lock(net
, AF_INET6
, get
.name
);
1180 if (t
&& !IS_ERR(t
)) {
1181 struct xt_table_info
*private = t
->private;
1182 duprintf("t->private->number = %u\n", private->number
);
1183 if (get
.size
== private->size
)
1184 ret
= copy_entries_to_user(private->size
,
1185 t
, uptr
->entrytable
);
1187 duprintf("get_entries: I've got %u not %u!\n",
1188 private->size
, get
.size
);
1194 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1200 __do_replace(struct net
*net
, const char *name
, unsigned int valid_hooks
,
1201 struct xt_table_info
*newinfo
, unsigned int num_counters
,
1202 void __user
*counters_ptr
)
1206 struct xt_table_info
*oldinfo
;
1207 struct xt_counters
*counters
;
1208 const void *loc_cpu_old_entry
;
1209 struct ip6t_entry
*iter
;
1212 counters
= vmalloc(num_counters
* sizeof(struct xt_counters
));
1218 t
= try_then_request_module(xt_find_table_lock(net
, AF_INET6
, name
),
1219 "ip6table_%s", name
);
1220 if (!t
|| IS_ERR(t
)) {
1221 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1222 goto free_newinfo_counters_untrans
;
1226 if (valid_hooks
!= t
->valid_hooks
) {
1227 duprintf("Valid hook crap: %08X vs %08X\n",
1228 valid_hooks
, t
->valid_hooks
);
1233 oldinfo
= xt_replace_table(t
, num_counters
, newinfo
, &ret
);
1237 /* Update module usage count based on number of rules */
1238 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1239 oldinfo
->number
, oldinfo
->initial_entries
, newinfo
->number
);
1240 if ((oldinfo
->number
> oldinfo
->initial_entries
) ||
1241 (newinfo
->number
<= oldinfo
->initial_entries
))
1243 if ((oldinfo
->number
> oldinfo
->initial_entries
) &&
1244 (newinfo
->number
<= oldinfo
->initial_entries
))
1247 /* Get the old counters, and synchronize with replace */
1248 get_counters(oldinfo
, counters
);
1250 /* Decrease module usage counts and free resource */
1251 loc_cpu_old_entry
= oldinfo
->entries
[raw_smp_processor_id()];
1252 xt_entry_foreach(iter
, loc_cpu_old_entry
, oldinfo
->size
)
1253 cleanup_entry(iter
, net
);
1255 xt_free_table_info(oldinfo
);
1256 if (copy_to_user(counters_ptr
, counters
,
1257 sizeof(struct xt_counters
) * num_counters
) != 0)
1266 free_newinfo_counters_untrans
:
1273 do_replace(struct net
*net
, const void __user
*user
, unsigned int len
)
1276 struct ip6t_replace tmp
;
1277 struct xt_table_info
*newinfo
;
1278 void *loc_cpu_entry
;
1279 struct ip6t_entry
*iter
;
1281 if (copy_from_user(&tmp
, user
, sizeof(tmp
)) != 0)
1284 /* overflow check */
1285 if (tmp
.num_counters
>= INT_MAX
/ sizeof(struct xt_counters
))
1288 newinfo
= xt_alloc_table_info(tmp
.size
);
1292 /* choose the copy that is on our node/cpu */
1293 loc_cpu_entry
= newinfo
->entries
[raw_smp_processor_id()];
1294 if (copy_from_user(loc_cpu_entry
, user
+ sizeof(tmp
),
1300 ret
= translate_table(net
, newinfo
, loc_cpu_entry
, &tmp
);
1304 duprintf("ip_tables: Translated table\n");
1306 ret
= __do_replace(net
, tmp
.name
, tmp
.valid_hooks
, newinfo
,
1307 tmp
.num_counters
, tmp
.counters
);
1309 goto free_newinfo_untrans
;
1312 free_newinfo_untrans
:
1313 xt_entry_foreach(iter
, loc_cpu_entry
, newinfo
->size
)
1314 cleanup_entry(iter
, net
);
1316 xt_free_table_info(newinfo
);
1321 do_add_counters(struct net
*net
, const void __user
*user
, unsigned int len
,
1324 unsigned int i
, curcpu
;
1325 struct xt_counters_info tmp
;
1326 struct xt_counters
*paddc
;
1327 unsigned int num_counters
;
1332 const struct xt_table_info
*private;
1334 const void *loc_cpu_entry
;
1335 struct ip6t_entry
*iter
;
1336 #ifdef CONFIG_COMPAT
1337 struct compat_xt_counters_info compat_tmp
;
1341 size
= sizeof(struct compat_xt_counters_info
);
1346 size
= sizeof(struct xt_counters_info
);
1349 if (copy_from_user(ptmp
, user
, size
) != 0)
1352 #ifdef CONFIG_COMPAT
1354 num_counters
= compat_tmp
.num_counters
;
1355 name
= compat_tmp
.name
;
1359 num_counters
= tmp
.num_counters
;
1363 if (len
!= size
+ num_counters
* sizeof(struct xt_counters
))
1366 paddc
= vmalloc(len
- size
);
1370 if (copy_from_user(paddc
, user
+ size
, len
- size
) != 0) {
1375 t
= xt_find_table_lock(net
, AF_INET6
, name
);
1376 if (!t
|| IS_ERR(t
)) {
1377 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1383 private = t
->private;
1384 if (private->number
!= num_counters
) {
1386 goto unlock_up_free
;
1390 /* Choose the copy that is on our node */
1391 curcpu
= smp_processor_id();
1392 xt_info_wrlock(curcpu
);
1393 loc_cpu_entry
= private->entries
[curcpu
];
1394 xt_entry_foreach(iter
, loc_cpu_entry
, private->size
) {
1395 ADD_COUNTER(iter
->counters
, paddc
[i
].bcnt
, paddc
[i
].pcnt
);
1398 xt_info_wrunlock(curcpu
);
1410 #ifdef CONFIG_COMPAT
1411 struct compat_ip6t_replace
{
1412 char name
[IP6T_TABLE_MAXNAMELEN
];
1416 u32 hook_entry
[NF_INET_NUMHOOKS
];
1417 u32 underflow
[NF_INET_NUMHOOKS
];
1419 compat_uptr_t counters
; /* struct ip6t_counters * */
1420 struct compat_ip6t_entry entries
[0];
1424 compat_copy_entry_to_user(struct ip6t_entry
*e
, void __user
**dstptr
,
1425 unsigned int *size
, struct xt_counters
*counters
,
1428 struct ip6t_entry_target
*t
;
1429 struct compat_ip6t_entry __user
*ce
;
1430 u_int16_t target_offset
, next_offset
;
1431 compat_uint_t origsize
;
1432 const struct xt_entry_match
*ematch
;
1436 ce
= (struct compat_ip6t_entry __user
*)*dstptr
;
1437 if (copy_to_user(ce
, e
, sizeof(struct ip6t_entry
)) != 0 ||
1438 copy_to_user(&ce
->counters
, &counters
[i
],
1439 sizeof(counters
[i
])) != 0)
1442 *dstptr
+= sizeof(struct compat_ip6t_entry
);
1443 *size
-= sizeof(struct ip6t_entry
) - sizeof(struct compat_ip6t_entry
);
1445 xt_ematch_foreach(ematch
, e
) {
1446 ret
= xt_compat_match_to_user(ematch
, dstptr
, size
);
1450 target_offset
= e
->target_offset
- (origsize
- *size
);
1451 t
= ip6t_get_target(e
);
1452 ret
= xt_compat_target_to_user(t
, dstptr
, size
);
1455 next_offset
= e
->next_offset
- (origsize
- *size
);
1456 if (put_user(target_offset
, &ce
->target_offset
) != 0 ||
1457 put_user(next_offset
, &ce
->next_offset
) != 0)
1463 compat_find_calc_match(struct ip6t_entry_match
*m
,
1465 const struct ip6t_ip6
*ipv6
,
1466 unsigned int hookmask
,
1469 struct xt_match
*match
;
1471 match
= xt_request_find_match(NFPROTO_IPV6
, m
->u
.user
.name
,
1472 m
->u
.user
.revision
);
1473 if (IS_ERR(match
)) {
1474 duprintf("compat_check_calc_match: `%s' not found\n",
1476 return PTR_ERR(match
);
1478 m
->u
.kernel
.match
= match
;
1479 *size
+= xt_compat_match_offset(match
);
1483 static void compat_release_entry(struct compat_ip6t_entry
*e
)
1485 struct ip6t_entry_target
*t
;
1486 struct xt_entry_match
*ematch
;
1488 /* Cleanup all matches */
1489 xt_ematch_foreach(ematch
, e
)
1490 module_put(ematch
->u
.kernel
.match
->me
);
1491 t
= compat_ip6t_get_target(e
);
1492 module_put(t
->u
.kernel
.target
->me
);
1496 check_compat_entry_size_and_hooks(struct compat_ip6t_entry
*e
,
1497 struct xt_table_info
*newinfo
,
1499 const unsigned char *base
,
1500 const unsigned char *limit
,
1501 const unsigned int *hook_entries
,
1502 const unsigned int *underflows
,
1505 struct xt_entry_match
*ematch
;
1506 struct ip6t_entry_target
*t
;
1507 struct xt_target
*target
;
1508 unsigned int entry_offset
;
1512 duprintf("check_compat_entry_size_and_hooks %p\n", e
);
1513 if ((unsigned long)e
% __alignof__(struct compat_ip6t_entry
) != 0 ||
1514 (unsigned char *)e
+ sizeof(struct compat_ip6t_entry
) >= limit
) {
1515 duprintf("Bad offset %p, limit = %p\n", e
, limit
);
1519 if (e
->next_offset
< sizeof(struct compat_ip6t_entry
) +
1520 sizeof(struct compat_xt_entry_target
)) {
1521 duprintf("checking: element %p size %u\n",
1526 /* For purposes of check_entry casting the compat entry is fine */
1527 ret
= check_entry((struct ip6t_entry
*)e
, name
);
1531 off
= sizeof(struct ip6t_entry
) - sizeof(struct compat_ip6t_entry
);
1532 entry_offset
= (void *)e
- (void *)base
;
1534 xt_ematch_foreach(ematch
, e
) {
1535 ret
= compat_find_calc_match(ematch
, name
,
1536 &e
->ipv6
, e
->comefrom
, &off
);
1538 goto release_matches
;
1542 t
= compat_ip6t_get_target(e
);
1543 target
= xt_request_find_target(NFPROTO_IPV6
, t
->u
.user
.name
,
1544 t
->u
.user
.revision
);
1545 if (IS_ERR(target
)) {
1546 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1548 ret
= PTR_ERR(target
);
1549 goto release_matches
;
1551 t
->u
.kernel
.target
= target
;
1553 off
+= xt_compat_target_offset(target
);
1555 ret
= xt_compat_add_offset(AF_INET6
, entry_offset
, off
);
1559 /* Check hooks & underflows */
1560 for (h
= 0; h
< NF_INET_NUMHOOKS
; h
++) {
1561 if ((unsigned char *)e
- base
== hook_entries
[h
])
1562 newinfo
->hook_entry
[h
] = hook_entries
[h
];
1563 if ((unsigned char *)e
- base
== underflows
[h
])
1564 newinfo
->underflow
[h
] = underflows
[h
];
1567 /* Clear counters and comefrom */
1568 memset(&e
->counters
, 0, sizeof(e
->counters
));
1573 module_put(t
->u
.kernel
.target
->me
);
1575 xt_ematch_foreach(ematch
, e
) {
1578 module_put(ematch
->u
.kernel
.match
->me
);
1584 compat_copy_entry_from_user(struct compat_ip6t_entry
*e
, void **dstptr
,
1585 unsigned int *size
, const char *name
,
1586 struct xt_table_info
*newinfo
, unsigned char *base
)
1588 struct ip6t_entry_target
*t
;
1589 struct xt_target
*target
;
1590 struct ip6t_entry
*de
;
1591 unsigned int origsize
;
1593 struct xt_entry_match
*ematch
;
1597 de
= (struct ip6t_entry
*)*dstptr
;
1598 memcpy(de
, e
, sizeof(struct ip6t_entry
));
1599 memcpy(&de
->counters
, &e
->counters
, sizeof(e
->counters
));
1601 *dstptr
+= sizeof(struct ip6t_entry
);
1602 *size
+= sizeof(struct ip6t_entry
) - sizeof(struct compat_ip6t_entry
);
1604 xt_ematch_foreach(ematch
, e
) {
1605 ret
= xt_compat_match_from_user(ematch
, dstptr
, size
);
1609 de
->target_offset
= e
->target_offset
- (origsize
- *size
);
1610 t
= compat_ip6t_get_target(e
);
1611 target
= t
->u
.kernel
.target
;
1612 xt_compat_target_from_user(t
, dstptr
, size
);
1614 de
->next_offset
= e
->next_offset
- (origsize
- *size
);
1615 for (h
= 0; h
< NF_INET_NUMHOOKS
; h
++) {
1616 if ((unsigned char *)de
- base
< newinfo
->hook_entry
[h
])
1617 newinfo
->hook_entry
[h
] -= origsize
- *size
;
1618 if ((unsigned char *)de
- base
< newinfo
->underflow
[h
])
1619 newinfo
->underflow
[h
] -= origsize
- *size
;
1624 static int compat_check_entry(struct ip6t_entry
*e
, struct net
*net
,
1629 struct xt_mtchk_param mtpar
;
1630 struct xt_entry_match
*ematch
;
1635 mtpar
.entryinfo
= &e
->ipv6
;
1636 mtpar
.hook_mask
= e
->comefrom
;
1637 mtpar
.family
= NFPROTO_IPV6
;
1638 xt_ematch_foreach(ematch
, e
) {
1639 ret
= check_match(ematch
, &mtpar
);
1641 goto cleanup_matches
;
1645 ret
= check_target(e
, net
, name
);
1647 goto cleanup_matches
;
1651 xt_ematch_foreach(ematch
, e
) {
1654 cleanup_match(ematch
, net
);
1660 translate_compat_table(struct net
*net
,
1662 unsigned int valid_hooks
,
1663 struct xt_table_info
**pinfo
,
1665 unsigned int total_size
,
1666 unsigned int number
,
1667 unsigned int *hook_entries
,
1668 unsigned int *underflows
)
1671 struct xt_table_info
*newinfo
, *info
;
1672 void *pos
, *entry0
, *entry1
;
1673 struct compat_ip6t_entry
*iter0
;
1674 struct ip6t_entry
*iter1
;
1681 info
->number
= number
;
1683 /* Init all hooks to impossible value. */
1684 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
1685 info
->hook_entry
[i
] = 0xFFFFFFFF;
1686 info
->underflow
[i
] = 0xFFFFFFFF;
1689 duprintf("translate_compat_table: size %u\n", info
->size
);
1691 xt_compat_lock(AF_INET6
);
1692 /* Walk through entries, checking offsets. */
1693 xt_entry_foreach(iter0
, entry0
, total_size
) {
1694 ret
= check_compat_entry_size_and_hooks(iter0
, info
, &size
,
1696 entry0
+ total_size
,
1707 duprintf("translate_compat_table: %u not %u entries\n",
1712 /* Check hooks all assigned */
1713 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
1714 /* Only hooks which are valid */
1715 if (!(valid_hooks
& (1 << i
)))
1717 if (info
->hook_entry
[i
] == 0xFFFFFFFF) {
1718 duprintf("Invalid hook entry %u %u\n",
1719 i
, hook_entries
[i
]);
1722 if (info
->underflow
[i
] == 0xFFFFFFFF) {
1723 duprintf("Invalid underflow %u %u\n",
1730 newinfo
= xt_alloc_table_info(size
);
1734 newinfo
->number
= number
;
1735 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
1736 newinfo
->hook_entry
[i
] = info
->hook_entry
[i
];
1737 newinfo
->underflow
[i
] = info
->underflow
[i
];
1739 entry1
= newinfo
->entries
[raw_smp_processor_id()];
1742 xt_entry_foreach(iter0
, entry0
, total_size
) {
1743 ret
= compat_copy_entry_from_user(iter0
, &pos
, &size
,
1744 name
, newinfo
, entry1
);
1748 xt_compat_flush_offsets(AF_INET6
);
1749 xt_compat_unlock(AF_INET6
);
1754 if (!mark_source_chains(newinfo
, valid_hooks
, entry1
))
1758 xt_entry_foreach(iter1
, entry1
, newinfo
->size
) {
1759 ret
= compat_check_entry(iter1
, net
, name
);
1763 if (strcmp(ip6t_get_target(iter1
)->u
.user
.name
,
1764 XT_ERROR_TARGET
) == 0)
1765 ++newinfo
->stacksize
;
1769 * The first i matches need cleanup_entry (calls ->destroy)
1770 * because they had called ->check already. The other j-i
1771 * entries need only release.
1775 xt_entry_foreach(iter0
, entry0
, newinfo
->size
) {
1780 compat_release_entry(iter0
);
1782 xt_entry_foreach(iter1
, entry1
, newinfo
->size
) {
1785 cleanup_entry(iter1
, net
);
1787 xt_free_table_info(newinfo
);
1791 /* And one copy for every other CPU */
1792 for_each_possible_cpu(i
)
1793 if (newinfo
->entries
[i
] && newinfo
->entries
[i
] != entry1
)
1794 memcpy(newinfo
->entries
[i
], entry1
, newinfo
->size
);
1798 xt_free_table_info(info
);
1802 xt_free_table_info(newinfo
);
1804 xt_entry_foreach(iter0
, entry0
, total_size
) {
1807 compat_release_entry(iter0
);
1811 xt_compat_flush_offsets(AF_INET6
);
1812 xt_compat_unlock(AF_INET6
);
1817 compat_do_replace(struct net
*net
, void __user
*user
, unsigned int len
)
1820 struct compat_ip6t_replace tmp
;
1821 struct xt_table_info
*newinfo
;
1822 void *loc_cpu_entry
;
1823 struct ip6t_entry
*iter
;
1825 if (copy_from_user(&tmp
, user
, sizeof(tmp
)) != 0)
1828 /* overflow check */
1829 if (tmp
.size
>= INT_MAX
/ num_possible_cpus())
1831 if (tmp
.num_counters
>= INT_MAX
/ sizeof(struct xt_counters
))
1834 newinfo
= xt_alloc_table_info(tmp
.size
);
1838 /* choose the copy that is on our node/cpu */
1839 loc_cpu_entry
= newinfo
->entries
[raw_smp_processor_id()];
1840 if (copy_from_user(loc_cpu_entry
, user
+ sizeof(tmp
),
1846 ret
= translate_compat_table(net
, tmp
.name
, tmp
.valid_hooks
,
1847 &newinfo
, &loc_cpu_entry
, tmp
.size
,
1848 tmp
.num_entries
, tmp
.hook_entry
,
1853 duprintf("compat_do_replace: Translated table\n");
1855 ret
= __do_replace(net
, tmp
.name
, tmp
.valid_hooks
, newinfo
,
1856 tmp
.num_counters
, compat_ptr(tmp
.counters
));
1858 goto free_newinfo_untrans
;
1861 free_newinfo_untrans
:
1862 xt_entry_foreach(iter
, loc_cpu_entry
, newinfo
->size
)
1863 cleanup_entry(iter
, net
);
1865 xt_free_table_info(newinfo
);
1870 compat_do_ip6t_set_ctl(struct sock
*sk
, int cmd
, void __user
*user
,
1875 if (!capable(CAP_NET_ADMIN
))
1879 case IP6T_SO_SET_REPLACE
:
1880 ret
= compat_do_replace(sock_net(sk
), user
, len
);
1883 case IP6T_SO_SET_ADD_COUNTERS
:
1884 ret
= do_add_counters(sock_net(sk
), user
, len
, 1);
1888 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd
);
1895 struct compat_ip6t_get_entries
{
1896 char name
[IP6T_TABLE_MAXNAMELEN
];
1898 struct compat_ip6t_entry entrytable
[0];
1902 compat_copy_entries_to_user(unsigned int total_size
, struct xt_table
*table
,
1903 void __user
*userptr
)
1905 struct xt_counters
*counters
;
1906 const struct xt_table_info
*private = table
->private;
1910 const void *loc_cpu_entry
;
1912 struct ip6t_entry
*iter
;
1914 counters
= alloc_counters(table
);
1915 if (IS_ERR(counters
))
1916 return PTR_ERR(counters
);
1918 /* choose the copy that is on our node/cpu, ...
1919 * This choice is lazy (because current thread is
1920 * allowed to migrate to another cpu)
1922 loc_cpu_entry
= private->entries
[raw_smp_processor_id()];
1925 xt_entry_foreach(iter
, loc_cpu_entry
, total_size
) {
1926 ret
= compat_copy_entry_to_user(iter
, &pos
,
1927 &size
, counters
, i
++);
1937 compat_get_entries(struct net
*net
, struct compat_ip6t_get_entries __user
*uptr
,
1941 struct compat_ip6t_get_entries get
;
1944 if (*len
< sizeof(get
)) {
1945 duprintf("compat_get_entries: %u < %zu\n", *len
, sizeof(get
));
1949 if (copy_from_user(&get
, uptr
, sizeof(get
)) != 0)
1952 if (*len
!= sizeof(struct compat_ip6t_get_entries
) + get
.size
) {
1953 duprintf("compat_get_entries: %u != %zu\n",
1954 *len
, sizeof(get
) + get
.size
);
1958 xt_compat_lock(AF_INET6
);
1959 t
= xt_find_table_lock(net
, AF_INET6
, get
.name
);
1960 if (t
&& !IS_ERR(t
)) {
1961 const struct xt_table_info
*private = t
->private;
1962 struct xt_table_info info
;
1963 duprintf("t->private->number = %u\n", private->number
);
1964 ret
= compat_table_info(private, &info
);
1965 if (!ret
&& get
.size
== info
.size
) {
1966 ret
= compat_copy_entries_to_user(private->size
,
1967 t
, uptr
->entrytable
);
1969 duprintf("compat_get_entries: I've got %u not %u!\n",
1970 private->size
, get
.size
);
1973 xt_compat_flush_offsets(AF_INET6
);
1977 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1979 xt_compat_unlock(AF_INET6
);
1983 static int do_ip6t_get_ctl(struct sock
*, int, void __user
*, int *);
1986 compat_do_ip6t_get_ctl(struct sock
*sk
, int cmd
, void __user
*user
, int *len
)
1990 if (!capable(CAP_NET_ADMIN
))
1994 case IP6T_SO_GET_INFO
:
1995 ret
= get_info(sock_net(sk
), user
, len
, 1);
1997 case IP6T_SO_GET_ENTRIES
:
1998 ret
= compat_get_entries(sock_net(sk
), user
, len
);
2001 ret
= do_ip6t_get_ctl(sk
, cmd
, user
, len
);
2008 do_ip6t_set_ctl(struct sock
*sk
, int cmd
, void __user
*user
, unsigned int len
)
2012 if (!capable(CAP_NET_ADMIN
))
2016 case IP6T_SO_SET_REPLACE
:
2017 ret
= do_replace(sock_net(sk
), user
, len
);
2020 case IP6T_SO_SET_ADD_COUNTERS
:
2021 ret
= do_add_counters(sock_net(sk
), user
, len
, 0);
2025 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd
);
2033 do_ip6t_get_ctl(struct sock
*sk
, int cmd
, void __user
*user
, int *len
)
2037 if (!capable(CAP_NET_ADMIN
))
2041 case IP6T_SO_GET_INFO
:
2042 ret
= get_info(sock_net(sk
), user
, len
, 0);
2045 case IP6T_SO_GET_ENTRIES
:
2046 ret
= get_entries(sock_net(sk
), user
, len
);
2049 case IP6T_SO_GET_REVISION_MATCH
:
2050 case IP6T_SO_GET_REVISION_TARGET
: {
2051 struct ip6t_get_revision rev
;
2054 if (*len
!= sizeof(rev
)) {
2058 if (copy_from_user(&rev
, user
, sizeof(rev
)) != 0) {
2063 if (cmd
== IP6T_SO_GET_REVISION_TARGET
)
2068 try_then_request_module(xt_find_revision(AF_INET6
, rev
.name
,
2071 "ip6t_%s", rev
.name
);
2076 duprintf("do_ip6t_get_ctl: unknown request %i\n", cmd
);
2083 struct xt_table
*ip6t_register_table(struct net
*net
,
2084 const struct xt_table
*table
,
2085 const struct ip6t_replace
*repl
)
2088 struct xt_table_info
*newinfo
;
2089 struct xt_table_info bootstrap
= {0};
2090 void *loc_cpu_entry
;
2091 struct xt_table
*new_table
;
2093 newinfo
= xt_alloc_table_info(repl
->size
);
2099 /* choose the copy on our node/cpu, but dont care about preemption */
2100 loc_cpu_entry
= newinfo
->entries
[raw_smp_processor_id()];
2101 memcpy(loc_cpu_entry
, repl
->entries
, repl
->size
);
2103 ret
= translate_table(net
, newinfo
, loc_cpu_entry
, repl
);
2107 new_table
= xt_register_table(net
, table
, &bootstrap
, newinfo
);
2108 if (IS_ERR(new_table
)) {
2109 ret
= PTR_ERR(new_table
);
2115 xt_free_table_info(newinfo
);
2117 return ERR_PTR(ret
);
2120 void ip6t_unregister_table(struct net
*net
, struct xt_table
*table
)
2122 struct xt_table_info
*private;
2123 void *loc_cpu_entry
;
2124 struct module
*table_owner
= table
->me
;
2125 struct ip6t_entry
*iter
;
2127 private = xt_unregister_table(table
);
2129 /* Decrease module usage counts and free resources */
2130 loc_cpu_entry
= private->entries
[raw_smp_processor_id()];
2131 xt_entry_foreach(iter
, loc_cpu_entry
, private->size
)
2132 cleanup_entry(iter
, net
);
2133 if (private->number
> private->initial_entries
)
2134 module_put(table_owner
);
2135 xt_free_table_info(private);
2138 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2140 icmp6_type_code_match(u_int8_t test_type
, u_int8_t min_code
, u_int8_t max_code
,
2141 u_int8_t type
, u_int8_t code
,
2144 return (type
== test_type
&& code
>= min_code
&& code
<= max_code
)
2149 icmp6_match(const struct sk_buff
*skb
, struct xt_action_param
*par
)
2151 const struct icmp6hdr
*ic
;
2152 struct icmp6hdr _icmph
;
2153 const struct ip6t_icmp
*icmpinfo
= par
->matchinfo
;
2155 /* Must not be a fragment. */
2156 if (par
->fragoff
!= 0)
2159 ic
= skb_header_pointer(skb
, par
->thoff
, sizeof(_icmph
), &_icmph
);
2161 /* We've been asked to examine this packet, and we
2162 * can't. Hence, no choice but to drop.
2164 duprintf("Dropping evil ICMP tinygram.\n");
2165 par
->hotdrop
= true;
2169 return icmp6_type_code_match(icmpinfo
->type
,
2172 ic
->icmp6_type
, ic
->icmp6_code
,
2173 !!(icmpinfo
->invflags
&IP6T_ICMP_INV
));
2176 /* Called when user tries to insert an entry of this type. */
2177 static int icmp6_checkentry(const struct xt_mtchk_param
*par
)
2179 const struct ip6t_icmp
*icmpinfo
= par
->matchinfo
;
2181 /* Must specify no unknown invflags */
2182 return (icmpinfo
->invflags
& ~IP6T_ICMP_INV
) ? -EINVAL
: 0;
2185 /* The built-in targets: standard (NULL) and error. */
2186 static struct xt_target ip6t_builtin_tg
[] __read_mostly
= {
2188 .name
= IP6T_STANDARD_TARGET
,
2189 .targetsize
= sizeof(int),
2190 .family
= NFPROTO_IPV6
,
2191 #ifdef CONFIG_COMPAT
2192 .compatsize
= sizeof(compat_int_t
),
2193 .compat_from_user
= compat_standard_from_user
,
2194 .compat_to_user
= compat_standard_to_user
,
2198 .name
= IP6T_ERROR_TARGET
,
2199 .target
= ip6t_error
,
2200 .targetsize
= IP6T_FUNCTION_MAXNAMELEN
,
2201 .family
= NFPROTO_IPV6
,
2205 static struct nf_sockopt_ops ip6t_sockopts
= {
2207 .set_optmin
= IP6T_BASE_CTL
,
2208 .set_optmax
= IP6T_SO_SET_MAX
+1,
2209 .set
= do_ip6t_set_ctl
,
2210 #ifdef CONFIG_COMPAT
2211 .compat_set
= compat_do_ip6t_set_ctl
,
2213 .get_optmin
= IP6T_BASE_CTL
,
2214 .get_optmax
= IP6T_SO_GET_MAX
+1,
2215 .get
= do_ip6t_get_ctl
,
2216 #ifdef CONFIG_COMPAT
2217 .compat_get
= compat_do_ip6t_get_ctl
,
2219 .owner
= THIS_MODULE
,
2222 static struct xt_match ip6t_builtin_mt
[] __read_mostly
= {
2225 .match
= icmp6_match
,
2226 .matchsize
= sizeof(struct ip6t_icmp
),
2227 .checkentry
= icmp6_checkentry
,
2228 .proto
= IPPROTO_ICMPV6
,
2229 .family
= NFPROTO_IPV6
,
2233 static int __net_init
ip6_tables_net_init(struct net
*net
)
2235 return xt_proto_init(net
, NFPROTO_IPV6
);
2238 static void __net_exit
ip6_tables_net_exit(struct net
*net
)
2240 xt_proto_fini(net
, NFPROTO_IPV6
);
2243 static struct pernet_operations ip6_tables_net_ops
= {
2244 .init
= ip6_tables_net_init
,
2245 .exit
= ip6_tables_net_exit
,
2248 static int __init
ip6_tables_init(void)
2252 ret
= register_pernet_subsys(&ip6_tables_net_ops
);
2256 /* Noone else will be downing sem now, so we won't sleep */
2257 ret
= xt_register_targets(ip6t_builtin_tg
, ARRAY_SIZE(ip6t_builtin_tg
));
2260 ret
= xt_register_matches(ip6t_builtin_mt
, ARRAY_SIZE(ip6t_builtin_mt
));
2264 /* Register setsockopt */
2265 ret
= nf_register_sockopt(&ip6t_sockopts
);
2269 pr_info("(C) 2000-2006 Netfilter Core Team\n");
2273 xt_unregister_matches(ip6t_builtin_mt
, ARRAY_SIZE(ip6t_builtin_mt
));
2275 xt_unregister_targets(ip6t_builtin_tg
, ARRAY_SIZE(ip6t_builtin_tg
));
2277 unregister_pernet_subsys(&ip6_tables_net_ops
);
2282 static void __exit
ip6_tables_fini(void)
2284 nf_unregister_sockopt(&ip6t_sockopts
);
2286 xt_unregister_matches(ip6t_builtin_mt
, ARRAY_SIZE(ip6t_builtin_mt
));
2287 xt_unregister_targets(ip6t_builtin_tg
, ARRAY_SIZE(ip6t_builtin_tg
));
2288 unregister_pernet_subsys(&ip6_tables_net_ops
);
2292 * find the offset to specified header or the protocol number of last header
2293 * if target < 0. "last header" is transport protocol header, ESP, or
2296 * If target header is found, its offset is set in *offset and return protocol
2297 * number. Otherwise, return -1.
2299 * If the first fragment doesn't contain the final protocol header or
2300 * NEXTHDR_NONE it is considered invalid.
2302 * Note that non-1st fragment is special case that "the protocol number
2303 * of last header" is "next header" field in Fragment header. In this case,
2304 * *offset is meaningless and fragment offset is stored in *fragoff if fragoff
2308 int ipv6_find_hdr(const struct sk_buff
*skb
, unsigned int *offset
,
2309 int target
, unsigned short *fragoff
)
2311 unsigned int start
= skb_network_offset(skb
) + sizeof(struct ipv6hdr
);
2312 u8 nexthdr
= ipv6_hdr(skb
)->nexthdr
;
2313 unsigned int len
= skb
->len
- start
;
2318 while (nexthdr
!= target
) {
2319 struct ipv6_opt_hdr _hdr
, *hp
;
2320 unsigned int hdrlen
;
2322 if ((!ipv6_ext_hdr(nexthdr
)) || nexthdr
== NEXTHDR_NONE
) {
2328 hp
= skb_header_pointer(skb
, start
, sizeof(_hdr
), &_hdr
);
2331 if (nexthdr
== NEXTHDR_FRAGMENT
) {
2332 unsigned short _frag_off
;
2334 fp
= skb_header_pointer(skb
,
2335 start
+offsetof(struct frag_hdr
,
2342 _frag_off
= ntohs(*fp
) & ~0x7;
2345 ((!ipv6_ext_hdr(hp
->nexthdr
)) ||
2346 hp
->nexthdr
== NEXTHDR_NONE
)) {
2348 *fragoff
= _frag_off
;
2354 } else if (nexthdr
== NEXTHDR_AUTH
)
2355 hdrlen
= (hp
->hdrlen
+ 2) << 2;
2357 hdrlen
= ipv6_optlen(hp
);
2359 nexthdr
= hp
->nexthdr
;
2368 EXPORT_SYMBOL(ip6t_register_table
);
2369 EXPORT_SYMBOL(ip6t_unregister_table
);
2370 EXPORT_SYMBOL(ip6t_do_table
);
2371 EXPORT_SYMBOL(ip6t_ext_hdr
);
2372 EXPORT_SYMBOL(ipv6_find_hdr
);
2374 module_init(ip6_tables_init
);
2375 module_exit(ip6_tables_fini
);