2 * Packet matching code.
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 #include <linux/capability.h>
14 #include <linux/skbuff.h>
15 #include <linux/kmod.h>
16 #include <linux/vmalloc.h>
17 #include <linux/netdevice.h>
18 #include <linux/module.h>
19 #include <linux/poison.h>
20 #include <linux/icmpv6.h>
22 #include <net/compat.h>
23 #include <asm/uaccess.h>
24 #include <linux/mutex.h>
25 #include <linux/proc_fs.h>
26 #include <linux/err.h>
27 #include <linux/cpumask.h>
29 #include <linux/netfilter_ipv6/ip6_tables.h>
30 #include <linux/netfilter/x_tables.h>
31 #include <net/netfilter/nf_log.h>
32 #include "../../netfilter/xt_repldata.h"
34 MODULE_LICENSE("GPL");
35 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
36 MODULE_DESCRIPTION("IPv6 packet filter");
38 /*#define DEBUG_IP_FIREWALL*/
39 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
40 /*#define DEBUG_IP_FIREWALL_USER*/
42 #ifdef DEBUG_IP_FIREWALL
43 #define dprintf(format, args...) pr_info(format , ## args)
45 #define dprintf(format, args...)
48 #ifdef DEBUG_IP_FIREWALL_USER
49 #define duprintf(format, args...) pr_info(format , ## args)
51 #define duprintf(format, args...)
54 #ifdef CONFIG_NETFILTER_DEBUG
55 #define IP_NF_ASSERT(x) \
58 printk("IP_NF_ASSERT: %s:%s:%u\n", \
59 __func__, __FILE__, __LINE__); \
62 #define IP_NF_ASSERT(x)
66 /* All the better to debug you with... */
71 void *ip6t_alloc_initial_table(const struct xt_table
*info
)
73 return xt_alloc_initial_table(ip6t
, IP6T
);
75 EXPORT_SYMBOL_GPL(ip6t_alloc_initial_table
);
78 We keep a set of rules for each CPU, so we can avoid write-locking
79 them in the softirq when updating the counters and therefore
80 only need to read-lock in the softirq; doing a write_lock_bh() in user
81 context stops packets coming through and allows user context to read
82 the counters or update the rules.
84 Hence the start of any table is given by get_table() below. */
86 /* Check for an extension */
88 ip6t_ext_hdr(u8 nexthdr
)
90 return ( (nexthdr
== IPPROTO_HOPOPTS
) ||
91 (nexthdr
== IPPROTO_ROUTING
) ||
92 (nexthdr
== IPPROTO_FRAGMENT
) ||
93 (nexthdr
== IPPROTO_ESP
) ||
94 (nexthdr
== IPPROTO_AH
) ||
95 (nexthdr
== IPPROTO_NONE
) ||
96 (nexthdr
== IPPROTO_DSTOPTS
) );
99 /* Returns whether matches rule or not. */
100 /* Performance critical - called for every packet */
102 ip6_packet_match(const struct sk_buff
*skb
,
105 const struct ip6t_ip6
*ip6info
,
106 unsigned int *protoff
,
107 int *fragoff
, bool *hotdrop
)
110 const struct ipv6hdr
*ipv6
= ipv6_hdr(skb
);
112 #define FWINV(bool, invflg) ((bool) ^ !!(ip6info->invflags & (invflg)))
114 if (FWINV(ipv6_masked_addr_cmp(&ipv6
->saddr
, &ip6info
->smsk
,
115 &ip6info
->src
), IP6T_INV_SRCIP
) ||
116 FWINV(ipv6_masked_addr_cmp(&ipv6
->daddr
, &ip6info
->dmsk
,
117 &ip6info
->dst
), IP6T_INV_DSTIP
)) {
118 dprintf("Source or dest mismatch.\n");
120 dprintf("SRC: %u. Mask: %u. Target: %u.%s\n", ip->saddr,
121 ipinfo->smsk.s_addr, ipinfo->src.s_addr,
122 ipinfo->invflags & IP6T_INV_SRCIP ? " (INV)" : "");
123 dprintf("DST: %u. Mask: %u. Target: %u.%s\n", ip->daddr,
124 ipinfo->dmsk.s_addr, ipinfo->dst.s_addr,
125 ipinfo->invflags & IP6T_INV_DSTIP ? " (INV)" : "");*/
129 ret
= ifname_compare_aligned(indev
, ip6info
->iniface
, ip6info
->iniface_mask
);
131 if (FWINV(ret
!= 0, IP6T_INV_VIA_IN
)) {
132 dprintf("VIA in mismatch (%s vs %s).%s\n",
133 indev
, ip6info
->iniface
,
134 ip6info
->invflags
&IP6T_INV_VIA_IN
?" (INV)":"");
138 ret
= ifname_compare_aligned(outdev
, ip6info
->outiface
, ip6info
->outiface_mask
);
140 if (FWINV(ret
!= 0, IP6T_INV_VIA_OUT
)) {
141 dprintf("VIA out mismatch (%s vs %s).%s\n",
142 outdev
, ip6info
->outiface
,
143 ip6info
->invflags
&IP6T_INV_VIA_OUT
?" (INV)":"");
147 /* ... might want to do something with class and flowlabel here ... */
149 /* look for the desired protocol header */
150 if((ip6info
->flags
& IP6T_F_PROTO
)) {
152 unsigned short _frag_off
;
154 protohdr
= ipv6_find_hdr(skb
, protoff
, -1, &_frag_off
);
160 *fragoff
= _frag_off
;
162 dprintf("Packet protocol %hi ?= %s%hi.\n",
164 ip6info
->invflags
& IP6T_INV_PROTO
? "!":"",
167 if (ip6info
->proto
== protohdr
) {
168 if(ip6info
->invflags
& IP6T_INV_PROTO
) {
174 /* We need match for the '-p all', too! */
175 if ((ip6info
->proto
!= 0) &&
176 !(ip6info
->invflags
& IP6T_INV_PROTO
))
182 /* should be ip6 safe */
184 ip6_checkentry(const struct ip6t_ip6
*ipv6
)
186 if (ipv6
->flags
& ~IP6T_F_MASK
) {
187 duprintf("Unknown flag bits set: %08X\n",
188 ipv6
->flags
& ~IP6T_F_MASK
);
191 if (ipv6
->invflags
& ~IP6T_INV_MASK
) {
192 duprintf("Unknown invflag bits set: %08X\n",
193 ipv6
->invflags
& ~IP6T_INV_MASK
);
200 ip6t_error(struct sk_buff
*skb
, const struct xt_target_param
*par
)
203 pr_info("error: `%s'\n", (const char *)par
->targinfo
);
208 /* Performance critical - called for every packet */
210 do_match(const struct ip6t_entry_match
*m
, const struct sk_buff
*skb
,
211 struct xt_match_param
*par
)
213 par
->match
= m
->u
.kernel
.match
;
214 par
->matchinfo
= m
->data
;
216 /* Stop iteration if it doesn't match */
217 if (!m
->u
.kernel
.match
->match(skb
, par
))
223 static inline struct ip6t_entry
*
224 get_entry(const void *base
, unsigned int offset
)
226 return (struct ip6t_entry
*)(base
+ offset
);
229 /* All zeroes == unconditional rule. */
230 /* Mildly perf critical (only if packet tracing is on) */
231 static inline bool unconditional(const struct ip6t_ip6
*ipv6
)
233 static const struct ip6t_ip6 uncond
;
235 return memcmp(ipv6
, &uncond
, sizeof(uncond
)) == 0;
238 static inline const struct ip6t_entry_target
*
239 ip6t_get_target_c(const struct ip6t_entry
*e
)
241 return ip6t_get_target((struct ip6t_entry
*)e
);
244 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
245 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
246 /* This cries for unification! */
247 static const char *const hooknames
[] = {
248 [NF_INET_PRE_ROUTING
] = "PREROUTING",
249 [NF_INET_LOCAL_IN
] = "INPUT",
250 [NF_INET_FORWARD
] = "FORWARD",
251 [NF_INET_LOCAL_OUT
] = "OUTPUT",
252 [NF_INET_POST_ROUTING
] = "POSTROUTING",
255 enum nf_ip_trace_comments
{
256 NF_IP6_TRACE_COMMENT_RULE
,
257 NF_IP6_TRACE_COMMENT_RETURN
,
258 NF_IP6_TRACE_COMMENT_POLICY
,
261 static const char *const comments
[] = {
262 [NF_IP6_TRACE_COMMENT_RULE
] = "rule",
263 [NF_IP6_TRACE_COMMENT_RETURN
] = "return",
264 [NF_IP6_TRACE_COMMENT_POLICY
] = "policy",
267 static struct nf_loginfo trace_loginfo
= {
268 .type
= NF_LOG_TYPE_LOG
,
272 .logflags
= NF_LOG_MASK
,
277 /* Mildly perf critical (only if packet tracing is on) */
279 get_chainname_rulenum(const struct ip6t_entry
*s
, const struct ip6t_entry
*e
,
280 const char *hookname
, const char **chainname
,
281 const char **comment
, unsigned int *rulenum
)
283 const struct ip6t_standard_target
*t
= (void *)ip6t_get_target_c(s
);
285 if (strcmp(t
->target
.u
.kernel
.target
->name
, IP6T_ERROR_TARGET
) == 0) {
286 /* Head of user chain: ERROR target with chainname */
287 *chainname
= t
->target
.data
;
292 if (s
->target_offset
== sizeof(struct ip6t_entry
) &&
293 strcmp(t
->target
.u
.kernel
.target
->name
,
294 IP6T_STANDARD_TARGET
) == 0 &&
296 unconditional(&s
->ipv6
)) {
297 /* Tail of chains: STANDARD target (return/policy) */
298 *comment
= *chainname
== hookname
299 ? comments
[NF_IP6_TRACE_COMMENT_POLICY
]
300 : comments
[NF_IP6_TRACE_COMMENT_RETURN
];
309 static void trace_packet(const struct sk_buff
*skb
,
311 const struct net_device
*in
,
312 const struct net_device
*out
,
313 const char *tablename
,
314 const struct xt_table_info
*private,
315 const struct ip6t_entry
*e
)
317 const void *table_base
;
318 const struct ip6t_entry
*root
;
319 const char *hookname
, *chainname
, *comment
;
320 const struct ip6t_entry
*iter
;
321 unsigned int rulenum
= 0;
323 table_base
= private->entries
[smp_processor_id()];
324 root
= get_entry(table_base
, private->hook_entry
[hook
]);
326 hookname
= chainname
= hooknames
[hook
];
327 comment
= comments
[NF_IP6_TRACE_COMMENT_RULE
];
329 xt_entry_foreach(iter
, root
, private->size
- private->hook_entry
[hook
])
330 if (get_chainname_rulenum(iter
, e
, hookname
,
331 &chainname
, &comment
, &rulenum
) != 0)
334 nf_log_packet(AF_INET6
, hook
, skb
, in
, out
, &trace_loginfo
,
335 "TRACE: %s:%s:%s:%u ",
336 tablename
, chainname
, comment
, rulenum
);
340 static inline __pure
struct ip6t_entry
*
341 ip6t_next_entry(const struct ip6t_entry
*entry
)
343 return (void *)entry
+ entry
->next_offset
;
346 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
348 ip6t_do_table(struct sk_buff
*skb
,
350 const struct net_device
*in
,
351 const struct net_device
*out
,
352 struct xt_table
*table
)
354 #define tb_comefrom ((struct ip6t_entry *)table_base)->comefrom
356 static const char nulldevname
[IFNAMSIZ
] __attribute__((aligned(sizeof(long))));
357 bool hotdrop
= false;
358 /* Initializing verdict to NF_DROP keeps gcc happy. */
359 unsigned int verdict
= NF_DROP
;
360 const char *indev
, *outdev
;
361 const void *table_base
;
362 struct ip6t_entry
*e
, *back
;
363 const struct xt_table_info
*private;
364 struct xt_match_param mtpar
;
365 struct xt_target_param tgpar
;
368 indev
= in
? in
->name
: nulldevname
;
369 outdev
= out
? out
->name
: nulldevname
;
370 /* We handle fragments by dealing with the first fragment as
371 * if it was a normal packet. All other fragments are treated
372 * normally, except that they will NEVER match rules that ask
373 * things we don't know, ie. tcp syn flag or ports). If the
374 * rule is also a fragment-specific rule, non-fragments won't
376 mtpar
.hotdrop
= &hotdrop
;
377 mtpar
.in
= tgpar
.in
= in
;
378 mtpar
.out
= tgpar
.out
= out
;
379 mtpar
.family
= tgpar
.family
= NFPROTO_IPV6
;
380 mtpar
.hooknum
= tgpar
.hooknum
= hook
;
382 IP_NF_ASSERT(table
->valid_hooks
& (1 << hook
));
385 private = table
->private;
386 table_base
= private->entries
[smp_processor_id()];
388 e
= get_entry(table_base
, private->hook_entry
[hook
]);
390 /* For return from builtin chain */
391 back
= get_entry(table_base
, private->underflow
[hook
]);
394 const struct ip6t_entry_target
*t
;
395 const struct xt_entry_match
*ematch
;
399 if (!ip6_packet_match(skb
, indev
, outdev
, &e
->ipv6
,
400 &mtpar
.thoff
, &mtpar
.fragoff
, &hotdrop
)) {
402 e
= ip6t_next_entry(e
);
406 xt_ematch_foreach(ematch
, e
)
407 if (do_match(ematch
, skb
, &mtpar
) != 0)
410 ADD_COUNTER(e
->counters
,
411 ntohs(ipv6_hdr(skb
)->payload_len
) +
412 sizeof(struct ipv6hdr
), 1);
414 t
= ip6t_get_target_c(e
);
415 IP_NF_ASSERT(t
->u
.kernel
.target
);
417 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
418 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
419 /* The packet is traced: log it */
420 if (unlikely(skb
->nf_trace
))
421 trace_packet(skb
, hook
, in
, out
,
422 table
->name
, private, e
);
424 /* Standard target? */
425 if (!t
->u
.kernel
.target
->target
) {
428 v
= ((struct ip6t_standard_target
*)t
)->verdict
;
430 /* Pop from stack? */
431 if (v
!= IP6T_RETURN
) {
432 verdict
= (unsigned)(-v
) - 1;
436 back
= get_entry(table_base
, back
->comefrom
);
439 if (table_base
+ v
!= ip6t_next_entry(e
) &&
440 !(e
->ipv6
.flags
& IP6T_F_GOTO
)) {
441 /* Save old back ptr in next entry */
442 struct ip6t_entry
*next
= ip6t_next_entry(e
);
443 next
->comefrom
= (void *)back
- table_base
;
444 /* set back pointer to next entry */
448 e
= get_entry(table_base
, v
);
452 /* Targets which reenter must return
454 tgpar
.target
= t
->u
.kernel
.target
;
455 tgpar
.targinfo
= t
->data
;
457 #ifdef CONFIG_NETFILTER_DEBUG
458 tb_comefrom
= 0xeeeeeeec;
460 verdict
= t
->u
.kernel
.target
->target(skb
, &tgpar
);
462 #ifdef CONFIG_NETFILTER_DEBUG
463 if (tb_comefrom
!= 0xeeeeeeec && verdict
== IP6T_CONTINUE
) {
464 printk("Target %s reentered!\n",
465 t
->u
.kernel
.target
->name
);
468 tb_comefrom
= 0x57acc001;
470 if (verdict
== IP6T_CONTINUE
)
471 e
= ip6t_next_entry(e
);
477 #ifdef CONFIG_NETFILTER_DEBUG
478 tb_comefrom
= NETFILTER_LINK_POISON
;
480 xt_info_rdunlock_bh();
482 #ifdef DEBUG_ALLOW_ALL
493 /* Figures out from what hook each rule can be called: returns 0 if
494 there are loops. Puts hook bitmask in comefrom. */
496 mark_source_chains(const struct xt_table_info
*newinfo
,
497 unsigned int valid_hooks
, void *entry0
)
501 /* No recursion; use packet counter to save back ptrs (reset
502 to 0 as we leave), and comefrom to save source hook bitmask */
503 for (hook
= 0; hook
< NF_INET_NUMHOOKS
; hook
++) {
504 unsigned int pos
= newinfo
->hook_entry
[hook
];
505 struct ip6t_entry
*e
= (struct ip6t_entry
*)(entry0
+ pos
);
507 if (!(valid_hooks
& (1 << hook
)))
510 /* Set initial back pointer. */
511 e
->counters
.pcnt
= pos
;
514 const struct ip6t_standard_target
*t
515 = (void *)ip6t_get_target_c(e
);
516 int visited
= e
->comefrom
& (1 << hook
);
518 if (e
->comefrom
& (1 << NF_INET_NUMHOOKS
)) {
519 printk("iptables: loop hook %u pos %u %08X.\n",
520 hook
, pos
, e
->comefrom
);
523 e
->comefrom
|= ((1 << hook
) | (1 << NF_INET_NUMHOOKS
));
525 /* Unconditional return/END. */
526 if ((e
->target_offset
== sizeof(struct ip6t_entry
) &&
527 (strcmp(t
->target
.u
.user
.name
,
528 IP6T_STANDARD_TARGET
) == 0) &&
530 unconditional(&e
->ipv6
)) || visited
) {
531 unsigned int oldpos
, size
;
533 if ((strcmp(t
->target
.u
.user
.name
,
534 IP6T_STANDARD_TARGET
) == 0) &&
535 t
->verdict
< -NF_MAX_VERDICT
- 1) {
536 duprintf("mark_source_chains: bad "
537 "negative verdict (%i)\n",
542 /* Return: backtrack through the last
545 e
->comefrom
^= (1<<NF_INET_NUMHOOKS
);
546 #ifdef DEBUG_IP_FIREWALL_USER
548 & (1 << NF_INET_NUMHOOKS
)) {
549 duprintf("Back unset "
556 pos
= e
->counters
.pcnt
;
557 e
->counters
.pcnt
= 0;
559 /* We're at the start. */
563 e
= (struct ip6t_entry
*)
565 } while (oldpos
== pos
+ e
->next_offset
);
568 size
= e
->next_offset
;
569 e
= (struct ip6t_entry
*)
570 (entry0
+ pos
+ size
);
571 e
->counters
.pcnt
= pos
;
574 int newpos
= t
->verdict
;
576 if (strcmp(t
->target
.u
.user
.name
,
577 IP6T_STANDARD_TARGET
) == 0 &&
579 if (newpos
> newinfo
->size
-
580 sizeof(struct ip6t_entry
)) {
581 duprintf("mark_source_chains: "
582 "bad verdict (%i)\n",
586 /* This a jump; chase it. */
587 duprintf("Jump rule %u -> %u\n",
590 /* ... this is a fallthru */
591 newpos
= pos
+ e
->next_offset
;
593 e
= (struct ip6t_entry
*)
595 e
->counters
.pcnt
= pos
;
600 duprintf("Finished chain %u\n", hook
);
605 static void cleanup_match(struct ip6t_entry_match
*m
, struct net
*net
)
607 struct xt_mtdtor_param par
;
610 par
.match
= m
->u
.kernel
.match
;
611 par
.matchinfo
= m
->data
;
612 par
.family
= NFPROTO_IPV6
;
613 if (par
.match
->destroy
!= NULL
)
614 par
.match
->destroy(&par
);
615 module_put(par
.match
->me
);
619 check_entry(const struct ip6t_entry
*e
, const char *name
)
621 const struct ip6t_entry_target
*t
;
623 if (!ip6_checkentry(&e
->ipv6
)) {
624 duprintf("ip_tables: ip check failed %p %s.\n", e
, name
);
628 if (e
->target_offset
+ sizeof(struct ip6t_entry_target
) >
632 t
= ip6t_get_target_c(e
);
633 if (e
->target_offset
+ t
->u
.target_size
> e
->next_offset
)
639 static int check_match(struct ip6t_entry_match
*m
, struct xt_mtchk_param
*par
)
641 const struct ip6t_ip6
*ipv6
= par
->entryinfo
;
644 par
->match
= m
->u
.kernel
.match
;
645 par
->matchinfo
= m
->data
;
647 ret
= xt_check_match(par
, m
->u
.match_size
- sizeof(*m
),
648 ipv6
->proto
, ipv6
->invflags
& IP6T_INV_PROTO
);
650 duprintf("ip_tables: check failed for `%s'.\n",
658 find_check_match(struct ip6t_entry_match
*m
, struct xt_mtchk_param
*par
)
660 struct xt_match
*match
;
663 match
= try_then_request_module(xt_find_match(AF_INET6
, m
->u
.user
.name
,
665 "ip6t_%s", m
->u
.user
.name
);
666 if (IS_ERR(match
) || !match
) {
667 duprintf("find_check_match: `%s' not found\n", m
->u
.user
.name
);
668 return match
? PTR_ERR(match
) : -ENOENT
;
670 m
->u
.kernel
.match
= match
;
672 ret
= check_match(m
, par
);
678 module_put(m
->u
.kernel
.match
->me
);
682 static int check_target(struct ip6t_entry
*e
, struct net
*net
, const char *name
)
684 struct ip6t_entry_target
*t
= ip6t_get_target(e
);
685 struct xt_tgchk_param par
= {
689 .target
= t
->u
.kernel
.target
,
691 .hook_mask
= e
->comefrom
,
692 .family
= NFPROTO_IPV6
,
696 t
= ip6t_get_target(e
);
697 ret
= xt_check_target(&par
, t
->u
.target_size
- sizeof(*t
),
698 e
->ipv6
.proto
, e
->ipv6
.invflags
& IP6T_INV_PROTO
);
700 duprintf("ip_tables: check failed for `%s'.\n",
701 t
->u
.kernel
.target
->name
);
708 find_check_entry(struct ip6t_entry
*e
, struct net
*net
, const char *name
,
711 struct ip6t_entry_target
*t
;
712 struct xt_target
*target
;
715 struct xt_mtchk_param mtpar
;
716 struct xt_entry_match
*ematch
;
718 ret
= check_entry(e
, name
);
725 mtpar
.entryinfo
= &e
->ipv6
;
726 mtpar
.hook_mask
= e
->comefrom
;
727 mtpar
.family
= NFPROTO_IPV6
;
728 xt_ematch_foreach(ematch
, e
) {
729 ret
= find_check_match(ematch
, &mtpar
);
731 goto cleanup_matches
;
735 t
= ip6t_get_target(e
);
736 target
= try_then_request_module(xt_find_target(AF_INET6
,
739 "ip6t_%s", t
->u
.user
.name
);
740 if (IS_ERR(target
) || !target
) {
741 duprintf("find_check_entry: `%s' not found\n", t
->u
.user
.name
);
742 ret
= target
? PTR_ERR(target
) : -ENOENT
;
743 goto cleanup_matches
;
745 t
->u
.kernel
.target
= target
;
747 ret
= check_target(e
, net
, name
);
752 module_put(t
->u
.kernel
.target
->me
);
754 xt_ematch_foreach(ematch
, e
) {
757 cleanup_match(ematch
, net
);
762 static bool check_underflow(const struct ip6t_entry
*e
)
764 const struct ip6t_entry_target
*t
;
765 unsigned int verdict
;
767 if (!unconditional(&e
->ipv6
))
769 t
= ip6t_get_target_c(e
);
770 if (strcmp(t
->u
.user
.name
, XT_STANDARD_TARGET
) != 0)
772 verdict
= ((struct ip6t_standard_target
*)t
)->verdict
;
773 verdict
= -verdict
- 1;
774 return verdict
== NF_DROP
|| verdict
== NF_ACCEPT
;
778 check_entry_size_and_hooks(struct ip6t_entry
*e
,
779 struct xt_table_info
*newinfo
,
780 const unsigned char *base
,
781 const unsigned char *limit
,
782 const unsigned int *hook_entries
,
783 const unsigned int *underflows
,
784 unsigned int valid_hooks
)
788 if ((unsigned long)e
% __alignof__(struct ip6t_entry
) != 0 ||
789 (unsigned char *)e
+ sizeof(struct ip6t_entry
) >= limit
) {
790 duprintf("Bad offset %p\n", e
);
795 < sizeof(struct ip6t_entry
) + sizeof(struct ip6t_entry_target
)) {
796 duprintf("checking: element %p size %u\n",
801 /* Check hooks & underflows */
802 for (h
= 0; h
< NF_INET_NUMHOOKS
; h
++) {
803 if (!(valid_hooks
& (1 << h
)))
805 if ((unsigned char *)e
- base
== hook_entries
[h
])
806 newinfo
->hook_entry
[h
] = hook_entries
[h
];
807 if ((unsigned char *)e
- base
== underflows
[h
]) {
808 if (!check_underflow(e
)) {
809 pr_err("Underflows must be unconditional and "
810 "use the STANDARD target with "
814 newinfo
->underflow
[h
] = underflows
[h
];
818 /* Clear counters and comefrom */
819 e
->counters
= ((struct xt_counters
) { 0, 0 });
824 static void cleanup_entry(struct ip6t_entry
*e
, struct net
*net
)
826 struct xt_tgdtor_param par
;
827 struct ip6t_entry_target
*t
;
828 struct xt_entry_match
*ematch
;
830 /* Cleanup all matches */
831 xt_ematch_foreach(ematch
, e
)
832 cleanup_match(ematch
, net
);
833 t
= ip6t_get_target(e
);
836 par
.target
= t
->u
.kernel
.target
;
837 par
.targinfo
= t
->data
;
838 par
.family
= NFPROTO_IPV6
;
839 if (par
.target
->destroy
!= NULL
)
840 par
.target
->destroy(&par
);
841 module_put(par
.target
->me
);
844 /* Checks and translates the user-supplied table segment (held in
847 translate_table(struct net
*net
, struct xt_table_info
*newinfo
, void *entry0
,
848 const struct ip6t_replace
*repl
)
850 struct ip6t_entry
*iter
;
854 newinfo
->size
= repl
->size
;
855 newinfo
->number
= repl
->num_entries
;
857 /* Init all hooks to impossible value. */
858 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
859 newinfo
->hook_entry
[i
] = 0xFFFFFFFF;
860 newinfo
->underflow
[i
] = 0xFFFFFFFF;
863 duprintf("translate_table: size %u\n", newinfo
->size
);
865 /* Walk through entries, checking offsets. */
866 xt_entry_foreach(iter
, entry0
, newinfo
->size
) {
867 ret
= check_entry_size_and_hooks(iter
, newinfo
, entry0
,
877 if (i
!= repl
->num_entries
) {
878 duprintf("translate_table: %u not %u entries\n",
879 i
, repl
->num_entries
);
883 /* Check hooks all assigned */
884 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
885 /* Only hooks which are valid */
886 if (!(repl
->valid_hooks
& (1 << i
)))
888 if (newinfo
->hook_entry
[i
] == 0xFFFFFFFF) {
889 duprintf("Invalid hook entry %u %u\n",
890 i
, repl
->hook_entry
[i
]);
893 if (newinfo
->underflow
[i
] == 0xFFFFFFFF) {
894 duprintf("Invalid underflow %u %u\n",
895 i
, repl
->underflow
[i
]);
900 if (!mark_source_chains(newinfo
, repl
->valid_hooks
, entry0
))
903 /* Finally, each sanity check must pass */
905 xt_entry_foreach(iter
, entry0
, newinfo
->size
) {
906 ret
= find_check_entry(iter
, net
, repl
->name
, repl
->size
);
913 xt_entry_foreach(iter
, entry0
, newinfo
->size
) {
916 cleanup_entry(iter
, net
);
921 /* And one copy for every other CPU */
922 for_each_possible_cpu(i
) {
923 if (newinfo
->entries
[i
] && newinfo
->entries
[i
] != entry0
)
924 memcpy(newinfo
->entries
[i
], entry0
, newinfo
->size
);
931 get_counters(const struct xt_table_info
*t
,
932 struct xt_counters counters
[])
934 struct ip6t_entry
*iter
;
939 /* Instead of clearing (by a previous call to memset())
940 * the counters and using adds, we set the counters
941 * with data used by 'current' CPU
943 * Bottom half has to be disabled to prevent deadlock
944 * if new softirq were to run and call ipt_do_table
947 curcpu
= smp_processor_id();
950 xt_entry_foreach(iter
, t
->entries
[curcpu
], t
->size
) {
951 SET_COUNTER(counters
[i
], iter
->counters
.bcnt
,
952 iter
->counters
.pcnt
);
956 for_each_possible_cpu(cpu
) {
961 xt_entry_foreach(iter
, t
->entries
[cpu
], t
->size
) {
962 ADD_COUNTER(counters
[i
], iter
->counters
.bcnt
,
963 iter
->counters
.pcnt
);
966 xt_info_wrunlock(cpu
);
971 static struct xt_counters
*alloc_counters(const struct xt_table
*table
)
973 unsigned int countersize
;
974 struct xt_counters
*counters
;
975 const struct xt_table_info
*private = table
->private;
977 /* We need atomic snapshot of counters: rest doesn't change
978 (other than comefrom, which userspace doesn't care
980 countersize
= sizeof(struct xt_counters
) * private->number
;
981 counters
= vmalloc_node(countersize
, numa_node_id());
983 if (counters
== NULL
)
984 return ERR_PTR(-ENOMEM
);
986 get_counters(private, counters
);
992 copy_entries_to_user(unsigned int total_size
,
993 const struct xt_table
*table
,
994 void __user
*userptr
)
996 unsigned int off
, num
;
997 const struct ip6t_entry
*e
;
998 struct xt_counters
*counters
;
999 const struct xt_table_info
*private = table
->private;
1001 const void *loc_cpu_entry
;
1003 counters
= alloc_counters(table
);
1004 if (IS_ERR(counters
))
1005 return PTR_ERR(counters
);
1007 /* choose the copy that is on our node/cpu, ...
1008 * This choice is lazy (because current thread is
1009 * allowed to migrate to another cpu)
1011 loc_cpu_entry
= private->entries
[raw_smp_processor_id()];
1012 if (copy_to_user(userptr
, loc_cpu_entry
, total_size
) != 0) {
1017 /* FIXME: use iterator macros --RR */
1018 /* ... then go back and fix counters and names */
1019 for (off
= 0, num
= 0; off
< total_size
; off
+= e
->next_offset
, num
++){
1021 const struct ip6t_entry_match
*m
;
1022 const struct ip6t_entry_target
*t
;
1024 e
= (struct ip6t_entry
*)(loc_cpu_entry
+ off
);
1025 if (copy_to_user(userptr
+ off
1026 + offsetof(struct ip6t_entry
, counters
),
1028 sizeof(counters
[num
])) != 0) {
1033 for (i
= sizeof(struct ip6t_entry
);
1034 i
< e
->target_offset
;
1035 i
+= m
->u
.match_size
) {
1038 if (copy_to_user(userptr
+ off
+ i
1039 + offsetof(struct ip6t_entry_match
,
1041 m
->u
.kernel
.match
->name
,
1042 strlen(m
->u
.kernel
.match
->name
)+1)
1049 t
= ip6t_get_target_c(e
);
1050 if (copy_to_user(userptr
+ off
+ e
->target_offset
1051 + offsetof(struct ip6t_entry_target
,
1053 t
->u
.kernel
.target
->name
,
1054 strlen(t
->u
.kernel
.target
->name
)+1) != 0) {
1065 #ifdef CONFIG_COMPAT
1066 static void compat_standard_from_user(void *dst
, const void *src
)
1068 int v
= *(compat_int_t
*)src
;
1071 v
+= xt_compat_calc_jump(AF_INET6
, v
);
1072 memcpy(dst
, &v
, sizeof(v
));
1075 static int compat_standard_to_user(void __user
*dst
, const void *src
)
1077 compat_int_t cv
= *(int *)src
;
1080 cv
-= xt_compat_calc_jump(AF_INET6
, cv
);
1081 return copy_to_user(dst
, &cv
, sizeof(cv
)) ? -EFAULT
: 0;
1084 static int compat_calc_entry(const struct ip6t_entry
*e
,
1085 const struct xt_table_info
*info
,
1086 const void *base
, struct xt_table_info
*newinfo
)
1088 const struct xt_entry_match
*ematch
;
1089 const struct ip6t_entry_target
*t
;
1090 unsigned int entry_offset
;
1093 off
= sizeof(struct ip6t_entry
) - sizeof(struct compat_ip6t_entry
);
1094 entry_offset
= (void *)e
- base
;
1095 xt_ematch_foreach(ematch
, e
)
1096 off
+= xt_compat_match_offset(ematch
->u
.kernel
.match
);
1097 t
= ip6t_get_target_c(e
);
1098 off
+= xt_compat_target_offset(t
->u
.kernel
.target
);
1099 newinfo
->size
-= off
;
1100 ret
= xt_compat_add_offset(AF_INET6
, entry_offset
, off
);
1104 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
1105 if (info
->hook_entry
[i
] &&
1106 (e
< (struct ip6t_entry
*)(base
+ info
->hook_entry
[i
])))
1107 newinfo
->hook_entry
[i
] -= off
;
1108 if (info
->underflow
[i
] &&
1109 (e
< (struct ip6t_entry
*)(base
+ info
->underflow
[i
])))
1110 newinfo
->underflow
[i
] -= off
;
1115 static int compat_table_info(const struct xt_table_info
*info
,
1116 struct xt_table_info
*newinfo
)
1118 struct ip6t_entry
*iter
;
1119 void *loc_cpu_entry
;
1122 if (!newinfo
|| !info
)
1125 /* we dont care about newinfo->entries[] */
1126 memcpy(newinfo
, info
, offsetof(struct xt_table_info
, entries
));
1127 newinfo
->initial_entries
= 0;
1128 loc_cpu_entry
= info
->entries
[raw_smp_processor_id()];
1129 xt_entry_foreach(iter
, loc_cpu_entry
, info
->size
) {
1130 ret
= compat_calc_entry(iter
, info
, loc_cpu_entry
, newinfo
);
1138 static int get_info(struct net
*net
, void __user
*user
,
1139 const int *len
, int compat
)
1141 char name
[IP6T_TABLE_MAXNAMELEN
];
1145 if (*len
!= sizeof(struct ip6t_getinfo
)) {
1146 duprintf("length %u != %zu\n", *len
,
1147 sizeof(struct ip6t_getinfo
));
1151 if (copy_from_user(name
, user
, sizeof(name
)) != 0)
1154 name
[IP6T_TABLE_MAXNAMELEN
-1] = '\0';
1155 #ifdef CONFIG_COMPAT
1157 xt_compat_lock(AF_INET6
);
1159 t
= try_then_request_module(xt_find_table_lock(net
, AF_INET6
, name
),
1160 "ip6table_%s", name
);
1161 if (t
&& !IS_ERR(t
)) {
1162 struct ip6t_getinfo info
;
1163 const struct xt_table_info
*private = t
->private;
1164 #ifdef CONFIG_COMPAT
1165 struct xt_table_info tmp
;
1168 ret
= compat_table_info(private, &tmp
);
1169 xt_compat_flush_offsets(AF_INET6
);
1173 info
.valid_hooks
= t
->valid_hooks
;
1174 memcpy(info
.hook_entry
, private->hook_entry
,
1175 sizeof(info
.hook_entry
));
1176 memcpy(info
.underflow
, private->underflow
,
1177 sizeof(info
.underflow
));
1178 info
.num_entries
= private->number
;
1179 info
.size
= private->size
;
1180 strcpy(info
.name
, name
);
1182 if (copy_to_user(user
, &info
, *len
) != 0)
1190 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1191 #ifdef CONFIG_COMPAT
1193 xt_compat_unlock(AF_INET6
);
1199 get_entries(struct net
*net
, struct ip6t_get_entries __user
*uptr
,
1203 struct ip6t_get_entries get
;
1206 if (*len
< sizeof(get
)) {
1207 duprintf("get_entries: %u < %zu\n", *len
, sizeof(get
));
1210 if (copy_from_user(&get
, uptr
, sizeof(get
)) != 0)
1212 if (*len
!= sizeof(struct ip6t_get_entries
) + get
.size
) {
1213 duprintf("get_entries: %u != %zu\n",
1214 *len
, sizeof(get
) + get
.size
);
1218 t
= xt_find_table_lock(net
, AF_INET6
, get
.name
);
1219 if (t
&& !IS_ERR(t
)) {
1220 struct xt_table_info
*private = t
->private;
1221 duprintf("t->private->number = %u\n", private->number
);
1222 if (get
.size
== private->size
)
1223 ret
= copy_entries_to_user(private->size
,
1224 t
, uptr
->entrytable
);
1226 duprintf("get_entries: I've got %u not %u!\n",
1227 private->size
, get
.size
);
1233 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1239 __do_replace(struct net
*net
, const char *name
, unsigned int valid_hooks
,
1240 struct xt_table_info
*newinfo
, unsigned int num_counters
,
1241 void __user
*counters_ptr
)
1245 struct xt_table_info
*oldinfo
;
1246 struct xt_counters
*counters
;
1247 const void *loc_cpu_old_entry
;
1248 struct ip6t_entry
*iter
;
1251 counters
= vmalloc_node(num_counters
* sizeof(struct xt_counters
),
1258 t
= try_then_request_module(xt_find_table_lock(net
, AF_INET6
, name
),
1259 "ip6table_%s", name
);
1260 if (!t
|| IS_ERR(t
)) {
1261 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1262 goto free_newinfo_counters_untrans
;
1266 if (valid_hooks
!= t
->valid_hooks
) {
1267 duprintf("Valid hook crap: %08X vs %08X\n",
1268 valid_hooks
, t
->valid_hooks
);
1273 oldinfo
= xt_replace_table(t
, num_counters
, newinfo
, &ret
);
1277 /* Update module usage count based on number of rules */
1278 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1279 oldinfo
->number
, oldinfo
->initial_entries
, newinfo
->number
);
1280 if ((oldinfo
->number
> oldinfo
->initial_entries
) ||
1281 (newinfo
->number
<= oldinfo
->initial_entries
))
1283 if ((oldinfo
->number
> oldinfo
->initial_entries
) &&
1284 (newinfo
->number
<= oldinfo
->initial_entries
))
1287 /* Get the old counters, and synchronize with replace */
1288 get_counters(oldinfo
, counters
);
1290 /* Decrease module usage counts and free resource */
1291 loc_cpu_old_entry
= oldinfo
->entries
[raw_smp_processor_id()];
1292 xt_entry_foreach(iter
, loc_cpu_old_entry
, oldinfo
->size
)
1293 cleanup_entry(iter
, net
);
1295 xt_free_table_info(oldinfo
);
1296 if (copy_to_user(counters_ptr
, counters
,
1297 sizeof(struct xt_counters
) * num_counters
) != 0)
1306 free_newinfo_counters_untrans
:
1313 do_replace(struct net
*net
, const void __user
*user
, unsigned int len
)
1316 struct ip6t_replace tmp
;
1317 struct xt_table_info
*newinfo
;
1318 void *loc_cpu_entry
;
1319 struct ip6t_entry
*iter
;
1321 if (copy_from_user(&tmp
, user
, sizeof(tmp
)) != 0)
1324 /* overflow check */
1325 if (tmp
.num_counters
>= INT_MAX
/ sizeof(struct xt_counters
))
1328 newinfo
= xt_alloc_table_info(tmp
.size
);
1332 /* choose the copy that is on our node/cpu */
1333 loc_cpu_entry
= newinfo
->entries
[raw_smp_processor_id()];
1334 if (copy_from_user(loc_cpu_entry
, user
+ sizeof(tmp
),
1340 ret
= translate_table(net
, newinfo
, loc_cpu_entry
, &tmp
);
1344 duprintf("ip_tables: Translated table\n");
1346 ret
= __do_replace(net
, tmp
.name
, tmp
.valid_hooks
, newinfo
,
1347 tmp
.num_counters
, tmp
.counters
);
1349 goto free_newinfo_untrans
;
1352 free_newinfo_untrans
:
1353 xt_entry_foreach(iter
, loc_cpu_entry
, newinfo
->size
)
1354 cleanup_entry(iter
, net
);
1356 xt_free_table_info(newinfo
);
1361 do_add_counters(struct net
*net
, const void __user
*user
, unsigned int len
,
1364 unsigned int i
, curcpu
;
1365 struct xt_counters_info tmp
;
1366 struct xt_counters
*paddc
;
1367 unsigned int num_counters
;
1372 const struct xt_table_info
*private;
1374 const void *loc_cpu_entry
;
1375 struct ip6t_entry
*iter
;
1376 #ifdef CONFIG_COMPAT
1377 struct compat_xt_counters_info compat_tmp
;
1381 size
= sizeof(struct compat_xt_counters_info
);
1386 size
= sizeof(struct xt_counters_info
);
1389 if (copy_from_user(ptmp
, user
, size
) != 0)
1392 #ifdef CONFIG_COMPAT
1394 num_counters
= compat_tmp
.num_counters
;
1395 name
= compat_tmp
.name
;
1399 num_counters
= tmp
.num_counters
;
1403 if (len
!= size
+ num_counters
* sizeof(struct xt_counters
))
1406 paddc
= vmalloc_node(len
- size
, numa_node_id());
1410 if (copy_from_user(paddc
, user
+ size
, len
- size
) != 0) {
1415 t
= xt_find_table_lock(net
, AF_INET6
, name
);
1416 if (!t
|| IS_ERR(t
)) {
1417 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1423 private = t
->private;
1424 if (private->number
!= num_counters
) {
1426 goto unlock_up_free
;
1430 /* Choose the copy that is on our node */
1431 curcpu
= smp_processor_id();
1432 xt_info_wrlock(curcpu
);
1433 loc_cpu_entry
= private->entries
[curcpu
];
1434 xt_entry_foreach(iter
, loc_cpu_entry
, private->size
) {
1435 ADD_COUNTER(iter
->counters
, paddc
[i
].bcnt
, paddc
[i
].pcnt
);
1438 xt_info_wrunlock(curcpu
);
1450 #ifdef CONFIG_COMPAT
1451 struct compat_ip6t_replace
{
1452 char name
[IP6T_TABLE_MAXNAMELEN
];
1456 u32 hook_entry
[NF_INET_NUMHOOKS
];
1457 u32 underflow
[NF_INET_NUMHOOKS
];
1459 compat_uptr_t counters
; /* struct ip6t_counters * */
1460 struct compat_ip6t_entry entries
[0];
1464 compat_copy_entry_to_user(struct ip6t_entry
*e
, void __user
**dstptr
,
1465 unsigned int *size
, struct xt_counters
*counters
,
1468 struct ip6t_entry_target
*t
;
1469 struct compat_ip6t_entry __user
*ce
;
1470 u_int16_t target_offset
, next_offset
;
1471 compat_uint_t origsize
;
1472 const struct xt_entry_match
*ematch
;
1476 ce
= (struct compat_ip6t_entry __user
*)*dstptr
;
1477 if (copy_to_user(ce
, e
, sizeof(struct ip6t_entry
)) != 0 ||
1478 copy_to_user(&ce
->counters
, &counters
[i
],
1479 sizeof(counters
[i
])) != 0)
1482 *dstptr
+= sizeof(struct compat_ip6t_entry
);
1483 *size
-= sizeof(struct ip6t_entry
) - sizeof(struct compat_ip6t_entry
);
1485 xt_ematch_foreach(ematch
, e
) {
1486 ret
= xt_compat_match_to_user(ematch
, dstptr
, size
);
1490 target_offset
= e
->target_offset
- (origsize
- *size
);
1491 t
= ip6t_get_target(e
);
1492 ret
= xt_compat_target_to_user(t
, dstptr
, size
);
1495 next_offset
= e
->next_offset
- (origsize
- *size
);
1496 if (put_user(target_offset
, &ce
->target_offset
) != 0 ||
1497 put_user(next_offset
, &ce
->next_offset
) != 0)
1503 compat_find_calc_match(struct ip6t_entry_match
*m
,
1505 const struct ip6t_ip6
*ipv6
,
1506 unsigned int hookmask
,
1509 struct xt_match
*match
;
1511 match
= try_then_request_module(xt_find_match(AF_INET6
, m
->u
.user
.name
,
1512 m
->u
.user
.revision
),
1513 "ip6t_%s", m
->u
.user
.name
);
1514 if (IS_ERR(match
) || !match
) {
1515 duprintf("compat_check_calc_match: `%s' not found\n",
1517 return match
? PTR_ERR(match
) : -ENOENT
;
1519 m
->u
.kernel
.match
= match
;
1520 *size
+= xt_compat_match_offset(match
);
1524 static void compat_release_entry(struct compat_ip6t_entry
*e
)
1526 struct ip6t_entry_target
*t
;
1527 struct xt_entry_match
*ematch
;
1529 /* Cleanup all matches */
1530 xt_ematch_foreach(ematch
, e
)
1531 module_put(ematch
->u
.kernel
.match
->me
);
1532 t
= compat_ip6t_get_target(e
);
1533 module_put(t
->u
.kernel
.target
->me
);
1537 check_compat_entry_size_and_hooks(struct compat_ip6t_entry
*e
,
1538 struct xt_table_info
*newinfo
,
1540 const unsigned char *base
,
1541 const unsigned char *limit
,
1542 const unsigned int *hook_entries
,
1543 const unsigned int *underflows
,
1546 struct xt_entry_match
*ematch
;
1547 struct ip6t_entry_target
*t
;
1548 struct xt_target
*target
;
1549 unsigned int entry_offset
;
1553 duprintf("check_compat_entry_size_and_hooks %p\n", e
);
1554 if ((unsigned long)e
% __alignof__(struct compat_ip6t_entry
) != 0 ||
1555 (unsigned char *)e
+ sizeof(struct compat_ip6t_entry
) >= limit
) {
1556 duprintf("Bad offset %p, limit = %p\n", e
, limit
);
1560 if (e
->next_offset
< sizeof(struct compat_ip6t_entry
) +
1561 sizeof(struct compat_xt_entry_target
)) {
1562 duprintf("checking: element %p size %u\n",
1567 /* For purposes of check_entry casting the compat entry is fine */
1568 ret
= check_entry((struct ip6t_entry
*)e
, name
);
1572 off
= sizeof(struct ip6t_entry
) - sizeof(struct compat_ip6t_entry
);
1573 entry_offset
= (void *)e
- (void *)base
;
1575 xt_ematch_foreach(ematch
, e
) {
1576 ret
= compat_find_calc_match(ematch
, name
,
1577 &e
->ipv6
, e
->comefrom
, &off
);
1579 goto release_matches
;
1583 t
= compat_ip6t_get_target(e
);
1584 target
= try_then_request_module(xt_find_target(AF_INET6
,
1586 t
->u
.user
.revision
),
1587 "ip6t_%s", t
->u
.user
.name
);
1588 if (IS_ERR(target
) || !target
) {
1589 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1591 ret
= target
? PTR_ERR(target
) : -ENOENT
;
1592 goto release_matches
;
1594 t
->u
.kernel
.target
= target
;
1596 off
+= xt_compat_target_offset(target
);
1598 ret
= xt_compat_add_offset(AF_INET6
, entry_offset
, off
);
1602 /* Check hooks & underflows */
1603 for (h
= 0; h
< NF_INET_NUMHOOKS
; h
++) {
1604 if ((unsigned char *)e
- base
== hook_entries
[h
])
1605 newinfo
->hook_entry
[h
] = hook_entries
[h
];
1606 if ((unsigned char *)e
- base
== underflows
[h
])
1607 newinfo
->underflow
[h
] = underflows
[h
];
1610 /* Clear counters and comefrom */
1611 memset(&e
->counters
, 0, sizeof(e
->counters
));
1616 module_put(t
->u
.kernel
.target
->me
);
1618 xt_ematch_foreach(ematch
, e
) {
1621 module_put(ematch
->u
.kernel
.match
->me
);
1627 compat_copy_entry_from_user(struct compat_ip6t_entry
*e
, void **dstptr
,
1628 unsigned int *size
, const char *name
,
1629 struct xt_table_info
*newinfo
, unsigned char *base
)
1631 struct ip6t_entry_target
*t
;
1632 struct xt_target
*target
;
1633 struct ip6t_entry
*de
;
1634 unsigned int origsize
;
1636 struct xt_entry_match
*ematch
;
1640 de
= (struct ip6t_entry
*)*dstptr
;
1641 memcpy(de
, e
, sizeof(struct ip6t_entry
));
1642 memcpy(&de
->counters
, &e
->counters
, sizeof(e
->counters
));
1644 *dstptr
+= sizeof(struct ip6t_entry
);
1645 *size
+= sizeof(struct ip6t_entry
) - sizeof(struct compat_ip6t_entry
);
1647 xt_ematch_foreach(ematch
, e
) {
1648 ret
= xt_compat_match_from_user(ematch
, dstptr
, size
);
1652 de
->target_offset
= e
->target_offset
- (origsize
- *size
);
1653 t
= compat_ip6t_get_target(e
);
1654 target
= t
->u
.kernel
.target
;
1655 xt_compat_target_from_user(t
, dstptr
, size
);
1657 de
->next_offset
= e
->next_offset
- (origsize
- *size
);
1658 for (h
= 0; h
< NF_INET_NUMHOOKS
; h
++) {
1659 if ((unsigned char *)de
- base
< newinfo
->hook_entry
[h
])
1660 newinfo
->hook_entry
[h
] -= origsize
- *size
;
1661 if ((unsigned char *)de
- base
< newinfo
->underflow
[h
])
1662 newinfo
->underflow
[h
] -= origsize
- *size
;
1667 static int compat_check_entry(struct ip6t_entry
*e
, struct net
*net
,
1672 struct xt_mtchk_param mtpar
;
1673 struct xt_entry_match
*ematch
;
1678 mtpar
.entryinfo
= &e
->ipv6
;
1679 mtpar
.hook_mask
= e
->comefrom
;
1680 mtpar
.family
= NFPROTO_IPV6
;
1681 xt_ematch_foreach(ematch
, e
) {
1682 ret
= check_match(ematch
, &mtpar
);
1684 goto cleanup_matches
;
1688 ret
= check_target(e
, net
, name
);
1690 goto cleanup_matches
;
1694 xt_ematch_foreach(ematch
, e
) {
1697 cleanup_match(ematch
, net
);
1703 translate_compat_table(struct net
*net
,
1705 unsigned int valid_hooks
,
1706 struct xt_table_info
**pinfo
,
1708 unsigned int total_size
,
1709 unsigned int number
,
1710 unsigned int *hook_entries
,
1711 unsigned int *underflows
)
1714 struct xt_table_info
*newinfo
, *info
;
1715 void *pos
, *entry0
, *entry1
;
1716 struct compat_ip6t_entry
*iter0
;
1717 struct ip6t_entry
*iter1
;
1724 info
->number
= number
;
1726 /* Init all hooks to impossible value. */
1727 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
1728 info
->hook_entry
[i
] = 0xFFFFFFFF;
1729 info
->underflow
[i
] = 0xFFFFFFFF;
1732 duprintf("translate_compat_table: size %u\n", info
->size
);
1734 xt_compat_lock(AF_INET6
);
1735 /* Walk through entries, checking offsets. */
1736 xt_entry_foreach(iter0
, entry0
, total_size
) {
1737 ret
= check_compat_entry_size_and_hooks(iter0
, info
, &size
,
1739 entry0
+ total_size
,
1750 duprintf("translate_compat_table: %u not %u entries\n",
1755 /* Check hooks all assigned */
1756 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
1757 /* Only hooks which are valid */
1758 if (!(valid_hooks
& (1 << i
)))
1760 if (info
->hook_entry
[i
] == 0xFFFFFFFF) {
1761 duprintf("Invalid hook entry %u %u\n",
1762 i
, hook_entries
[i
]);
1765 if (info
->underflow
[i
] == 0xFFFFFFFF) {
1766 duprintf("Invalid underflow %u %u\n",
1773 newinfo
= xt_alloc_table_info(size
);
1777 newinfo
->number
= number
;
1778 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
1779 newinfo
->hook_entry
[i
] = info
->hook_entry
[i
];
1780 newinfo
->underflow
[i
] = info
->underflow
[i
];
1782 entry1
= newinfo
->entries
[raw_smp_processor_id()];
1785 xt_entry_foreach(iter0
, entry0
, total_size
) {
1786 ret
= compat_copy_entry_from_user(iter0
, &pos
, &size
,
1787 name
, newinfo
, entry1
);
1791 xt_compat_flush_offsets(AF_INET6
);
1792 xt_compat_unlock(AF_INET6
);
1797 if (!mark_source_chains(newinfo
, valid_hooks
, entry1
))
1801 xt_entry_foreach(iter1
, entry1
, newinfo
->size
) {
1802 ret
= compat_check_entry(iter1
, net
, name
);
1809 * The first i matches need cleanup_entry (calls ->destroy)
1810 * because they had called ->check already. The other j-i
1811 * entries need only release.
1815 xt_entry_foreach(iter0
, entry0
, newinfo
->size
) {
1820 compat_release_entry(iter0
);
1822 xt_entry_foreach(iter1
, entry1
, newinfo
->size
) {
1825 cleanup_entry(iter1
, net
);
1827 xt_free_table_info(newinfo
);
1831 /* And one copy for every other CPU */
1832 for_each_possible_cpu(i
)
1833 if (newinfo
->entries
[i
] && newinfo
->entries
[i
] != entry1
)
1834 memcpy(newinfo
->entries
[i
], entry1
, newinfo
->size
);
1838 xt_free_table_info(info
);
1842 xt_free_table_info(newinfo
);
1844 xt_entry_foreach(iter0
, entry0
, total_size
) {
1847 compat_release_entry(iter0
);
1851 xt_compat_flush_offsets(AF_INET6
);
1852 xt_compat_unlock(AF_INET6
);
1857 compat_do_replace(struct net
*net
, void __user
*user
, unsigned int len
)
1860 struct compat_ip6t_replace tmp
;
1861 struct xt_table_info
*newinfo
;
1862 void *loc_cpu_entry
;
1863 struct ip6t_entry
*iter
;
1865 if (copy_from_user(&tmp
, user
, sizeof(tmp
)) != 0)
1868 /* overflow check */
1869 if (tmp
.size
>= INT_MAX
/ num_possible_cpus())
1871 if (tmp
.num_counters
>= INT_MAX
/ sizeof(struct xt_counters
))
1874 newinfo
= xt_alloc_table_info(tmp
.size
);
1878 /* choose the copy that is on our node/cpu */
1879 loc_cpu_entry
= newinfo
->entries
[raw_smp_processor_id()];
1880 if (copy_from_user(loc_cpu_entry
, user
+ sizeof(tmp
),
1886 ret
= translate_compat_table(net
, tmp
.name
, tmp
.valid_hooks
,
1887 &newinfo
, &loc_cpu_entry
, tmp
.size
,
1888 tmp
.num_entries
, tmp
.hook_entry
,
1893 duprintf("compat_do_replace: Translated table\n");
1895 ret
= __do_replace(net
, tmp
.name
, tmp
.valid_hooks
, newinfo
,
1896 tmp
.num_counters
, compat_ptr(tmp
.counters
));
1898 goto free_newinfo_untrans
;
1901 free_newinfo_untrans
:
1902 xt_entry_foreach(iter
, loc_cpu_entry
, newinfo
->size
)
1903 cleanup_entry(iter
, net
);
1905 xt_free_table_info(newinfo
);
1910 compat_do_ip6t_set_ctl(struct sock
*sk
, int cmd
, void __user
*user
,
1915 if (!capable(CAP_NET_ADMIN
))
1919 case IP6T_SO_SET_REPLACE
:
1920 ret
= compat_do_replace(sock_net(sk
), user
, len
);
1923 case IP6T_SO_SET_ADD_COUNTERS
:
1924 ret
= do_add_counters(sock_net(sk
), user
, len
, 1);
1928 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd
);
1935 struct compat_ip6t_get_entries
{
1936 char name
[IP6T_TABLE_MAXNAMELEN
];
1938 struct compat_ip6t_entry entrytable
[0];
1942 compat_copy_entries_to_user(unsigned int total_size
, struct xt_table
*table
,
1943 void __user
*userptr
)
1945 struct xt_counters
*counters
;
1946 const struct xt_table_info
*private = table
->private;
1950 const void *loc_cpu_entry
;
1952 struct ip6t_entry
*iter
;
1954 counters
= alloc_counters(table
);
1955 if (IS_ERR(counters
))
1956 return PTR_ERR(counters
);
1958 /* choose the copy that is on our node/cpu, ...
1959 * This choice is lazy (because current thread is
1960 * allowed to migrate to another cpu)
1962 loc_cpu_entry
= private->entries
[raw_smp_processor_id()];
1965 xt_entry_foreach(iter
, loc_cpu_entry
, total_size
) {
1966 ret
= compat_copy_entry_to_user(iter
, &pos
,
1967 &size
, counters
, i
++);
1977 compat_get_entries(struct net
*net
, struct compat_ip6t_get_entries __user
*uptr
,
1981 struct compat_ip6t_get_entries get
;
1984 if (*len
< sizeof(get
)) {
1985 duprintf("compat_get_entries: %u < %zu\n", *len
, sizeof(get
));
1989 if (copy_from_user(&get
, uptr
, sizeof(get
)) != 0)
1992 if (*len
!= sizeof(struct compat_ip6t_get_entries
) + get
.size
) {
1993 duprintf("compat_get_entries: %u != %zu\n",
1994 *len
, sizeof(get
) + get
.size
);
1998 xt_compat_lock(AF_INET6
);
1999 t
= xt_find_table_lock(net
, AF_INET6
, get
.name
);
2000 if (t
&& !IS_ERR(t
)) {
2001 const struct xt_table_info
*private = t
->private;
2002 struct xt_table_info info
;
2003 duprintf("t->private->number = %u\n", private->number
);
2004 ret
= compat_table_info(private, &info
);
2005 if (!ret
&& get
.size
== info
.size
) {
2006 ret
= compat_copy_entries_to_user(private->size
,
2007 t
, uptr
->entrytable
);
2009 duprintf("compat_get_entries: I've got %u not %u!\n",
2010 private->size
, get
.size
);
2013 xt_compat_flush_offsets(AF_INET6
);
2017 ret
= t
? PTR_ERR(t
) : -ENOENT
;
2019 xt_compat_unlock(AF_INET6
);
2023 static int do_ip6t_get_ctl(struct sock
*, int, void __user
*, int *);
2026 compat_do_ip6t_get_ctl(struct sock
*sk
, int cmd
, void __user
*user
, int *len
)
2030 if (!capable(CAP_NET_ADMIN
))
2034 case IP6T_SO_GET_INFO
:
2035 ret
= get_info(sock_net(sk
), user
, len
, 1);
2037 case IP6T_SO_GET_ENTRIES
:
2038 ret
= compat_get_entries(sock_net(sk
), user
, len
);
2041 ret
= do_ip6t_get_ctl(sk
, cmd
, user
, len
);
2048 do_ip6t_set_ctl(struct sock
*sk
, int cmd
, void __user
*user
, unsigned int len
)
2052 if (!capable(CAP_NET_ADMIN
))
2056 case IP6T_SO_SET_REPLACE
:
2057 ret
= do_replace(sock_net(sk
), user
, len
);
2060 case IP6T_SO_SET_ADD_COUNTERS
:
2061 ret
= do_add_counters(sock_net(sk
), user
, len
, 0);
2065 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd
);
2073 do_ip6t_get_ctl(struct sock
*sk
, int cmd
, void __user
*user
, int *len
)
2077 if (!capable(CAP_NET_ADMIN
))
2081 case IP6T_SO_GET_INFO
:
2082 ret
= get_info(sock_net(sk
), user
, len
, 0);
2085 case IP6T_SO_GET_ENTRIES
:
2086 ret
= get_entries(sock_net(sk
), user
, len
);
2089 case IP6T_SO_GET_REVISION_MATCH
:
2090 case IP6T_SO_GET_REVISION_TARGET
: {
2091 struct ip6t_get_revision rev
;
2094 if (*len
!= sizeof(rev
)) {
2098 if (copy_from_user(&rev
, user
, sizeof(rev
)) != 0) {
2103 if (cmd
== IP6T_SO_GET_REVISION_TARGET
)
2108 try_then_request_module(xt_find_revision(AF_INET6
, rev
.name
,
2111 "ip6t_%s", rev
.name
);
2116 duprintf("do_ip6t_get_ctl: unknown request %i\n", cmd
);
2123 struct xt_table
*ip6t_register_table(struct net
*net
,
2124 const struct xt_table
*table
,
2125 const struct ip6t_replace
*repl
)
2128 struct xt_table_info
*newinfo
;
2129 struct xt_table_info bootstrap
2130 = { 0, 0, 0, { 0 }, { 0 }, { } };
2131 void *loc_cpu_entry
;
2132 struct xt_table
*new_table
;
2134 newinfo
= xt_alloc_table_info(repl
->size
);
2140 /* choose the copy on our node/cpu, but dont care about preemption */
2141 loc_cpu_entry
= newinfo
->entries
[raw_smp_processor_id()];
2142 memcpy(loc_cpu_entry
, repl
->entries
, repl
->size
);
2144 ret
= translate_table(net
, newinfo
, loc_cpu_entry
, repl
);
2148 new_table
= xt_register_table(net
, table
, &bootstrap
, newinfo
);
2149 if (IS_ERR(new_table
)) {
2150 ret
= PTR_ERR(new_table
);
2156 xt_free_table_info(newinfo
);
2158 return ERR_PTR(ret
);
2161 void ip6t_unregister_table(struct net
*net
, struct xt_table
*table
)
2163 struct xt_table_info
*private;
2164 void *loc_cpu_entry
;
2165 struct module
*table_owner
= table
->me
;
2166 struct ip6t_entry
*iter
;
2168 private = xt_unregister_table(table
);
2170 /* Decrease module usage counts and free resources */
2171 loc_cpu_entry
= private->entries
[raw_smp_processor_id()];
2172 xt_entry_foreach(iter
, loc_cpu_entry
, private->size
)
2173 cleanup_entry(iter
, net
);
2174 if (private->number
> private->initial_entries
)
2175 module_put(table_owner
);
2176 xt_free_table_info(private);
2179 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2181 icmp6_type_code_match(u_int8_t test_type
, u_int8_t min_code
, u_int8_t max_code
,
2182 u_int8_t type
, u_int8_t code
,
2185 return (type
== test_type
&& code
>= min_code
&& code
<= max_code
)
2190 icmp6_match(const struct sk_buff
*skb
, const struct xt_match_param
*par
)
2192 const struct icmp6hdr
*ic
;
2193 struct icmp6hdr _icmph
;
2194 const struct ip6t_icmp
*icmpinfo
= par
->matchinfo
;
2196 /* Must not be a fragment. */
2197 if (par
->fragoff
!= 0)
2200 ic
= skb_header_pointer(skb
, par
->thoff
, sizeof(_icmph
), &_icmph
);
2202 /* We've been asked to examine this packet, and we
2203 * can't. Hence, no choice but to drop.
2205 duprintf("Dropping evil ICMP tinygram.\n");
2206 *par
->hotdrop
= true;
2210 return icmp6_type_code_match(icmpinfo
->type
,
2213 ic
->icmp6_type
, ic
->icmp6_code
,
2214 !!(icmpinfo
->invflags
&IP6T_ICMP_INV
));
2217 /* Called when user tries to insert an entry of this type. */
2218 static bool icmp6_checkentry(const struct xt_mtchk_param
*par
)
2220 const struct ip6t_icmp
*icmpinfo
= par
->matchinfo
;
2222 /* Must specify no unknown invflags */
2223 return !(icmpinfo
->invflags
& ~IP6T_ICMP_INV
);
2226 /* The built-in targets: standard (NULL) and error. */
2227 static struct xt_target ip6t_standard_target __read_mostly
= {
2228 .name
= IP6T_STANDARD_TARGET
,
2229 .targetsize
= sizeof(int),
2230 .family
= NFPROTO_IPV6
,
2231 #ifdef CONFIG_COMPAT
2232 .compatsize
= sizeof(compat_int_t
),
2233 .compat_from_user
= compat_standard_from_user
,
2234 .compat_to_user
= compat_standard_to_user
,
2238 static struct xt_target ip6t_error_target __read_mostly
= {
2239 .name
= IP6T_ERROR_TARGET
,
2240 .target
= ip6t_error
,
2241 .targetsize
= IP6T_FUNCTION_MAXNAMELEN
,
2242 .family
= NFPROTO_IPV6
,
2245 static struct nf_sockopt_ops ip6t_sockopts
= {
2247 .set_optmin
= IP6T_BASE_CTL
,
2248 .set_optmax
= IP6T_SO_SET_MAX
+1,
2249 .set
= do_ip6t_set_ctl
,
2250 #ifdef CONFIG_COMPAT
2251 .compat_set
= compat_do_ip6t_set_ctl
,
2253 .get_optmin
= IP6T_BASE_CTL
,
2254 .get_optmax
= IP6T_SO_GET_MAX
+1,
2255 .get
= do_ip6t_get_ctl
,
2256 #ifdef CONFIG_COMPAT
2257 .compat_get
= compat_do_ip6t_get_ctl
,
2259 .owner
= THIS_MODULE
,
2262 static struct xt_match icmp6_matchstruct __read_mostly
= {
2264 .match
= icmp6_match
,
2265 .matchsize
= sizeof(struct ip6t_icmp
),
2266 .checkentry
= icmp6_checkentry
,
2267 .proto
= IPPROTO_ICMPV6
,
2268 .family
= NFPROTO_IPV6
,
2271 static int __net_init
ip6_tables_net_init(struct net
*net
)
2273 return xt_proto_init(net
, NFPROTO_IPV6
);
2276 static void __net_exit
ip6_tables_net_exit(struct net
*net
)
2278 xt_proto_fini(net
, NFPROTO_IPV6
);
2281 static struct pernet_operations ip6_tables_net_ops
= {
2282 .init
= ip6_tables_net_init
,
2283 .exit
= ip6_tables_net_exit
,
2286 static int __init
ip6_tables_init(void)
2290 ret
= register_pernet_subsys(&ip6_tables_net_ops
);
2294 /* Noone else will be downing sem now, so we won't sleep */
2295 ret
= xt_register_target(&ip6t_standard_target
);
2298 ret
= xt_register_target(&ip6t_error_target
);
2301 ret
= xt_register_match(&icmp6_matchstruct
);
2305 /* Register setsockopt */
2306 ret
= nf_register_sockopt(&ip6t_sockopts
);
2310 pr_info("(C) 2000-2006 Netfilter Core Team\n");
2314 xt_unregister_match(&icmp6_matchstruct
);
2316 xt_unregister_target(&ip6t_error_target
);
2318 xt_unregister_target(&ip6t_standard_target
);
2320 unregister_pernet_subsys(&ip6_tables_net_ops
);
2325 static void __exit
ip6_tables_fini(void)
2327 nf_unregister_sockopt(&ip6t_sockopts
);
2329 xt_unregister_match(&icmp6_matchstruct
);
2330 xt_unregister_target(&ip6t_error_target
);
2331 xt_unregister_target(&ip6t_standard_target
);
2333 unregister_pernet_subsys(&ip6_tables_net_ops
);
2337 * find the offset to specified header or the protocol number of last header
2338 * if target < 0. "last header" is transport protocol header, ESP, or
2341 * If target header is found, its offset is set in *offset and return protocol
2342 * number. Otherwise, return -1.
2344 * If the first fragment doesn't contain the final protocol header or
2345 * NEXTHDR_NONE it is considered invalid.
2347 * Note that non-1st fragment is special case that "the protocol number
2348 * of last header" is "next header" field in Fragment header. In this case,
2349 * *offset is meaningless and fragment offset is stored in *fragoff if fragoff
2353 int ipv6_find_hdr(const struct sk_buff
*skb
, unsigned int *offset
,
2354 int target
, unsigned short *fragoff
)
2356 unsigned int start
= skb_network_offset(skb
) + sizeof(struct ipv6hdr
);
2357 u8 nexthdr
= ipv6_hdr(skb
)->nexthdr
;
2358 unsigned int len
= skb
->len
- start
;
2363 while (nexthdr
!= target
) {
2364 struct ipv6_opt_hdr _hdr
, *hp
;
2365 unsigned int hdrlen
;
2367 if ((!ipv6_ext_hdr(nexthdr
)) || nexthdr
== NEXTHDR_NONE
) {
2373 hp
= skb_header_pointer(skb
, start
, sizeof(_hdr
), &_hdr
);
2376 if (nexthdr
== NEXTHDR_FRAGMENT
) {
2377 unsigned short _frag_off
;
2379 fp
= skb_header_pointer(skb
,
2380 start
+offsetof(struct frag_hdr
,
2387 _frag_off
= ntohs(*fp
) & ~0x7;
2390 ((!ipv6_ext_hdr(hp
->nexthdr
)) ||
2391 hp
->nexthdr
== NEXTHDR_NONE
)) {
2393 *fragoff
= _frag_off
;
2399 } else if (nexthdr
== NEXTHDR_AUTH
)
2400 hdrlen
= (hp
->hdrlen
+ 2) << 2;
2402 hdrlen
= ipv6_optlen(hp
);
2404 nexthdr
= hp
->nexthdr
;
2413 EXPORT_SYMBOL(ip6t_register_table
);
2414 EXPORT_SYMBOL(ip6t_unregister_table
);
2415 EXPORT_SYMBOL(ip6t_do_table
);
2416 EXPORT_SYMBOL(ip6t_ext_hdr
);
2417 EXPORT_SYMBOL(ipv6_find_hdr
);
2419 module_init(ip6_tables_init
);
2420 module_exit(ip6_tables_fini
);