netfilter: xtables: replace XT_ENTRY_ITERATE macro
[linux-2.6/libata-dev.git] / net / ipv6 / netfilter / ip6_tables.c
blob23926e38d36bf6991a7f8ea6f4b0a7988cf98e88
1 /*
2 * Packet matching code.
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 #include <linux/capability.h>
13 #include <linux/in.h>
14 #include <linux/skbuff.h>
15 #include <linux/kmod.h>
16 #include <linux/vmalloc.h>
17 #include <linux/netdevice.h>
18 #include <linux/module.h>
19 #include <linux/poison.h>
20 #include <linux/icmpv6.h>
21 #include <net/ipv6.h>
22 #include <net/compat.h>
23 #include <asm/uaccess.h>
24 #include <linux/mutex.h>
25 #include <linux/proc_fs.h>
26 #include <linux/err.h>
27 #include <linux/cpumask.h>
29 #include <linux/netfilter_ipv6/ip6_tables.h>
30 #include <linux/netfilter/x_tables.h>
31 #include <net/netfilter/nf_log.h>
32 #include "../../netfilter/xt_repldata.h"
34 MODULE_LICENSE("GPL");
35 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
36 MODULE_DESCRIPTION("IPv6 packet filter");
38 /*#define DEBUG_IP_FIREWALL*/
39 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
40 /*#define DEBUG_IP_FIREWALL_USER*/
42 #ifdef DEBUG_IP_FIREWALL
43 #define dprintf(format, args...) printk(format , ## args)
44 #else
45 #define dprintf(format, args...)
46 #endif
48 #ifdef DEBUG_IP_FIREWALL_USER
49 #define duprintf(format, args...) printk(format , ## args)
50 #else
51 #define duprintf(format, args...)
52 #endif
54 #ifdef CONFIG_NETFILTER_DEBUG
55 #define IP_NF_ASSERT(x) \
56 do { \
57 if (!(x)) \
58 printk("IP_NF_ASSERT: %s:%s:%u\n", \
59 __func__, __FILE__, __LINE__); \
60 } while(0)
61 #else
62 #define IP_NF_ASSERT(x)
63 #endif
65 #if 0
66 /* All the better to debug you with... */
67 #define static
68 #define inline
69 #endif
71 void *ip6t_alloc_initial_table(const struct xt_table *info)
73 return xt_alloc_initial_table(ip6t, IP6T);
75 EXPORT_SYMBOL_GPL(ip6t_alloc_initial_table);
78 We keep a set of rules for each CPU, so we can avoid write-locking
79 them in the softirq when updating the counters and therefore
80 only need to read-lock in the softirq; doing a write_lock_bh() in user
81 context stops packets coming through and allows user context to read
82 the counters or update the rules.
84 Hence the start of any table is given by get_table() below. */
86 /* Check for an extension */
87 int
88 ip6t_ext_hdr(u8 nexthdr)
90 return ( (nexthdr == IPPROTO_HOPOPTS) ||
91 (nexthdr == IPPROTO_ROUTING) ||
92 (nexthdr == IPPROTO_FRAGMENT) ||
93 (nexthdr == IPPROTO_ESP) ||
94 (nexthdr == IPPROTO_AH) ||
95 (nexthdr == IPPROTO_NONE) ||
96 (nexthdr == IPPROTO_DSTOPTS) );
99 /* Returns whether matches rule or not. */
100 /* Performance critical - called for every packet */
101 static inline bool
102 ip6_packet_match(const struct sk_buff *skb,
103 const char *indev,
104 const char *outdev,
105 const struct ip6t_ip6 *ip6info,
106 unsigned int *protoff,
107 int *fragoff, bool *hotdrop)
109 unsigned long ret;
110 const struct ipv6hdr *ipv6 = ipv6_hdr(skb);
112 #define FWINV(bool, invflg) ((bool) ^ !!(ip6info->invflags & (invflg)))
114 if (FWINV(ipv6_masked_addr_cmp(&ipv6->saddr, &ip6info->smsk,
115 &ip6info->src), IP6T_INV_SRCIP) ||
116 FWINV(ipv6_masked_addr_cmp(&ipv6->daddr, &ip6info->dmsk,
117 &ip6info->dst), IP6T_INV_DSTIP)) {
118 dprintf("Source or dest mismatch.\n");
120 dprintf("SRC: %u. Mask: %u. Target: %u.%s\n", ip->saddr,
121 ipinfo->smsk.s_addr, ipinfo->src.s_addr,
122 ipinfo->invflags & IP6T_INV_SRCIP ? " (INV)" : "");
123 dprintf("DST: %u. Mask: %u. Target: %u.%s\n", ip->daddr,
124 ipinfo->dmsk.s_addr, ipinfo->dst.s_addr,
125 ipinfo->invflags & IP6T_INV_DSTIP ? " (INV)" : "");*/
126 return false;
129 ret = ifname_compare_aligned(indev, ip6info->iniface, ip6info->iniface_mask);
131 if (FWINV(ret != 0, IP6T_INV_VIA_IN)) {
132 dprintf("VIA in mismatch (%s vs %s).%s\n",
133 indev, ip6info->iniface,
134 ip6info->invflags&IP6T_INV_VIA_IN ?" (INV)":"");
135 return false;
138 ret = ifname_compare_aligned(outdev, ip6info->outiface, ip6info->outiface_mask);
140 if (FWINV(ret != 0, IP6T_INV_VIA_OUT)) {
141 dprintf("VIA out mismatch (%s vs %s).%s\n",
142 outdev, ip6info->outiface,
143 ip6info->invflags&IP6T_INV_VIA_OUT ?" (INV)":"");
144 return false;
147 /* ... might want to do something with class and flowlabel here ... */
149 /* look for the desired protocol header */
150 if((ip6info->flags & IP6T_F_PROTO)) {
151 int protohdr;
152 unsigned short _frag_off;
154 protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off);
155 if (protohdr < 0) {
156 if (_frag_off == 0)
157 *hotdrop = true;
158 return false;
160 *fragoff = _frag_off;
162 dprintf("Packet protocol %hi ?= %s%hi.\n",
163 protohdr,
164 ip6info->invflags & IP6T_INV_PROTO ? "!":"",
165 ip6info->proto);
167 if (ip6info->proto == protohdr) {
168 if(ip6info->invflags & IP6T_INV_PROTO) {
169 return false;
171 return true;
174 /* We need match for the '-p all', too! */
175 if ((ip6info->proto != 0) &&
176 !(ip6info->invflags & IP6T_INV_PROTO))
177 return false;
179 return true;
182 /* should be ip6 safe */
183 static bool
184 ip6_checkentry(const struct ip6t_ip6 *ipv6)
186 if (ipv6->flags & ~IP6T_F_MASK) {
187 duprintf("Unknown flag bits set: %08X\n",
188 ipv6->flags & ~IP6T_F_MASK);
189 return false;
191 if (ipv6->invflags & ~IP6T_INV_MASK) {
192 duprintf("Unknown invflag bits set: %08X\n",
193 ipv6->invflags & ~IP6T_INV_MASK);
194 return false;
196 return true;
199 static unsigned int
200 ip6t_error(struct sk_buff *skb, const struct xt_target_param *par)
202 if (net_ratelimit())
203 printk("ip6_tables: error: `%s'\n",
204 (const char *)par->targinfo);
206 return NF_DROP;
209 /* Performance critical - called for every packet */
210 static inline bool
211 do_match(const struct ip6t_entry_match *m, const struct sk_buff *skb,
212 struct xt_match_param *par)
214 par->match = m->u.kernel.match;
215 par->matchinfo = m->data;
217 /* Stop iteration if it doesn't match */
218 if (!m->u.kernel.match->match(skb, par))
219 return true;
220 else
221 return false;
224 static inline struct ip6t_entry *
225 get_entry(const void *base, unsigned int offset)
227 return (struct ip6t_entry *)(base + offset);
230 /* All zeroes == unconditional rule. */
231 /* Mildly perf critical (only if packet tracing is on) */
232 static inline bool unconditional(const struct ip6t_ip6 *ipv6)
234 static const struct ip6t_ip6 uncond;
236 return memcmp(ipv6, &uncond, sizeof(uncond)) == 0;
239 static inline const struct ip6t_entry_target *
240 ip6t_get_target_c(const struct ip6t_entry *e)
242 return ip6t_get_target((struct ip6t_entry *)e);
245 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
246 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
247 /* This cries for unification! */
248 static const char *const hooknames[] = {
249 [NF_INET_PRE_ROUTING] = "PREROUTING",
250 [NF_INET_LOCAL_IN] = "INPUT",
251 [NF_INET_FORWARD] = "FORWARD",
252 [NF_INET_LOCAL_OUT] = "OUTPUT",
253 [NF_INET_POST_ROUTING] = "POSTROUTING",
256 enum nf_ip_trace_comments {
257 NF_IP6_TRACE_COMMENT_RULE,
258 NF_IP6_TRACE_COMMENT_RETURN,
259 NF_IP6_TRACE_COMMENT_POLICY,
262 static const char *const comments[] = {
263 [NF_IP6_TRACE_COMMENT_RULE] = "rule",
264 [NF_IP6_TRACE_COMMENT_RETURN] = "return",
265 [NF_IP6_TRACE_COMMENT_POLICY] = "policy",
268 static struct nf_loginfo trace_loginfo = {
269 .type = NF_LOG_TYPE_LOG,
270 .u = {
271 .log = {
272 .level = 4,
273 .logflags = NF_LOG_MASK,
278 /* Mildly perf critical (only if packet tracing is on) */
279 static inline int
280 get_chainname_rulenum(const struct ip6t_entry *s, const struct ip6t_entry *e,
281 const char *hookname, const char **chainname,
282 const char **comment, unsigned int *rulenum)
284 const struct ip6t_standard_target *t = (void *)ip6t_get_target_c(s);
286 if (strcmp(t->target.u.kernel.target->name, IP6T_ERROR_TARGET) == 0) {
287 /* Head of user chain: ERROR target with chainname */
288 *chainname = t->target.data;
289 (*rulenum) = 0;
290 } else if (s == e) {
291 (*rulenum)++;
293 if (s->target_offset == sizeof(struct ip6t_entry) &&
294 strcmp(t->target.u.kernel.target->name,
295 IP6T_STANDARD_TARGET) == 0 &&
296 t->verdict < 0 &&
297 unconditional(&s->ipv6)) {
298 /* Tail of chains: STANDARD target (return/policy) */
299 *comment = *chainname == hookname
300 ? comments[NF_IP6_TRACE_COMMENT_POLICY]
301 : comments[NF_IP6_TRACE_COMMENT_RETURN];
303 return 1;
304 } else
305 (*rulenum)++;
307 return 0;
310 static void trace_packet(const struct sk_buff *skb,
311 unsigned int hook,
312 const struct net_device *in,
313 const struct net_device *out,
314 const char *tablename,
315 const struct xt_table_info *private,
316 const struct ip6t_entry *e)
318 const void *table_base;
319 const struct ip6t_entry *root;
320 const char *hookname, *chainname, *comment;
321 const struct ip6t_entry *iter;
322 unsigned int rulenum = 0;
324 table_base = private->entries[smp_processor_id()];
325 root = get_entry(table_base, private->hook_entry[hook]);
327 hookname = chainname = hooknames[hook];
328 comment = comments[NF_IP6_TRACE_COMMENT_RULE];
330 xt_entry_foreach(iter, root, private->size - private->hook_entry[hook])
331 if (get_chainname_rulenum(iter, e, hookname,
332 &chainname, &comment, &rulenum) != 0)
333 break;
335 nf_log_packet(AF_INET6, hook, skb, in, out, &trace_loginfo,
336 "TRACE: %s:%s:%s:%u ",
337 tablename, chainname, comment, rulenum);
339 #endif
341 static inline __pure struct ip6t_entry *
342 ip6t_next_entry(const struct ip6t_entry *entry)
344 return (void *)entry + entry->next_offset;
347 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
348 unsigned int
349 ip6t_do_table(struct sk_buff *skb,
350 unsigned int hook,
351 const struct net_device *in,
352 const struct net_device *out,
353 struct xt_table *table)
355 #define tb_comefrom ((struct ip6t_entry *)table_base)->comefrom
357 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
358 bool hotdrop = false;
359 /* Initializing verdict to NF_DROP keeps gcc happy. */
360 unsigned int verdict = NF_DROP;
361 const char *indev, *outdev;
362 const void *table_base;
363 struct ip6t_entry *e, *back;
364 const struct xt_table_info *private;
365 struct xt_match_param mtpar;
366 struct xt_target_param tgpar;
368 /* Initialization */
369 indev = in ? in->name : nulldevname;
370 outdev = out ? out->name : nulldevname;
371 /* We handle fragments by dealing with the first fragment as
372 * if it was a normal packet. All other fragments are treated
373 * normally, except that they will NEVER match rules that ask
374 * things we don't know, ie. tcp syn flag or ports). If the
375 * rule is also a fragment-specific rule, non-fragments won't
376 * match it. */
377 mtpar.hotdrop = &hotdrop;
378 mtpar.in = tgpar.in = in;
379 mtpar.out = tgpar.out = out;
380 mtpar.family = tgpar.family = NFPROTO_IPV6;
381 mtpar.hooknum = tgpar.hooknum = hook;
383 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
385 xt_info_rdlock_bh();
386 private = table->private;
387 table_base = private->entries[smp_processor_id()];
389 e = get_entry(table_base, private->hook_entry[hook]);
391 /* For return from builtin chain */
392 back = get_entry(table_base, private->underflow[hook]);
394 do {
395 const struct ip6t_entry_target *t;
397 IP_NF_ASSERT(e);
398 IP_NF_ASSERT(back);
399 if (!ip6_packet_match(skb, indev, outdev, &e->ipv6,
400 &mtpar.thoff, &mtpar.fragoff, &hotdrop) ||
401 IP6T_MATCH_ITERATE(e, do_match, skb, &mtpar) != 0) {
402 e = ip6t_next_entry(e);
403 continue;
406 ADD_COUNTER(e->counters,
407 ntohs(ipv6_hdr(skb)->payload_len) +
408 sizeof(struct ipv6hdr), 1);
410 t = ip6t_get_target_c(e);
411 IP_NF_ASSERT(t->u.kernel.target);
413 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
414 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
415 /* The packet is traced: log it */
416 if (unlikely(skb->nf_trace))
417 trace_packet(skb, hook, in, out,
418 table->name, private, e);
419 #endif
420 /* Standard target? */
421 if (!t->u.kernel.target->target) {
422 int v;
424 v = ((struct ip6t_standard_target *)t)->verdict;
425 if (v < 0) {
426 /* Pop from stack? */
427 if (v != IP6T_RETURN) {
428 verdict = (unsigned)(-v) - 1;
429 break;
431 e = back;
432 back = get_entry(table_base, back->comefrom);
433 continue;
435 if (table_base + v != ip6t_next_entry(e) &&
436 !(e->ipv6.flags & IP6T_F_GOTO)) {
437 /* Save old back ptr in next entry */
438 struct ip6t_entry *next = ip6t_next_entry(e);
439 next->comefrom = (void *)back - table_base;
440 /* set back pointer to next entry */
441 back = next;
444 e = get_entry(table_base, v);
445 continue;
448 /* Targets which reenter must return
449 abs. verdicts */
450 tgpar.target = t->u.kernel.target;
451 tgpar.targinfo = t->data;
453 #ifdef CONFIG_NETFILTER_DEBUG
454 tb_comefrom = 0xeeeeeeec;
455 #endif
456 verdict = t->u.kernel.target->target(skb, &tgpar);
458 #ifdef CONFIG_NETFILTER_DEBUG
459 if (tb_comefrom != 0xeeeeeeec && verdict == IP6T_CONTINUE) {
460 printk("Target %s reentered!\n",
461 t->u.kernel.target->name);
462 verdict = NF_DROP;
464 tb_comefrom = 0x57acc001;
465 #endif
466 if (verdict == IP6T_CONTINUE)
467 e = ip6t_next_entry(e);
468 else
469 /* Verdict */
470 break;
471 } while (!hotdrop);
473 #ifdef CONFIG_NETFILTER_DEBUG
474 tb_comefrom = NETFILTER_LINK_POISON;
475 #endif
476 xt_info_rdunlock_bh();
478 #ifdef DEBUG_ALLOW_ALL
479 return NF_ACCEPT;
480 #else
481 if (hotdrop)
482 return NF_DROP;
483 else return verdict;
484 #endif
486 #undef tb_comefrom
489 /* Figures out from what hook each rule can be called: returns 0 if
490 there are loops. Puts hook bitmask in comefrom. */
491 static int
492 mark_source_chains(const struct xt_table_info *newinfo,
493 unsigned int valid_hooks, void *entry0)
495 unsigned int hook;
497 /* No recursion; use packet counter to save back ptrs (reset
498 to 0 as we leave), and comefrom to save source hook bitmask */
499 for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
500 unsigned int pos = newinfo->hook_entry[hook];
501 struct ip6t_entry *e = (struct ip6t_entry *)(entry0 + pos);
503 if (!(valid_hooks & (1 << hook)))
504 continue;
506 /* Set initial back pointer. */
507 e->counters.pcnt = pos;
509 for (;;) {
510 const struct ip6t_standard_target *t
511 = (void *)ip6t_get_target_c(e);
512 int visited = e->comefrom & (1 << hook);
514 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
515 printk("iptables: loop hook %u pos %u %08X.\n",
516 hook, pos, e->comefrom);
517 return 0;
519 e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
521 /* Unconditional return/END. */
522 if ((e->target_offset == sizeof(struct ip6t_entry) &&
523 (strcmp(t->target.u.user.name,
524 IP6T_STANDARD_TARGET) == 0) &&
525 t->verdict < 0 &&
526 unconditional(&e->ipv6)) || visited) {
527 unsigned int oldpos, size;
529 if ((strcmp(t->target.u.user.name,
530 IP6T_STANDARD_TARGET) == 0) &&
531 t->verdict < -NF_MAX_VERDICT - 1) {
532 duprintf("mark_source_chains: bad "
533 "negative verdict (%i)\n",
534 t->verdict);
535 return 0;
538 /* Return: backtrack through the last
539 big jump. */
540 do {
541 e->comefrom ^= (1<<NF_INET_NUMHOOKS);
542 #ifdef DEBUG_IP_FIREWALL_USER
543 if (e->comefrom
544 & (1 << NF_INET_NUMHOOKS)) {
545 duprintf("Back unset "
546 "on hook %u "
547 "rule %u\n",
548 hook, pos);
550 #endif
551 oldpos = pos;
552 pos = e->counters.pcnt;
553 e->counters.pcnt = 0;
555 /* We're at the start. */
556 if (pos == oldpos)
557 goto next;
559 e = (struct ip6t_entry *)
560 (entry0 + pos);
561 } while (oldpos == pos + e->next_offset);
563 /* Move along one */
564 size = e->next_offset;
565 e = (struct ip6t_entry *)
566 (entry0 + pos + size);
567 e->counters.pcnt = pos;
568 pos += size;
569 } else {
570 int newpos = t->verdict;
572 if (strcmp(t->target.u.user.name,
573 IP6T_STANDARD_TARGET) == 0 &&
574 newpos >= 0) {
575 if (newpos > newinfo->size -
576 sizeof(struct ip6t_entry)) {
577 duprintf("mark_source_chains: "
578 "bad verdict (%i)\n",
579 newpos);
580 return 0;
582 /* This a jump; chase it. */
583 duprintf("Jump rule %u -> %u\n",
584 pos, newpos);
585 } else {
586 /* ... this is a fallthru */
587 newpos = pos + e->next_offset;
589 e = (struct ip6t_entry *)
590 (entry0 + newpos);
591 e->counters.pcnt = pos;
592 pos = newpos;
595 next:
596 duprintf("Finished chain %u\n", hook);
598 return 1;
601 static int
602 cleanup_match(struct ip6t_entry_match *m, struct net *net, unsigned int *i)
604 struct xt_mtdtor_param par;
606 if (i && (*i)-- == 0)
607 return 1;
609 par.net = net;
610 par.match = m->u.kernel.match;
611 par.matchinfo = m->data;
612 par.family = NFPROTO_IPV6;
613 if (par.match->destroy != NULL)
614 par.match->destroy(&par);
615 module_put(par.match->me);
616 return 0;
619 static int
620 check_entry(const struct ip6t_entry *e, const char *name)
622 const struct ip6t_entry_target *t;
624 if (!ip6_checkentry(&e->ipv6)) {
625 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
626 return -EINVAL;
629 if (e->target_offset + sizeof(struct ip6t_entry_target) >
630 e->next_offset)
631 return -EINVAL;
633 t = ip6t_get_target_c(e);
634 if (e->target_offset + t->u.target_size > e->next_offset)
635 return -EINVAL;
637 return 0;
640 static int check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par,
641 unsigned int *i)
643 const struct ip6t_ip6 *ipv6 = par->entryinfo;
644 int ret;
646 par->match = m->u.kernel.match;
647 par->matchinfo = m->data;
649 ret = xt_check_match(par, m->u.match_size - sizeof(*m),
650 ipv6->proto, ipv6->invflags & IP6T_INV_PROTO);
651 if (ret < 0) {
652 duprintf("ip_tables: check failed for `%s'.\n",
653 par.match->name);
654 return ret;
656 ++*i;
657 return 0;
660 static int
661 find_check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par,
662 unsigned int *i)
664 struct xt_match *match;
665 int ret;
667 match = try_then_request_module(xt_find_match(AF_INET6, m->u.user.name,
668 m->u.user.revision),
669 "ip6t_%s", m->u.user.name);
670 if (IS_ERR(match) || !match) {
671 duprintf("find_check_match: `%s' not found\n", m->u.user.name);
672 return match ? PTR_ERR(match) : -ENOENT;
674 m->u.kernel.match = match;
676 ret = check_match(m, par, i);
677 if (ret)
678 goto err;
680 return 0;
681 err:
682 module_put(m->u.kernel.match->me);
683 return ret;
686 static int check_target(struct ip6t_entry *e, struct net *net, const char *name)
688 struct ip6t_entry_target *t = ip6t_get_target(e);
689 struct xt_tgchk_param par = {
690 .net = net,
691 .table = name,
692 .entryinfo = e,
693 .target = t->u.kernel.target,
694 .targinfo = t->data,
695 .hook_mask = e->comefrom,
696 .family = NFPROTO_IPV6,
698 int ret;
700 t = ip6t_get_target(e);
701 ret = xt_check_target(&par, t->u.target_size - sizeof(*t),
702 e->ipv6.proto, e->ipv6.invflags & IP6T_INV_PROTO);
703 if (ret < 0) {
704 duprintf("ip_tables: check failed for `%s'.\n",
705 t->u.kernel.target->name);
706 return ret;
708 return 0;
711 static int
712 find_check_entry(struct ip6t_entry *e, struct net *net, const char *name,
713 unsigned int size, unsigned int *i)
715 struct ip6t_entry_target *t;
716 struct xt_target *target;
717 int ret;
718 unsigned int j;
719 struct xt_mtchk_param mtpar;
721 ret = check_entry(e, name);
722 if (ret)
723 return ret;
725 j = 0;
726 mtpar.net = net;
727 mtpar.table = name;
728 mtpar.entryinfo = &e->ipv6;
729 mtpar.hook_mask = e->comefrom;
730 mtpar.family = NFPROTO_IPV6;
731 ret = IP6T_MATCH_ITERATE(e, find_check_match, &mtpar, &j);
732 if (ret != 0)
733 goto cleanup_matches;
735 t = ip6t_get_target(e);
736 target = try_then_request_module(xt_find_target(AF_INET6,
737 t->u.user.name,
738 t->u.user.revision),
739 "ip6t_%s", t->u.user.name);
740 if (IS_ERR(target) || !target) {
741 duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
742 ret = target ? PTR_ERR(target) : -ENOENT;
743 goto cleanup_matches;
745 t->u.kernel.target = target;
747 ret = check_target(e, net, name);
748 if (ret)
749 goto err;
751 (*i)++;
752 return 0;
753 err:
754 module_put(t->u.kernel.target->me);
755 cleanup_matches:
756 IP6T_MATCH_ITERATE(e, cleanup_match, net, &j);
757 return ret;
760 static bool check_underflow(const struct ip6t_entry *e)
762 const struct ip6t_entry_target *t;
763 unsigned int verdict;
765 if (!unconditional(&e->ipv6))
766 return false;
767 t = ip6t_get_target_c(e);
768 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
769 return false;
770 verdict = ((struct ip6t_standard_target *)t)->verdict;
771 verdict = -verdict - 1;
772 return verdict == NF_DROP || verdict == NF_ACCEPT;
775 static int
776 check_entry_size_and_hooks(struct ip6t_entry *e,
777 struct xt_table_info *newinfo,
778 const unsigned char *base,
779 const unsigned char *limit,
780 const unsigned int *hook_entries,
781 const unsigned int *underflows,
782 unsigned int valid_hooks,
783 unsigned int *i)
785 unsigned int h;
787 if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0 ||
788 (unsigned char *)e + sizeof(struct ip6t_entry) >= limit) {
789 duprintf("Bad offset %p\n", e);
790 return -EINVAL;
793 if (e->next_offset
794 < sizeof(struct ip6t_entry) + sizeof(struct ip6t_entry_target)) {
795 duprintf("checking: element %p size %u\n",
796 e, e->next_offset);
797 return -EINVAL;
800 /* Check hooks & underflows */
801 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
802 if (!(valid_hooks & (1 << h)))
803 continue;
804 if ((unsigned char *)e - base == hook_entries[h])
805 newinfo->hook_entry[h] = hook_entries[h];
806 if ((unsigned char *)e - base == underflows[h]) {
807 if (!check_underflow(e)) {
808 pr_err("Underflows must be unconditional and "
809 "use the STANDARD target with "
810 "ACCEPT/DROP\n");
811 return -EINVAL;
813 newinfo->underflow[h] = underflows[h];
817 /* Clear counters and comefrom */
818 e->counters = ((struct xt_counters) { 0, 0 });
819 e->comefrom = 0;
821 (*i)++;
822 return 0;
825 static int
826 cleanup_entry(struct ip6t_entry *e, struct net *net, unsigned int *i)
828 struct xt_tgdtor_param par;
829 struct ip6t_entry_target *t;
831 if (i && (*i)-- == 0)
832 return 1;
834 /* Cleanup all matches */
835 IP6T_MATCH_ITERATE(e, cleanup_match, net, NULL);
836 t = ip6t_get_target(e);
838 par.net = net;
839 par.target = t->u.kernel.target;
840 par.targinfo = t->data;
841 par.family = NFPROTO_IPV6;
842 if (par.target->destroy != NULL)
843 par.target->destroy(&par);
844 module_put(par.target->me);
845 return 0;
848 /* Checks and translates the user-supplied table segment (held in
849 newinfo) */
850 static int
851 translate_table(struct net *net,
852 const char *name,
853 unsigned int valid_hooks,
854 struct xt_table_info *newinfo,
855 void *entry0,
856 unsigned int size,
857 unsigned int number,
858 const unsigned int *hook_entries,
859 const unsigned int *underflows)
861 struct ip6t_entry *iter;
862 unsigned int i;
863 int ret = 0;
865 newinfo->size = size;
866 newinfo->number = number;
868 /* Init all hooks to impossible value. */
869 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
870 newinfo->hook_entry[i] = 0xFFFFFFFF;
871 newinfo->underflow[i] = 0xFFFFFFFF;
874 duprintf("translate_table: size %u\n", newinfo->size);
875 i = 0;
876 /* Walk through entries, checking offsets. */
877 xt_entry_foreach(iter, entry0, newinfo->size) {
878 ret = check_entry_size_and_hooks(iter, newinfo, entry0,
879 entry0 + size, hook_entries, underflows,
880 valid_hooks, &i);
881 if (ret != 0)
882 break;
884 if (ret != 0)
885 return ret;
887 if (i != number) {
888 duprintf("translate_table: %u not %u entries\n",
889 i, number);
890 return -EINVAL;
893 /* Check hooks all assigned */
894 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
895 /* Only hooks which are valid */
896 if (!(valid_hooks & (1 << i)))
897 continue;
898 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
899 duprintf("Invalid hook entry %u %u\n",
900 i, hook_entries[i]);
901 return -EINVAL;
903 if (newinfo->underflow[i] == 0xFFFFFFFF) {
904 duprintf("Invalid underflow %u %u\n",
905 i, underflows[i]);
906 return -EINVAL;
910 if (!mark_source_chains(newinfo, valid_hooks, entry0))
911 return -ELOOP;
913 /* Finally, each sanity check must pass */
914 i = 0;
915 xt_entry_foreach(iter, entry0, newinfo->size) {
916 ret = find_check_entry(iter, net, name, size, &i);
917 if (ret != 0)
918 break;
921 if (ret != 0) {
922 xt_entry_foreach(iter, entry0, newinfo->size)
923 if (cleanup_entry(iter, net, &i) != 0)
924 break;
925 return ret;
928 /* And one copy for every other CPU */
929 for_each_possible_cpu(i) {
930 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
931 memcpy(newinfo->entries[i], entry0, newinfo->size);
934 return ret;
937 /* Gets counters. */
938 static inline int
939 add_entry_to_counter(const struct ip6t_entry *e,
940 struct xt_counters total[],
941 unsigned int *i)
943 ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
945 (*i)++;
946 return 0;
949 static inline int
950 set_entry_to_counter(const struct ip6t_entry *e,
951 struct ip6t_counters total[],
952 unsigned int *i)
954 SET_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
956 (*i)++;
957 return 0;
960 static void
961 get_counters(const struct xt_table_info *t,
962 struct xt_counters counters[])
964 struct ip6t_entry *iter;
965 unsigned int cpu;
966 unsigned int i;
967 unsigned int curcpu;
969 /* Instead of clearing (by a previous call to memset())
970 * the counters and using adds, we set the counters
971 * with data used by 'current' CPU
973 * Bottom half has to be disabled to prevent deadlock
974 * if new softirq were to run and call ipt_do_table
976 local_bh_disable();
977 curcpu = smp_processor_id();
979 i = 0;
980 xt_entry_foreach(iter, t->entries[curcpu], t->size)
981 if (set_entry_to_counter(iter, counters, &i) != 0)
982 break;
984 for_each_possible_cpu(cpu) {
985 if (cpu == curcpu)
986 continue;
987 i = 0;
988 xt_info_wrlock(cpu);
989 xt_entry_foreach(iter, t->entries[cpu], t->size)
990 if (add_entry_to_counter(iter, counters, &i) != 0)
991 break;
992 xt_info_wrunlock(cpu);
994 local_bh_enable();
997 static struct xt_counters *alloc_counters(const struct xt_table *table)
999 unsigned int countersize;
1000 struct xt_counters *counters;
1001 const struct xt_table_info *private = table->private;
1003 /* We need atomic snapshot of counters: rest doesn't change
1004 (other than comefrom, which userspace doesn't care
1005 about). */
1006 countersize = sizeof(struct xt_counters) * private->number;
1007 counters = vmalloc_node(countersize, numa_node_id());
1009 if (counters == NULL)
1010 return ERR_PTR(-ENOMEM);
1012 get_counters(private, counters);
1014 return counters;
1017 static int
1018 copy_entries_to_user(unsigned int total_size,
1019 const struct xt_table *table,
1020 void __user *userptr)
1022 unsigned int off, num;
1023 const struct ip6t_entry *e;
1024 struct xt_counters *counters;
1025 const struct xt_table_info *private = table->private;
1026 int ret = 0;
1027 const void *loc_cpu_entry;
1029 counters = alloc_counters(table);
1030 if (IS_ERR(counters))
1031 return PTR_ERR(counters);
1033 /* choose the copy that is on our node/cpu, ...
1034 * This choice is lazy (because current thread is
1035 * allowed to migrate to another cpu)
1037 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1038 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
1039 ret = -EFAULT;
1040 goto free_counters;
1043 /* FIXME: use iterator macros --RR */
1044 /* ... then go back and fix counters and names */
1045 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
1046 unsigned int i;
1047 const struct ip6t_entry_match *m;
1048 const struct ip6t_entry_target *t;
1050 e = (struct ip6t_entry *)(loc_cpu_entry + off);
1051 if (copy_to_user(userptr + off
1052 + offsetof(struct ip6t_entry, counters),
1053 &counters[num],
1054 sizeof(counters[num])) != 0) {
1055 ret = -EFAULT;
1056 goto free_counters;
1059 for (i = sizeof(struct ip6t_entry);
1060 i < e->target_offset;
1061 i += m->u.match_size) {
1062 m = (void *)e + i;
1064 if (copy_to_user(userptr + off + i
1065 + offsetof(struct ip6t_entry_match,
1066 u.user.name),
1067 m->u.kernel.match->name,
1068 strlen(m->u.kernel.match->name)+1)
1069 != 0) {
1070 ret = -EFAULT;
1071 goto free_counters;
1075 t = ip6t_get_target_c(e);
1076 if (copy_to_user(userptr + off + e->target_offset
1077 + offsetof(struct ip6t_entry_target,
1078 u.user.name),
1079 t->u.kernel.target->name,
1080 strlen(t->u.kernel.target->name)+1) != 0) {
1081 ret = -EFAULT;
1082 goto free_counters;
1086 free_counters:
1087 vfree(counters);
1088 return ret;
1091 #ifdef CONFIG_COMPAT
1092 static void compat_standard_from_user(void *dst, const void *src)
1094 int v = *(compat_int_t *)src;
1096 if (v > 0)
1097 v += xt_compat_calc_jump(AF_INET6, v);
1098 memcpy(dst, &v, sizeof(v));
1101 static int compat_standard_to_user(void __user *dst, const void *src)
1103 compat_int_t cv = *(int *)src;
1105 if (cv > 0)
1106 cv -= xt_compat_calc_jump(AF_INET6, cv);
1107 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
1110 static inline int
1111 compat_calc_match(const struct ip6t_entry_match *m, int *size)
1113 *size += xt_compat_match_offset(m->u.kernel.match);
1114 return 0;
1117 static int compat_calc_entry(const struct ip6t_entry *e,
1118 const struct xt_table_info *info,
1119 const void *base, struct xt_table_info *newinfo)
1121 const struct ip6t_entry_target *t;
1122 unsigned int entry_offset;
1123 int off, i, ret;
1125 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1126 entry_offset = (void *)e - base;
1127 IP6T_MATCH_ITERATE(e, compat_calc_match, &off);
1128 t = ip6t_get_target_c(e);
1129 off += xt_compat_target_offset(t->u.kernel.target);
1130 newinfo->size -= off;
1131 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1132 if (ret)
1133 return ret;
1135 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1136 if (info->hook_entry[i] &&
1137 (e < (struct ip6t_entry *)(base + info->hook_entry[i])))
1138 newinfo->hook_entry[i] -= off;
1139 if (info->underflow[i] &&
1140 (e < (struct ip6t_entry *)(base + info->underflow[i])))
1141 newinfo->underflow[i] -= off;
1143 return 0;
1146 static int compat_table_info(const struct xt_table_info *info,
1147 struct xt_table_info *newinfo)
1149 struct ip6t_entry *iter;
1150 void *loc_cpu_entry;
1151 int ret = 0;
1153 if (!newinfo || !info)
1154 return -EINVAL;
1156 /* we dont care about newinfo->entries[] */
1157 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
1158 newinfo->initial_entries = 0;
1159 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1160 xt_entry_foreach(iter, loc_cpu_entry, info->size) {
1161 ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
1162 if (ret != 0)
1163 break;
1165 return ret;
1167 #endif
1169 static int get_info(struct net *net, void __user *user,
1170 const int *len, int compat)
1172 char name[IP6T_TABLE_MAXNAMELEN];
1173 struct xt_table *t;
1174 int ret;
1176 if (*len != sizeof(struct ip6t_getinfo)) {
1177 duprintf("length %u != %zu\n", *len,
1178 sizeof(struct ip6t_getinfo));
1179 return -EINVAL;
1182 if (copy_from_user(name, user, sizeof(name)) != 0)
1183 return -EFAULT;
1185 name[IP6T_TABLE_MAXNAMELEN-1] = '\0';
1186 #ifdef CONFIG_COMPAT
1187 if (compat)
1188 xt_compat_lock(AF_INET6);
1189 #endif
1190 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1191 "ip6table_%s", name);
1192 if (t && !IS_ERR(t)) {
1193 struct ip6t_getinfo info;
1194 const struct xt_table_info *private = t->private;
1195 #ifdef CONFIG_COMPAT
1196 struct xt_table_info tmp;
1198 if (compat) {
1199 ret = compat_table_info(private, &tmp);
1200 xt_compat_flush_offsets(AF_INET6);
1201 private = &tmp;
1203 #endif
1204 info.valid_hooks = t->valid_hooks;
1205 memcpy(info.hook_entry, private->hook_entry,
1206 sizeof(info.hook_entry));
1207 memcpy(info.underflow, private->underflow,
1208 sizeof(info.underflow));
1209 info.num_entries = private->number;
1210 info.size = private->size;
1211 strcpy(info.name, name);
1213 if (copy_to_user(user, &info, *len) != 0)
1214 ret = -EFAULT;
1215 else
1216 ret = 0;
1218 xt_table_unlock(t);
1219 module_put(t->me);
1220 } else
1221 ret = t ? PTR_ERR(t) : -ENOENT;
1222 #ifdef CONFIG_COMPAT
1223 if (compat)
1224 xt_compat_unlock(AF_INET6);
1225 #endif
1226 return ret;
1229 static int
1230 get_entries(struct net *net, struct ip6t_get_entries __user *uptr,
1231 const int *len)
1233 int ret;
1234 struct ip6t_get_entries get;
1235 struct xt_table *t;
1237 if (*len < sizeof(get)) {
1238 duprintf("get_entries: %u < %zu\n", *len, sizeof(get));
1239 return -EINVAL;
1241 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1242 return -EFAULT;
1243 if (*len != sizeof(struct ip6t_get_entries) + get.size) {
1244 duprintf("get_entries: %u != %zu\n",
1245 *len, sizeof(get) + get.size);
1246 return -EINVAL;
1249 t = xt_find_table_lock(net, AF_INET6, get.name);
1250 if (t && !IS_ERR(t)) {
1251 struct xt_table_info *private = t->private;
1252 duprintf("t->private->number = %u\n", private->number);
1253 if (get.size == private->size)
1254 ret = copy_entries_to_user(private->size,
1255 t, uptr->entrytable);
1256 else {
1257 duprintf("get_entries: I've got %u not %u!\n",
1258 private->size, get.size);
1259 ret = -EAGAIN;
1261 module_put(t->me);
1262 xt_table_unlock(t);
1263 } else
1264 ret = t ? PTR_ERR(t) : -ENOENT;
1266 return ret;
1269 static int
1270 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1271 struct xt_table_info *newinfo, unsigned int num_counters,
1272 void __user *counters_ptr)
1274 int ret;
1275 struct xt_table *t;
1276 struct xt_table_info *oldinfo;
1277 struct xt_counters *counters;
1278 const void *loc_cpu_old_entry;
1279 struct ip6t_entry *iter;
1281 ret = 0;
1282 counters = vmalloc_node(num_counters * sizeof(struct xt_counters),
1283 numa_node_id());
1284 if (!counters) {
1285 ret = -ENOMEM;
1286 goto out;
1289 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1290 "ip6table_%s", name);
1291 if (!t || IS_ERR(t)) {
1292 ret = t ? PTR_ERR(t) : -ENOENT;
1293 goto free_newinfo_counters_untrans;
1296 /* You lied! */
1297 if (valid_hooks != t->valid_hooks) {
1298 duprintf("Valid hook crap: %08X vs %08X\n",
1299 valid_hooks, t->valid_hooks);
1300 ret = -EINVAL;
1301 goto put_module;
1304 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1305 if (!oldinfo)
1306 goto put_module;
1308 /* Update module usage count based on number of rules */
1309 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1310 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1311 if ((oldinfo->number > oldinfo->initial_entries) ||
1312 (newinfo->number <= oldinfo->initial_entries))
1313 module_put(t->me);
1314 if ((oldinfo->number > oldinfo->initial_entries) &&
1315 (newinfo->number <= oldinfo->initial_entries))
1316 module_put(t->me);
1318 /* Get the old counters, and synchronize with replace */
1319 get_counters(oldinfo, counters);
1321 /* Decrease module usage counts and free resource */
1322 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1323 xt_entry_foreach(iter, loc_cpu_old_entry, oldinfo->size)
1324 if (cleanup_entry(iter, net, NULL) != 0)
1325 break;
1327 xt_free_table_info(oldinfo);
1328 if (copy_to_user(counters_ptr, counters,
1329 sizeof(struct xt_counters) * num_counters) != 0)
1330 ret = -EFAULT;
1331 vfree(counters);
1332 xt_table_unlock(t);
1333 return ret;
1335 put_module:
1336 module_put(t->me);
1337 xt_table_unlock(t);
1338 free_newinfo_counters_untrans:
1339 vfree(counters);
1340 out:
1341 return ret;
1344 static int
1345 do_replace(struct net *net, const void __user *user, unsigned int len)
1347 int ret;
1348 struct ip6t_replace tmp;
1349 struct xt_table_info *newinfo;
1350 void *loc_cpu_entry;
1351 struct ip6t_entry *iter;
1353 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1354 return -EFAULT;
1356 /* overflow check */
1357 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1358 return -ENOMEM;
1360 newinfo = xt_alloc_table_info(tmp.size);
1361 if (!newinfo)
1362 return -ENOMEM;
1364 /* choose the copy that is on our node/cpu */
1365 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1366 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1367 tmp.size) != 0) {
1368 ret = -EFAULT;
1369 goto free_newinfo;
1372 ret = translate_table(net, tmp.name, tmp.valid_hooks,
1373 newinfo, loc_cpu_entry, tmp.size, tmp.num_entries,
1374 tmp.hook_entry, tmp.underflow);
1375 if (ret != 0)
1376 goto free_newinfo;
1378 duprintf("ip_tables: Translated table\n");
1380 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1381 tmp.num_counters, tmp.counters);
1382 if (ret)
1383 goto free_newinfo_untrans;
1384 return 0;
1386 free_newinfo_untrans:
1387 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1388 if (cleanup_entry(iter, net, NULL) != 0)
1389 break;
1390 free_newinfo:
1391 xt_free_table_info(newinfo);
1392 return ret;
1395 /* We're lazy, and add to the first CPU; overflow works its fey magic
1396 * and everything is OK. */
1397 static int
1398 add_counter_to_entry(struct ip6t_entry *e,
1399 const struct xt_counters addme[],
1400 unsigned int *i)
1402 ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
1404 (*i)++;
1405 return 0;
1408 static int
1409 do_add_counters(struct net *net, const void __user *user, unsigned int len,
1410 int compat)
1412 unsigned int i, curcpu;
1413 struct xt_counters_info tmp;
1414 struct xt_counters *paddc;
1415 unsigned int num_counters;
1416 char *name;
1417 int size;
1418 void *ptmp;
1419 struct xt_table *t;
1420 const struct xt_table_info *private;
1421 int ret = 0;
1422 const void *loc_cpu_entry;
1423 struct ip6t_entry *iter;
1424 #ifdef CONFIG_COMPAT
1425 struct compat_xt_counters_info compat_tmp;
1427 if (compat) {
1428 ptmp = &compat_tmp;
1429 size = sizeof(struct compat_xt_counters_info);
1430 } else
1431 #endif
1433 ptmp = &tmp;
1434 size = sizeof(struct xt_counters_info);
1437 if (copy_from_user(ptmp, user, size) != 0)
1438 return -EFAULT;
1440 #ifdef CONFIG_COMPAT
1441 if (compat) {
1442 num_counters = compat_tmp.num_counters;
1443 name = compat_tmp.name;
1444 } else
1445 #endif
1447 num_counters = tmp.num_counters;
1448 name = tmp.name;
1451 if (len != size + num_counters * sizeof(struct xt_counters))
1452 return -EINVAL;
1454 paddc = vmalloc_node(len - size, numa_node_id());
1455 if (!paddc)
1456 return -ENOMEM;
1458 if (copy_from_user(paddc, user + size, len - size) != 0) {
1459 ret = -EFAULT;
1460 goto free;
1463 t = xt_find_table_lock(net, AF_INET6, name);
1464 if (!t || IS_ERR(t)) {
1465 ret = t ? PTR_ERR(t) : -ENOENT;
1466 goto free;
1470 local_bh_disable();
1471 private = t->private;
1472 if (private->number != num_counters) {
1473 ret = -EINVAL;
1474 goto unlock_up_free;
1477 i = 0;
1478 /* Choose the copy that is on our node */
1479 curcpu = smp_processor_id();
1480 xt_info_wrlock(curcpu);
1481 loc_cpu_entry = private->entries[curcpu];
1482 xt_entry_foreach(iter, loc_cpu_entry, private->size)
1483 if (add_counter_to_entry(iter, paddc, &i) != 0)
1484 break;
1485 xt_info_wrunlock(curcpu);
1487 unlock_up_free:
1488 local_bh_enable();
1489 xt_table_unlock(t);
1490 module_put(t->me);
1491 free:
1492 vfree(paddc);
1494 return ret;
1497 #ifdef CONFIG_COMPAT
1498 struct compat_ip6t_replace {
1499 char name[IP6T_TABLE_MAXNAMELEN];
1500 u32 valid_hooks;
1501 u32 num_entries;
1502 u32 size;
1503 u32 hook_entry[NF_INET_NUMHOOKS];
1504 u32 underflow[NF_INET_NUMHOOKS];
1505 u32 num_counters;
1506 compat_uptr_t counters; /* struct ip6t_counters * */
1507 struct compat_ip6t_entry entries[0];
1510 static int
1511 compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr,
1512 unsigned int *size, struct xt_counters *counters,
1513 unsigned int *i)
1515 struct ip6t_entry_target *t;
1516 struct compat_ip6t_entry __user *ce;
1517 u_int16_t target_offset, next_offset;
1518 compat_uint_t origsize;
1519 int ret;
1521 ret = -EFAULT;
1522 origsize = *size;
1523 ce = (struct compat_ip6t_entry __user *)*dstptr;
1524 if (copy_to_user(ce, e, sizeof(struct ip6t_entry)))
1525 goto out;
1527 if (copy_to_user(&ce->counters, &counters[*i], sizeof(counters[*i])))
1528 goto out;
1530 *dstptr += sizeof(struct compat_ip6t_entry);
1531 *size -= sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1533 ret = IP6T_MATCH_ITERATE(e, xt_compat_match_to_user, dstptr, size);
1534 target_offset = e->target_offset - (origsize - *size);
1535 if (ret)
1536 goto out;
1537 t = ip6t_get_target(e);
1538 ret = xt_compat_target_to_user(t, dstptr, size);
1539 if (ret)
1540 goto out;
1541 ret = -EFAULT;
1542 next_offset = e->next_offset - (origsize - *size);
1543 if (put_user(target_offset, &ce->target_offset))
1544 goto out;
1545 if (put_user(next_offset, &ce->next_offset))
1546 goto out;
1548 (*i)++;
1549 return 0;
1550 out:
1551 return ret;
1554 static int
1555 compat_find_calc_match(struct ip6t_entry_match *m,
1556 const char *name,
1557 const struct ip6t_ip6 *ipv6,
1558 unsigned int hookmask,
1559 int *size, unsigned int *i)
1561 struct xt_match *match;
1563 match = try_then_request_module(xt_find_match(AF_INET6, m->u.user.name,
1564 m->u.user.revision),
1565 "ip6t_%s", m->u.user.name);
1566 if (IS_ERR(match) || !match) {
1567 duprintf("compat_check_calc_match: `%s' not found\n",
1568 m->u.user.name);
1569 return match ? PTR_ERR(match) : -ENOENT;
1571 m->u.kernel.match = match;
1572 *size += xt_compat_match_offset(match);
1574 (*i)++;
1575 return 0;
1578 static int
1579 compat_release_match(struct ip6t_entry_match *m, unsigned int *i)
1581 if (i && (*i)-- == 0)
1582 return 1;
1584 module_put(m->u.kernel.match->me);
1585 return 0;
1588 static int
1589 compat_release_entry(struct compat_ip6t_entry *e, unsigned int *i)
1591 struct ip6t_entry_target *t;
1593 if (i && (*i)-- == 0)
1594 return 1;
1596 /* Cleanup all matches */
1597 COMPAT_IP6T_MATCH_ITERATE(e, compat_release_match, NULL);
1598 t = compat_ip6t_get_target(e);
1599 module_put(t->u.kernel.target->me);
1600 return 0;
1603 static int
1604 check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
1605 struct xt_table_info *newinfo,
1606 unsigned int *size,
1607 const unsigned char *base,
1608 const unsigned char *limit,
1609 const unsigned int *hook_entries,
1610 const unsigned int *underflows,
1611 unsigned int *i,
1612 const char *name)
1614 struct ip6t_entry_target *t;
1615 struct xt_target *target;
1616 unsigned int entry_offset;
1617 unsigned int j;
1618 int ret, off, h;
1620 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1621 if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0 ||
1622 (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit) {
1623 duprintf("Bad offset %p, limit = %p\n", e, limit);
1624 return -EINVAL;
1627 if (e->next_offset < sizeof(struct compat_ip6t_entry) +
1628 sizeof(struct compat_xt_entry_target)) {
1629 duprintf("checking: element %p size %u\n",
1630 e, e->next_offset);
1631 return -EINVAL;
1634 /* For purposes of check_entry casting the compat entry is fine */
1635 ret = check_entry((struct ip6t_entry *)e, name);
1636 if (ret)
1637 return ret;
1639 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1640 entry_offset = (void *)e - (void *)base;
1641 j = 0;
1642 ret = COMPAT_IP6T_MATCH_ITERATE(e, compat_find_calc_match, name,
1643 &e->ipv6, e->comefrom, &off, &j);
1644 if (ret != 0)
1645 goto release_matches;
1647 t = compat_ip6t_get_target(e);
1648 target = try_then_request_module(xt_find_target(AF_INET6,
1649 t->u.user.name,
1650 t->u.user.revision),
1651 "ip6t_%s", t->u.user.name);
1652 if (IS_ERR(target) || !target) {
1653 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1654 t->u.user.name);
1655 ret = target ? PTR_ERR(target) : -ENOENT;
1656 goto release_matches;
1658 t->u.kernel.target = target;
1660 off += xt_compat_target_offset(target);
1661 *size += off;
1662 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1663 if (ret)
1664 goto out;
1666 /* Check hooks & underflows */
1667 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1668 if ((unsigned char *)e - base == hook_entries[h])
1669 newinfo->hook_entry[h] = hook_entries[h];
1670 if ((unsigned char *)e - base == underflows[h])
1671 newinfo->underflow[h] = underflows[h];
1674 /* Clear counters and comefrom */
1675 memset(&e->counters, 0, sizeof(e->counters));
1676 e->comefrom = 0;
1678 (*i)++;
1679 return 0;
1681 out:
1682 module_put(t->u.kernel.target->me);
1683 release_matches:
1684 IP6T_MATCH_ITERATE(e, compat_release_match, &j);
1685 return ret;
1688 static int
1689 compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
1690 unsigned int *size, const char *name,
1691 struct xt_table_info *newinfo, unsigned char *base)
1693 struct ip6t_entry_target *t;
1694 struct xt_target *target;
1695 struct ip6t_entry *de;
1696 unsigned int origsize;
1697 int ret, h;
1699 ret = 0;
1700 origsize = *size;
1701 de = (struct ip6t_entry *)*dstptr;
1702 memcpy(de, e, sizeof(struct ip6t_entry));
1703 memcpy(&de->counters, &e->counters, sizeof(e->counters));
1705 *dstptr += sizeof(struct ip6t_entry);
1706 *size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1708 ret = COMPAT_IP6T_MATCH_ITERATE(e, xt_compat_match_from_user,
1709 dstptr, size);
1710 if (ret)
1711 return ret;
1712 de->target_offset = e->target_offset - (origsize - *size);
1713 t = compat_ip6t_get_target(e);
1714 target = t->u.kernel.target;
1715 xt_compat_target_from_user(t, dstptr, size);
1717 de->next_offset = e->next_offset - (origsize - *size);
1718 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1719 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1720 newinfo->hook_entry[h] -= origsize - *size;
1721 if ((unsigned char *)de - base < newinfo->underflow[h])
1722 newinfo->underflow[h] -= origsize - *size;
1724 return ret;
1727 static int compat_check_entry(struct ip6t_entry *e, struct net *net,
1728 const char *name, unsigned int *i)
1730 unsigned int j;
1731 int ret;
1732 struct xt_mtchk_param mtpar;
1734 j = 0;
1735 mtpar.net = net;
1736 mtpar.table = name;
1737 mtpar.entryinfo = &e->ipv6;
1738 mtpar.hook_mask = e->comefrom;
1739 mtpar.family = NFPROTO_IPV6;
1740 ret = IP6T_MATCH_ITERATE(e, check_match, &mtpar, &j);
1741 if (ret)
1742 goto cleanup_matches;
1744 ret = check_target(e, net, name);
1745 if (ret)
1746 goto cleanup_matches;
1748 (*i)++;
1749 return 0;
1751 cleanup_matches:
1752 IP6T_MATCH_ITERATE(e, cleanup_match, net, &j);
1753 return ret;
1756 static int
1757 translate_compat_table(struct net *net,
1758 const char *name,
1759 unsigned int valid_hooks,
1760 struct xt_table_info **pinfo,
1761 void **pentry0,
1762 unsigned int total_size,
1763 unsigned int number,
1764 unsigned int *hook_entries,
1765 unsigned int *underflows)
1767 unsigned int i, j;
1768 struct xt_table_info *newinfo, *info;
1769 void *pos, *entry0, *entry1;
1770 struct compat_ip6t_entry *iter0;
1771 struct ip6t_entry *iter1;
1772 unsigned int size;
1773 int ret = 0;
1775 info = *pinfo;
1776 entry0 = *pentry0;
1777 size = total_size;
1778 info->number = number;
1780 /* Init all hooks to impossible value. */
1781 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1782 info->hook_entry[i] = 0xFFFFFFFF;
1783 info->underflow[i] = 0xFFFFFFFF;
1786 duprintf("translate_compat_table: size %u\n", info->size);
1787 j = 0;
1788 xt_compat_lock(AF_INET6);
1789 /* Walk through entries, checking offsets. */
1790 xt_entry_foreach(iter0, entry0, total_size) {
1791 ret = check_compat_entry_size_and_hooks(iter0, info, &size,
1792 entry0, entry0 + total_size, hook_entries, underflows,
1793 &j, name);
1794 if (ret != 0)
1795 break;
1797 if (ret != 0)
1798 goto out_unlock;
1800 ret = -EINVAL;
1801 if (j != number) {
1802 duprintf("translate_compat_table: %u not %u entries\n",
1803 j, number);
1804 goto out_unlock;
1807 /* Check hooks all assigned */
1808 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1809 /* Only hooks which are valid */
1810 if (!(valid_hooks & (1 << i)))
1811 continue;
1812 if (info->hook_entry[i] == 0xFFFFFFFF) {
1813 duprintf("Invalid hook entry %u %u\n",
1814 i, hook_entries[i]);
1815 goto out_unlock;
1817 if (info->underflow[i] == 0xFFFFFFFF) {
1818 duprintf("Invalid underflow %u %u\n",
1819 i, underflows[i]);
1820 goto out_unlock;
1824 ret = -ENOMEM;
1825 newinfo = xt_alloc_table_info(size);
1826 if (!newinfo)
1827 goto out_unlock;
1829 newinfo->number = number;
1830 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1831 newinfo->hook_entry[i] = info->hook_entry[i];
1832 newinfo->underflow[i] = info->underflow[i];
1834 entry1 = newinfo->entries[raw_smp_processor_id()];
1835 pos = entry1;
1836 size = total_size;
1837 xt_entry_foreach(iter0, entry0, total_size) {
1838 ret = compat_copy_entry_from_user(iter0, &pos,
1839 &size, name, newinfo, entry1);
1840 if (ret != 0)
1841 break;
1843 xt_compat_flush_offsets(AF_INET6);
1844 xt_compat_unlock(AF_INET6);
1845 if (ret)
1846 goto free_newinfo;
1848 ret = -ELOOP;
1849 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1850 goto free_newinfo;
1852 i = 0;
1853 xt_entry_foreach(iter1, entry1, newinfo->size) {
1854 ret = compat_check_entry(iter1, net, name, &i);
1855 if (ret != 0)
1856 break;
1858 if (ret) {
1860 * The first i matches need cleanup_entry (calls ->destroy)
1861 * because they had called ->check already. The other j-i
1862 * entries need only release.
1864 int skip = i;
1865 j -= i;
1866 xt_entry_foreach(iter0, entry0, newinfo->size) {
1867 if (skip-- > 0)
1868 continue;
1869 if (compat_release_entry(iter0, &j) != 0)
1870 break;
1872 xt_entry_foreach(iter1, entry1, newinfo->size)
1873 if (cleanup_entry(iter1, net, &i) != 0)
1874 break;
1875 xt_free_table_info(newinfo);
1876 return ret;
1879 /* And one copy for every other CPU */
1880 for_each_possible_cpu(i)
1881 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1882 memcpy(newinfo->entries[i], entry1, newinfo->size);
1884 *pinfo = newinfo;
1885 *pentry0 = entry1;
1886 xt_free_table_info(info);
1887 return 0;
1889 free_newinfo:
1890 xt_free_table_info(newinfo);
1891 out:
1892 xt_entry_foreach(iter0, entry0, total_size)
1893 if (compat_release_entry(iter0, &j) != 0)
1894 break;
1895 return ret;
1896 out_unlock:
1897 xt_compat_flush_offsets(AF_INET6);
1898 xt_compat_unlock(AF_INET6);
1899 goto out;
1902 static int
1903 compat_do_replace(struct net *net, void __user *user, unsigned int len)
1905 int ret;
1906 struct compat_ip6t_replace tmp;
1907 struct xt_table_info *newinfo;
1908 void *loc_cpu_entry;
1909 struct ip6t_entry *iter;
1911 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1912 return -EFAULT;
1914 /* overflow check */
1915 if (tmp.size >= INT_MAX / num_possible_cpus())
1916 return -ENOMEM;
1917 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1918 return -ENOMEM;
1920 newinfo = xt_alloc_table_info(tmp.size);
1921 if (!newinfo)
1922 return -ENOMEM;
1924 /* choose the copy that is on our node/cpu */
1925 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1926 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1927 tmp.size) != 0) {
1928 ret = -EFAULT;
1929 goto free_newinfo;
1932 ret = translate_compat_table(net, tmp.name, tmp.valid_hooks,
1933 &newinfo, &loc_cpu_entry, tmp.size,
1934 tmp.num_entries, tmp.hook_entry,
1935 tmp.underflow);
1936 if (ret != 0)
1937 goto free_newinfo;
1939 duprintf("compat_do_replace: Translated table\n");
1941 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1942 tmp.num_counters, compat_ptr(tmp.counters));
1943 if (ret)
1944 goto free_newinfo_untrans;
1945 return 0;
1947 free_newinfo_untrans:
1948 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1949 if (cleanup_entry(iter, net, NULL) != 0)
1950 break;
1951 free_newinfo:
1952 xt_free_table_info(newinfo);
1953 return ret;
1956 static int
1957 compat_do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user,
1958 unsigned int len)
1960 int ret;
1962 if (!capable(CAP_NET_ADMIN))
1963 return -EPERM;
1965 switch (cmd) {
1966 case IP6T_SO_SET_REPLACE:
1967 ret = compat_do_replace(sock_net(sk), user, len);
1968 break;
1970 case IP6T_SO_SET_ADD_COUNTERS:
1971 ret = do_add_counters(sock_net(sk), user, len, 1);
1972 break;
1974 default:
1975 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
1976 ret = -EINVAL;
1979 return ret;
1982 struct compat_ip6t_get_entries {
1983 char name[IP6T_TABLE_MAXNAMELEN];
1984 compat_uint_t size;
1985 struct compat_ip6t_entry entrytable[0];
1988 static int
1989 compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1990 void __user *userptr)
1992 struct xt_counters *counters;
1993 const struct xt_table_info *private = table->private;
1994 void __user *pos;
1995 unsigned int size;
1996 int ret = 0;
1997 const void *loc_cpu_entry;
1998 unsigned int i = 0;
1999 struct ip6t_entry *iter;
2001 counters = alloc_counters(table);
2002 if (IS_ERR(counters))
2003 return PTR_ERR(counters);
2005 /* choose the copy that is on our node/cpu, ...
2006 * This choice is lazy (because current thread is
2007 * allowed to migrate to another cpu)
2009 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2010 pos = userptr;
2011 size = total_size;
2012 xt_entry_foreach(iter, loc_cpu_entry, total_size) {
2013 ret = compat_copy_entry_to_user(iter, &pos,
2014 &size, counters, &i);
2015 if (ret != 0)
2016 break;
2019 vfree(counters);
2020 return ret;
2023 static int
2024 compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
2025 int *len)
2027 int ret;
2028 struct compat_ip6t_get_entries get;
2029 struct xt_table *t;
2031 if (*len < sizeof(get)) {
2032 duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get));
2033 return -EINVAL;
2036 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
2037 return -EFAULT;
2039 if (*len != sizeof(struct compat_ip6t_get_entries) + get.size) {
2040 duprintf("compat_get_entries: %u != %zu\n",
2041 *len, sizeof(get) + get.size);
2042 return -EINVAL;
2045 xt_compat_lock(AF_INET6);
2046 t = xt_find_table_lock(net, AF_INET6, get.name);
2047 if (t && !IS_ERR(t)) {
2048 const struct xt_table_info *private = t->private;
2049 struct xt_table_info info;
2050 duprintf("t->private->number = %u\n", private->number);
2051 ret = compat_table_info(private, &info);
2052 if (!ret && get.size == info.size) {
2053 ret = compat_copy_entries_to_user(private->size,
2054 t, uptr->entrytable);
2055 } else if (!ret) {
2056 duprintf("compat_get_entries: I've got %u not %u!\n",
2057 private->size, get.size);
2058 ret = -EAGAIN;
2060 xt_compat_flush_offsets(AF_INET6);
2061 module_put(t->me);
2062 xt_table_unlock(t);
2063 } else
2064 ret = t ? PTR_ERR(t) : -ENOENT;
2066 xt_compat_unlock(AF_INET6);
2067 return ret;
2070 static int do_ip6t_get_ctl(struct sock *, int, void __user *, int *);
2072 static int
2073 compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2075 int ret;
2077 if (!capable(CAP_NET_ADMIN))
2078 return -EPERM;
2080 switch (cmd) {
2081 case IP6T_SO_GET_INFO:
2082 ret = get_info(sock_net(sk), user, len, 1);
2083 break;
2084 case IP6T_SO_GET_ENTRIES:
2085 ret = compat_get_entries(sock_net(sk), user, len);
2086 break;
2087 default:
2088 ret = do_ip6t_get_ctl(sk, cmd, user, len);
2090 return ret;
2092 #endif
2094 static int
2095 do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2097 int ret;
2099 if (!capable(CAP_NET_ADMIN))
2100 return -EPERM;
2102 switch (cmd) {
2103 case IP6T_SO_SET_REPLACE:
2104 ret = do_replace(sock_net(sk), user, len);
2105 break;
2107 case IP6T_SO_SET_ADD_COUNTERS:
2108 ret = do_add_counters(sock_net(sk), user, len, 0);
2109 break;
2111 default:
2112 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
2113 ret = -EINVAL;
2116 return ret;
2119 static int
2120 do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2122 int ret;
2124 if (!capable(CAP_NET_ADMIN))
2125 return -EPERM;
2127 switch (cmd) {
2128 case IP6T_SO_GET_INFO:
2129 ret = get_info(sock_net(sk), user, len, 0);
2130 break;
2132 case IP6T_SO_GET_ENTRIES:
2133 ret = get_entries(sock_net(sk), user, len);
2134 break;
2136 case IP6T_SO_GET_REVISION_MATCH:
2137 case IP6T_SO_GET_REVISION_TARGET: {
2138 struct ip6t_get_revision rev;
2139 int target;
2141 if (*len != sizeof(rev)) {
2142 ret = -EINVAL;
2143 break;
2145 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2146 ret = -EFAULT;
2147 break;
2150 if (cmd == IP6T_SO_GET_REVISION_TARGET)
2151 target = 1;
2152 else
2153 target = 0;
2155 try_then_request_module(xt_find_revision(AF_INET6, rev.name,
2156 rev.revision,
2157 target, &ret),
2158 "ip6t_%s", rev.name);
2159 break;
2162 default:
2163 duprintf("do_ip6t_get_ctl: unknown request %i\n", cmd);
2164 ret = -EINVAL;
2167 return ret;
2170 struct xt_table *ip6t_register_table(struct net *net,
2171 const struct xt_table *table,
2172 const struct ip6t_replace *repl)
2174 int ret;
2175 struct xt_table_info *newinfo;
2176 struct xt_table_info bootstrap
2177 = { 0, 0, 0, { 0 }, { 0 }, { } };
2178 void *loc_cpu_entry;
2179 struct xt_table *new_table;
2181 newinfo = xt_alloc_table_info(repl->size);
2182 if (!newinfo) {
2183 ret = -ENOMEM;
2184 goto out;
2187 /* choose the copy on our node/cpu, but dont care about preemption */
2188 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2189 memcpy(loc_cpu_entry, repl->entries, repl->size);
2191 ret = translate_table(net, table->name, table->valid_hooks,
2192 newinfo, loc_cpu_entry, repl->size,
2193 repl->num_entries,
2194 repl->hook_entry,
2195 repl->underflow);
2196 if (ret != 0)
2197 goto out_free;
2199 new_table = xt_register_table(net, table, &bootstrap, newinfo);
2200 if (IS_ERR(new_table)) {
2201 ret = PTR_ERR(new_table);
2202 goto out_free;
2204 return new_table;
2206 out_free:
2207 xt_free_table_info(newinfo);
2208 out:
2209 return ERR_PTR(ret);
2212 void ip6t_unregister_table(struct net *net, struct xt_table *table)
2214 struct xt_table_info *private;
2215 void *loc_cpu_entry;
2216 struct module *table_owner = table->me;
2217 struct ip6t_entry *iter;
2219 private = xt_unregister_table(table);
2221 /* Decrease module usage counts and free resources */
2222 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2223 xt_entry_foreach(iter, loc_cpu_entry, private->size)
2224 if (cleanup_entry(iter, net, NULL) != 0)
2225 break;
2226 if (private->number > private->initial_entries)
2227 module_put(table_owner);
2228 xt_free_table_info(private);
2231 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2232 static inline bool
2233 icmp6_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2234 u_int8_t type, u_int8_t code,
2235 bool invert)
2237 return (type == test_type && code >= min_code && code <= max_code)
2238 ^ invert;
2241 static bool
2242 icmp6_match(const struct sk_buff *skb, const struct xt_match_param *par)
2244 const struct icmp6hdr *ic;
2245 struct icmp6hdr _icmph;
2246 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2248 /* Must not be a fragment. */
2249 if (par->fragoff != 0)
2250 return false;
2252 ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph);
2253 if (ic == NULL) {
2254 /* We've been asked to examine this packet, and we
2255 * can't. Hence, no choice but to drop.
2257 duprintf("Dropping evil ICMP tinygram.\n");
2258 *par->hotdrop = true;
2259 return false;
2262 return icmp6_type_code_match(icmpinfo->type,
2263 icmpinfo->code[0],
2264 icmpinfo->code[1],
2265 ic->icmp6_type, ic->icmp6_code,
2266 !!(icmpinfo->invflags&IP6T_ICMP_INV));
2269 /* Called when user tries to insert an entry of this type. */
2270 static bool icmp6_checkentry(const struct xt_mtchk_param *par)
2272 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2274 /* Must specify no unknown invflags */
2275 return !(icmpinfo->invflags & ~IP6T_ICMP_INV);
2278 /* The built-in targets: standard (NULL) and error. */
2279 static struct xt_target ip6t_standard_target __read_mostly = {
2280 .name = IP6T_STANDARD_TARGET,
2281 .targetsize = sizeof(int),
2282 .family = NFPROTO_IPV6,
2283 #ifdef CONFIG_COMPAT
2284 .compatsize = sizeof(compat_int_t),
2285 .compat_from_user = compat_standard_from_user,
2286 .compat_to_user = compat_standard_to_user,
2287 #endif
2290 static struct xt_target ip6t_error_target __read_mostly = {
2291 .name = IP6T_ERROR_TARGET,
2292 .target = ip6t_error,
2293 .targetsize = IP6T_FUNCTION_MAXNAMELEN,
2294 .family = NFPROTO_IPV6,
2297 static struct nf_sockopt_ops ip6t_sockopts = {
2298 .pf = PF_INET6,
2299 .set_optmin = IP6T_BASE_CTL,
2300 .set_optmax = IP6T_SO_SET_MAX+1,
2301 .set = do_ip6t_set_ctl,
2302 #ifdef CONFIG_COMPAT
2303 .compat_set = compat_do_ip6t_set_ctl,
2304 #endif
2305 .get_optmin = IP6T_BASE_CTL,
2306 .get_optmax = IP6T_SO_GET_MAX+1,
2307 .get = do_ip6t_get_ctl,
2308 #ifdef CONFIG_COMPAT
2309 .compat_get = compat_do_ip6t_get_ctl,
2310 #endif
2311 .owner = THIS_MODULE,
2314 static struct xt_match icmp6_matchstruct __read_mostly = {
2315 .name = "icmp6",
2316 .match = icmp6_match,
2317 .matchsize = sizeof(struct ip6t_icmp),
2318 .checkentry = icmp6_checkentry,
2319 .proto = IPPROTO_ICMPV6,
2320 .family = NFPROTO_IPV6,
2323 static int __net_init ip6_tables_net_init(struct net *net)
2325 return xt_proto_init(net, NFPROTO_IPV6);
2328 static void __net_exit ip6_tables_net_exit(struct net *net)
2330 xt_proto_fini(net, NFPROTO_IPV6);
2333 static struct pernet_operations ip6_tables_net_ops = {
2334 .init = ip6_tables_net_init,
2335 .exit = ip6_tables_net_exit,
2338 static int __init ip6_tables_init(void)
2340 int ret;
2342 ret = register_pernet_subsys(&ip6_tables_net_ops);
2343 if (ret < 0)
2344 goto err1;
2346 /* Noone else will be downing sem now, so we won't sleep */
2347 ret = xt_register_target(&ip6t_standard_target);
2348 if (ret < 0)
2349 goto err2;
2350 ret = xt_register_target(&ip6t_error_target);
2351 if (ret < 0)
2352 goto err3;
2353 ret = xt_register_match(&icmp6_matchstruct);
2354 if (ret < 0)
2355 goto err4;
2357 /* Register setsockopt */
2358 ret = nf_register_sockopt(&ip6t_sockopts);
2359 if (ret < 0)
2360 goto err5;
2362 printk(KERN_INFO "ip6_tables: (C) 2000-2006 Netfilter Core Team\n");
2363 return 0;
2365 err5:
2366 xt_unregister_match(&icmp6_matchstruct);
2367 err4:
2368 xt_unregister_target(&ip6t_error_target);
2369 err3:
2370 xt_unregister_target(&ip6t_standard_target);
2371 err2:
2372 unregister_pernet_subsys(&ip6_tables_net_ops);
2373 err1:
2374 return ret;
2377 static void __exit ip6_tables_fini(void)
2379 nf_unregister_sockopt(&ip6t_sockopts);
2381 xt_unregister_match(&icmp6_matchstruct);
2382 xt_unregister_target(&ip6t_error_target);
2383 xt_unregister_target(&ip6t_standard_target);
2385 unregister_pernet_subsys(&ip6_tables_net_ops);
2389 * find the offset to specified header or the protocol number of last header
2390 * if target < 0. "last header" is transport protocol header, ESP, or
2391 * "No next header".
2393 * If target header is found, its offset is set in *offset and return protocol
2394 * number. Otherwise, return -1.
2396 * If the first fragment doesn't contain the final protocol header or
2397 * NEXTHDR_NONE it is considered invalid.
2399 * Note that non-1st fragment is special case that "the protocol number
2400 * of last header" is "next header" field in Fragment header. In this case,
2401 * *offset is meaningless and fragment offset is stored in *fragoff if fragoff
2402 * isn't NULL.
2405 int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
2406 int target, unsigned short *fragoff)
2408 unsigned int start = skb_network_offset(skb) + sizeof(struct ipv6hdr);
2409 u8 nexthdr = ipv6_hdr(skb)->nexthdr;
2410 unsigned int len = skb->len - start;
2412 if (fragoff)
2413 *fragoff = 0;
2415 while (nexthdr != target) {
2416 struct ipv6_opt_hdr _hdr, *hp;
2417 unsigned int hdrlen;
2419 if ((!ipv6_ext_hdr(nexthdr)) || nexthdr == NEXTHDR_NONE) {
2420 if (target < 0)
2421 break;
2422 return -ENOENT;
2425 hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr);
2426 if (hp == NULL)
2427 return -EBADMSG;
2428 if (nexthdr == NEXTHDR_FRAGMENT) {
2429 unsigned short _frag_off;
2430 __be16 *fp;
2431 fp = skb_header_pointer(skb,
2432 start+offsetof(struct frag_hdr,
2433 frag_off),
2434 sizeof(_frag_off),
2435 &_frag_off);
2436 if (fp == NULL)
2437 return -EBADMSG;
2439 _frag_off = ntohs(*fp) & ~0x7;
2440 if (_frag_off) {
2441 if (target < 0 &&
2442 ((!ipv6_ext_hdr(hp->nexthdr)) ||
2443 hp->nexthdr == NEXTHDR_NONE)) {
2444 if (fragoff)
2445 *fragoff = _frag_off;
2446 return hp->nexthdr;
2448 return -ENOENT;
2450 hdrlen = 8;
2451 } else if (nexthdr == NEXTHDR_AUTH)
2452 hdrlen = (hp->hdrlen + 2) << 2;
2453 else
2454 hdrlen = ipv6_optlen(hp);
2456 nexthdr = hp->nexthdr;
2457 len -= hdrlen;
2458 start += hdrlen;
2461 *offset = start;
2462 return nexthdr;
2465 EXPORT_SYMBOL(ip6t_register_table);
2466 EXPORT_SYMBOL(ip6t_unregister_table);
2467 EXPORT_SYMBOL(ip6t_do_table);
2468 EXPORT_SYMBOL(ip6t_ext_hdr);
2469 EXPORT_SYMBOL(ipv6_find_hdr);
2471 module_init(ip6_tables_init);
2472 module_exit(ip6_tables_fini);