Merge branch 'master' of /repos/git/net-next-2.6
[firewire-audio.git] / net / ipv6 / netfilter / ip6_tables.c
blob4332f4591482bec02cfed28c14e71d21b86d6eec
1 /*
2 * Packet matching code.
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 #include <linux/capability.h>
13 #include <linux/in.h>
14 #include <linux/skbuff.h>
15 #include <linux/kmod.h>
16 #include <linux/vmalloc.h>
17 #include <linux/netdevice.h>
18 #include <linux/module.h>
19 #include <linux/poison.h>
20 #include <linux/icmpv6.h>
21 #include <net/ipv6.h>
22 #include <net/compat.h>
23 #include <asm/uaccess.h>
24 #include <linux/mutex.h>
25 #include <linux/proc_fs.h>
26 #include <linux/err.h>
27 #include <linux/cpumask.h>
29 #include <linux/netfilter_ipv6/ip6_tables.h>
30 #include <linux/netfilter/x_tables.h>
31 #include <net/netfilter/nf_log.h>
33 MODULE_LICENSE("GPL");
34 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
35 MODULE_DESCRIPTION("IPv6 packet filter");
37 /*#define DEBUG_IP_FIREWALL*/
38 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
39 /*#define DEBUG_IP_FIREWALL_USER*/
41 #ifdef DEBUG_IP_FIREWALL
42 #define dprintf(format, args...) printk(format , ## args)
43 #else
44 #define dprintf(format, args...)
45 #endif
47 #ifdef DEBUG_IP_FIREWALL_USER
48 #define duprintf(format, args...) printk(format , ## args)
49 #else
50 #define duprintf(format, args...)
51 #endif
53 #ifdef CONFIG_NETFILTER_DEBUG
54 #define IP_NF_ASSERT(x) \
55 do { \
56 if (!(x)) \
57 printk("IP_NF_ASSERT: %s:%s:%u\n", \
58 __func__, __FILE__, __LINE__); \
59 } while(0)
60 #else
61 #define IP_NF_ASSERT(x)
62 #endif
64 #if 0
65 /* All the better to debug you with... */
66 #define static
67 #define inline
68 #endif
71 We keep a set of rules for each CPU, so we can avoid write-locking
72 them in the softirq when updating the counters and therefore
73 only need to read-lock in the softirq; doing a write_lock_bh() in user
74 context stops packets coming through and allows user context to read
75 the counters or update the rules.
77 Hence the start of any table is given by get_table() below. */
79 /* Check for an extension */
80 int
81 ip6t_ext_hdr(u8 nexthdr)
83 return ( (nexthdr == IPPROTO_HOPOPTS) ||
84 (nexthdr == IPPROTO_ROUTING) ||
85 (nexthdr == IPPROTO_FRAGMENT) ||
86 (nexthdr == IPPROTO_ESP) ||
87 (nexthdr == IPPROTO_AH) ||
88 (nexthdr == IPPROTO_NONE) ||
89 (nexthdr == IPPROTO_DSTOPTS) );
92 /* Returns whether matches rule or not. */
93 /* Performance critical - called for every packet */
94 static inline bool
95 ip6_packet_match(const struct sk_buff *skb,
96 const char *indev,
97 const char *outdev,
98 const struct ip6t_ip6 *ip6info,
99 unsigned int *protoff,
100 int *fragoff, bool *hotdrop)
102 unsigned long ret;
103 const struct ipv6hdr *ipv6 = ipv6_hdr(skb);
105 #define FWINV(bool, invflg) ((bool) ^ !!(ip6info->invflags & (invflg)))
107 if (FWINV(ipv6_masked_addr_cmp(&ipv6->saddr, &ip6info->smsk,
108 &ip6info->src), IP6T_INV_SRCIP) ||
109 FWINV(ipv6_masked_addr_cmp(&ipv6->daddr, &ip6info->dmsk,
110 &ip6info->dst), IP6T_INV_DSTIP)) {
111 dprintf("Source or dest mismatch.\n");
113 dprintf("SRC: %u. Mask: %u. Target: %u.%s\n", ip->saddr,
114 ipinfo->smsk.s_addr, ipinfo->src.s_addr,
115 ipinfo->invflags & IP6T_INV_SRCIP ? " (INV)" : "");
116 dprintf("DST: %u. Mask: %u. Target: %u.%s\n", ip->daddr,
117 ipinfo->dmsk.s_addr, ipinfo->dst.s_addr,
118 ipinfo->invflags & IP6T_INV_DSTIP ? " (INV)" : "");*/
119 return false;
122 ret = ifname_compare_aligned(indev, ip6info->iniface, ip6info->iniface_mask);
124 if (FWINV(ret != 0, IP6T_INV_VIA_IN)) {
125 dprintf("VIA in mismatch (%s vs %s).%s\n",
126 indev, ip6info->iniface,
127 ip6info->invflags&IP6T_INV_VIA_IN ?" (INV)":"");
128 return false;
131 ret = ifname_compare_aligned(outdev, ip6info->outiface, ip6info->outiface_mask);
133 if (FWINV(ret != 0, IP6T_INV_VIA_OUT)) {
134 dprintf("VIA out mismatch (%s vs %s).%s\n",
135 outdev, ip6info->outiface,
136 ip6info->invflags&IP6T_INV_VIA_OUT ?" (INV)":"");
137 return false;
140 /* ... might want to do something with class and flowlabel here ... */
142 /* look for the desired protocol header */
143 if((ip6info->flags & IP6T_F_PROTO)) {
144 int protohdr;
145 unsigned short _frag_off;
147 protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off);
148 if (protohdr < 0) {
149 if (_frag_off == 0)
150 *hotdrop = true;
151 return false;
153 *fragoff = _frag_off;
155 dprintf("Packet protocol %hi ?= %s%hi.\n",
156 protohdr,
157 ip6info->invflags & IP6T_INV_PROTO ? "!":"",
158 ip6info->proto);
160 if (ip6info->proto == protohdr) {
161 if(ip6info->invflags & IP6T_INV_PROTO) {
162 return false;
164 return true;
167 /* We need match for the '-p all', too! */
168 if ((ip6info->proto != 0) &&
169 !(ip6info->invflags & IP6T_INV_PROTO))
170 return false;
172 return true;
175 /* should be ip6 safe */
176 static bool
177 ip6_checkentry(const struct ip6t_ip6 *ipv6)
179 if (ipv6->flags & ~IP6T_F_MASK) {
180 duprintf("Unknown flag bits set: %08X\n",
181 ipv6->flags & ~IP6T_F_MASK);
182 return false;
184 if (ipv6->invflags & ~IP6T_INV_MASK) {
185 duprintf("Unknown invflag bits set: %08X\n",
186 ipv6->invflags & ~IP6T_INV_MASK);
187 return false;
189 return true;
192 static unsigned int
193 ip6t_error(struct sk_buff *skb, const struct xt_target_param *par)
195 if (net_ratelimit())
196 printk("ip6_tables: error: `%s'\n",
197 (const char *)par->targinfo);
199 return NF_DROP;
202 /* Performance critical - called for every packet */
203 static inline bool
204 do_match(struct ip6t_entry_match *m, const struct sk_buff *skb,
205 struct xt_match_param *par)
207 par->match = m->u.kernel.match;
208 par->matchinfo = m->data;
210 /* Stop iteration if it doesn't match */
211 if (!m->u.kernel.match->match(skb, par))
212 return true;
213 else
214 return false;
217 static inline struct ip6t_entry *
218 get_entry(void *base, unsigned int offset)
220 return (struct ip6t_entry *)(base + offset);
223 /* All zeroes == unconditional rule. */
224 /* Mildly perf critical (only if packet tracing is on) */
225 static inline bool unconditional(const struct ip6t_ip6 *ipv6)
227 static const struct ip6t_ip6 uncond;
229 return memcmp(ipv6, &uncond, sizeof(uncond)) == 0;
232 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
233 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
234 /* This cries for unification! */
235 static const char *const hooknames[] = {
236 [NF_INET_PRE_ROUTING] = "PREROUTING",
237 [NF_INET_LOCAL_IN] = "INPUT",
238 [NF_INET_FORWARD] = "FORWARD",
239 [NF_INET_LOCAL_OUT] = "OUTPUT",
240 [NF_INET_POST_ROUTING] = "POSTROUTING",
243 enum nf_ip_trace_comments {
244 NF_IP6_TRACE_COMMENT_RULE,
245 NF_IP6_TRACE_COMMENT_RETURN,
246 NF_IP6_TRACE_COMMENT_POLICY,
249 static const char *const comments[] = {
250 [NF_IP6_TRACE_COMMENT_RULE] = "rule",
251 [NF_IP6_TRACE_COMMENT_RETURN] = "return",
252 [NF_IP6_TRACE_COMMENT_POLICY] = "policy",
255 static struct nf_loginfo trace_loginfo = {
256 .type = NF_LOG_TYPE_LOG,
257 .u = {
258 .log = {
259 .level = 4,
260 .logflags = NF_LOG_MASK,
265 /* Mildly perf critical (only if packet tracing is on) */
266 static inline int
267 get_chainname_rulenum(struct ip6t_entry *s, struct ip6t_entry *e,
268 const char *hookname, const char **chainname,
269 const char **comment, unsigned int *rulenum)
271 struct ip6t_standard_target *t = (void *)ip6t_get_target(s);
273 if (strcmp(t->target.u.kernel.target->name, IP6T_ERROR_TARGET) == 0) {
274 /* Head of user chain: ERROR target with chainname */
275 *chainname = t->target.data;
276 (*rulenum) = 0;
277 } else if (s == e) {
278 (*rulenum)++;
280 if (s->target_offset == sizeof(struct ip6t_entry) &&
281 strcmp(t->target.u.kernel.target->name,
282 IP6T_STANDARD_TARGET) == 0 &&
283 t->verdict < 0 &&
284 unconditional(&s->ipv6)) {
285 /* Tail of chains: STANDARD target (return/policy) */
286 *comment = *chainname == hookname
287 ? comments[NF_IP6_TRACE_COMMENT_POLICY]
288 : comments[NF_IP6_TRACE_COMMENT_RETURN];
290 return 1;
291 } else
292 (*rulenum)++;
294 return 0;
297 static void trace_packet(struct sk_buff *skb,
298 unsigned int hook,
299 const struct net_device *in,
300 const struct net_device *out,
301 const char *tablename,
302 struct xt_table_info *private,
303 struct ip6t_entry *e)
305 void *table_base;
306 const struct ip6t_entry *root;
307 const char *hookname, *chainname, *comment;
308 unsigned int rulenum = 0;
310 table_base = private->entries[smp_processor_id()];
311 root = get_entry(table_base, private->hook_entry[hook]);
313 hookname = chainname = hooknames[hook];
314 comment = comments[NF_IP6_TRACE_COMMENT_RULE];
316 IP6T_ENTRY_ITERATE(root,
317 private->size - private->hook_entry[hook],
318 get_chainname_rulenum,
319 e, hookname, &chainname, &comment, &rulenum);
321 nf_log_packet(AF_INET6, hook, skb, in, out, &trace_loginfo,
322 "TRACE: %s:%s:%s:%u ",
323 tablename, chainname, comment, rulenum);
325 #endif
327 static inline __pure struct ip6t_entry *
328 ip6t_next_entry(const struct ip6t_entry *entry)
330 return (void *)entry + entry->next_offset;
333 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
334 unsigned int
335 ip6t_do_table(struct sk_buff *skb,
336 unsigned int hook,
337 const struct net_device *in,
338 const struct net_device *out,
339 struct xt_table *table)
341 #define tb_comefrom ((struct ip6t_entry *)table_base)->comefrom
343 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
344 bool hotdrop = false;
345 /* Initializing verdict to NF_DROP keeps gcc happy. */
346 unsigned int verdict = NF_DROP;
347 const char *indev, *outdev;
348 void *table_base;
349 struct ip6t_entry *e, *back;
350 struct xt_table_info *private;
351 struct xt_match_param mtpar;
352 struct xt_target_param tgpar;
354 /* Initialization */
355 indev = in ? in->name : nulldevname;
356 outdev = out ? out->name : nulldevname;
357 /* We handle fragments by dealing with the first fragment as
358 * if it was a normal packet. All other fragments are treated
359 * normally, except that they will NEVER match rules that ask
360 * things we don't know, ie. tcp syn flag or ports). If the
361 * rule is also a fragment-specific rule, non-fragments won't
362 * match it. */
363 mtpar.hotdrop = &hotdrop;
364 mtpar.in = tgpar.in = in;
365 mtpar.out = tgpar.out = out;
366 mtpar.family = tgpar.family = NFPROTO_IPV6;
367 mtpar.hooknum = tgpar.hooknum = hook;
369 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
371 xt_info_rdlock_bh();
372 private = table->private;
373 table_base = private->entries[smp_processor_id()];
375 e = get_entry(table_base, private->hook_entry[hook]);
377 /* For return from builtin chain */
378 back = get_entry(table_base, private->underflow[hook]);
380 do {
381 struct ip6t_entry_target *t;
383 IP_NF_ASSERT(e);
384 IP_NF_ASSERT(back);
385 if (!ip6_packet_match(skb, indev, outdev, &e->ipv6,
386 &mtpar.thoff, &mtpar.fragoff, &hotdrop) ||
387 IP6T_MATCH_ITERATE(e, do_match, skb, &mtpar) != 0) {
388 e = ip6t_next_entry(e);
389 continue;
392 ADD_COUNTER(e->counters,
393 ntohs(ipv6_hdr(skb)->payload_len) +
394 sizeof(struct ipv6hdr), 1);
396 t = ip6t_get_target(e);
397 IP_NF_ASSERT(t->u.kernel.target);
399 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
400 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
401 /* The packet is traced: log it */
402 if (unlikely(skb->nf_trace))
403 trace_packet(skb, hook, in, out,
404 table->name, private, e);
405 #endif
406 /* Standard target? */
407 if (!t->u.kernel.target->target) {
408 int v;
410 v = ((struct ip6t_standard_target *)t)->verdict;
411 if (v < 0) {
412 /* Pop from stack? */
413 if (v != IP6T_RETURN) {
414 verdict = (unsigned)(-v) - 1;
415 break;
417 e = back;
418 back = get_entry(table_base, back->comefrom);
419 continue;
421 if (table_base + v != ip6t_next_entry(e) &&
422 !(e->ipv6.flags & IP6T_F_GOTO)) {
423 /* Save old back ptr in next entry */
424 struct ip6t_entry *next = ip6t_next_entry(e);
425 next->comefrom = (void *)back - table_base;
426 /* set back pointer to next entry */
427 back = next;
430 e = get_entry(table_base, v);
431 continue;
434 /* Targets which reenter must return
435 abs. verdicts */
436 tgpar.target = t->u.kernel.target;
437 tgpar.targinfo = t->data;
439 #ifdef CONFIG_NETFILTER_DEBUG
440 tb_comefrom = 0xeeeeeeec;
441 #endif
442 verdict = t->u.kernel.target->target(skb, &tgpar);
444 #ifdef CONFIG_NETFILTER_DEBUG
445 if (tb_comefrom != 0xeeeeeeec && verdict == IP6T_CONTINUE) {
446 printk("Target %s reentered!\n",
447 t->u.kernel.target->name);
448 verdict = NF_DROP;
450 tb_comefrom = 0x57acc001;
451 #endif
452 if (verdict == IP6T_CONTINUE)
453 e = ip6t_next_entry(e);
454 else
455 /* Verdict */
456 break;
457 } while (!hotdrop);
459 #ifdef CONFIG_NETFILTER_DEBUG
460 tb_comefrom = NETFILTER_LINK_POISON;
461 #endif
462 xt_info_rdunlock_bh();
464 #ifdef DEBUG_ALLOW_ALL
465 return NF_ACCEPT;
466 #else
467 if (hotdrop)
468 return NF_DROP;
469 else return verdict;
470 #endif
472 #undef tb_comefrom
475 /* Figures out from what hook each rule can be called: returns 0 if
476 there are loops. Puts hook bitmask in comefrom. */
477 static int
478 mark_source_chains(struct xt_table_info *newinfo,
479 unsigned int valid_hooks, void *entry0)
481 unsigned int hook;
483 /* No recursion; use packet counter to save back ptrs (reset
484 to 0 as we leave), and comefrom to save source hook bitmask */
485 for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
486 unsigned int pos = newinfo->hook_entry[hook];
487 struct ip6t_entry *e = (struct ip6t_entry *)(entry0 + pos);
489 if (!(valid_hooks & (1 << hook)))
490 continue;
492 /* Set initial back pointer. */
493 e->counters.pcnt = pos;
495 for (;;) {
496 struct ip6t_standard_target *t
497 = (void *)ip6t_get_target(e);
498 int visited = e->comefrom & (1 << hook);
500 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
501 printk("iptables: loop hook %u pos %u %08X.\n",
502 hook, pos, e->comefrom);
503 return 0;
505 e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
507 /* Unconditional return/END. */
508 if ((e->target_offset == sizeof(struct ip6t_entry) &&
509 (strcmp(t->target.u.user.name,
510 IP6T_STANDARD_TARGET) == 0) &&
511 t->verdict < 0 &&
512 unconditional(&e->ipv6)) || visited) {
513 unsigned int oldpos, size;
515 if ((strcmp(t->target.u.user.name,
516 IP6T_STANDARD_TARGET) == 0) &&
517 t->verdict < -NF_MAX_VERDICT - 1) {
518 duprintf("mark_source_chains: bad "
519 "negative verdict (%i)\n",
520 t->verdict);
521 return 0;
524 /* Return: backtrack through the last
525 big jump. */
526 do {
527 e->comefrom ^= (1<<NF_INET_NUMHOOKS);
528 #ifdef DEBUG_IP_FIREWALL_USER
529 if (e->comefrom
530 & (1 << NF_INET_NUMHOOKS)) {
531 duprintf("Back unset "
532 "on hook %u "
533 "rule %u\n",
534 hook, pos);
536 #endif
537 oldpos = pos;
538 pos = e->counters.pcnt;
539 e->counters.pcnt = 0;
541 /* We're at the start. */
542 if (pos == oldpos)
543 goto next;
545 e = (struct ip6t_entry *)
546 (entry0 + pos);
547 } while (oldpos == pos + e->next_offset);
549 /* Move along one */
550 size = e->next_offset;
551 e = (struct ip6t_entry *)
552 (entry0 + pos + size);
553 e->counters.pcnt = pos;
554 pos += size;
555 } else {
556 int newpos = t->verdict;
558 if (strcmp(t->target.u.user.name,
559 IP6T_STANDARD_TARGET) == 0 &&
560 newpos >= 0) {
561 if (newpos > newinfo->size -
562 sizeof(struct ip6t_entry)) {
563 duprintf("mark_source_chains: "
564 "bad verdict (%i)\n",
565 newpos);
566 return 0;
568 /* This a jump; chase it. */
569 duprintf("Jump rule %u -> %u\n",
570 pos, newpos);
571 } else {
572 /* ... this is a fallthru */
573 newpos = pos + e->next_offset;
575 e = (struct ip6t_entry *)
576 (entry0 + newpos);
577 e->counters.pcnt = pos;
578 pos = newpos;
581 next:
582 duprintf("Finished chain %u\n", hook);
584 return 1;
587 static int
588 cleanup_match(struct ip6t_entry_match *m, struct net *net, unsigned int *i)
590 struct xt_mtdtor_param par;
592 if (i && (*i)-- == 0)
593 return 1;
595 par.net = net;
596 par.match = m->u.kernel.match;
597 par.matchinfo = m->data;
598 par.family = NFPROTO_IPV6;
599 if (par.match->destroy != NULL)
600 par.match->destroy(&par);
601 module_put(par.match->me);
602 return 0;
605 static int
606 check_entry(struct ip6t_entry *e, const char *name)
608 struct ip6t_entry_target *t;
610 if (!ip6_checkentry(&e->ipv6)) {
611 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
612 return -EINVAL;
615 if (e->target_offset + sizeof(struct ip6t_entry_target) >
616 e->next_offset)
617 return -EINVAL;
619 t = ip6t_get_target(e);
620 if (e->target_offset + t->u.target_size > e->next_offset)
621 return -EINVAL;
623 return 0;
626 static int check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par,
627 unsigned int *i)
629 const struct ip6t_ip6 *ipv6 = par->entryinfo;
630 int ret;
632 par->match = m->u.kernel.match;
633 par->matchinfo = m->data;
635 ret = xt_check_match(par, m->u.match_size - sizeof(*m),
636 ipv6->proto, ipv6->invflags & IP6T_INV_PROTO);
637 if (ret < 0) {
638 duprintf("ip_tables: check failed for `%s'.\n",
639 par.match->name);
640 return ret;
642 ++*i;
643 return 0;
646 static int
647 find_check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par,
648 unsigned int *i)
650 struct xt_match *match;
651 int ret;
653 match = try_then_request_module(xt_find_match(AF_INET6, m->u.user.name,
654 m->u.user.revision),
655 "ip6t_%s", m->u.user.name);
656 if (IS_ERR(match) || !match) {
657 duprintf("find_check_match: `%s' not found\n", m->u.user.name);
658 return match ? PTR_ERR(match) : -ENOENT;
660 m->u.kernel.match = match;
662 ret = check_match(m, par, i);
663 if (ret)
664 goto err;
666 return 0;
667 err:
668 module_put(m->u.kernel.match->me);
669 return ret;
672 static int check_target(struct ip6t_entry *e, struct net *net, const char *name)
674 struct ip6t_entry_target *t = ip6t_get_target(e);
675 struct xt_tgchk_param par = {
676 .net = net,
677 .table = name,
678 .entryinfo = e,
679 .target = t->u.kernel.target,
680 .targinfo = t->data,
681 .hook_mask = e->comefrom,
682 .family = NFPROTO_IPV6,
684 int ret;
686 t = ip6t_get_target(e);
687 ret = xt_check_target(&par, t->u.target_size - sizeof(*t),
688 e->ipv6.proto, e->ipv6.invflags & IP6T_INV_PROTO);
689 if (ret < 0) {
690 duprintf("ip_tables: check failed for `%s'.\n",
691 t->u.kernel.target->name);
692 return ret;
694 return 0;
697 static int
698 find_check_entry(struct ip6t_entry *e, struct net *net, const char *name,
699 unsigned int size, unsigned int *i)
701 struct ip6t_entry_target *t;
702 struct xt_target *target;
703 int ret;
704 unsigned int j;
705 struct xt_mtchk_param mtpar;
707 ret = check_entry(e, name);
708 if (ret)
709 return ret;
711 j = 0;
712 mtpar.net = net;
713 mtpar.table = name;
714 mtpar.entryinfo = &e->ipv6;
715 mtpar.hook_mask = e->comefrom;
716 mtpar.family = NFPROTO_IPV6;
717 ret = IP6T_MATCH_ITERATE(e, find_check_match, &mtpar, &j);
718 if (ret != 0)
719 goto cleanup_matches;
721 t = ip6t_get_target(e);
722 target = try_then_request_module(xt_find_target(AF_INET6,
723 t->u.user.name,
724 t->u.user.revision),
725 "ip6t_%s", t->u.user.name);
726 if (IS_ERR(target) || !target) {
727 duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
728 ret = target ? PTR_ERR(target) : -ENOENT;
729 goto cleanup_matches;
731 t->u.kernel.target = target;
733 ret = check_target(e, net, name);
734 if (ret)
735 goto err;
737 (*i)++;
738 return 0;
739 err:
740 module_put(t->u.kernel.target->me);
741 cleanup_matches:
742 IP6T_MATCH_ITERATE(e, cleanup_match, net, &j);
743 return ret;
746 static bool check_underflow(struct ip6t_entry *e)
748 const struct ip6t_entry_target *t;
749 unsigned int verdict;
751 if (!unconditional(&e->ipv6))
752 return false;
753 t = ip6t_get_target(e);
754 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
755 return false;
756 verdict = ((struct ip6t_standard_target *)t)->verdict;
757 verdict = -verdict - 1;
758 return verdict == NF_DROP || verdict == NF_ACCEPT;
761 static int
762 check_entry_size_and_hooks(struct ip6t_entry *e,
763 struct xt_table_info *newinfo,
764 unsigned char *base,
765 unsigned char *limit,
766 const unsigned int *hook_entries,
767 const unsigned int *underflows,
768 unsigned int valid_hooks,
769 unsigned int *i)
771 unsigned int h;
773 if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0 ||
774 (unsigned char *)e + sizeof(struct ip6t_entry) >= limit) {
775 duprintf("Bad offset %p\n", e);
776 return -EINVAL;
779 if (e->next_offset
780 < sizeof(struct ip6t_entry) + sizeof(struct ip6t_entry_target)) {
781 duprintf("checking: element %p size %u\n",
782 e, e->next_offset);
783 return -EINVAL;
786 /* Check hooks & underflows */
787 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
788 if (!(valid_hooks & (1 << h)))
789 continue;
790 if ((unsigned char *)e - base == hook_entries[h])
791 newinfo->hook_entry[h] = hook_entries[h];
792 if ((unsigned char *)e - base == underflows[h]) {
793 if (!check_underflow(e)) {
794 pr_err("Underflows must be unconditional and "
795 "use the STANDARD target with "
796 "ACCEPT/DROP\n");
797 return -EINVAL;
799 newinfo->underflow[h] = underflows[h];
803 /* Clear counters and comefrom */
804 e->counters = ((struct xt_counters) { 0, 0 });
805 e->comefrom = 0;
807 (*i)++;
808 return 0;
811 static int
812 cleanup_entry(struct ip6t_entry *e, struct net *net, unsigned int *i)
814 struct xt_tgdtor_param par;
815 struct ip6t_entry_target *t;
817 if (i && (*i)-- == 0)
818 return 1;
820 /* Cleanup all matches */
821 IP6T_MATCH_ITERATE(e, cleanup_match, net, NULL);
822 t = ip6t_get_target(e);
824 par.net = net;
825 par.target = t->u.kernel.target;
826 par.targinfo = t->data;
827 par.family = NFPROTO_IPV6;
828 if (par.target->destroy != NULL)
829 par.target->destroy(&par);
830 module_put(par.target->me);
831 return 0;
834 /* Checks and translates the user-supplied table segment (held in
835 newinfo) */
836 static int
837 translate_table(struct net *net,
838 const char *name,
839 unsigned int valid_hooks,
840 struct xt_table_info *newinfo,
841 void *entry0,
842 unsigned int size,
843 unsigned int number,
844 const unsigned int *hook_entries,
845 const unsigned int *underflows)
847 unsigned int i;
848 int ret;
850 newinfo->size = size;
851 newinfo->number = number;
853 /* Init all hooks to impossible value. */
854 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
855 newinfo->hook_entry[i] = 0xFFFFFFFF;
856 newinfo->underflow[i] = 0xFFFFFFFF;
859 duprintf("translate_table: size %u\n", newinfo->size);
860 i = 0;
861 /* Walk through entries, checking offsets. */
862 ret = IP6T_ENTRY_ITERATE(entry0, newinfo->size,
863 check_entry_size_and_hooks,
864 newinfo,
865 entry0,
866 entry0 + size,
867 hook_entries, underflows, valid_hooks, &i);
868 if (ret != 0)
869 return ret;
871 if (i != number) {
872 duprintf("translate_table: %u not %u entries\n",
873 i, number);
874 return -EINVAL;
877 /* Check hooks all assigned */
878 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
879 /* Only hooks which are valid */
880 if (!(valid_hooks & (1 << i)))
881 continue;
882 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
883 duprintf("Invalid hook entry %u %u\n",
884 i, hook_entries[i]);
885 return -EINVAL;
887 if (newinfo->underflow[i] == 0xFFFFFFFF) {
888 duprintf("Invalid underflow %u %u\n",
889 i, underflows[i]);
890 return -EINVAL;
894 if (!mark_source_chains(newinfo, valid_hooks, entry0))
895 return -ELOOP;
897 /* Finally, each sanity check must pass */
898 i = 0;
899 ret = IP6T_ENTRY_ITERATE(entry0, newinfo->size,
900 find_check_entry, net, name, size, &i);
902 if (ret != 0) {
903 IP6T_ENTRY_ITERATE(entry0, newinfo->size,
904 cleanup_entry, net, &i);
905 return ret;
908 /* And one copy for every other CPU */
909 for_each_possible_cpu(i) {
910 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
911 memcpy(newinfo->entries[i], entry0, newinfo->size);
914 return ret;
917 /* Gets counters. */
918 static inline int
919 add_entry_to_counter(const struct ip6t_entry *e,
920 struct xt_counters total[],
921 unsigned int *i)
923 ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
925 (*i)++;
926 return 0;
929 static inline int
930 set_entry_to_counter(const struct ip6t_entry *e,
931 struct ip6t_counters total[],
932 unsigned int *i)
934 SET_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
936 (*i)++;
937 return 0;
940 static void
941 get_counters(const struct xt_table_info *t,
942 struct xt_counters counters[])
944 unsigned int cpu;
945 unsigned int i;
946 unsigned int curcpu;
948 /* Instead of clearing (by a previous call to memset())
949 * the counters and using adds, we set the counters
950 * with data used by 'current' CPU
952 * Bottom half has to be disabled to prevent deadlock
953 * if new softirq were to run and call ipt_do_table
955 local_bh_disable();
956 curcpu = smp_processor_id();
958 i = 0;
959 IP6T_ENTRY_ITERATE(t->entries[curcpu],
960 t->size,
961 set_entry_to_counter,
962 counters,
963 &i);
965 for_each_possible_cpu(cpu) {
966 if (cpu == curcpu)
967 continue;
968 i = 0;
969 xt_info_wrlock(cpu);
970 IP6T_ENTRY_ITERATE(t->entries[cpu],
971 t->size,
972 add_entry_to_counter,
973 counters,
974 &i);
975 xt_info_wrunlock(cpu);
977 local_bh_enable();
980 static struct xt_counters *alloc_counters(struct xt_table *table)
982 unsigned int countersize;
983 struct xt_counters *counters;
984 struct xt_table_info *private = table->private;
986 /* We need atomic snapshot of counters: rest doesn't change
987 (other than comefrom, which userspace doesn't care
988 about). */
989 countersize = sizeof(struct xt_counters) * private->number;
990 counters = vmalloc_node(countersize, numa_node_id());
992 if (counters == NULL)
993 return ERR_PTR(-ENOMEM);
995 get_counters(private, counters);
997 return counters;
1000 static int
1001 copy_entries_to_user(unsigned int total_size,
1002 struct xt_table *table,
1003 void __user *userptr)
1005 unsigned int off, num;
1006 struct ip6t_entry *e;
1007 struct xt_counters *counters;
1008 const struct xt_table_info *private = table->private;
1009 int ret = 0;
1010 const void *loc_cpu_entry;
1012 counters = alloc_counters(table);
1013 if (IS_ERR(counters))
1014 return PTR_ERR(counters);
1016 /* choose the copy that is on our node/cpu, ...
1017 * This choice is lazy (because current thread is
1018 * allowed to migrate to another cpu)
1020 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1021 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
1022 ret = -EFAULT;
1023 goto free_counters;
1026 /* FIXME: use iterator macros --RR */
1027 /* ... then go back and fix counters and names */
1028 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
1029 unsigned int i;
1030 const struct ip6t_entry_match *m;
1031 const struct ip6t_entry_target *t;
1033 e = (struct ip6t_entry *)(loc_cpu_entry + off);
1034 if (copy_to_user(userptr + off
1035 + offsetof(struct ip6t_entry, counters),
1036 &counters[num],
1037 sizeof(counters[num])) != 0) {
1038 ret = -EFAULT;
1039 goto free_counters;
1042 for (i = sizeof(struct ip6t_entry);
1043 i < e->target_offset;
1044 i += m->u.match_size) {
1045 m = (void *)e + i;
1047 if (copy_to_user(userptr + off + i
1048 + offsetof(struct ip6t_entry_match,
1049 u.user.name),
1050 m->u.kernel.match->name,
1051 strlen(m->u.kernel.match->name)+1)
1052 != 0) {
1053 ret = -EFAULT;
1054 goto free_counters;
1058 t = ip6t_get_target(e);
1059 if (copy_to_user(userptr + off + e->target_offset
1060 + offsetof(struct ip6t_entry_target,
1061 u.user.name),
1062 t->u.kernel.target->name,
1063 strlen(t->u.kernel.target->name)+1) != 0) {
1064 ret = -EFAULT;
1065 goto free_counters;
1069 free_counters:
1070 vfree(counters);
1071 return ret;
1074 #ifdef CONFIG_COMPAT
1075 static void compat_standard_from_user(void *dst, void *src)
1077 int v = *(compat_int_t *)src;
1079 if (v > 0)
1080 v += xt_compat_calc_jump(AF_INET6, v);
1081 memcpy(dst, &v, sizeof(v));
1084 static int compat_standard_to_user(void __user *dst, void *src)
1086 compat_int_t cv = *(int *)src;
1088 if (cv > 0)
1089 cv -= xt_compat_calc_jump(AF_INET6, cv);
1090 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
1093 static inline int
1094 compat_calc_match(struct ip6t_entry_match *m, int *size)
1096 *size += xt_compat_match_offset(m->u.kernel.match);
1097 return 0;
1100 static int compat_calc_entry(struct ip6t_entry *e,
1101 const struct xt_table_info *info,
1102 void *base, struct xt_table_info *newinfo)
1104 struct ip6t_entry_target *t;
1105 unsigned int entry_offset;
1106 int off, i, ret;
1108 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1109 entry_offset = (void *)e - base;
1110 IP6T_MATCH_ITERATE(e, compat_calc_match, &off);
1111 t = ip6t_get_target(e);
1112 off += xt_compat_target_offset(t->u.kernel.target);
1113 newinfo->size -= off;
1114 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1115 if (ret)
1116 return ret;
1118 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1119 if (info->hook_entry[i] &&
1120 (e < (struct ip6t_entry *)(base + info->hook_entry[i])))
1121 newinfo->hook_entry[i] -= off;
1122 if (info->underflow[i] &&
1123 (e < (struct ip6t_entry *)(base + info->underflow[i])))
1124 newinfo->underflow[i] -= off;
1126 return 0;
1129 static int compat_table_info(const struct xt_table_info *info,
1130 struct xt_table_info *newinfo)
1132 void *loc_cpu_entry;
1134 if (!newinfo || !info)
1135 return -EINVAL;
1137 /* we dont care about newinfo->entries[] */
1138 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
1139 newinfo->initial_entries = 0;
1140 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1141 return IP6T_ENTRY_ITERATE(loc_cpu_entry, info->size,
1142 compat_calc_entry, info, loc_cpu_entry,
1143 newinfo);
1145 #endif
1147 static int get_info(struct net *net, void __user *user, int *len, int compat)
1149 char name[IP6T_TABLE_MAXNAMELEN];
1150 struct xt_table *t;
1151 int ret;
1153 if (*len != sizeof(struct ip6t_getinfo)) {
1154 duprintf("length %u != %zu\n", *len,
1155 sizeof(struct ip6t_getinfo));
1156 return -EINVAL;
1159 if (copy_from_user(name, user, sizeof(name)) != 0)
1160 return -EFAULT;
1162 name[IP6T_TABLE_MAXNAMELEN-1] = '\0';
1163 #ifdef CONFIG_COMPAT
1164 if (compat)
1165 xt_compat_lock(AF_INET6);
1166 #endif
1167 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1168 "ip6table_%s", name);
1169 if (t && !IS_ERR(t)) {
1170 struct ip6t_getinfo info;
1171 const struct xt_table_info *private = t->private;
1172 #ifdef CONFIG_COMPAT
1173 struct xt_table_info tmp;
1175 if (compat) {
1176 ret = compat_table_info(private, &tmp);
1177 xt_compat_flush_offsets(AF_INET6);
1178 private = &tmp;
1180 #endif
1181 info.valid_hooks = t->valid_hooks;
1182 memcpy(info.hook_entry, private->hook_entry,
1183 sizeof(info.hook_entry));
1184 memcpy(info.underflow, private->underflow,
1185 sizeof(info.underflow));
1186 info.num_entries = private->number;
1187 info.size = private->size;
1188 strcpy(info.name, name);
1190 if (copy_to_user(user, &info, *len) != 0)
1191 ret = -EFAULT;
1192 else
1193 ret = 0;
1195 xt_table_unlock(t);
1196 module_put(t->me);
1197 } else
1198 ret = t ? PTR_ERR(t) : -ENOENT;
1199 #ifdef CONFIG_COMPAT
1200 if (compat)
1201 xt_compat_unlock(AF_INET6);
1202 #endif
1203 return ret;
1206 static int
1207 get_entries(struct net *net, struct ip6t_get_entries __user *uptr, int *len)
1209 int ret;
1210 struct ip6t_get_entries get;
1211 struct xt_table *t;
1213 if (*len < sizeof(get)) {
1214 duprintf("get_entries: %u < %zu\n", *len, sizeof(get));
1215 return -EINVAL;
1217 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1218 return -EFAULT;
1219 if (*len != sizeof(struct ip6t_get_entries) + get.size) {
1220 duprintf("get_entries: %u != %zu\n",
1221 *len, sizeof(get) + get.size);
1222 return -EINVAL;
1225 t = xt_find_table_lock(net, AF_INET6, get.name);
1226 if (t && !IS_ERR(t)) {
1227 struct xt_table_info *private = t->private;
1228 duprintf("t->private->number = %u\n", private->number);
1229 if (get.size == private->size)
1230 ret = copy_entries_to_user(private->size,
1231 t, uptr->entrytable);
1232 else {
1233 duprintf("get_entries: I've got %u not %u!\n",
1234 private->size, get.size);
1235 ret = -EAGAIN;
1237 module_put(t->me);
1238 xt_table_unlock(t);
1239 } else
1240 ret = t ? PTR_ERR(t) : -ENOENT;
1242 return ret;
1245 static int
1246 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1247 struct xt_table_info *newinfo, unsigned int num_counters,
1248 void __user *counters_ptr)
1250 int ret;
1251 struct xt_table *t;
1252 struct xt_table_info *oldinfo;
1253 struct xt_counters *counters;
1254 const void *loc_cpu_old_entry;
1256 ret = 0;
1257 counters = vmalloc_node(num_counters * sizeof(struct xt_counters),
1258 numa_node_id());
1259 if (!counters) {
1260 ret = -ENOMEM;
1261 goto out;
1264 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1265 "ip6table_%s", name);
1266 if (!t || IS_ERR(t)) {
1267 ret = t ? PTR_ERR(t) : -ENOENT;
1268 goto free_newinfo_counters_untrans;
1271 /* You lied! */
1272 if (valid_hooks != t->valid_hooks) {
1273 duprintf("Valid hook crap: %08X vs %08X\n",
1274 valid_hooks, t->valid_hooks);
1275 ret = -EINVAL;
1276 goto put_module;
1279 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1280 if (!oldinfo)
1281 goto put_module;
1283 /* Update module usage count based on number of rules */
1284 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1285 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1286 if ((oldinfo->number > oldinfo->initial_entries) ||
1287 (newinfo->number <= oldinfo->initial_entries))
1288 module_put(t->me);
1289 if ((oldinfo->number > oldinfo->initial_entries) &&
1290 (newinfo->number <= oldinfo->initial_entries))
1291 module_put(t->me);
1293 /* Get the old counters, and synchronize with replace */
1294 get_counters(oldinfo, counters);
1296 /* Decrease module usage counts and free resource */
1297 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1298 IP6T_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry,
1299 net, NULL);
1300 xt_free_table_info(oldinfo);
1301 if (copy_to_user(counters_ptr, counters,
1302 sizeof(struct xt_counters) * num_counters) != 0)
1303 ret = -EFAULT;
1304 vfree(counters);
1305 xt_table_unlock(t);
1306 return ret;
1308 put_module:
1309 module_put(t->me);
1310 xt_table_unlock(t);
1311 free_newinfo_counters_untrans:
1312 vfree(counters);
1313 out:
1314 return ret;
1317 static int
1318 do_replace(struct net *net, void __user *user, unsigned int len)
1320 int ret;
1321 struct ip6t_replace tmp;
1322 struct xt_table_info *newinfo;
1323 void *loc_cpu_entry;
1325 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1326 return -EFAULT;
1328 /* overflow check */
1329 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1330 return -ENOMEM;
1332 newinfo = xt_alloc_table_info(tmp.size);
1333 if (!newinfo)
1334 return -ENOMEM;
1336 /* choose the copy that is on our node/cpu */
1337 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1338 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1339 tmp.size) != 0) {
1340 ret = -EFAULT;
1341 goto free_newinfo;
1344 ret = translate_table(net, tmp.name, tmp.valid_hooks,
1345 newinfo, loc_cpu_entry, tmp.size, tmp.num_entries,
1346 tmp.hook_entry, tmp.underflow);
1347 if (ret != 0)
1348 goto free_newinfo;
1350 duprintf("ip_tables: Translated table\n");
1352 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1353 tmp.num_counters, tmp.counters);
1354 if (ret)
1355 goto free_newinfo_untrans;
1356 return 0;
1358 free_newinfo_untrans:
1359 IP6T_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, net, NULL);
1360 free_newinfo:
1361 xt_free_table_info(newinfo);
1362 return ret;
1365 /* We're lazy, and add to the first CPU; overflow works its fey magic
1366 * and everything is OK. */
1367 static int
1368 add_counter_to_entry(struct ip6t_entry *e,
1369 const struct xt_counters addme[],
1370 unsigned int *i)
1372 ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
1374 (*i)++;
1375 return 0;
1378 static int
1379 do_add_counters(struct net *net, void __user *user, unsigned int len,
1380 int compat)
1382 unsigned int i, curcpu;
1383 struct xt_counters_info tmp;
1384 struct xt_counters *paddc;
1385 unsigned int num_counters;
1386 char *name;
1387 int size;
1388 void *ptmp;
1389 struct xt_table *t;
1390 const struct xt_table_info *private;
1391 int ret = 0;
1392 const void *loc_cpu_entry;
1393 #ifdef CONFIG_COMPAT
1394 struct compat_xt_counters_info compat_tmp;
1396 if (compat) {
1397 ptmp = &compat_tmp;
1398 size = sizeof(struct compat_xt_counters_info);
1399 } else
1400 #endif
1402 ptmp = &tmp;
1403 size = sizeof(struct xt_counters_info);
1406 if (copy_from_user(ptmp, user, size) != 0)
1407 return -EFAULT;
1409 #ifdef CONFIG_COMPAT
1410 if (compat) {
1411 num_counters = compat_tmp.num_counters;
1412 name = compat_tmp.name;
1413 } else
1414 #endif
1416 num_counters = tmp.num_counters;
1417 name = tmp.name;
1420 if (len != size + num_counters * sizeof(struct xt_counters))
1421 return -EINVAL;
1423 paddc = vmalloc_node(len - size, numa_node_id());
1424 if (!paddc)
1425 return -ENOMEM;
1427 if (copy_from_user(paddc, user + size, len - size) != 0) {
1428 ret = -EFAULT;
1429 goto free;
1432 t = xt_find_table_lock(net, AF_INET6, name);
1433 if (!t || IS_ERR(t)) {
1434 ret = t ? PTR_ERR(t) : -ENOENT;
1435 goto free;
1439 local_bh_disable();
1440 private = t->private;
1441 if (private->number != num_counters) {
1442 ret = -EINVAL;
1443 goto unlock_up_free;
1446 i = 0;
1447 /* Choose the copy that is on our node */
1448 curcpu = smp_processor_id();
1449 xt_info_wrlock(curcpu);
1450 loc_cpu_entry = private->entries[curcpu];
1451 IP6T_ENTRY_ITERATE(loc_cpu_entry,
1452 private->size,
1453 add_counter_to_entry,
1454 paddc,
1455 &i);
1456 xt_info_wrunlock(curcpu);
1458 unlock_up_free:
1459 local_bh_enable();
1460 xt_table_unlock(t);
1461 module_put(t->me);
1462 free:
1463 vfree(paddc);
1465 return ret;
1468 #ifdef CONFIG_COMPAT
1469 struct compat_ip6t_replace {
1470 char name[IP6T_TABLE_MAXNAMELEN];
1471 u32 valid_hooks;
1472 u32 num_entries;
1473 u32 size;
1474 u32 hook_entry[NF_INET_NUMHOOKS];
1475 u32 underflow[NF_INET_NUMHOOKS];
1476 u32 num_counters;
1477 compat_uptr_t counters; /* struct ip6t_counters * */
1478 struct compat_ip6t_entry entries[0];
1481 static int
1482 compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr,
1483 unsigned int *size, struct xt_counters *counters,
1484 unsigned int *i)
1486 struct ip6t_entry_target *t;
1487 struct compat_ip6t_entry __user *ce;
1488 u_int16_t target_offset, next_offset;
1489 compat_uint_t origsize;
1490 int ret;
1492 ret = -EFAULT;
1493 origsize = *size;
1494 ce = (struct compat_ip6t_entry __user *)*dstptr;
1495 if (copy_to_user(ce, e, sizeof(struct ip6t_entry)))
1496 goto out;
1498 if (copy_to_user(&ce->counters, &counters[*i], sizeof(counters[*i])))
1499 goto out;
1501 *dstptr += sizeof(struct compat_ip6t_entry);
1502 *size -= sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1504 ret = IP6T_MATCH_ITERATE(e, xt_compat_match_to_user, dstptr, size);
1505 target_offset = e->target_offset - (origsize - *size);
1506 if (ret)
1507 goto out;
1508 t = ip6t_get_target(e);
1509 ret = xt_compat_target_to_user(t, dstptr, size);
1510 if (ret)
1511 goto out;
1512 ret = -EFAULT;
1513 next_offset = e->next_offset - (origsize - *size);
1514 if (put_user(target_offset, &ce->target_offset))
1515 goto out;
1516 if (put_user(next_offset, &ce->next_offset))
1517 goto out;
1519 (*i)++;
1520 return 0;
1521 out:
1522 return ret;
1525 static int
1526 compat_find_calc_match(struct ip6t_entry_match *m,
1527 const char *name,
1528 const struct ip6t_ip6 *ipv6,
1529 unsigned int hookmask,
1530 int *size, unsigned int *i)
1532 struct xt_match *match;
1534 match = try_then_request_module(xt_find_match(AF_INET6, m->u.user.name,
1535 m->u.user.revision),
1536 "ip6t_%s", m->u.user.name);
1537 if (IS_ERR(match) || !match) {
1538 duprintf("compat_check_calc_match: `%s' not found\n",
1539 m->u.user.name);
1540 return match ? PTR_ERR(match) : -ENOENT;
1542 m->u.kernel.match = match;
1543 *size += xt_compat_match_offset(match);
1545 (*i)++;
1546 return 0;
1549 static int
1550 compat_release_match(struct ip6t_entry_match *m, unsigned int *i)
1552 if (i && (*i)-- == 0)
1553 return 1;
1555 module_put(m->u.kernel.match->me);
1556 return 0;
1559 static int
1560 compat_release_entry(struct compat_ip6t_entry *e, unsigned int *i)
1562 struct ip6t_entry_target *t;
1564 if (i && (*i)-- == 0)
1565 return 1;
1567 /* Cleanup all matches */
1568 COMPAT_IP6T_MATCH_ITERATE(e, compat_release_match, NULL);
1569 t = compat_ip6t_get_target(e);
1570 module_put(t->u.kernel.target->me);
1571 return 0;
1574 static int
1575 check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
1576 struct xt_table_info *newinfo,
1577 unsigned int *size,
1578 unsigned char *base,
1579 unsigned char *limit,
1580 unsigned int *hook_entries,
1581 unsigned int *underflows,
1582 unsigned int *i,
1583 const char *name)
1585 struct ip6t_entry_target *t;
1586 struct xt_target *target;
1587 unsigned int entry_offset;
1588 unsigned int j;
1589 int ret, off, h;
1591 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1592 if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0 ||
1593 (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit) {
1594 duprintf("Bad offset %p, limit = %p\n", e, limit);
1595 return -EINVAL;
1598 if (e->next_offset < sizeof(struct compat_ip6t_entry) +
1599 sizeof(struct compat_xt_entry_target)) {
1600 duprintf("checking: element %p size %u\n",
1601 e, e->next_offset);
1602 return -EINVAL;
1605 /* For purposes of check_entry casting the compat entry is fine */
1606 ret = check_entry((struct ip6t_entry *)e, name);
1607 if (ret)
1608 return ret;
1610 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1611 entry_offset = (void *)e - (void *)base;
1612 j = 0;
1613 ret = COMPAT_IP6T_MATCH_ITERATE(e, compat_find_calc_match, name,
1614 &e->ipv6, e->comefrom, &off, &j);
1615 if (ret != 0)
1616 goto release_matches;
1618 t = compat_ip6t_get_target(e);
1619 target = try_then_request_module(xt_find_target(AF_INET6,
1620 t->u.user.name,
1621 t->u.user.revision),
1622 "ip6t_%s", t->u.user.name);
1623 if (IS_ERR(target) || !target) {
1624 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1625 t->u.user.name);
1626 ret = target ? PTR_ERR(target) : -ENOENT;
1627 goto release_matches;
1629 t->u.kernel.target = target;
1631 off += xt_compat_target_offset(target);
1632 *size += off;
1633 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1634 if (ret)
1635 goto out;
1637 /* Check hooks & underflows */
1638 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1639 if ((unsigned char *)e - base == hook_entries[h])
1640 newinfo->hook_entry[h] = hook_entries[h];
1641 if ((unsigned char *)e - base == underflows[h])
1642 newinfo->underflow[h] = underflows[h];
1645 /* Clear counters and comefrom */
1646 memset(&e->counters, 0, sizeof(e->counters));
1647 e->comefrom = 0;
1649 (*i)++;
1650 return 0;
1652 out:
1653 module_put(t->u.kernel.target->me);
1654 release_matches:
1655 IP6T_MATCH_ITERATE(e, compat_release_match, &j);
1656 return ret;
1659 static int
1660 compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
1661 unsigned int *size, const char *name,
1662 struct xt_table_info *newinfo, unsigned char *base)
1664 struct ip6t_entry_target *t;
1665 struct xt_target *target;
1666 struct ip6t_entry *de;
1667 unsigned int origsize;
1668 int ret, h;
1670 ret = 0;
1671 origsize = *size;
1672 de = (struct ip6t_entry *)*dstptr;
1673 memcpy(de, e, sizeof(struct ip6t_entry));
1674 memcpy(&de->counters, &e->counters, sizeof(e->counters));
1676 *dstptr += sizeof(struct ip6t_entry);
1677 *size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1679 ret = COMPAT_IP6T_MATCH_ITERATE(e, xt_compat_match_from_user,
1680 dstptr, size);
1681 if (ret)
1682 return ret;
1683 de->target_offset = e->target_offset - (origsize - *size);
1684 t = compat_ip6t_get_target(e);
1685 target = t->u.kernel.target;
1686 xt_compat_target_from_user(t, dstptr, size);
1688 de->next_offset = e->next_offset - (origsize - *size);
1689 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1690 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1691 newinfo->hook_entry[h] -= origsize - *size;
1692 if ((unsigned char *)de - base < newinfo->underflow[h])
1693 newinfo->underflow[h] -= origsize - *size;
1695 return ret;
1698 static int compat_check_entry(struct ip6t_entry *e, struct net *net,
1699 const char *name, unsigned int *i)
1701 unsigned int j;
1702 int ret;
1703 struct xt_mtchk_param mtpar;
1705 j = 0;
1706 mtpar.net = net;
1707 mtpar.table = name;
1708 mtpar.entryinfo = &e->ipv6;
1709 mtpar.hook_mask = e->comefrom;
1710 mtpar.family = NFPROTO_IPV6;
1711 ret = IP6T_MATCH_ITERATE(e, check_match, &mtpar, &j);
1712 if (ret)
1713 goto cleanup_matches;
1715 ret = check_target(e, net, name);
1716 if (ret)
1717 goto cleanup_matches;
1719 (*i)++;
1720 return 0;
1722 cleanup_matches:
1723 IP6T_MATCH_ITERATE(e, cleanup_match, net, &j);
1724 return ret;
1727 static int
1728 translate_compat_table(struct net *net,
1729 const char *name,
1730 unsigned int valid_hooks,
1731 struct xt_table_info **pinfo,
1732 void **pentry0,
1733 unsigned int total_size,
1734 unsigned int number,
1735 unsigned int *hook_entries,
1736 unsigned int *underflows)
1738 unsigned int i, j;
1739 struct xt_table_info *newinfo, *info;
1740 void *pos, *entry0, *entry1;
1741 unsigned int size;
1742 int ret;
1744 info = *pinfo;
1745 entry0 = *pentry0;
1746 size = total_size;
1747 info->number = number;
1749 /* Init all hooks to impossible value. */
1750 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1751 info->hook_entry[i] = 0xFFFFFFFF;
1752 info->underflow[i] = 0xFFFFFFFF;
1755 duprintf("translate_compat_table: size %u\n", info->size);
1756 j = 0;
1757 xt_compat_lock(AF_INET6);
1758 /* Walk through entries, checking offsets. */
1759 ret = COMPAT_IP6T_ENTRY_ITERATE(entry0, total_size,
1760 check_compat_entry_size_and_hooks,
1761 info, &size, entry0,
1762 entry0 + total_size,
1763 hook_entries, underflows, &j, name);
1764 if (ret != 0)
1765 goto out_unlock;
1767 ret = -EINVAL;
1768 if (j != number) {
1769 duprintf("translate_compat_table: %u not %u entries\n",
1770 j, number);
1771 goto out_unlock;
1774 /* Check hooks all assigned */
1775 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1776 /* Only hooks which are valid */
1777 if (!(valid_hooks & (1 << i)))
1778 continue;
1779 if (info->hook_entry[i] == 0xFFFFFFFF) {
1780 duprintf("Invalid hook entry %u %u\n",
1781 i, hook_entries[i]);
1782 goto out_unlock;
1784 if (info->underflow[i] == 0xFFFFFFFF) {
1785 duprintf("Invalid underflow %u %u\n",
1786 i, underflows[i]);
1787 goto out_unlock;
1791 ret = -ENOMEM;
1792 newinfo = xt_alloc_table_info(size);
1793 if (!newinfo)
1794 goto out_unlock;
1796 newinfo->number = number;
1797 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1798 newinfo->hook_entry[i] = info->hook_entry[i];
1799 newinfo->underflow[i] = info->underflow[i];
1801 entry1 = newinfo->entries[raw_smp_processor_id()];
1802 pos = entry1;
1803 size = total_size;
1804 ret = COMPAT_IP6T_ENTRY_ITERATE(entry0, total_size,
1805 compat_copy_entry_from_user,
1806 &pos, &size, name, newinfo, entry1);
1807 xt_compat_flush_offsets(AF_INET6);
1808 xt_compat_unlock(AF_INET6);
1809 if (ret)
1810 goto free_newinfo;
1812 ret = -ELOOP;
1813 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1814 goto free_newinfo;
1816 i = 0;
1817 ret = IP6T_ENTRY_ITERATE(entry1, newinfo->size, compat_check_entry,
1818 net, name, &i);
1819 if (ret) {
1820 j -= i;
1821 COMPAT_IP6T_ENTRY_ITERATE_CONTINUE(entry0, newinfo->size, i,
1822 compat_release_entry, &j);
1823 IP6T_ENTRY_ITERATE(entry1, newinfo->size, cleanup_entry, net, &i);
1824 xt_free_table_info(newinfo);
1825 return ret;
1828 /* And one copy for every other CPU */
1829 for_each_possible_cpu(i)
1830 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1831 memcpy(newinfo->entries[i], entry1, newinfo->size);
1833 *pinfo = newinfo;
1834 *pentry0 = entry1;
1835 xt_free_table_info(info);
1836 return 0;
1838 free_newinfo:
1839 xt_free_table_info(newinfo);
1840 out:
1841 COMPAT_IP6T_ENTRY_ITERATE(entry0, total_size, compat_release_entry, &j);
1842 return ret;
1843 out_unlock:
1844 xt_compat_flush_offsets(AF_INET6);
1845 xt_compat_unlock(AF_INET6);
1846 goto out;
1849 static int
1850 compat_do_replace(struct net *net, void __user *user, unsigned int len)
1852 int ret;
1853 struct compat_ip6t_replace tmp;
1854 struct xt_table_info *newinfo;
1855 void *loc_cpu_entry;
1857 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1858 return -EFAULT;
1860 /* overflow check */
1861 if (tmp.size >= INT_MAX / num_possible_cpus())
1862 return -ENOMEM;
1863 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1864 return -ENOMEM;
1866 newinfo = xt_alloc_table_info(tmp.size);
1867 if (!newinfo)
1868 return -ENOMEM;
1870 /* choose the copy that is on our node/cpu */
1871 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1872 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1873 tmp.size) != 0) {
1874 ret = -EFAULT;
1875 goto free_newinfo;
1878 ret = translate_compat_table(net, tmp.name, tmp.valid_hooks,
1879 &newinfo, &loc_cpu_entry, tmp.size,
1880 tmp.num_entries, tmp.hook_entry,
1881 tmp.underflow);
1882 if (ret != 0)
1883 goto free_newinfo;
1885 duprintf("compat_do_replace: Translated table\n");
1887 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1888 tmp.num_counters, compat_ptr(tmp.counters));
1889 if (ret)
1890 goto free_newinfo_untrans;
1891 return 0;
1893 free_newinfo_untrans:
1894 IP6T_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, net, NULL);
1895 free_newinfo:
1896 xt_free_table_info(newinfo);
1897 return ret;
1900 static int
1901 compat_do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user,
1902 unsigned int len)
1904 int ret;
1906 if (!capable(CAP_NET_ADMIN))
1907 return -EPERM;
1909 switch (cmd) {
1910 case IP6T_SO_SET_REPLACE:
1911 ret = compat_do_replace(sock_net(sk), user, len);
1912 break;
1914 case IP6T_SO_SET_ADD_COUNTERS:
1915 ret = do_add_counters(sock_net(sk), user, len, 1);
1916 break;
1918 default:
1919 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
1920 ret = -EINVAL;
1923 return ret;
1926 struct compat_ip6t_get_entries {
1927 char name[IP6T_TABLE_MAXNAMELEN];
1928 compat_uint_t size;
1929 struct compat_ip6t_entry entrytable[0];
1932 static int
1933 compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1934 void __user *userptr)
1936 struct xt_counters *counters;
1937 const struct xt_table_info *private = table->private;
1938 void __user *pos;
1939 unsigned int size;
1940 int ret = 0;
1941 const void *loc_cpu_entry;
1942 unsigned int i = 0;
1944 counters = alloc_counters(table);
1945 if (IS_ERR(counters))
1946 return PTR_ERR(counters);
1948 /* choose the copy that is on our node/cpu, ...
1949 * This choice is lazy (because current thread is
1950 * allowed to migrate to another cpu)
1952 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1953 pos = userptr;
1954 size = total_size;
1955 ret = IP6T_ENTRY_ITERATE(loc_cpu_entry, total_size,
1956 compat_copy_entry_to_user,
1957 &pos, &size, counters, &i);
1959 vfree(counters);
1960 return ret;
1963 static int
1964 compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
1965 int *len)
1967 int ret;
1968 struct compat_ip6t_get_entries get;
1969 struct xt_table *t;
1971 if (*len < sizeof(get)) {
1972 duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get));
1973 return -EINVAL;
1976 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1977 return -EFAULT;
1979 if (*len != sizeof(struct compat_ip6t_get_entries) + get.size) {
1980 duprintf("compat_get_entries: %u != %zu\n",
1981 *len, sizeof(get) + get.size);
1982 return -EINVAL;
1985 xt_compat_lock(AF_INET6);
1986 t = xt_find_table_lock(net, AF_INET6, get.name);
1987 if (t && !IS_ERR(t)) {
1988 const struct xt_table_info *private = t->private;
1989 struct xt_table_info info;
1990 duprintf("t->private->number = %u\n", private->number);
1991 ret = compat_table_info(private, &info);
1992 if (!ret && get.size == info.size) {
1993 ret = compat_copy_entries_to_user(private->size,
1994 t, uptr->entrytable);
1995 } else if (!ret) {
1996 duprintf("compat_get_entries: I've got %u not %u!\n",
1997 private->size, get.size);
1998 ret = -EAGAIN;
2000 xt_compat_flush_offsets(AF_INET6);
2001 module_put(t->me);
2002 xt_table_unlock(t);
2003 } else
2004 ret = t ? PTR_ERR(t) : -ENOENT;
2006 xt_compat_unlock(AF_INET6);
2007 return ret;
2010 static int do_ip6t_get_ctl(struct sock *, int, void __user *, int *);
2012 static int
2013 compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2015 int ret;
2017 if (!capable(CAP_NET_ADMIN))
2018 return -EPERM;
2020 switch (cmd) {
2021 case IP6T_SO_GET_INFO:
2022 ret = get_info(sock_net(sk), user, len, 1);
2023 break;
2024 case IP6T_SO_GET_ENTRIES:
2025 ret = compat_get_entries(sock_net(sk), user, len);
2026 break;
2027 default:
2028 ret = do_ip6t_get_ctl(sk, cmd, user, len);
2030 return ret;
2032 #endif
2034 static int
2035 do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2037 int ret;
2039 if (!capable(CAP_NET_ADMIN))
2040 return -EPERM;
2042 switch (cmd) {
2043 case IP6T_SO_SET_REPLACE:
2044 ret = do_replace(sock_net(sk), user, len);
2045 break;
2047 case IP6T_SO_SET_ADD_COUNTERS:
2048 ret = do_add_counters(sock_net(sk), user, len, 0);
2049 break;
2051 default:
2052 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
2053 ret = -EINVAL;
2056 return ret;
2059 static int
2060 do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2062 int ret;
2064 if (!capable(CAP_NET_ADMIN))
2065 return -EPERM;
2067 switch (cmd) {
2068 case IP6T_SO_GET_INFO:
2069 ret = get_info(sock_net(sk), user, len, 0);
2070 break;
2072 case IP6T_SO_GET_ENTRIES:
2073 ret = get_entries(sock_net(sk), user, len);
2074 break;
2076 case IP6T_SO_GET_REVISION_MATCH:
2077 case IP6T_SO_GET_REVISION_TARGET: {
2078 struct ip6t_get_revision rev;
2079 int target;
2081 if (*len != sizeof(rev)) {
2082 ret = -EINVAL;
2083 break;
2085 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2086 ret = -EFAULT;
2087 break;
2090 if (cmd == IP6T_SO_GET_REVISION_TARGET)
2091 target = 1;
2092 else
2093 target = 0;
2095 try_then_request_module(xt_find_revision(AF_INET6, rev.name,
2096 rev.revision,
2097 target, &ret),
2098 "ip6t_%s", rev.name);
2099 break;
2102 default:
2103 duprintf("do_ip6t_get_ctl: unknown request %i\n", cmd);
2104 ret = -EINVAL;
2107 return ret;
2110 struct xt_table *ip6t_register_table(struct net *net,
2111 const struct xt_table *table,
2112 const struct ip6t_replace *repl)
2114 int ret;
2115 struct xt_table_info *newinfo;
2116 struct xt_table_info bootstrap
2117 = { 0, 0, 0, { 0 }, { 0 }, { } };
2118 void *loc_cpu_entry;
2119 struct xt_table *new_table;
2121 newinfo = xt_alloc_table_info(repl->size);
2122 if (!newinfo) {
2123 ret = -ENOMEM;
2124 goto out;
2127 /* choose the copy on our node/cpu, but dont care about preemption */
2128 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2129 memcpy(loc_cpu_entry, repl->entries, repl->size);
2131 ret = translate_table(net, table->name, table->valid_hooks,
2132 newinfo, loc_cpu_entry, repl->size,
2133 repl->num_entries,
2134 repl->hook_entry,
2135 repl->underflow);
2136 if (ret != 0)
2137 goto out_free;
2139 new_table = xt_register_table(net, table, &bootstrap, newinfo);
2140 if (IS_ERR(new_table)) {
2141 ret = PTR_ERR(new_table);
2142 goto out_free;
2144 return new_table;
2146 out_free:
2147 xt_free_table_info(newinfo);
2148 out:
2149 return ERR_PTR(ret);
2152 void ip6t_unregister_table(struct net *net, struct xt_table *table)
2154 struct xt_table_info *private;
2155 void *loc_cpu_entry;
2156 struct module *table_owner = table->me;
2158 private = xt_unregister_table(table);
2160 /* Decrease module usage counts and free resources */
2161 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2162 IP6T_ENTRY_ITERATE(loc_cpu_entry, private->size, cleanup_entry, net, NULL);
2163 if (private->number > private->initial_entries)
2164 module_put(table_owner);
2165 xt_free_table_info(private);
2168 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2169 static inline bool
2170 icmp6_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2171 u_int8_t type, u_int8_t code,
2172 bool invert)
2174 return (type == test_type && code >= min_code && code <= max_code)
2175 ^ invert;
2178 static bool
2179 icmp6_match(const struct sk_buff *skb, const struct xt_match_param *par)
2181 const struct icmp6hdr *ic;
2182 struct icmp6hdr _icmph;
2183 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2185 /* Must not be a fragment. */
2186 if (par->fragoff != 0)
2187 return false;
2189 ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph);
2190 if (ic == NULL) {
2191 /* We've been asked to examine this packet, and we
2192 * can't. Hence, no choice but to drop.
2194 duprintf("Dropping evil ICMP tinygram.\n");
2195 *par->hotdrop = true;
2196 return false;
2199 return icmp6_type_code_match(icmpinfo->type,
2200 icmpinfo->code[0],
2201 icmpinfo->code[1],
2202 ic->icmp6_type, ic->icmp6_code,
2203 !!(icmpinfo->invflags&IP6T_ICMP_INV));
2206 /* Called when user tries to insert an entry of this type. */
2207 static bool icmp6_checkentry(const struct xt_mtchk_param *par)
2209 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2211 /* Must specify no unknown invflags */
2212 return !(icmpinfo->invflags & ~IP6T_ICMP_INV);
2215 /* The built-in targets: standard (NULL) and error. */
2216 static struct xt_target ip6t_standard_target __read_mostly = {
2217 .name = IP6T_STANDARD_TARGET,
2218 .targetsize = sizeof(int),
2219 .family = NFPROTO_IPV6,
2220 #ifdef CONFIG_COMPAT
2221 .compatsize = sizeof(compat_int_t),
2222 .compat_from_user = compat_standard_from_user,
2223 .compat_to_user = compat_standard_to_user,
2224 #endif
2227 static struct xt_target ip6t_error_target __read_mostly = {
2228 .name = IP6T_ERROR_TARGET,
2229 .target = ip6t_error,
2230 .targetsize = IP6T_FUNCTION_MAXNAMELEN,
2231 .family = NFPROTO_IPV6,
2234 static struct nf_sockopt_ops ip6t_sockopts = {
2235 .pf = PF_INET6,
2236 .set_optmin = IP6T_BASE_CTL,
2237 .set_optmax = IP6T_SO_SET_MAX+1,
2238 .set = do_ip6t_set_ctl,
2239 #ifdef CONFIG_COMPAT
2240 .compat_set = compat_do_ip6t_set_ctl,
2241 #endif
2242 .get_optmin = IP6T_BASE_CTL,
2243 .get_optmax = IP6T_SO_GET_MAX+1,
2244 .get = do_ip6t_get_ctl,
2245 #ifdef CONFIG_COMPAT
2246 .compat_get = compat_do_ip6t_get_ctl,
2247 #endif
2248 .owner = THIS_MODULE,
2251 static struct xt_match icmp6_matchstruct __read_mostly = {
2252 .name = "icmp6",
2253 .match = icmp6_match,
2254 .matchsize = sizeof(struct ip6t_icmp),
2255 .checkentry = icmp6_checkentry,
2256 .proto = IPPROTO_ICMPV6,
2257 .family = NFPROTO_IPV6,
2260 static int __net_init ip6_tables_net_init(struct net *net)
2262 return xt_proto_init(net, NFPROTO_IPV6);
2265 static void __net_exit ip6_tables_net_exit(struct net *net)
2267 xt_proto_fini(net, NFPROTO_IPV6);
2270 static struct pernet_operations ip6_tables_net_ops = {
2271 .init = ip6_tables_net_init,
2272 .exit = ip6_tables_net_exit,
2275 static int __init ip6_tables_init(void)
2277 int ret;
2279 ret = register_pernet_subsys(&ip6_tables_net_ops);
2280 if (ret < 0)
2281 goto err1;
2283 /* Noone else will be downing sem now, so we won't sleep */
2284 ret = xt_register_target(&ip6t_standard_target);
2285 if (ret < 0)
2286 goto err2;
2287 ret = xt_register_target(&ip6t_error_target);
2288 if (ret < 0)
2289 goto err3;
2290 ret = xt_register_match(&icmp6_matchstruct);
2291 if (ret < 0)
2292 goto err4;
2294 /* Register setsockopt */
2295 ret = nf_register_sockopt(&ip6t_sockopts);
2296 if (ret < 0)
2297 goto err5;
2299 printk(KERN_INFO "ip6_tables: (C) 2000-2006 Netfilter Core Team\n");
2300 return 0;
2302 err5:
2303 xt_unregister_match(&icmp6_matchstruct);
2304 err4:
2305 xt_unregister_target(&ip6t_error_target);
2306 err3:
2307 xt_unregister_target(&ip6t_standard_target);
2308 err2:
2309 unregister_pernet_subsys(&ip6_tables_net_ops);
2310 err1:
2311 return ret;
2314 static void __exit ip6_tables_fini(void)
2316 nf_unregister_sockopt(&ip6t_sockopts);
2318 xt_unregister_match(&icmp6_matchstruct);
2319 xt_unregister_target(&ip6t_error_target);
2320 xt_unregister_target(&ip6t_standard_target);
2322 unregister_pernet_subsys(&ip6_tables_net_ops);
2326 * find the offset to specified header or the protocol number of last header
2327 * if target < 0. "last header" is transport protocol header, ESP, or
2328 * "No next header".
2330 * If target header is found, its offset is set in *offset and return protocol
2331 * number. Otherwise, return -1.
2333 * If the first fragment doesn't contain the final protocol header or
2334 * NEXTHDR_NONE it is considered invalid.
2336 * Note that non-1st fragment is special case that "the protocol number
2337 * of last header" is "next header" field in Fragment header. In this case,
2338 * *offset is meaningless and fragment offset is stored in *fragoff if fragoff
2339 * isn't NULL.
2342 int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
2343 int target, unsigned short *fragoff)
2345 unsigned int start = skb_network_offset(skb) + sizeof(struct ipv6hdr);
2346 u8 nexthdr = ipv6_hdr(skb)->nexthdr;
2347 unsigned int len = skb->len - start;
2349 if (fragoff)
2350 *fragoff = 0;
2352 while (nexthdr != target) {
2353 struct ipv6_opt_hdr _hdr, *hp;
2354 unsigned int hdrlen;
2356 if ((!ipv6_ext_hdr(nexthdr)) || nexthdr == NEXTHDR_NONE) {
2357 if (target < 0)
2358 break;
2359 return -ENOENT;
2362 hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr);
2363 if (hp == NULL)
2364 return -EBADMSG;
2365 if (nexthdr == NEXTHDR_FRAGMENT) {
2366 unsigned short _frag_off;
2367 __be16 *fp;
2368 fp = skb_header_pointer(skb,
2369 start+offsetof(struct frag_hdr,
2370 frag_off),
2371 sizeof(_frag_off),
2372 &_frag_off);
2373 if (fp == NULL)
2374 return -EBADMSG;
2376 _frag_off = ntohs(*fp) & ~0x7;
2377 if (_frag_off) {
2378 if (target < 0 &&
2379 ((!ipv6_ext_hdr(hp->nexthdr)) ||
2380 hp->nexthdr == NEXTHDR_NONE)) {
2381 if (fragoff)
2382 *fragoff = _frag_off;
2383 return hp->nexthdr;
2385 return -ENOENT;
2387 hdrlen = 8;
2388 } else if (nexthdr == NEXTHDR_AUTH)
2389 hdrlen = (hp->hdrlen + 2) << 2;
2390 else
2391 hdrlen = ipv6_optlen(hp);
2393 nexthdr = hp->nexthdr;
2394 len -= hdrlen;
2395 start += hdrlen;
2398 *offset = start;
2399 return nexthdr;
2402 EXPORT_SYMBOL(ip6t_register_table);
2403 EXPORT_SYMBOL(ip6t_unregister_table);
2404 EXPORT_SYMBOL(ip6t_do_table);
2405 EXPORT_SYMBOL(ip6t_ext_hdr);
2406 EXPORT_SYMBOL(ipv6_find_hdr);
2408 module_init(ip6_tables_init);
2409 module_exit(ip6_tables_fini);