netfilter: vmalloc_node cleanup
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / net / ipv6 / netfilter / ip6_tables.c
blob82945ef6c9fc44b8c9c1b75915213e3faa430914
1 /*
2 * Packet matching code.
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 #include <linux/capability.h>
13 #include <linux/in.h>
14 #include <linux/skbuff.h>
15 #include <linux/kmod.h>
16 #include <linux/vmalloc.h>
17 #include <linux/netdevice.h>
18 #include <linux/module.h>
19 #include <linux/poison.h>
20 #include <linux/icmpv6.h>
21 #include <net/ipv6.h>
22 #include <net/compat.h>
23 #include <asm/uaccess.h>
24 #include <linux/mutex.h>
25 #include <linux/proc_fs.h>
26 #include <linux/err.h>
27 #include <linux/cpumask.h>
29 #include <linux/netfilter_ipv6/ip6_tables.h>
30 #include <linux/netfilter/x_tables.h>
31 #include <net/netfilter/nf_log.h>
32 #include "../../netfilter/xt_repldata.h"
34 MODULE_LICENSE("GPL");
35 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
36 MODULE_DESCRIPTION("IPv6 packet filter");
38 /*#define DEBUG_IP_FIREWALL*/
39 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
40 /*#define DEBUG_IP_FIREWALL_USER*/
42 #ifdef DEBUG_IP_FIREWALL
43 #define dprintf(format, args...) pr_info(format , ## args)
44 #else
45 #define dprintf(format, args...)
46 #endif
48 #ifdef DEBUG_IP_FIREWALL_USER
49 #define duprintf(format, args...) pr_info(format , ## args)
50 #else
51 #define duprintf(format, args...)
52 #endif
54 #ifdef CONFIG_NETFILTER_DEBUG
55 #define IP_NF_ASSERT(x) WARN_ON(!(x))
56 #else
57 #define IP_NF_ASSERT(x)
58 #endif
60 #if 0
61 /* All the better to debug you with... */
62 #define static
63 #define inline
64 #endif
66 void *ip6t_alloc_initial_table(const struct xt_table *info)
68 return xt_alloc_initial_table(ip6t, IP6T);
70 EXPORT_SYMBOL_GPL(ip6t_alloc_initial_table);
73 We keep a set of rules for each CPU, so we can avoid write-locking
74 them in the softirq when updating the counters and therefore
75 only need to read-lock in the softirq; doing a write_lock_bh() in user
76 context stops packets coming through and allows user context to read
77 the counters or update the rules.
79 Hence the start of any table is given by get_table() below. */
81 /* Check for an extension */
82 int
83 ip6t_ext_hdr(u8 nexthdr)
85 return ( (nexthdr == IPPROTO_HOPOPTS) ||
86 (nexthdr == IPPROTO_ROUTING) ||
87 (nexthdr == IPPROTO_FRAGMENT) ||
88 (nexthdr == IPPROTO_ESP) ||
89 (nexthdr == IPPROTO_AH) ||
90 (nexthdr == IPPROTO_NONE) ||
91 (nexthdr == IPPROTO_DSTOPTS) );
94 /* Returns whether matches rule or not. */
95 /* Performance critical - called for every packet */
96 static inline bool
97 ip6_packet_match(const struct sk_buff *skb,
98 const char *indev,
99 const char *outdev,
100 const struct ip6t_ip6 *ip6info,
101 unsigned int *protoff,
102 int *fragoff, bool *hotdrop)
104 unsigned long ret;
105 const struct ipv6hdr *ipv6 = ipv6_hdr(skb);
107 #define FWINV(bool, invflg) ((bool) ^ !!(ip6info->invflags & (invflg)))
109 if (FWINV(ipv6_masked_addr_cmp(&ipv6->saddr, &ip6info->smsk,
110 &ip6info->src), IP6T_INV_SRCIP) ||
111 FWINV(ipv6_masked_addr_cmp(&ipv6->daddr, &ip6info->dmsk,
112 &ip6info->dst), IP6T_INV_DSTIP)) {
113 dprintf("Source or dest mismatch.\n");
115 dprintf("SRC: %u. Mask: %u. Target: %u.%s\n", ip->saddr,
116 ipinfo->smsk.s_addr, ipinfo->src.s_addr,
117 ipinfo->invflags & IP6T_INV_SRCIP ? " (INV)" : "");
118 dprintf("DST: %u. Mask: %u. Target: %u.%s\n", ip->daddr,
119 ipinfo->dmsk.s_addr, ipinfo->dst.s_addr,
120 ipinfo->invflags & IP6T_INV_DSTIP ? " (INV)" : "");*/
121 return false;
124 ret = ifname_compare_aligned(indev, ip6info->iniface, ip6info->iniface_mask);
126 if (FWINV(ret != 0, IP6T_INV_VIA_IN)) {
127 dprintf("VIA in mismatch (%s vs %s).%s\n",
128 indev, ip6info->iniface,
129 ip6info->invflags&IP6T_INV_VIA_IN ?" (INV)":"");
130 return false;
133 ret = ifname_compare_aligned(outdev, ip6info->outiface, ip6info->outiface_mask);
135 if (FWINV(ret != 0, IP6T_INV_VIA_OUT)) {
136 dprintf("VIA out mismatch (%s vs %s).%s\n",
137 outdev, ip6info->outiface,
138 ip6info->invflags&IP6T_INV_VIA_OUT ?" (INV)":"");
139 return false;
142 /* ... might want to do something with class and flowlabel here ... */
144 /* look for the desired protocol header */
145 if((ip6info->flags & IP6T_F_PROTO)) {
146 int protohdr;
147 unsigned short _frag_off;
149 protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off);
150 if (protohdr < 0) {
151 if (_frag_off == 0)
152 *hotdrop = true;
153 return false;
155 *fragoff = _frag_off;
157 dprintf("Packet protocol %hi ?= %s%hi.\n",
158 protohdr,
159 ip6info->invflags & IP6T_INV_PROTO ? "!":"",
160 ip6info->proto);
162 if (ip6info->proto == protohdr) {
163 if(ip6info->invflags & IP6T_INV_PROTO) {
164 return false;
166 return true;
169 /* We need match for the '-p all', too! */
170 if ((ip6info->proto != 0) &&
171 !(ip6info->invflags & IP6T_INV_PROTO))
172 return false;
174 return true;
177 /* should be ip6 safe */
178 static bool
179 ip6_checkentry(const struct ip6t_ip6 *ipv6)
181 if (ipv6->flags & ~IP6T_F_MASK) {
182 duprintf("Unknown flag bits set: %08X\n",
183 ipv6->flags & ~IP6T_F_MASK);
184 return false;
186 if (ipv6->invflags & ~IP6T_INV_MASK) {
187 duprintf("Unknown invflag bits set: %08X\n",
188 ipv6->invflags & ~IP6T_INV_MASK);
189 return false;
191 return true;
194 static unsigned int
195 ip6t_error(struct sk_buff *skb, const struct xt_action_param *par)
197 if (net_ratelimit())
198 pr_info("error: `%s'\n", (const char *)par->targinfo);
200 return NF_DROP;
203 static inline struct ip6t_entry *
204 get_entry(const void *base, unsigned int offset)
206 return (struct ip6t_entry *)(base + offset);
209 /* All zeroes == unconditional rule. */
210 /* Mildly perf critical (only if packet tracing is on) */
211 static inline bool unconditional(const struct ip6t_ip6 *ipv6)
213 static const struct ip6t_ip6 uncond;
215 return memcmp(ipv6, &uncond, sizeof(uncond)) == 0;
218 static inline const struct ip6t_entry_target *
219 ip6t_get_target_c(const struct ip6t_entry *e)
221 return ip6t_get_target((struct ip6t_entry *)e);
224 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
225 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
226 /* This cries for unification! */
227 static const char *const hooknames[] = {
228 [NF_INET_PRE_ROUTING] = "PREROUTING",
229 [NF_INET_LOCAL_IN] = "INPUT",
230 [NF_INET_FORWARD] = "FORWARD",
231 [NF_INET_LOCAL_OUT] = "OUTPUT",
232 [NF_INET_POST_ROUTING] = "POSTROUTING",
235 enum nf_ip_trace_comments {
236 NF_IP6_TRACE_COMMENT_RULE,
237 NF_IP6_TRACE_COMMENT_RETURN,
238 NF_IP6_TRACE_COMMENT_POLICY,
241 static const char *const comments[] = {
242 [NF_IP6_TRACE_COMMENT_RULE] = "rule",
243 [NF_IP6_TRACE_COMMENT_RETURN] = "return",
244 [NF_IP6_TRACE_COMMENT_POLICY] = "policy",
247 static struct nf_loginfo trace_loginfo = {
248 .type = NF_LOG_TYPE_LOG,
249 .u = {
250 .log = {
251 .level = 4,
252 .logflags = NF_LOG_MASK,
257 /* Mildly perf critical (only if packet tracing is on) */
258 static inline int
259 get_chainname_rulenum(const struct ip6t_entry *s, const struct ip6t_entry *e,
260 const char *hookname, const char **chainname,
261 const char **comment, unsigned int *rulenum)
263 const struct ip6t_standard_target *t = (void *)ip6t_get_target_c(s);
265 if (strcmp(t->target.u.kernel.target->name, IP6T_ERROR_TARGET) == 0) {
266 /* Head of user chain: ERROR target with chainname */
267 *chainname = t->target.data;
268 (*rulenum) = 0;
269 } else if (s == e) {
270 (*rulenum)++;
272 if (s->target_offset == sizeof(struct ip6t_entry) &&
273 strcmp(t->target.u.kernel.target->name,
274 IP6T_STANDARD_TARGET) == 0 &&
275 t->verdict < 0 &&
276 unconditional(&s->ipv6)) {
277 /* Tail of chains: STANDARD target (return/policy) */
278 *comment = *chainname == hookname
279 ? comments[NF_IP6_TRACE_COMMENT_POLICY]
280 : comments[NF_IP6_TRACE_COMMENT_RETURN];
282 return 1;
283 } else
284 (*rulenum)++;
286 return 0;
289 static void trace_packet(const struct sk_buff *skb,
290 unsigned int hook,
291 const struct net_device *in,
292 const struct net_device *out,
293 const char *tablename,
294 const struct xt_table_info *private,
295 const struct ip6t_entry *e)
297 const void *table_base;
298 const struct ip6t_entry *root;
299 const char *hookname, *chainname, *comment;
300 const struct ip6t_entry *iter;
301 unsigned int rulenum = 0;
303 table_base = private->entries[smp_processor_id()];
304 root = get_entry(table_base, private->hook_entry[hook]);
306 hookname = chainname = hooknames[hook];
307 comment = comments[NF_IP6_TRACE_COMMENT_RULE];
309 xt_entry_foreach(iter, root, private->size - private->hook_entry[hook])
310 if (get_chainname_rulenum(iter, e, hookname,
311 &chainname, &comment, &rulenum) != 0)
312 break;
314 nf_log_packet(AF_INET6, hook, skb, in, out, &trace_loginfo,
315 "TRACE: %s:%s:%s:%u ",
316 tablename, chainname, comment, rulenum);
318 #endif
320 static inline __pure struct ip6t_entry *
321 ip6t_next_entry(const struct ip6t_entry *entry)
323 return (void *)entry + entry->next_offset;
326 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
327 unsigned int
328 ip6t_do_table(struct sk_buff *skb,
329 unsigned int hook,
330 const struct net_device *in,
331 const struct net_device *out,
332 struct xt_table *table)
334 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
335 /* Initializing verdict to NF_DROP keeps gcc happy. */
336 unsigned int verdict = NF_DROP;
337 const char *indev, *outdev;
338 const void *table_base;
339 struct ip6t_entry *e, **jumpstack;
340 unsigned int *stackptr, origptr, cpu;
341 const struct xt_table_info *private;
342 struct xt_action_param acpar;
344 /* Initialization */
345 indev = in ? in->name : nulldevname;
346 outdev = out ? out->name : nulldevname;
347 /* We handle fragments by dealing with the first fragment as
348 * if it was a normal packet. All other fragments are treated
349 * normally, except that they will NEVER match rules that ask
350 * things we don't know, ie. tcp syn flag or ports). If the
351 * rule is also a fragment-specific rule, non-fragments won't
352 * match it. */
353 acpar.hotdrop = false;
354 acpar.in = in;
355 acpar.out = out;
356 acpar.family = NFPROTO_IPV6;
357 acpar.hooknum = hook;
359 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
361 xt_info_rdlock_bh();
362 private = table->private;
363 cpu = smp_processor_id();
364 table_base = private->entries[cpu];
365 jumpstack = (struct ip6t_entry **)private->jumpstack[cpu];
366 stackptr = &private->stackptr[cpu];
367 origptr = *stackptr;
369 e = get_entry(table_base, private->hook_entry[hook]);
371 do {
372 const struct ip6t_entry_target *t;
373 const struct xt_entry_match *ematch;
375 IP_NF_ASSERT(e);
376 if (!ip6_packet_match(skb, indev, outdev, &e->ipv6,
377 &acpar.thoff, &acpar.fragoff, &acpar.hotdrop)) {
378 no_match:
379 e = ip6t_next_entry(e);
380 continue;
383 xt_ematch_foreach(ematch, e) {
384 acpar.match = ematch->u.kernel.match;
385 acpar.matchinfo = ematch->data;
386 if (!acpar.match->match(skb, &acpar))
387 goto no_match;
390 ADD_COUNTER(e->counters,
391 ntohs(ipv6_hdr(skb)->payload_len) +
392 sizeof(struct ipv6hdr), 1);
394 t = ip6t_get_target_c(e);
395 IP_NF_ASSERT(t->u.kernel.target);
397 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
398 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
399 /* The packet is traced: log it */
400 if (unlikely(skb->nf_trace))
401 trace_packet(skb, hook, in, out,
402 table->name, private, e);
403 #endif
404 /* Standard target? */
405 if (!t->u.kernel.target->target) {
406 int v;
408 v = ((struct ip6t_standard_target *)t)->verdict;
409 if (v < 0) {
410 /* Pop from stack? */
411 if (v != IP6T_RETURN) {
412 verdict = (unsigned)(-v) - 1;
413 break;
415 if (*stackptr == 0)
416 e = get_entry(table_base,
417 private->underflow[hook]);
418 else
419 e = ip6t_next_entry(jumpstack[--*stackptr]);
420 continue;
422 if (table_base + v != ip6t_next_entry(e) &&
423 !(e->ipv6.flags & IP6T_F_GOTO)) {
424 if (*stackptr >= private->stacksize) {
425 verdict = NF_DROP;
426 break;
428 jumpstack[(*stackptr)++] = e;
431 e = get_entry(table_base, v);
432 continue;
435 acpar.target = t->u.kernel.target;
436 acpar.targinfo = t->data;
438 verdict = t->u.kernel.target->target(skb, &acpar);
439 if (verdict == IP6T_CONTINUE)
440 e = ip6t_next_entry(e);
441 else
442 /* Verdict */
443 break;
444 } while (!acpar.hotdrop);
446 xt_info_rdunlock_bh();
447 *stackptr = origptr;
449 #ifdef DEBUG_ALLOW_ALL
450 return NF_ACCEPT;
451 #else
452 if (acpar.hotdrop)
453 return NF_DROP;
454 else return verdict;
455 #endif
458 /* Figures out from what hook each rule can be called: returns 0 if
459 there are loops. Puts hook bitmask in comefrom. */
460 static int
461 mark_source_chains(const struct xt_table_info *newinfo,
462 unsigned int valid_hooks, void *entry0)
464 unsigned int hook;
466 /* No recursion; use packet counter to save back ptrs (reset
467 to 0 as we leave), and comefrom to save source hook bitmask */
468 for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
469 unsigned int pos = newinfo->hook_entry[hook];
470 struct ip6t_entry *e = (struct ip6t_entry *)(entry0 + pos);
472 if (!(valid_hooks & (1 << hook)))
473 continue;
475 /* Set initial back pointer. */
476 e->counters.pcnt = pos;
478 for (;;) {
479 const struct ip6t_standard_target *t
480 = (void *)ip6t_get_target_c(e);
481 int visited = e->comefrom & (1 << hook);
483 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
484 pr_err("iptables: loop hook %u pos %u %08X.\n",
485 hook, pos, e->comefrom);
486 return 0;
488 e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
490 /* Unconditional return/END. */
491 if ((e->target_offset == sizeof(struct ip6t_entry) &&
492 (strcmp(t->target.u.user.name,
493 IP6T_STANDARD_TARGET) == 0) &&
494 t->verdict < 0 &&
495 unconditional(&e->ipv6)) || visited) {
496 unsigned int oldpos, size;
498 if ((strcmp(t->target.u.user.name,
499 IP6T_STANDARD_TARGET) == 0) &&
500 t->verdict < -NF_MAX_VERDICT - 1) {
501 duprintf("mark_source_chains: bad "
502 "negative verdict (%i)\n",
503 t->verdict);
504 return 0;
507 /* Return: backtrack through the last
508 big jump. */
509 do {
510 e->comefrom ^= (1<<NF_INET_NUMHOOKS);
511 #ifdef DEBUG_IP_FIREWALL_USER
512 if (e->comefrom
513 & (1 << NF_INET_NUMHOOKS)) {
514 duprintf("Back unset "
515 "on hook %u "
516 "rule %u\n",
517 hook, pos);
519 #endif
520 oldpos = pos;
521 pos = e->counters.pcnt;
522 e->counters.pcnt = 0;
524 /* We're at the start. */
525 if (pos == oldpos)
526 goto next;
528 e = (struct ip6t_entry *)
529 (entry0 + pos);
530 } while (oldpos == pos + e->next_offset);
532 /* Move along one */
533 size = e->next_offset;
534 e = (struct ip6t_entry *)
535 (entry0 + pos + size);
536 e->counters.pcnt = pos;
537 pos += size;
538 } else {
539 int newpos = t->verdict;
541 if (strcmp(t->target.u.user.name,
542 IP6T_STANDARD_TARGET) == 0 &&
543 newpos >= 0) {
544 if (newpos > newinfo->size -
545 sizeof(struct ip6t_entry)) {
546 duprintf("mark_source_chains: "
547 "bad verdict (%i)\n",
548 newpos);
549 return 0;
551 /* This a jump; chase it. */
552 duprintf("Jump rule %u -> %u\n",
553 pos, newpos);
554 } else {
555 /* ... this is a fallthru */
556 newpos = pos + e->next_offset;
558 e = (struct ip6t_entry *)
559 (entry0 + newpos);
560 e->counters.pcnt = pos;
561 pos = newpos;
564 next:
565 duprintf("Finished chain %u\n", hook);
567 return 1;
570 static void cleanup_match(struct ip6t_entry_match *m, struct net *net)
572 struct xt_mtdtor_param par;
574 par.net = net;
575 par.match = m->u.kernel.match;
576 par.matchinfo = m->data;
577 par.family = NFPROTO_IPV6;
578 if (par.match->destroy != NULL)
579 par.match->destroy(&par);
580 module_put(par.match->me);
583 static int
584 check_entry(const struct ip6t_entry *e, const char *name)
586 const struct ip6t_entry_target *t;
588 if (!ip6_checkentry(&e->ipv6)) {
589 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
590 return -EINVAL;
593 if (e->target_offset + sizeof(struct ip6t_entry_target) >
594 e->next_offset)
595 return -EINVAL;
597 t = ip6t_get_target_c(e);
598 if (e->target_offset + t->u.target_size > e->next_offset)
599 return -EINVAL;
601 return 0;
604 static int check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par)
606 const struct ip6t_ip6 *ipv6 = par->entryinfo;
607 int ret;
609 par->match = m->u.kernel.match;
610 par->matchinfo = m->data;
612 ret = xt_check_match(par, m->u.match_size - sizeof(*m),
613 ipv6->proto, ipv6->invflags & IP6T_INV_PROTO);
614 if (ret < 0) {
615 duprintf("ip_tables: check failed for `%s'.\n",
616 par.match->name);
617 return ret;
619 return 0;
622 static int
623 find_check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par)
625 struct xt_match *match;
626 int ret;
628 match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name,
629 m->u.user.revision);
630 if (IS_ERR(match)) {
631 duprintf("find_check_match: `%s' not found\n", m->u.user.name);
632 return PTR_ERR(match);
634 m->u.kernel.match = match;
636 ret = check_match(m, par);
637 if (ret)
638 goto err;
640 return 0;
641 err:
642 module_put(m->u.kernel.match->me);
643 return ret;
646 static int check_target(struct ip6t_entry *e, struct net *net, const char *name)
648 struct ip6t_entry_target *t = ip6t_get_target(e);
649 struct xt_tgchk_param par = {
650 .net = net,
651 .table = name,
652 .entryinfo = e,
653 .target = t->u.kernel.target,
654 .targinfo = t->data,
655 .hook_mask = e->comefrom,
656 .family = NFPROTO_IPV6,
658 int ret;
660 t = ip6t_get_target(e);
661 ret = xt_check_target(&par, t->u.target_size - sizeof(*t),
662 e->ipv6.proto, e->ipv6.invflags & IP6T_INV_PROTO);
663 if (ret < 0) {
664 duprintf("ip_tables: check failed for `%s'.\n",
665 t->u.kernel.target->name);
666 return ret;
668 return 0;
671 static int
672 find_check_entry(struct ip6t_entry *e, struct net *net, const char *name,
673 unsigned int size)
675 struct ip6t_entry_target *t;
676 struct xt_target *target;
677 int ret;
678 unsigned int j;
679 struct xt_mtchk_param mtpar;
680 struct xt_entry_match *ematch;
682 ret = check_entry(e, name);
683 if (ret)
684 return ret;
686 j = 0;
687 mtpar.net = net;
688 mtpar.table = name;
689 mtpar.entryinfo = &e->ipv6;
690 mtpar.hook_mask = e->comefrom;
691 mtpar.family = NFPROTO_IPV6;
692 xt_ematch_foreach(ematch, e) {
693 ret = find_check_match(ematch, &mtpar);
694 if (ret != 0)
695 goto cleanup_matches;
696 ++j;
699 t = ip6t_get_target(e);
700 target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name,
701 t->u.user.revision);
702 if (IS_ERR(target)) {
703 duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
704 ret = PTR_ERR(target);
705 goto cleanup_matches;
707 t->u.kernel.target = target;
709 ret = check_target(e, net, name);
710 if (ret)
711 goto err;
712 return 0;
713 err:
714 module_put(t->u.kernel.target->me);
715 cleanup_matches:
716 xt_ematch_foreach(ematch, e) {
717 if (j-- == 0)
718 break;
719 cleanup_match(ematch, net);
721 return ret;
724 static bool check_underflow(const struct ip6t_entry *e)
726 const struct ip6t_entry_target *t;
727 unsigned int verdict;
729 if (!unconditional(&e->ipv6))
730 return false;
731 t = ip6t_get_target_c(e);
732 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
733 return false;
734 verdict = ((struct ip6t_standard_target *)t)->verdict;
735 verdict = -verdict - 1;
736 return verdict == NF_DROP || verdict == NF_ACCEPT;
739 static int
740 check_entry_size_and_hooks(struct ip6t_entry *e,
741 struct xt_table_info *newinfo,
742 const unsigned char *base,
743 const unsigned char *limit,
744 const unsigned int *hook_entries,
745 const unsigned int *underflows,
746 unsigned int valid_hooks)
748 unsigned int h;
750 if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0 ||
751 (unsigned char *)e + sizeof(struct ip6t_entry) >= limit) {
752 duprintf("Bad offset %p\n", e);
753 return -EINVAL;
756 if (e->next_offset
757 < sizeof(struct ip6t_entry) + sizeof(struct ip6t_entry_target)) {
758 duprintf("checking: element %p size %u\n",
759 e, e->next_offset);
760 return -EINVAL;
763 /* Check hooks & underflows */
764 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
765 if (!(valid_hooks & (1 << h)))
766 continue;
767 if ((unsigned char *)e - base == hook_entries[h])
768 newinfo->hook_entry[h] = hook_entries[h];
769 if ((unsigned char *)e - base == underflows[h]) {
770 if (!check_underflow(e)) {
771 pr_err("Underflows must be unconditional and "
772 "use the STANDARD target with "
773 "ACCEPT/DROP\n");
774 return -EINVAL;
776 newinfo->underflow[h] = underflows[h];
780 /* Clear counters and comefrom */
781 e->counters = ((struct xt_counters) { 0, 0 });
782 e->comefrom = 0;
783 return 0;
786 static void cleanup_entry(struct ip6t_entry *e, struct net *net)
788 struct xt_tgdtor_param par;
789 struct ip6t_entry_target *t;
790 struct xt_entry_match *ematch;
792 /* Cleanup all matches */
793 xt_ematch_foreach(ematch, e)
794 cleanup_match(ematch, net);
795 t = ip6t_get_target(e);
797 par.net = net;
798 par.target = t->u.kernel.target;
799 par.targinfo = t->data;
800 par.family = NFPROTO_IPV6;
801 if (par.target->destroy != NULL)
802 par.target->destroy(&par);
803 module_put(par.target->me);
806 /* Checks and translates the user-supplied table segment (held in
807 newinfo) */
808 static int
809 translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
810 const struct ip6t_replace *repl)
812 struct ip6t_entry *iter;
813 unsigned int i;
814 int ret = 0;
816 newinfo->size = repl->size;
817 newinfo->number = repl->num_entries;
819 /* Init all hooks to impossible value. */
820 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
821 newinfo->hook_entry[i] = 0xFFFFFFFF;
822 newinfo->underflow[i] = 0xFFFFFFFF;
825 duprintf("translate_table: size %u\n", newinfo->size);
826 i = 0;
827 /* Walk through entries, checking offsets. */
828 xt_entry_foreach(iter, entry0, newinfo->size) {
829 ret = check_entry_size_and_hooks(iter, newinfo, entry0,
830 entry0 + repl->size,
831 repl->hook_entry,
832 repl->underflow,
833 repl->valid_hooks);
834 if (ret != 0)
835 return ret;
836 ++i;
837 if (strcmp(ip6t_get_target(iter)->u.user.name,
838 XT_ERROR_TARGET) == 0)
839 ++newinfo->stacksize;
842 if (i != repl->num_entries) {
843 duprintf("translate_table: %u not %u entries\n",
844 i, repl->num_entries);
845 return -EINVAL;
848 /* Check hooks all assigned */
849 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
850 /* Only hooks which are valid */
851 if (!(repl->valid_hooks & (1 << i)))
852 continue;
853 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
854 duprintf("Invalid hook entry %u %u\n",
855 i, repl->hook_entry[i]);
856 return -EINVAL;
858 if (newinfo->underflow[i] == 0xFFFFFFFF) {
859 duprintf("Invalid underflow %u %u\n",
860 i, repl->underflow[i]);
861 return -EINVAL;
865 if (!mark_source_chains(newinfo, repl->valid_hooks, entry0))
866 return -ELOOP;
868 /* Finally, each sanity check must pass */
869 i = 0;
870 xt_entry_foreach(iter, entry0, newinfo->size) {
871 ret = find_check_entry(iter, net, repl->name, repl->size);
872 if (ret != 0)
873 break;
874 ++i;
877 if (ret != 0) {
878 xt_entry_foreach(iter, entry0, newinfo->size) {
879 if (i-- == 0)
880 break;
881 cleanup_entry(iter, net);
883 return ret;
886 /* And one copy for every other CPU */
887 for_each_possible_cpu(i) {
888 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
889 memcpy(newinfo->entries[i], entry0, newinfo->size);
892 return ret;
895 static void
896 get_counters(const struct xt_table_info *t,
897 struct xt_counters counters[])
899 struct ip6t_entry *iter;
900 unsigned int cpu;
901 unsigned int i;
902 unsigned int curcpu;
904 /* Instead of clearing (by a previous call to memset())
905 * the counters and using adds, we set the counters
906 * with data used by 'current' CPU
908 * Bottom half has to be disabled to prevent deadlock
909 * if new softirq were to run and call ipt_do_table
911 local_bh_disable();
912 curcpu = smp_processor_id();
914 i = 0;
915 xt_entry_foreach(iter, t->entries[curcpu], t->size) {
916 SET_COUNTER(counters[i], iter->counters.bcnt,
917 iter->counters.pcnt);
918 ++i;
921 for_each_possible_cpu(cpu) {
922 if (cpu == curcpu)
923 continue;
924 i = 0;
925 xt_info_wrlock(cpu);
926 xt_entry_foreach(iter, t->entries[cpu], t->size) {
927 ADD_COUNTER(counters[i], iter->counters.bcnt,
928 iter->counters.pcnt);
929 ++i;
931 xt_info_wrunlock(cpu);
933 local_bh_enable();
936 static struct xt_counters *alloc_counters(const struct xt_table *table)
938 unsigned int countersize;
939 struct xt_counters *counters;
940 const struct xt_table_info *private = table->private;
942 /* We need atomic snapshot of counters: rest doesn't change
943 (other than comefrom, which userspace doesn't care
944 about). */
945 countersize = sizeof(struct xt_counters) * private->number;
946 counters = vmalloc(countersize);
948 if (counters == NULL)
949 return ERR_PTR(-ENOMEM);
951 get_counters(private, counters);
953 return counters;
956 static int
957 copy_entries_to_user(unsigned int total_size,
958 const struct xt_table *table,
959 void __user *userptr)
961 unsigned int off, num;
962 const struct ip6t_entry *e;
963 struct xt_counters *counters;
964 const struct xt_table_info *private = table->private;
965 int ret = 0;
966 const void *loc_cpu_entry;
968 counters = alloc_counters(table);
969 if (IS_ERR(counters))
970 return PTR_ERR(counters);
972 /* choose the copy that is on our node/cpu, ...
973 * This choice is lazy (because current thread is
974 * allowed to migrate to another cpu)
976 loc_cpu_entry = private->entries[raw_smp_processor_id()];
977 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
978 ret = -EFAULT;
979 goto free_counters;
982 /* FIXME: use iterator macros --RR */
983 /* ... then go back and fix counters and names */
984 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
985 unsigned int i;
986 const struct ip6t_entry_match *m;
987 const struct ip6t_entry_target *t;
989 e = (struct ip6t_entry *)(loc_cpu_entry + off);
990 if (copy_to_user(userptr + off
991 + offsetof(struct ip6t_entry, counters),
992 &counters[num],
993 sizeof(counters[num])) != 0) {
994 ret = -EFAULT;
995 goto free_counters;
998 for (i = sizeof(struct ip6t_entry);
999 i < e->target_offset;
1000 i += m->u.match_size) {
1001 m = (void *)e + i;
1003 if (copy_to_user(userptr + off + i
1004 + offsetof(struct ip6t_entry_match,
1005 u.user.name),
1006 m->u.kernel.match->name,
1007 strlen(m->u.kernel.match->name)+1)
1008 != 0) {
1009 ret = -EFAULT;
1010 goto free_counters;
1014 t = ip6t_get_target_c(e);
1015 if (copy_to_user(userptr + off + e->target_offset
1016 + offsetof(struct ip6t_entry_target,
1017 u.user.name),
1018 t->u.kernel.target->name,
1019 strlen(t->u.kernel.target->name)+1) != 0) {
1020 ret = -EFAULT;
1021 goto free_counters;
1025 free_counters:
1026 vfree(counters);
1027 return ret;
1030 #ifdef CONFIG_COMPAT
1031 static void compat_standard_from_user(void *dst, const void *src)
1033 int v = *(compat_int_t *)src;
1035 if (v > 0)
1036 v += xt_compat_calc_jump(AF_INET6, v);
1037 memcpy(dst, &v, sizeof(v));
1040 static int compat_standard_to_user(void __user *dst, const void *src)
1042 compat_int_t cv = *(int *)src;
1044 if (cv > 0)
1045 cv -= xt_compat_calc_jump(AF_INET6, cv);
1046 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
1049 static int compat_calc_entry(const struct ip6t_entry *e,
1050 const struct xt_table_info *info,
1051 const void *base, struct xt_table_info *newinfo)
1053 const struct xt_entry_match *ematch;
1054 const struct ip6t_entry_target *t;
1055 unsigned int entry_offset;
1056 int off, i, ret;
1058 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1059 entry_offset = (void *)e - base;
1060 xt_ematch_foreach(ematch, e)
1061 off += xt_compat_match_offset(ematch->u.kernel.match);
1062 t = ip6t_get_target_c(e);
1063 off += xt_compat_target_offset(t->u.kernel.target);
1064 newinfo->size -= off;
1065 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1066 if (ret)
1067 return ret;
1069 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1070 if (info->hook_entry[i] &&
1071 (e < (struct ip6t_entry *)(base + info->hook_entry[i])))
1072 newinfo->hook_entry[i] -= off;
1073 if (info->underflow[i] &&
1074 (e < (struct ip6t_entry *)(base + info->underflow[i])))
1075 newinfo->underflow[i] -= off;
1077 return 0;
1080 static int compat_table_info(const struct xt_table_info *info,
1081 struct xt_table_info *newinfo)
1083 struct ip6t_entry *iter;
1084 void *loc_cpu_entry;
1085 int ret;
1087 if (!newinfo || !info)
1088 return -EINVAL;
1090 /* we dont care about newinfo->entries[] */
1091 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
1092 newinfo->initial_entries = 0;
1093 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1094 xt_entry_foreach(iter, loc_cpu_entry, info->size) {
1095 ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
1096 if (ret != 0)
1097 return ret;
1099 return 0;
1101 #endif
1103 static int get_info(struct net *net, void __user *user,
1104 const int *len, int compat)
1106 char name[IP6T_TABLE_MAXNAMELEN];
1107 struct xt_table *t;
1108 int ret;
1110 if (*len != sizeof(struct ip6t_getinfo)) {
1111 duprintf("length %u != %zu\n", *len,
1112 sizeof(struct ip6t_getinfo));
1113 return -EINVAL;
1116 if (copy_from_user(name, user, sizeof(name)) != 0)
1117 return -EFAULT;
1119 name[IP6T_TABLE_MAXNAMELEN-1] = '\0';
1120 #ifdef CONFIG_COMPAT
1121 if (compat)
1122 xt_compat_lock(AF_INET6);
1123 #endif
1124 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1125 "ip6table_%s", name);
1126 if (t && !IS_ERR(t)) {
1127 struct ip6t_getinfo info;
1128 const struct xt_table_info *private = t->private;
1129 #ifdef CONFIG_COMPAT
1130 struct xt_table_info tmp;
1132 if (compat) {
1133 ret = compat_table_info(private, &tmp);
1134 xt_compat_flush_offsets(AF_INET6);
1135 private = &tmp;
1137 #endif
1138 info.valid_hooks = t->valid_hooks;
1139 memcpy(info.hook_entry, private->hook_entry,
1140 sizeof(info.hook_entry));
1141 memcpy(info.underflow, private->underflow,
1142 sizeof(info.underflow));
1143 info.num_entries = private->number;
1144 info.size = private->size;
1145 strcpy(info.name, name);
1147 if (copy_to_user(user, &info, *len) != 0)
1148 ret = -EFAULT;
1149 else
1150 ret = 0;
1152 xt_table_unlock(t);
1153 module_put(t->me);
1154 } else
1155 ret = t ? PTR_ERR(t) : -ENOENT;
1156 #ifdef CONFIG_COMPAT
1157 if (compat)
1158 xt_compat_unlock(AF_INET6);
1159 #endif
1160 return ret;
1163 static int
1164 get_entries(struct net *net, struct ip6t_get_entries __user *uptr,
1165 const int *len)
1167 int ret;
1168 struct ip6t_get_entries get;
1169 struct xt_table *t;
1171 if (*len < sizeof(get)) {
1172 duprintf("get_entries: %u < %zu\n", *len, sizeof(get));
1173 return -EINVAL;
1175 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1176 return -EFAULT;
1177 if (*len != sizeof(struct ip6t_get_entries) + get.size) {
1178 duprintf("get_entries: %u != %zu\n",
1179 *len, sizeof(get) + get.size);
1180 return -EINVAL;
1183 t = xt_find_table_lock(net, AF_INET6, get.name);
1184 if (t && !IS_ERR(t)) {
1185 struct xt_table_info *private = t->private;
1186 duprintf("t->private->number = %u\n", private->number);
1187 if (get.size == private->size)
1188 ret = copy_entries_to_user(private->size,
1189 t, uptr->entrytable);
1190 else {
1191 duprintf("get_entries: I've got %u not %u!\n",
1192 private->size, get.size);
1193 ret = -EAGAIN;
1195 module_put(t->me);
1196 xt_table_unlock(t);
1197 } else
1198 ret = t ? PTR_ERR(t) : -ENOENT;
1200 return ret;
1203 static int
1204 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1205 struct xt_table_info *newinfo, unsigned int num_counters,
1206 void __user *counters_ptr)
1208 int ret;
1209 struct xt_table *t;
1210 struct xt_table_info *oldinfo;
1211 struct xt_counters *counters;
1212 const void *loc_cpu_old_entry;
1213 struct ip6t_entry *iter;
1215 ret = 0;
1216 counters = vmalloc(num_counters * sizeof(struct xt_counters));
1217 if (!counters) {
1218 ret = -ENOMEM;
1219 goto out;
1222 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1223 "ip6table_%s", name);
1224 if (!t || IS_ERR(t)) {
1225 ret = t ? PTR_ERR(t) : -ENOENT;
1226 goto free_newinfo_counters_untrans;
1229 /* You lied! */
1230 if (valid_hooks != t->valid_hooks) {
1231 duprintf("Valid hook crap: %08X vs %08X\n",
1232 valid_hooks, t->valid_hooks);
1233 ret = -EINVAL;
1234 goto put_module;
1237 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1238 if (!oldinfo)
1239 goto put_module;
1241 /* Update module usage count based on number of rules */
1242 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1243 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1244 if ((oldinfo->number > oldinfo->initial_entries) ||
1245 (newinfo->number <= oldinfo->initial_entries))
1246 module_put(t->me);
1247 if ((oldinfo->number > oldinfo->initial_entries) &&
1248 (newinfo->number <= oldinfo->initial_entries))
1249 module_put(t->me);
1251 /* Get the old counters, and synchronize with replace */
1252 get_counters(oldinfo, counters);
1254 /* Decrease module usage counts and free resource */
1255 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1256 xt_entry_foreach(iter, loc_cpu_old_entry, oldinfo->size)
1257 cleanup_entry(iter, net);
1259 xt_free_table_info(oldinfo);
1260 if (copy_to_user(counters_ptr, counters,
1261 sizeof(struct xt_counters) * num_counters) != 0)
1262 ret = -EFAULT;
1263 vfree(counters);
1264 xt_table_unlock(t);
1265 return ret;
1267 put_module:
1268 module_put(t->me);
1269 xt_table_unlock(t);
1270 free_newinfo_counters_untrans:
1271 vfree(counters);
1272 out:
1273 return ret;
1276 static int
1277 do_replace(struct net *net, const void __user *user, unsigned int len)
1279 int ret;
1280 struct ip6t_replace tmp;
1281 struct xt_table_info *newinfo;
1282 void *loc_cpu_entry;
1283 struct ip6t_entry *iter;
1285 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1286 return -EFAULT;
1288 /* overflow check */
1289 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1290 return -ENOMEM;
1292 newinfo = xt_alloc_table_info(tmp.size);
1293 if (!newinfo)
1294 return -ENOMEM;
1296 /* choose the copy that is on our node/cpu */
1297 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1298 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1299 tmp.size) != 0) {
1300 ret = -EFAULT;
1301 goto free_newinfo;
1304 ret = translate_table(net, newinfo, loc_cpu_entry, &tmp);
1305 if (ret != 0)
1306 goto free_newinfo;
1308 duprintf("ip_tables: Translated table\n");
1310 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1311 tmp.num_counters, tmp.counters);
1312 if (ret)
1313 goto free_newinfo_untrans;
1314 return 0;
1316 free_newinfo_untrans:
1317 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1318 cleanup_entry(iter, net);
1319 free_newinfo:
1320 xt_free_table_info(newinfo);
1321 return ret;
1324 static int
1325 do_add_counters(struct net *net, const void __user *user, unsigned int len,
1326 int compat)
1328 unsigned int i, curcpu;
1329 struct xt_counters_info tmp;
1330 struct xt_counters *paddc;
1331 unsigned int num_counters;
1332 char *name;
1333 int size;
1334 void *ptmp;
1335 struct xt_table *t;
1336 const struct xt_table_info *private;
1337 int ret = 0;
1338 const void *loc_cpu_entry;
1339 struct ip6t_entry *iter;
1340 #ifdef CONFIG_COMPAT
1341 struct compat_xt_counters_info compat_tmp;
1343 if (compat) {
1344 ptmp = &compat_tmp;
1345 size = sizeof(struct compat_xt_counters_info);
1346 } else
1347 #endif
1349 ptmp = &tmp;
1350 size = sizeof(struct xt_counters_info);
1353 if (copy_from_user(ptmp, user, size) != 0)
1354 return -EFAULT;
1356 #ifdef CONFIG_COMPAT
1357 if (compat) {
1358 num_counters = compat_tmp.num_counters;
1359 name = compat_tmp.name;
1360 } else
1361 #endif
1363 num_counters = tmp.num_counters;
1364 name = tmp.name;
1367 if (len != size + num_counters * sizeof(struct xt_counters))
1368 return -EINVAL;
1370 paddc = vmalloc(len - size);
1371 if (!paddc)
1372 return -ENOMEM;
1374 if (copy_from_user(paddc, user + size, len - size) != 0) {
1375 ret = -EFAULT;
1376 goto free;
1379 t = xt_find_table_lock(net, AF_INET6, name);
1380 if (!t || IS_ERR(t)) {
1381 ret = t ? PTR_ERR(t) : -ENOENT;
1382 goto free;
1386 local_bh_disable();
1387 private = t->private;
1388 if (private->number != num_counters) {
1389 ret = -EINVAL;
1390 goto unlock_up_free;
1393 i = 0;
1394 /* Choose the copy that is on our node */
1395 curcpu = smp_processor_id();
1396 xt_info_wrlock(curcpu);
1397 loc_cpu_entry = private->entries[curcpu];
1398 xt_entry_foreach(iter, loc_cpu_entry, private->size) {
1399 ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt);
1400 ++i;
1402 xt_info_wrunlock(curcpu);
1404 unlock_up_free:
1405 local_bh_enable();
1406 xt_table_unlock(t);
1407 module_put(t->me);
1408 free:
1409 vfree(paddc);
1411 return ret;
1414 #ifdef CONFIG_COMPAT
1415 struct compat_ip6t_replace {
1416 char name[IP6T_TABLE_MAXNAMELEN];
1417 u32 valid_hooks;
1418 u32 num_entries;
1419 u32 size;
1420 u32 hook_entry[NF_INET_NUMHOOKS];
1421 u32 underflow[NF_INET_NUMHOOKS];
1422 u32 num_counters;
1423 compat_uptr_t counters; /* struct ip6t_counters * */
1424 struct compat_ip6t_entry entries[0];
1427 static int
1428 compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr,
1429 unsigned int *size, struct xt_counters *counters,
1430 unsigned int i)
1432 struct ip6t_entry_target *t;
1433 struct compat_ip6t_entry __user *ce;
1434 u_int16_t target_offset, next_offset;
1435 compat_uint_t origsize;
1436 const struct xt_entry_match *ematch;
1437 int ret = 0;
1439 origsize = *size;
1440 ce = (struct compat_ip6t_entry __user *)*dstptr;
1441 if (copy_to_user(ce, e, sizeof(struct ip6t_entry)) != 0 ||
1442 copy_to_user(&ce->counters, &counters[i],
1443 sizeof(counters[i])) != 0)
1444 return -EFAULT;
1446 *dstptr += sizeof(struct compat_ip6t_entry);
1447 *size -= sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1449 xt_ematch_foreach(ematch, e) {
1450 ret = xt_compat_match_to_user(ematch, dstptr, size);
1451 if (ret != 0)
1452 return ret;
1454 target_offset = e->target_offset - (origsize - *size);
1455 t = ip6t_get_target(e);
1456 ret = xt_compat_target_to_user(t, dstptr, size);
1457 if (ret)
1458 return ret;
1459 next_offset = e->next_offset - (origsize - *size);
1460 if (put_user(target_offset, &ce->target_offset) != 0 ||
1461 put_user(next_offset, &ce->next_offset) != 0)
1462 return -EFAULT;
1463 return 0;
1466 static int
1467 compat_find_calc_match(struct ip6t_entry_match *m,
1468 const char *name,
1469 const struct ip6t_ip6 *ipv6,
1470 unsigned int hookmask,
1471 int *size)
1473 struct xt_match *match;
1475 match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name,
1476 m->u.user.revision);
1477 if (IS_ERR(match)) {
1478 duprintf("compat_check_calc_match: `%s' not found\n",
1479 m->u.user.name);
1480 return PTR_ERR(match);
1482 m->u.kernel.match = match;
1483 *size += xt_compat_match_offset(match);
1484 return 0;
1487 static void compat_release_entry(struct compat_ip6t_entry *e)
1489 struct ip6t_entry_target *t;
1490 struct xt_entry_match *ematch;
1492 /* Cleanup all matches */
1493 xt_ematch_foreach(ematch, e)
1494 module_put(ematch->u.kernel.match->me);
1495 t = compat_ip6t_get_target(e);
1496 module_put(t->u.kernel.target->me);
1499 static int
1500 check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
1501 struct xt_table_info *newinfo,
1502 unsigned int *size,
1503 const unsigned char *base,
1504 const unsigned char *limit,
1505 const unsigned int *hook_entries,
1506 const unsigned int *underflows,
1507 const char *name)
1509 struct xt_entry_match *ematch;
1510 struct ip6t_entry_target *t;
1511 struct xt_target *target;
1512 unsigned int entry_offset;
1513 unsigned int j;
1514 int ret, off, h;
1516 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1517 if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0 ||
1518 (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit) {
1519 duprintf("Bad offset %p, limit = %p\n", e, limit);
1520 return -EINVAL;
1523 if (e->next_offset < sizeof(struct compat_ip6t_entry) +
1524 sizeof(struct compat_xt_entry_target)) {
1525 duprintf("checking: element %p size %u\n",
1526 e, e->next_offset);
1527 return -EINVAL;
1530 /* For purposes of check_entry casting the compat entry is fine */
1531 ret = check_entry((struct ip6t_entry *)e, name);
1532 if (ret)
1533 return ret;
1535 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1536 entry_offset = (void *)e - (void *)base;
1537 j = 0;
1538 xt_ematch_foreach(ematch, e) {
1539 ret = compat_find_calc_match(ematch, name,
1540 &e->ipv6, e->comefrom, &off);
1541 if (ret != 0)
1542 goto release_matches;
1543 ++j;
1546 t = compat_ip6t_get_target(e);
1547 target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name,
1548 t->u.user.revision);
1549 if (IS_ERR(target)) {
1550 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1551 t->u.user.name);
1552 ret = PTR_ERR(target);
1553 goto release_matches;
1555 t->u.kernel.target = target;
1557 off += xt_compat_target_offset(target);
1558 *size += off;
1559 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1560 if (ret)
1561 goto out;
1563 /* Check hooks & underflows */
1564 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1565 if ((unsigned char *)e - base == hook_entries[h])
1566 newinfo->hook_entry[h] = hook_entries[h];
1567 if ((unsigned char *)e - base == underflows[h])
1568 newinfo->underflow[h] = underflows[h];
1571 /* Clear counters and comefrom */
1572 memset(&e->counters, 0, sizeof(e->counters));
1573 e->comefrom = 0;
1574 return 0;
1576 out:
1577 module_put(t->u.kernel.target->me);
1578 release_matches:
1579 xt_ematch_foreach(ematch, e) {
1580 if (j-- == 0)
1581 break;
1582 module_put(ematch->u.kernel.match->me);
1584 return ret;
1587 static int
1588 compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
1589 unsigned int *size, const char *name,
1590 struct xt_table_info *newinfo, unsigned char *base)
1592 struct ip6t_entry_target *t;
1593 struct xt_target *target;
1594 struct ip6t_entry *de;
1595 unsigned int origsize;
1596 int ret, h;
1597 struct xt_entry_match *ematch;
1599 ret = 0;
1600 origsize = *size;
1601 de = (struct ip6t_entry *)*dstptr;
1602 memcpy(de, e, sizeof(struct ip6t_entry));
1603 memcpy(&de->counters, &e->counters, sizeof(e->counters));
1605 *dstptr += sizeof(struct ip6t_entry);
1606 *size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1608 xt_ematch_foreach(ematch, e) {
1609 ret = xt_compat_match_from_user(ematch, dstptr, size);
1610 if (ret != 0)
1611 return ret;
1613 de->target_offset = e->target_offset - (origsize - *size);
1614 t = compat_ip6t_get_target(e);
1615 target = t->u.kernel.target;
1616 xt_compat_target_from_user(t, dstptr, size);
1618 de->next_offset = e->next_offset - (origsize - *size);
1619 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1620 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1621 newinfo->hook_entry[h] -= origsize - *size;
1622 if ((unsigned char *)de - base < newinfo->underflow[h])
1623 newinfo->underflow[h] -= origsize - *size;
1625 return ret;
1628 static int compat_check_entry(struct ip6t_entry *e, struct net *net,
1629 const char *name)
1631 unsigned int j;
1632 int ret = 0;
1633 struct xt_mtchk_param mtpar;
1634 struct xt_entry_match *ematch;
1636 j = 0;
1637 mtpar.net = net;
1638 mtpar.table = name;
1639 mtpar.entryinfo = &e->ipv6;
1640 mtpar.hook_mask = e->comefrom;
1641 mtpar.family = NFPROTO_IPV6;
1642 xt_ematch_foreach(ematch, e) {
1643 ret = check_match(ematch, &mtpar);
1644 if (ret != 0)
1645 goto cleanup_matches;
1646 ++j;
1649 ret = check_target(e, net, name);
1650 if (ret)
1651 goto cleanup_matches;
1652 return 0;
1654 cleanup_matches:
1655 xt_ematch_foreach(ematch, e) {
1656 if (j-- == 0)
1657 break;
1658 cleanup_match(ematch, net);
1660 return ret;
1663 static int
1664 translate_compat_table(struct net *net,
1665 const char *name,
1666 unsigned int valid_hooks,
1667 struct xt_table_info **pinfo,
1668 void **pentry0,
1669 unsigned int total_size,
1670 unsigned int number,
1671 unsigned int *hook_entries,
1672 unsigned int *underflows)
1674 unsigned int i, j;
1675 struct xt_table_info *newinfo, *info;
1676 void *pos, *entry0, *entry1;
1677 struct compat_ip6t_entry *iter0;
1678 struct ip6t_entry *iter1;
1679 unsigned int size;
1680 int ret = 0;
1682 info = *pinfo;
1683 entry0 = *pentry0;
1684 size = total_size;
1685 info->number = number;
1687 /* Init all hooks to impossible value. */
1688 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1689 info->hook_entry[i] = 0xFFFFFFFF;
1690 info->underflow[i] = 0xFFFFFFFF;
1693 duprintf("translate_compat_table: size %u\n", info->size);
1694 j = 0;
1695 xt_compat_lock(AF_INET6);
1696 /* Walk through entries, checking offsets. */
1697 xt_entry_foreach(iter0, entry0, total_size) {
1698 ret = check_compat_entry_size_and_hooks(iter0, info, &size,
1699 entry0,
1700 entry0 + total_size,
1701 hook_entries,
1702 underflows,
1703 name);
1704 if (ret != 0)
1705 goto out_unlock;
1706 ++j;
1709 ret = -EINVAL;
1710 if (j != number) {
1711 duprintf("translate_compat_table: %u not %u entries\n",
1712 j, number);
1713 goto out_unlock;
1716 /* Check hooks all assigned */
1717 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1718 /* Only hooks which are valid */
1719 if (!(valid_hooks & (1 << i)))
1720 continue;
1721 if (info->hook_entry[i] == 0xFFFFFFFF) {
1722 duprintf("Invalid hook entry %u %u\n",
1723 i, hook_entries[i]);
1724 goto out_unlock;
1726 if (info->underflow[i] == 0xFFFFFFFF) {
1727 duprintf("Invalid underflow %u %u\n",
1728 i, underflows[i]);
1729 goto out_unlock;
1733 ret = -ENOMEM;
1734 newinfo = xt_alloc_table_info(size);
1735 if (!newinfo)
1736 goto out_unlock;
1738 newinfo->number = number;
1739 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1740 newinfo->hook_entry[i] = info->hook_entry[i];
1741 newinfo->underflow[i] = info->underflow[i];
1743 entry1 = newinfo->entries[raw_smp_processor_id()];
1744 pos = entry1;
1745 size = total_size;
1746 xt_entry_foreach(iter0, entry0, total_size) {
1747 ret = compat_copy_entry_from_user(iter0, &pos, &size,
1748 name, newinfo, entry1);
1749 if (ret != 0)
1750 break;
1752 xt_compat_flush_offsets(AF_INET6);
1753 xt_compat_unlock(AF_INET6);
1754 if (ret)
1755 goto free_newinfo;
1757 ret = -ELOOP;
1758 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1759 goto free_newinfo;
1761 i = 0;
1762 xt_entry_foreach(iter1, entry1, newinfo->size) {
1763 ret = compat_check_entry(iter1, net, name);
1764 if (ret != 0)
1765 break;
1766 ++i;
1768 if (ret) {
1770 * The first i matches need cleanup_entry (calls ->destroy)
1771 * because they had called ->check already. The other j-i
1772 * entries need only release.
1774 int skip = i;
1775 j -= i;
1776 xt_entry_foreach(iter0, entry0, newinfo->size) {
1777 if (skip-- > 0)
1778 continue;
1779 if (j-- == 0)
1780 break;
1781 compat_release_entry(iter0);
1783 xt_entry_foreach(iter1, entry1, newinfo->size) {
1784 if (i-- == 0)
1785 break;
1786 cleanup_entry(iter1, net);
1788 xt_free_table_info(newinfo);
1789 return ret;
1792 /* And one copy for every other CPU */
1793 for_each_possible_cpu(i)
1794 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1795 memcpy(newinfo->entries[i], entry1, newinfo->size);
1797 *pinfo = newinfo;
1798 *pentry0 = entry1;
1799 xt_free_table_info(info);
1800 return 0;
1802 free_newinfo:
1803 xt_free_table_info(newinfo);
1804 out:
1805 xt_entry_foreach(iter0, entry0, total_size) {
1806 if (j-- == 0)
1807 break;
1808 compat_release_entry(iter0);
1810 return ret;
1811 out_unlock:
1812 xt_compat_flush_offsets(AF_INET6);
1813 xt_compat_unlock(AF_INET6);
1814 goto out;
1817 static int
1818 compat_do_replace(struct net *net, void __user *user, unsigned int len)
1820 int ret;
1821 struct compat_ip6t_replace tmp;
1822 struct xt_table_info *newinfo;
1823 void *loc_cpu_entry;
1824 struct ip6t_entry *iter;
1826 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1827 return -EFAULT;
1829 /* overflow check */
1830 if (tmp.size >= INT_MAX / num_possible_cpus())
1831 return -ENOMEM;
1832 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1833 return -ENOMEM;
1835 newinfo = xt_alloc_table_info(tmp.size);
1836 if (!newinfo)
1837 return -ENOMEM;
1839 /* choose the copy that is on our node/cpu */
1840 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1841 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1842 tmp.size) != 0) {
1843 ret = -EFAULT;
1844 goto free_newinfo;
1847 ret = translate_compat_table(net, tmp.name, tmp.valid_hooks,
1848 &newinfo, &loc_cpu_entry, tmp.size,
1849 tmp.num_entries, tmp.hook_entry,
1850 tmp.underflow);
1851 if (ret != 0)
1852 goto free_newinfo;
1854 duprintf("compat_do_replace: Translated table\n");
1856 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1857 tmp.num_counters, compat_ptr(tmp.counters));
1858 if (ret)
1859 goto free_newinfo_untrans;
1860 return 0;
1862 free_newinfo_untrans:
1863 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1864 cleanup_entry(iter, net);
1865 free_newinfo:
1866 xt_free_table_info(newinfo);
1867 return ret;
1870 static int
1871 compat_do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user,
1872 unsigned int len)
1874 int ret;
1876 if (!capable(CAP_NET_ADMIN))
1877 return -EPERM;
1879 switch (cmd) {
1880 case IP6T_SO_SET_REPLACE:
1881 ret = compat_do_replace(sock_net(sk), user, len);
1882 break;
1884 case IP6T_SO_SET_ADD_COUNTERS:
1885 ret = do_add_counters(sock_net(sk), user, len, 1);
1886 break;
1888 default:
1889 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
1890 ret = -EINVAL;
1893 return ret;
1896 struct compat_ip6t_get_entries {
1897 char name[IP6T_TABLE_MAXNAMELEN];
1898 compat_uint_t size;
1899 struct compat_ip6t_entry entrytable[0];
1902 static int
1903 compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1904 void __user *userptr)
1906 struct xt_counters *counters;
1907 const struct xt_table_info *private = table->private;
1908 void __user *pos;
1909 unsigned int size;
1910 int ret = 0;
1911 const void *loc_cpu_entry;
1912 unsigned int i = 0;
1913 struct ip6t_entry *iter;
1915 counters = alloc_counters(table);
1916 if (IS_ERR(counters))
1917 return PTR_ERR(counters);
1919 /* choose the copy that is on our node/cpu, ...
1920 * This choice is lazy (because current thread is
1921 * allowed to migrate to another cpu)
1923 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1924 pos = userptr;
1925 size = total_size;
1926 xt_entry_foreach(iter, loc_cpu_entry, total_size) {
1927 ret = compat_copy_entry_to_user(iter, &pos,
1928 &size, counters, i++);
1929 if (ret != 0)
1930 break;
1933 vfree(counters);
1934 return ret;
1937 static int
1938 compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
1939 int *len)
1941 int ret;
1942 struct compat_ip6t_get_entries get;
1943 struct xt_table *t;
1945 if (*len < sizeof(get)) {
1946 duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get));
1947 return -EINVAL;
1950 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1951 return -EFAULT;
1953 if (*len != sizeof(struct compat_ip6t_get_entries) + get.size) {
1954 duprintf("compat_get_entries: %u != %zu\n",
1955 *len, sizeof(get) + get.size);
1956 return -EINVAL;
1959 xt_compat_lock(AF_INET6);
1960 t = xt_find_table_lock(net, AF_INET6, get.name);
1961 if (t && !IS_ERR(t)) {
1962 const struct xt_table_info *private = t->private;
1963 struct xt_table_info info;
1964 duprintf("t->private->number = %u\n", private->number);
1965 ret = compat_table_info(private, &info);
1966 if (!ret && get.size == info.size) {
1967 ret = compat_copy_entries_to_user(private->size,
1968 t, uptr->entrytable);
1969 } else if (!ret) {
1970 duprintf("compat_get_entries: I've got %u not %u!\n",
1971 private->size, get.size);
1972 ret = -EAGAIN;
1974 xt_compat_flush_offsets(AF_INET6);
1975 module_put(t->me);
1976 xt_table_unlock(t);
1977 } else
1978 ret = t ? PTR_ERR(t) : -ENOENT;
1980 xt_compat_unlock(AF_INET6);
1981 return ret;
1984 static int do_ip6t_get_ctl(struct sock *, int, void __user *, int *);
1986 static int
1987 compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1989 int ret;
1991 if (!capable(CAP_NET_ADMIN))
1992 return -EPERM;
1994 switch (cmd) {
1995 case IP6T_SO_GET_INFO:
1996 ret = get_info(sock_net(sk), user, len, 1);
1997 break;
1998 case IP6T_SO_GET_ENTRIES:
1999 ret = compat_get_entries(sock_net(sk), user, len);
2000 break;
2001 default:
2002 ret = do_ip6t_get_ctl(sk, cmd, user, len);
2004 return ret;
2006 #endif
2008 static int
2009 do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2011 int ret;
2013 if (!capable(CAP_NET_ADMIN))
2014 return -EPERM;
2016 switch (cmd) {
2017 case IP6T_SO_SET_REPLACE:
2018 ret = do_replace(sock_net(sk), user, len);
2019 break;
2021 case IP6T_SO_SET_ADD_COUNTERS:
2022 ret = do_add_counters(sock_net(sk), user, len, 0);
2023 break;
2025 default:
2026 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
2027 ret = -EINVAL;
2030 return ret;
2033 static int
2034 do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2036 int ret;
2038 if (!capable(CAP_NET_ADMIN))
2039 return -EPERM;
2041 switch (cmd) {
2042 case IP6T_SO_GET_INFO:
2043 ret = get_info(sock_net(sk), user, len, 0);
2044 break;
2046 case IP6T_SO_GET_ENTRIES:
2047 ret = get_entries(sock_net(sk), user, len);
2048 break;
2050 case IP6T_SO_GET_REVISION_MATCH:
2051 case IP6T_SO_GET_REVISION_TARGET: {
2052 struct ip6t_get_revision rev;
2053 int target;
2055 if (*len != sizeof(rev)) {
2056 ret = -EINVAL;
2057 break;
2059 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2060 ret = -EFAULT;
2061 break;
2064 if (cmd == IP6T_SO_GET_REVISION_TARGET)
2065 target = 1;
2066 else
2067 target = 0;
2069 try_then_request_module(xt_find_revision(AF_INET6, rev.name,
2070 rev.revision,
2071 target, &ret),
2072 "ip6t_%s", rev.name);
2073 break;
2076 default:
2077 duprintf("do_ip6t_get_ctl: unknown request %i\n", cmd);
2078 ret = -EINVAL;
2081 return ret;
2084 struct xt_table *ip6t_register_table(struct net *net,
2085 const struct xt_table *table,
2086 const struct ip6t_replace *repl)
2088 int ret;
2089 struct xt_table_info *newinfo;
2090 struct xt_table_info bootstrap = {0};
2091 void *loc_cpu_entry;
2092 struct xt_table *new_table;
2094 newinfo = xt_alloc_table_info(repl->size);
2095 if (!newinfo) {
2096 ret = -ENOMEM;
2097 goto out;
2100 /* choose the copy on our node/cpu, but dont care about preemption */
2101 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2102 memcpy(loc_cpu_entry, repl->entries, repl->size);
2104 ret = translate_table(net, newinfo, loc_cpu_entry, repl);
2105 if (ret != 0)
2106 goto out_free;
2108 new_table = xt_register_table(net, table, &bootstrap, newinfo);
2109 if (IS_ERR(new_table)) {
2110 ret = PTR_ERR(new_table);
2111 goto out_free;
2113 return new_table;
2115 out_free:
2116 xt_free_table_info(newinfo);
2117 out:
2118 return ERR_PTR(ret);
2121 void ip6t_unregister_table(struct net *net, struct xt_table *table)
2123 struct xt_table_info *private;
2124 void *loc_cpu_entry;
2125 struct module *table_owner = table->me;
2126 struct ip6t_entry *iter;
2128 private = xt_unregister_table(table);
2130 /* Decrease module usage counts and free resources */
2131 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2132 xt_entry_foreach(iter, loc_cpu_entry, private->size)
2133 cleanup_entry(iter, net);
2134 if (private->number > private->initial_entries)
2135 module_put(table_owner);
2136 xt_free_table_info(private);
2139 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2140 static inline bool
2141 icmp6_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2142 u_int8_t type, u_int8_t code,
2143 bool invert)
2145 return (type == test_type && code >= min_code && code <= max_code)
2146 ^ invert;
2149 static bool
2150 icmp6_match(const struct sk_buff *skb, struct xt_action_param *par)
2152 const struct icmp6hdr *ic;
2153 struct icmp6hdr _icmph;
2154 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2156 /* Must not be a fragment. */
2157 if (par->fragoff != 0)
2158 return false;
2160 ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph);
2161 if (ic == NULL) {
2162 /* We've been asked to examine this packet, and we
2163 * can't. Hence, no choice but to drop.
2165 duprintf("Dropping evil ICMP tinygram.\n");
2166 par->hotdrop = true;
2167 return false;
2170 return icmp6_type_code_match(icmpinfo->type,
2171 icmpinfo->code[0],
2172 icmpinfo->code[1],
2173 ic->icmp6_type, ic->icmp6_code,
2174 !!(icmpinfo->invflags&IP6T_ICMP_INV));
2177 /* Called when user tries to insert an entry of this type. */
2178 static int icmp6_checkentry(const struct xt_mtchk_param *par)
2180 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2182 /* Must specify no unknown invflags */
2183 return (icmpinfo->invflags & ~IP6T_ICMP_INV) ? -EINVAL : 0;
2186 /* The built-in targets: standard (NULL) and error. */
2187 static struct xt_target ip6t_builtin_tg[] __read_mostly = {
2189 .name = IP6T_STANDARD_TARGET,
2190 .targetsize = sizeof(int),
2191 .family = NFPROTO_IPV6,
2192 #ifdef CONFIG_COMPAT
2193 .compatsize = sizeof(compat_int_t),
2194 .compat_from_user = compat_standard_from_user,
2195 .compat_to_user = compat_standard_to_user,
2196 #endif
2199 .name = IP6T_ERROR_TARGET,
2200 .target = ip6t_error,
2201 .targetsize = IP6T_FUNCTION_MAXNAMELEN,
2202 .family = NFPROTO_IPV6,
2206 static struct nf_sockopt_ops ip6t_sockopts = {
2207 .pf = PF_INET6,
2208 .set_optmin = IP6T_BASE_CTL,
2209 .set_optmax = IP6T_SO_SET_MAX+1,
2210 .set = do_ip6t_set_ctl,
2211 #ifdef CONFIG_COMPAT
2212 .compat_set = compat_do_ip6t_set_ctl,
2213 #endif
2214 .get_optmin = IP6T_BASE_CTL,
2215 .get_optmax = IP6T_SO_GET_MAX+1,
2216 .get = do_ip6t_get_ctl,
2217 #ifdef CONFIG_COMPAT
2218 .compat_get = compat_do_ip6t_get_ctl,
2219 #endif
2220 .owner = THIS_MODULE,
2223 static struct xt_match ip6t_builtin_mt[] __read_mostly = {
2225 .name = "icmp6",
2226 .match = icmp6_match,
2227 .matchsize = sizeof(struct ip6t_icmp),
2228 .checkentry = icmp6_checkentry,
2229 .proto = IPPROTO_ICMPV6,
2230 .family = NFPROTO_IPV6,
2234 static int __net_init ip6_tables_net_init(struct net *net)
2236 return xt_proto_init(net, NFPROTO_IPV6);
2239 static void __net_exit ip6_tables_net_exit(struct net *net)
2241 xt_proto_fini(net, NFPROTO_IPV6);
2244 static struct pernet_operations ip6_tables_net_ops = {
2245 .init = ip6_tables_net_init,
2246 .exit = ip6_tables_net_exit,
2249 static int __init ip6_tables_init(void)
2251 int ret;
2253 ret = register_pernet_subsys(&ip6_tables_net_ops);
2254 if (ret < 0)
2255 goto err1;
2257 /* Noone else will be downing sem now, so we won't sleep */
2258 ret = xt_register_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
2259 if (ret < 0)
2260 goto err2;
2261 ret = xt_register_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt));
2262 if (ret < 0)
2263 goto err4;
2265 /* Register setsockopt */
2266 ret = nf_register_sockopt(&ip6t_sockopts);
2267 if (ret < 0)
2268 goto err5;
2270 pr_info("(C) 2000-2006 Netfilter Core Team\n");
2271 return 0;
2273 err5:
2274 xt_unregister_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt));
2275 err4:
2276 xt_unregister_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
2277 err2:
2278 unregister_pernet_subsys(&ip6_tables_net_ops);
2279 err1:
2280 return ret;
2283 static void __exit ip6_tables_fini(void)
2285 nf_unregister_sockopt(&ip6t_sockopts);
2287 xt_unregister_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt));
2288 xt_unregister_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
2289 unregister_pernet_subsys(&ip6_tables_net_ops);
2293 * find the offset to specified header or the protocol number of last header
2294 * if target < 0. "last header" is transport protocol header, ESP, or
2295 * "No next header".
2297 * If target header is found, its offset is set in *offset and return protocol
2298 * number. Otherwise, return -1.
2300 * If the first fragment doesn't contain the final protocol header or
2301 * NEXTHDR_NONE it is considered invalid.
2303 * Note that non-1st fragment is special case that "the protocol number
2304 * of last header" is "next header" field in Fragment header. In this case,
2305 * *offset is meaningless and fragment offset is stored in *fragoff if fragoff
2306 * isn't NULL.
2309 int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
2310 int target, unsigned short *fragoff)
2312 unsigned int start = skb_network_offset(skb) + sizeof(struct ipv6hdr);
2313 u8 nexthdr = ipv6_hdr(skb)->nexthdr;
2314 unsigned int len = skb->len - start;
2316 if (fragoff)
2317 *fragoff = 0;
2319 while (nexthdr != target) {
2320 struct ipv6_opt_hdr _hdr, *hp;
2321 unsigned int hdrlen;
2323 if ((!ipv6_ext_hdr(nexthdr)) || nexthdr == NEXTHDR_NONE) {
2324 if (target < 0)
2325 break;
2326 return -ENOENT;
2329 hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr);
2330 if (hp == NULL)
2331 return -EBADMSG;
2332 if (nexthdr == NEXTHDR_FRAGMENT) {
2333 unsigned short _frag_off;
2334 __be16 *fp;
2335 fp = skb_header_pointer(skb,
2336 start+offsetof(struct frag_hdr,
2337 frag_off),
2338 sizeof(_frag_off),
2339 &_frag_off);
2340 if (fp == NULL)
2341 return -EBADMSG;
2343 _frag_off = ntohs(*fp) & ~0x7;
2344 if (_frag_off) {
2345 if (target < 0 &&
2346 ((!ipv6_ext_hdr(hp->nexthdr)) ||
2347 hp->nexthdr == NEXTHDR_NONE)) {
2348 if (fragoff)
2349 *fragoff = _frag_off;
2350 return hp->nexthdr;
2352 return -ENOENT;
2354 hdrlen = 8;
2355 } else if (nexthdr == NEXTHDR_AUTH)
2356 hdrlen = (hp->hdrlen + 2) << 2;
2357 else
2358 hdrlen = ipv6_optlen(hp);
2360 nexthdr = hp->nexthdr;
2361 len -= hdrlen;
2362 start += hdrlen;
2365 *offset = start;
2366 return nexthdr;
2369 EXPORT_SYMBOL(ip6t_register_table);
2370 EXPORT_SYMBOL(ip6t_unregister_table);
2371 EXPORT_SYMBOL(ip6t_do_table);
2372 EXPORT_SYMBOL(ip6t_ext_hdr);
2373 EXPORT_SYMBOL(ipv6_find_hdr);
2375 module_init(ip6_tables_init);
2376 module_exit(ip6_tables_fini);