GUI: Fix Tomato RAF theme for all builds. Compilation typo.
[tomato.git] / release / src-rt-6.x.4708 / linux / linux-2.6.36 / net / ipv6 / netfilter / ip6_tables.c
blobf0dbbf8ebd2b7b8e61b98f51071579cba37e59ec
1 /*
2 * Packet matching code.
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 #include <linux/capability.h>
13 #include <linux/in.h>
14 #include <linux/skbuff.h>
15 #include <linux/kmod.h>
16 #include <linux/vmalloc.h>
17 #include <linux/netdevice.h>
18 #include <linux/module.h>
19 #include <linux/poison.h>
20 #include <linux/icmpv6.h>
21 #include <net/ipv6.h>
22 #include <net/compat.h>
23 #include <asm/uaccess.h>
24 #include <linux/mutex.h>
25 #include <linux/proc_fs.h>
26 #include <linux/err.h>
27 #include <linux/cpumask.h>
29 #include <linux/netfilter_ipv6/ip6_tables.h>
30 #include <linux/netfilter/x_tables.h>
31 #include <net/netfilter/nf_log.h>
32 #include "../../netfilter/xt_repldata.h"
34 MODULE_LICENSE("GPL");
35 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
36 MODULE_DESCRIPTION("IPv6 packet filter");
38 /*#define DEBUG_IP_FIREWALL*/
39 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
40 /*#define DEBUG_IP_FIREWALL_USER*/
42 #ifdef DEBUG_IP_FIREWALL
43 #define dprintf(format, args...) pr_info(format , ## args)
44 #else
45 #define dprintf(format, args...)
46 #endif
48 #ifdef DEBUG_IP_FIREWALL_USER
49 #define duprintf(format, args...) pr_info(format , ## args)
50 #else
51 #define duprintf(format, args...)
52 #endif
54 #ifdef CONFIG_NETFILTER_DEBUG
55 #define IP_NF_ASSERT(x) WARN_ON(!(x))
56 #else
57 #define IP_NF_ASSERT(x)
58 #endif
61 void *ip6t_alloc_initial_table(const struct xt_table *info)
63 return xt_alloc_initial_table(ip6t, IP6T);
65 EXPORT_SYMBOL_GPL(ip6t_alloc_initial_table);
68 We keep a set of rules for each CPU, so we can avoid write-locking
69 them in the softirq when updating the counters and therefore
70 only need to read-lock in the softirq; doing a write_lock_bh() in user
71 context stops packets coming through and allows user context to read
72 the counters or update the rules.
74 Hence the start of any table is given by get_table() below. */
76 /* Check for an extension */
77 int
78 ip6t_ext_hdr(u8 nexthdr)
80 return ( (nexthdr == IPPROTO_HOPOPTS) ||
81 (nexthdr == IPPROTO_ROUTING) ||
82 (nexthdr == IPPROTO_FRAGMENT) ||
83 (nexthdr == IPPROTO_ESP) ||
84 (nexthdr == IPPROTO_AH) ||
85 (nexthdr == IPPROTO_NONE) ||
86 (nexthdr == IPPROTO_DSTOPTS) );
89 /* Returns whether matches rule or not. */
90 /* Performance critical - called for every packet */
91 static inline bool
92 ip6_packet_match(const struct sk_buff *skb,
93 const char *indev,
94 const char *outdev,
95 const struct ip6t_ip6 *ip6info,
96 unsigned int *protoff,
97 int *fragoff, bool *hotdrop)
99 unsigned long ret;
100 const struct ipv6hdr *ipv6 = ipv6_hdr(skb);
102 #define FWINV(bool, invflg) ((bool) ^ !!(ip6info->invflags & (invflg)))
104 if (FWINV(ipv6_masked_addr_cmp(&ipv6->saddr, &ip6info->smsk,
105 &ip6info->src), IP6T_INV_SRCIP) ||
106 FWINV(ipv6_masked_addr_cmp(&ipv6->daddr, &ip6info->dmsk,
107 &ip6info->dst), IP6T_INV_DSTIP)) {
108 dprintf("Source or dest mismatch.\n");
110 dprintf("SRC: %u. Mask: %u. Target: %u.%s\n", ip->saddr,
111 ipinfo->smsk.s_addr, ipinfo->src.s_addr,
112 ipinfo->invflags & IP6T_INV_SRCIP ? " (INV)" : "");
113 dprintf("DST: %u. Mask: %u. Target: %u.%s\n", ip->daddr,
114 ipinfo->dmsk.s_addr, ipinfo->dst.s_addr,
115 ipinfo->invflags & IP6T_INV_DSTIP ? " (INV)" : "");*/
116 return false;
119 ret = ifname_compare_aligned(indev, ip6info->iniface, ip6info->iniface_mask);
121 if (FWINV(ret != 0, IP6T_INV_VIA_IN)) {
122 dprintf("VIA in mismatch (%s vs %s).%s\n",
123 indev, ip6info->iniface,
124 ip6info->invflags&IP6T_INV_VIA_IN ?" (INV)":"");
125 return false;
128 ret = ifname_compare_aligned(outdev, ip6info->outiface, ip6info->outiface_mask);
130 if (FWINV(ret != 0, IP6T_INV_VIA_OUT)) {
131 dprintf("VIA out mismatch (%s vs %s).%s\n",
132 outdev, ip6info->outiface,
133 ip6info->invflags&IP6T_INV_VIA_OUT ?" (INV)":"");
134 return false;
137 /* ... might want to do something with class and flowlabel here ... */
139 /* look for the desired protocol header */
140 if((ip6info->flags & IP6T_F_PROTO)) {
141 int protohdr;
142 unsigned short _frag_off;
144 protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off);
145 if (protohdr < 0) {
146 if (_frag_off == 0)
147 *hotdrop = true;
148 return false;
150 *fragoff = _frag_off;
152 dprintf("Packet protocol %hi ?= %s%hi.\n",
153 protohdr,
154 ip6info->invflags & IP6T_INV_PROTO ? "!":"",
155 ip6info->proto);
157 if (ip6info->proto == protohdr) {
158 if(ip6info->invflags & IP6T_INV_PROTO) {
159 return false;
161 return true;
164 /* We need match for the '-p all', too! */
165 if ((ip6info->proto != 0) &&
166 !(ip6info->invflags & IP6T_INV_PROTO))
167 return false;
169 return true;
172 /* should be ip6 safe */
173 static bool
174 ip6_checkentry(const struct ip6t_ip6 *ipv6)
176 if (ipv6->flags & ~IP6T_F_MASK) {
177 duprintf("Unknown flag bits set: %08X\n",
178 ipv6->flags & ~IP6T_F_MASK);
179 return false;
181 if (ipv6->invflags & ~IP6T_INV_MASK) {
182 duprintf("Unknown invflag bits set: %08X\n",
183 ipv6->invflags & ~IP6T_INV_MASK);
184 return false;
186 return true;
189 static unsigned int
190 ip6t_error(struct sk_buff *skb, const struct xt_action_param *par)
192 if (net_ratelimit())
193 pr_info("error: `%s'\n", (const char *)par->targinfo);
195 return NF_DROP;
198 static inline struct ip6t_entry *
199 get_entry(const void *base, unsigned int offset)
201 return (struct ip6t_entry *)(base + offset);
204 /* All zeroes == unconditional rule. */
205 /* Mildly perf critical (only if packet tracing is on) */
206 static inline bool unconditional(const struct ip6t_ip6 *ipv6)
208 static const struct ip6t_ip6 uncond;
210 return memcmp(ipv6, &uncond, sizeof(uncond)) == 0;
213 static inline const struct ip6t_entry_target *
214 ip6t_get_target_c(const struct ip6t_entry *e)
216 return ip6t_get_target((struct ip6t_entry *)e);
219 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
220 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
221 /* This cries for unification! */
222 static const char *const hooknames[] = {
223 [NF_INET_PRE_ROUTING] = "PREROUTING",
224 [NF_INET_LOCAL_IN] = "INPUT",
225 [NF_INET_FORWARD] = "FORWARD",
226 [NF_INET_LOCAL_OUT] = "OUTPUT",
227 [NF_INET_POST_ROUTING] = "POSTROUTING",
230 enum nf_ip_trace_comments {
231 NF_IP6_TRACE_COMMENT_RULE,
232 NF_IP6_TRACE_COMMENT_RETURN,
233 NF_IP6_TRACE_COMMENT_POLICY,
236 static const char *const comments[] = {
237 [NF_IP6_TRACE_COMMENT_RULE] = "rule",
238 [NF_IP6_TRACE_COMMENT_RETURN] = "return",
239 [NF_IP6_TRACE_COMMENT_POLICY] = "policy",
242 static struct nf_loginfo trace_loginfo = {
243 .type = NF_LOG_TYPE_LOG,
244 .u = {
245 .log = {
246 .level = 4,
247 .logflags = NF_LOG_MASK,
252 /* Mildly perf critical (only if packet tracing is on) */
253 static inline int
254 get_chainname_rulenum(const struct ip6t_entry *s, const struct ip6t_entry *e,
255 const char *hookname, const char **chainname,
256 const char **comment, unsigned int *rulenum)
258 const struct ip6t_standard_target *t = (void *)ip6t_get_target_c(s);
260 if (strcmp(t->target.u.kernel.target->name, IP6T_ERROR_TARGET) == 0) {
261 /* Head of user chain: ERROR target with chainname */
262 *chainname = t->target.data;
263 (*rulenum) = 0;
264 } else if (s == e) {
265 (*rulenum)++;
267 if (s->target_offset == sizeof(struct ip6t_entry) &&
268 strcmp(t->target.u.kernel.target->name,
269 IP6T_STANDARD_TARGET) == 0 &&
270 t->verdict < 0 &&
271 unconditional(&s->ipv6)) {
272 /* Tail of chains: STANDARD target (return/policy) */
273 *comment = *chainname == hookname
274 ? comments[NF_IP6_TRACE_COMMENT_POLICY]
275 : comments[NF_IP6_TRACE_COMMENT_RETURN];
277 return 1;
278 } else
279 (*rulenum)++;
281 return 0;
284 static void trace_packet(const struct sk_buff *skb,
285 unsigned int hook,
286 const struct net_device *in,
287 const struct net_device *out,
288 const char *tablename,
289 const struct xt_table_info *private,
290 const struct ip6t_entry *e)
292 const void *table_base;
293 const struct ip6t_entry *root;
294 const char *hookname, *chainname, *comment;
295 const struct ip6t_entry *iter;
296 unsigned int rulenum = 0;
298 table_base = private->entries[smp_processor_id()];
299 root = get_entry(table_base, private->hook_entry[hook]);
301 hookname = chainname = hooknames[hook];
302 comment = comments[NF_IP6_TRACE_COMMENT_RULE];
304 xt_entry_foreach(iter, root, private->size - private->hook_entry[hook])
305 if (get_chainname_rulenum(iter, e, hookname,
306 &chainname, &comment, &rulenum) != 0)
307 break;
309 nf_log_packet(AF_INET6, hook, skb, in, out, &trace_loginfo,
310 "TRACE: %s:%s:%s:%u ",
311 tablename, chainname, comment, rulenum);
313 #endif
315 static inline __pure struct ip6t_entry *
316 ip6t_next_entry(const struct ip6t_entry *entry)
318 return (void *)entry + entry->next_offset;
321 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
322 unsigned int
323 ip6t_do_table(struct sk_buff *skb,
324 unsigned int hook,
325 const struct net_device *in,
326 const struct net_device *out,
327 struct xt_table *table)
329 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
330 /* Initializing verdict to NF_DROP keeps gcc happy. */
331 unsigned int verdict = NF_DROP;
332 const char *indev, *outdev;
333 const void *table_base;
334 struct ip6t_entry *e, **jumpstack;
335 unsigned int *stackptr, origptr, cpu;
336 const struct xt_table_info *private;
337 struct xt_action_param acpar;
339 /* Initialization */
340 indev = in ? in->name : nulldevname;
341 outdev = out ? out->name : nulldevname;
342 /* We handle fragments by dealing with the first fragment as
343 * if it was a normal packet. All other fragments are treated
344 * normally, except that they will NEVER match rules that ask
345 * things we don't know, ie. tcp syn flag or ports). If the
346 * rule is also a fragment-specific rule, non-fragments won't
347 * match it. */
348 acpar.hotdrop = false;
349 acpar.in = in;
350 acpar.out = out;
351 acpar.family = NFPROTO_IPV6;
352 acpar.hooknum = hook;
354 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
356 xt_info_rdlock_bh();
357 private = table->private;
358 cpu = smp_processor_id();
359 table_base = private->entries[cpu];
360 jumpstack = (struct ip6t_entry **)private->jumpstack[cpu];
361 stackptr = per_cpu_ptr(private->stackptr, cpu);
362 origptr = *stackptr;
364 e = get_entry(table_base, private->hook_entry[hook]);
366 do {
367 const struct ip6t_entry_target *t;
368 const struct xt_entry_match *ematch;
370 IP_NF_ASSERT(e);
371 if (!ip6_packet_match(skb, indev, outdev, &e->ipv6,
372 &acpar.thoff, &acpar.fragoff, &acpar.hotdrop)) {
373 no_match:
374 e = ip6t_next_entry(e);
375 continue;
378 xt_ematch_foreach(ematch, e) {
379 acpar.match = ematch->u.kernel.match;
380 acpar.matchinfo = ematch->data;
381 if (!acpar.match->match(skb, &acpar))
382 goto no_match;
385 ADD_COUNTER(e->counters, skb->len, 1);
387 t = ip6t_get_target_c(e);
388 IP_NF_ASSERT(t->u.kernel.target);
390 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
391 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
392 /* The packet is traced: log it */
393 if (unlikely(skb->nf_trace))
394 trace_packet(skb, hook, in, out,
395 table->name, private, e);
396 #endif
397 /* Standard target? */
398 if (!t->u.kernel.target->target) {
399 int v;
401 v = ((struct ip6t_standard_target *)t)->verdict;
402 if (v < 0) {
403 /* Pop from stack? */
404 if (v != IP6T_RETURN) {
405 verdict = (unsigned)(-v) - 1;
406 break;
408 if (*stackptr == 0)
409 e = get_entry(table_base,
410 private->underflow[hook]);
411 else
412 e = ip6t_next_entry(jumpstack[--*stackptr]);
413 continue;
415 if (table_base + v != ip6t_next_entry(e) &&
416 !(e->ipv6.flags & IP6T_F_GOTO)) {
417 if (*stackptr >= private->stacksize) {
418 verdict = NF_DROP;
419 break;
421 jumpstack[(*stackptr)++] = e;
424 e = get_entry(table_base, v);
425 continue;
428 acpar.target = t->u.kernel.target;
429 acpar.targinfo = t->data;
431 verdict = t->u.kernel.target->target(skb, &acpar);
432 if (verdict == IP6T_CONTINUE)
433 e = ip6t_next_entry(e);
434 else
435 /* Verdict */
436 break;
437 } while (!acpar.hotdrop);
439 xt_info_rdunlock_bh();
440 *stackptr = origptr;
442 #ifdef DEBUG_ALLOW_ALL
443 return NF_ACCEPT;
444 #else
445 if (acpar.hotdrop)
446 return NF_DROP;
447 else return verdict;
448 #endif
451 /* Figures out from what hook each rule can be called: returns 0 if
452 there are loops. Puts hook bitmask in comefrom. */
453 static int
454 mark_source_chains(const struct xt_table_info *newinfo,
455 unsigned int valid_hooks, void *entry0)
457 unsigned int hook;
459 /* No recursion; use packet counter to save back ptrs (reset
460 to 0 as we leave), and comefrom to save source hook bitmask */
461 for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
462 unsigned int pos = newinfo->hook_entry[hook];
463 struct ip6t_entry *e = (struct ip6t_entry *)(entry0 + pos);
465 if (!(valid_hooks & (1 << hook)))
466 continue;
468 /* Set initial back pointer. */
469 e->counters.pcnt = pos;
471 for (;;) {
472 const struct ip6t_standard_target *t
473 = (void *)ip6t_get_target_c(e);
474 int visited = e->comefrom & (1 << hook);
476 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
477 pr_err("iptables: loop hook %u pos %u %08X.\n",
478 hook, pos, e->comefrom);
479 return 0;
481 e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
483 /* Unconditional return/END. */
484 if ((e->target_offset == sizeof(struct ip6t_entry) &&
485 (strcmp(t->target.u.user.name,
486 IP6T_STANDARD_TARGET) == 0) &&
487 t->verdict < 0 &&
488 unconditional(&e->ipv6)) || visited) {
489 unsigned int oldpos, size;
491 if ((strcmp(t->target.u.user.name,
492 IP6T_STANDARD_TARGET) == 0) &&
493 t->verdict < -NF_MAX_VERDICT - 1) {
494 duprintf("mark_source_chains: bad "
495 "negative verdict (%i)\n",
496 t->verdict);
497 return 0;
500 /* Return: backtrack through the last
501 big jump. */
502 do {
503 e->comefrom ^= (1<<NF_INET_NUMHOOKS);
504 #ifdef DEBUG_IP_FIREWALL_USER
505 if (e->comefrom
506 & (1 << NF_INET_NUMHOOKS)) {
507 duprintf("Back unset "
508 "on hook %u "
509 "rule %u\n",
510 hook, pos);
512 #endif
513 oldpos = pos;
514 pos = e->counters.pcnt;
515 e->counters.pcnt = 0;
517 /* We're at the start. */
518 if (pos == oldpos)
519 goto next;
521 e = (struct ip6t_entry *)
522 (entry0 + pos);
523 } while (oldpos == pos + e->next_offset);
525 /* Move along one */
526 size = e->next_offset;
527 e = (struct ip6t_entry *)
528 (entry0 + pos + size);
529 e->counters.pcnt = pos;
530 pos += size;
531 } else {
532 int newpos = t->verdict;
534 if (strcmp(t->target.u.user.name,
535 IP6T_STANDARD_TARGET) == 0 &&
536 newpos >= 0) {
537 if (newpos > newinfo->size -
538 sizeof(struct ip6t_entry)) {
539 duprintf("mark_source_chains: "
540 "bad verdict (%i)\n",
541 newpos);
542 return 0;
544 /* This a jump; chase it. */
545 duprintf("Jump rule %u -> %u\n",
546 pos, newpos);
547 } else {
548 /* ... this is a fallthru */
549 newpos = pos + e->next_offset;
551 e = (struct ip6t_entry *)
552 (entry0 + newpos);
553 e->counters.pcnt = pos;
554 pos = newpos;
557 next:
558 duprintf("Finished chain %u\n", hook);
560 return 1;
563 static void cleanup_match(struct ip6t_entry_match *m, struct net *net)
565 struct xt_mtdtor_param par;
567 par.net = net;
568 par.match = m->u.kernel.match;
569 par.matchinfo = m->data;
570 par.family = NFPROTO_IPV6;
571 if (par.match->destroy != NULL)
572 par.match->destroy(&par);
573 module_put(par.match->me);
576 static int
577 check_entry(const struct ip6t_entry *e, const char *name)
579 const struct ip6t_entry_target *t;
581 if (!ip6_checkentry(&e->ipv6)) {
582 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
583 return -EINVAL;
586 if (e->target_offset + sizeof(struct ip6t_entry_target) >
587 e->next_offset)
588 return -EINVAL;
590 t = ip6t_get_target_c(e);
591 if (e->target_offset + t->u.target_size > e->next_offset)
592 return -EINVAL;
594 return 0;
597 static int check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par)
599 const struct ip6t_ip6 *ipv6 = par->entryinfo;
600 int ret;
602 par->match = m->u.kernel.match;
603 par->matchinfo = m->data;
605 ret = xt_check_match(par, m->u.match_size - sizeof(*m),
606 ipv6->proto, ipv6->invflags & IP6T_INV_PROTO);
607 if (ret < 0) {
608 duprintf("ip_tables: check failed for `%s'.\n",
609 par.match->name);
610 return ret;
612 return 0;
615 static int
616 find_check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par)
618 struct xt_match *match;
619 int ret;
621 match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name,
622 m->u.user.revision);
623 if (IS_ERR(match)) {
624 duprintf("find_check_match: `%s' not found\n", m->u.user.name);
625 return PTR_ERR(match);
627 m->u.kernel.match = match;
629 ret = check_match(m, par);
630 if (ret)
631 goto err;
633 return 0;
634 err:
635 module_put(m->u.kernel.match->me);
636 return ret;
639 static int check_target(struct ip6t_entry *e, struct net *net, const char *name)
641 struct ip6t_entry_target *t = ip6t_get_target(e);
642 struct xt_tgchk_param par = {
643 .net = net,
644 .table = name,
645 .entryinfo = e,
646 .target = t->u.kernel.target,
647 .targinfo = t->data,
648 .hook_mask = e->comefrom,
649 .family = NFPROTO_IPV6,
651 int ret;
653 t = ip6t_get_target(e);
654 ret = xt_check_target(&par, t->u.target_size - sizeof(*t),
655 e->ipv6.proto, e->ipv6.invflags & IP6T_INV_PROTO);
656 if (ret < 0) {
657 duprintf("ip_tables: check failed for `%s'.\n",
658 t->u.kernel.target->name);
659 return ret;
661 return 0;
664 static int
665 find_check_entry(struct ip6t_entry *e, struct net *net, const char *name,
666 unsigned int size)
668 struct ip6t_entry_target *t;
669 struct xt_target *target;
670 int ret;
671 unsigned int j;
672 struct xt_mtchk_param mtpar;
673 struct xt_entry_match *ematch;
675 ret = check_entry(e, name);
676 if (ret)
677 return ret;
679 j = 0;
680 mtpar.net = net;
681 mtpar.table = name;
682 mtpar.entryinfo = &e->ipv6;
683 mtpar.hook_mask = e->comefrom;
684 mtpar.family = NFPROTO_IPV6;
685 xt_ematch_foreach(ematch, e) {
686 ret = find_check_match(ematch, &mtpar);
687 if (ret != 0)
688 goto cleanup_matches;
689 ++j;
692 t = ip6t_get_target(e);
693 target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name,
694 t->u.user.revision);
695 if (IS_ERR(target)) {
696 duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
697 ret = PTR_ERR(target);
698 goto cleanup_matches;
700 t->u.kernel.target = target;
702 ret = check_target(e, net, name);
703 if (ret)
704 goto err;
705 return 0;
706 err:
707 module_put(t->u.kernel.target->me);
708 cleanup_matches:
709 xt_ematch_foreach(ematch, e) {
710 if (j-- == 0)
711 break;
712 cleanup_match(ematch, net);
714 return ret;
717 static bool check_underflow(const struct ip6t_entry *e)
719 const struct ip6t_entry_target *t;
720 unsigned int verdict;
722 if (!unconditional(&e->ipv6))
723 return false;
724 t = ip6t_get_target_c(e);
725 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
726 return false;
727 verdict = ((struct ip6t_standard_target *)t)->verdict;
728 verdict = -verdict - 1;
729 return verdict == NF_DROP || verdict == NF_ACCEPT;
732 static int
733 check_entry_size_and_hooks(struct ip6t_entry *e,
734 struct xt_table_info *newinfo,
735 const unsigned char *base,
736 const unsigned char *limit,
737 const unsigned int *hook_entries,
738 const unsigned int *underflows,
739 unsigned int valid_hooks)
741 unsigned int h;
743 if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0 ||
744 (unsigned char *)e + sizeof(struct ip6t_entry) >= limit) {
745 duprintf("Bad offset %p\n", e);
746 return -EINVAL;
749 if (e->next_offset
750 < sizeof(struct ip6t_entry) + sizeof(struct ip6t_entry_target)) {
751 duprintf("checking: element %p size %u\n",
752 e, e->next_offset);
753 return -EINVAL;
756 /* Check hooks & underflows */
757 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
758 if (!(valid_hooks & (1 << h)))
759 continue;
760 if ((unsigned char *)e - base == hook_entries[h])
761 newinfo->hook_entry[h] = hook_entries[h];
762 if ((unsigned char *)e - base == underflows[h]) {
763 if (!check_underflow(e)) {
764 pr_err("Underflows must be unconditional and "
765 "use the STANDARD target with "
766 "ACCEPT/DROP\n");
767 return -EINVAL;
769 newinfo->underflow[h] = underflows[h];
773 /* Clear counters and comefrom */
774 e->counters = ((struct xt_counters) { 0, 0 });
775 e->comefrom = 0;
776 return 0;
779 static void cleanup_entry(struct ip6t_entry *e, struct net *net)
781 struct xt_tgdtor_param par;
782 struct ip6t_entry_target *t;
783 struct xt_entry_match *ematch;
785 /* Cleanup all matches */
786 xt_ematch_foreach(ematch, e)
787 cleanup_match(ematch, net);
788 t = ip6t_get_target(e);
790 par.net = net;
791 par.target = t->u.kernel.target;
792 par.targinfo = t->data;
793 par.family = NFPROTO_IPV6;
794 if (par.target->destroy != NULL)
795 par.target->destroy(&par);
796 module_put(par.target->me);
799 /* Checks and translates the user-supplied table segment (held in
800 newinfo) */
801 static int
802 translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
803 const struct ip6t_replace *repl)
805 struct ip6t_entry *iter;
806 unsigned int i;
807 int ret = 0;
809 newinfo->size = repl->size;
810 newinfo->number = repl->num_entries;
812 /* Init all hooks to impossible value. */
813 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
814 newinfo->hook_entry[i] = 0xFFFFFFFF;
815 newinfo->underflow[i] = 0xFFFFFFFF;
818 duprintf("translate_table: size %u\n", newinfo->size);
819 i = 0;
820 /* Walk through entries, checking offsets. */
821 xt_entry_foreach(iter, entry0, newinfo->size) {
822 ret = check_entry_size_and_hooks(iter, newinfo, entry0,
823 entry0 + repl->size,
824 repl->hook_entry,
825 repl->underflow,
826 repl->valid_hooks);
827 if (ret != 0)
828 return ret;
829 ++i;
830 if (strcmp(ip6t_get_target(iter)->u.user.name,
831 XT_ERROR_TARGET) == 0)
832 ++newinfo->stacksize;
835 if (i != repl->num_entries) {
836 duprintf("translate_table: %u not %u entries\n",
837 i, repl->num_entries);
838 return -EINVAL;
841 /* Check hooks all assigned */
842 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
843 /* Only hooks which are valid */
844 if (!(repl->valid_hooks & (1 << i)))
845 continue;
846 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
847 duprintf("Invalid hook entry %u %u\n",
848 i, repl->hook_entry[i]);
849 return -EINVAL;
851 if (newinfo->underflow[i] == 0xFFFFFFFF) {
852 duprintf("Invalid underflow %u %u\n",
853 i, repl->underflow[i]);
854 return -EINVAL;
858 if (!mark_source_chains(newinfo, repl->valid_hooks, entry0))
859 return -ELOOP;
861 /* Finally, each sanity check must pass */
862 i = 0;
863 xt_entry_foreach(iter, entry0, newinfo->size) {
864 ret = find_check_entry(iter, net, repl->name, repl->size);
865 if (ret != 0)
866 break;
867 ++i;
870 if (ret != 0) {
871 xt_entry_foreach(iter, entry0, newinfo->size) {
872 if (i-- == 0)
873 break;
874 cleanup_entry(iter, net);
876 return ret;
879 /* And one copy for every other CPU */
880 for_each_possible_cpu(i) {
881 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
882 memcpy(newinfo->entries[i], entry0, newinfo->size);
885 return ret;
888 static void
889 get_counters(const struct xt_table_info *t,
890 struct xt_counters counters[])
892 struct ip6t_entry *iter;
893 unsigned int cpu;
894 unsigned int i;
895 unsigned int curcpu = get_cpu();
897 /* Instead of clearing (by a previous call to memset())
898 * the counters and using adds, we set the counters
899 * with data used by 'current' CPU
901 * Bottom half has to be disabled to prevent deadlock
902 * if new softirq were to run and call ipt_do_table
904 local_bh_disable();
905 i = 0;
906 xt_entry_foreach(iter, t->entries[curcpu], t->size) {
907 SET_COUNTER(counters[i], iter->counters.bcnt,
908 iter->counters.pcnt);
909 ++i;
911 local_bh_enable();
912 /* Processing counters from other cpus, we can let bottom half enabled,
913 * (preemption is disabled)
916 for_each_possible_cpu(cpu) {
917 if (cpu == curcpu)
918 continue;
919 i = 0;
920 local_bh_disable();
921 xt_info_wrlock(cpu);
922 xt_entry_foreach(iter, t->entries[cpu], t->size) {
923 ADD_COUNTER(counters[i], iter->counters.bcnt,
924 iter->counters.pcnt);
925 ++i;
927 xt_info_wrunlock(cpu);
928 local_bh_enable();
930 put_cpu();
933 static struct xt_counters *alloc_counters(const struct xt_table *table)
935 unsigned int countersize;
936 struct xt_counters *counters;
937 const struct xt_table_info *private = table->private;
939 /* We need atomic snapshot of counters: rest doesn't change
940 (other than comefrom, which userspace doesn't care
941 about). */
942 countersize = sizeof(struct xt_counters) * private->number;
943 counters = vmalloc(countersize);
945 if (counters == NULL)
946 return ERR_PTR(-ENOMEM);
948 get_counters(private, counters);
950 return counters;
953 static int
954 copy_entries_to_user(unsigned int total_size,
955 const struct xt_table *table,
956 void __user *userptr)
958 unsigned int off, num;
959 const struct ip6t_entry *e;
960 struct xt_counters *counters;
961 const struct xt_table_info *private = table->private;
962 int ret = 0;
963 const void *loc_cpu_entry;
965 counters = alloc_counters(table);
966 if (IS_ERR(counters))
967 return PTR_ERR(counters);
969 /* choose the copy that is on our node/cpu, ...
970 * This choice is lazy (because current thread is
971 * allowed to migrate to another cpu)
973 loc_cpu_entry = private->entries[raw_smp_processor_id()];
974 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
975 ret = -EFAULT;
976 goto free_counters;
979 /* ... then go back and fix counters and names */
980 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
981 unsigned int i;
982 const struct ip6t_entry_match *m;
983 const struct ip6t_entry_target *t;
985 e = (struct ip6t_entry *)(loc_cpu_entry + off);
986 if (copy_to_user(userptr + off
987 + offsetof(struct ip6t_entry, counters),
988 &counters[num],
989 sizeof(counters[num])) != 0) {
990 ret = -EFAULT;
991 goto free_counters;
994 for (i = sizeof(struct ip6t_entry);
995 i < e->target_offset;
996 i += m->u.match_size) {
997 m = (void *)e + i;
999 if (copy_to_user(userptr + off + i
1000 + offsetof(struct ip6t_entry_match,
1001 u.user.name),
1002 m->u.kernel.match->name,
1003 strlen(m->u.kernel.match->name)+1)
1004 != 0) {
1005 ret = -EFAULT;
1006 goto free_counters;
1010 t = ip6t_get_target_c(e);
1011 if (copy_to_user(userptr + off + e->target_offset
1012 + offsetof(struct ip6t_entry_target,
1013 u.user.name),
1014 t->u.kernel.target->name,
1015 strlen(t->u.kernel.target->name)+1) != 0) {
1016 ret = -EFAULT;
1017 goto free_counters;
1021 free_counters:
1022 vfree(counters);
1023 return ret;
1026 #ifdef CONFIG_COMPAT
1027 static void compat_standard_from_user(void *dst, const void *src)
1029 int v = *(compat_int_t *)src;
1031 if (v > 0)
1032 v += xt_compat_calc_jump(AF_INET6, v);
1033 memcpy(dst, &v, sizeof(v));
1036 static int compat_standard_to_user(void __user *dst, const void *src)
1038 compat_int_t cv = *(int *)src;
1040 if (cv > 0)
1041 cv -= xt_compat_calc_jump(AF_INET6, cv);
1042 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
1045 static int compat_calc_entry(const struct ip6t_entry *e,
1046 const struct xt_table_info *info,
1047 const void *base, struct xt_table_info *newinfo)
1049 const struct xt_entry_match *ematch;
1050 const struct ip6t_entry_target *t;
1051 unsigned int entry_offset;
1052 int off, i, ret;
1054 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1055 entry_offset = (void *)e - base;
1056 xt_ematch_foreach(ematch, e)
1057 off += xt_compat_match_offset(ematch->u.kernel.match);
1058 t = ip6t_get_target_c(e);
1059 off += xt_compat_target_offset(t->u.kernel.target);
1060 newinfo->size -= off;
1061 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1062 if (ret)
1063 return ret;
1065 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1066 if (info->hook_entry[i] &&
1067 (e < (struct ip6t_entry *)(base + info->hook_entry[i])))
1068 newinfo->hook_entry[i] -= off;
1069 if (info->underflow[i] &&
1070 (e < (struct ip6t_entry *)(base + info->underflow[i])))
1071 newinfo->underflow[i] -= off;
1073 return 0;
1076 static int compat_table_info(const struct xt_table_info *info,
1077 struct xt_table_info *newinfo)
1079 struct ip6t_entry *iter;
1080 void *loc_cpu_entry;
1081 int ret;
1083 if (!newinfo || !info)
1084 return -EINVAL;
1086 /* we dont care about newinfo->entries[] */
1087 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
1088 newinfo->initial_entries = 0;
1089 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1090 xt_entry_foreach(iter, loc_cpu_entry, info->size) {
1091 ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
1092 if (ret != 0)
1093 return ret;
1095 return 0;
1097 #endif
1099 static int get_info(struct net *net, void __user *user,
1100 const int *len, int compat)
1102 char name[IP6T_TABLE_MAXNAMELEN];
1103 struct xt_table *t;
1104 int ret;
1106 if (*len != sizeof(struct ip6t_getinfo)) {
1107 duprintf("length %u != %zu\n", *len,
1108 sizeof(struct ip6t_getinfo));
1109 return -EINVAL;
1112 if (copy_from_user(name, user, sizeof(name)) != 0)
1113 return -EFAULT;
1115 name[IP6T_TABLE_MAXNAMELEN-1] = '\0';
1116 #ifdef CONFIG_COMPAT
1117 if (compat)
1118 xt_compat_lock(AF_INET6);
1119 #endif
1120 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1121 "ip6table_%s", name);
1122 if (t && !IS_ERR(t)) {
1123 struct ip6t_getinfo info;
1124 const struct xt_table_info *private = t->private;
1125 #ifdef CONFIG_COMPAT
1126 struct xt_table_info tmp;
1128 if (compat) {
1129 ret = compat_table_info(private, &tmp);
1130 xt_compat_flush_offsets(AF_INET6);
1131 private = &tmp;
1133 #endif
1134 info.valid_hooks = t->valid_hooks;
1135 memcpy(info.hook_entry, private->hook_entry,
1136 sizeof(info.hook_entry));
1137 memcpy(info.underflow, private->underflow,
1138 sizeof(info.underflow));
1139 info.num_entries = private->number;
1140 info.size = private->size;
1141 strcpy(info.name, name);
1143 if (copy_to_user(user, &info, *len) != 0)
1144 ret = -EFAULT;
1145 else
1146 ret = 0;
1148 xt_table_unlock(t);
1149 module_put(t->me);
1150 } else
1151 ret = t ? PTR_ERR(t) : -ENOENT;
1152 #ifdef CONFIG_COMPAT
1153 if (compat)
1154 xt_compat_unlock(AF_INET6);
1155 #endif
1156 return ret;
1159 static int
1160 get_entries(struct net *net, struct ip6t_get_entries __user *uptr,
1161 const int *len)
1163 int ret;
1164 struct ip6t_get_entries get;
1165 struct xt_table *t;
1167 if (*len < sizeof(get)) {
1168 duprintf("get_entries: %u < %zu\n", *len, sizeof(get));
1169 return -EINVAL;
1171 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1172 return -EFAULT;
1173 if (*len != sizeof(struct ip6t_get_entries) + get.size) {
1174 duprintf("get_entries: %u != %zu\n",
1175 *len, sizeof(get) + get.size);
1176 return -EINVAL;
1179 t = xt_find_table_lock(net, AF_INET6, get.name);
1180 if (t && !IS_ERR(t)) {
1181 struct xt_table_info *private = t->private;
1182 duprintf("t->private->number = %u\n", private->number);
1183 if (get.size == private->size)
1184 ret = copy_entries_to_user(private->size,
1185 t, uptr->entrytable);
1186 else {
1187 duprintf("get_entries: I've got %u not %u!\n",
1188 private->size, get.size);
1189 ret = -EAGAIN;
1191 module_put(t->me);
1192 xt_table_unlock(t);
1193 } else
1194 ret = t ? PTR_ERR(t) : -ENOENT;
1196 return ret;
1199 static int
1200 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1201 struct xt_table_info *newinfo, unsigned int num_counters,
1202 void __user *counters_ptr)
1204 int ret;
1205 struct xt_table *t;
1206 struct xt_table_info *oldinfo;
1207 struct xt_counters *counters;
1208 const void *loc_cpu_old_entry;
1209 struct ip6t_entry *iter;
1211 ret = 0;
1212 counters = vmalloc(num_counters * sizeof(struct xt_counters));
1213 if (!counters) {
1214 ret = -ENOMEM;
1215 goto out;
1218 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1219 "ip6table_%s", name);
1220 if (!t || IS_ERR(t)) {
1221 ret = t ? PTR_ERR(t) : -ENOENT;
1222 goto free_newinfo_counters_untrans;
1225 /* You lied! */
1226 if (valid_hooks != t->valid_hooks) {
1227 duprintf("Valid hook crap: %08X vs %08X\n",
1228 valid_hooks, t->valid_hooks);
1229 ret = -EINVAL;
1230 goto put_module;
1233 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1234 if (!oldinfo)
1235 goto put_module;
1237 /* Update module usage count based on number of rules */
1238 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1239 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1240 if ((oldinfo->number > oldinfo->initial_entries) ||
1241 (newinfo->number <= oldinfo->initial_entries))
1242 module_put(t->me);
1243 if ((oldinfo->number > oldinfo->initial_entries) &&
1244 (newinfo->number <= oldinfo->initial_entries))
1245 module_put(t->me);
1247 /* Get the old counters, and synchronize with replace */
1248 get_counters(oldinfo, counters);
1250 /* Decrease module usage counts and free resource */
1251 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1252 xt_entry_foreach(iter, loc_cpu_old_entry, oldinfo->size)
1253 cleanup_entry(iter, net);
1255 xt_free_table_info(oldinfo);
1256 if (copy_to_user(counters_ptr, counters,
1257 sizeof(struct xt_counters) * num_counters) != 0)
1258 ret = -EFAULT;
1259 vfree(counters);
1260 xt_table_unlock(t);
1261 return ret;
1263 put_module:
1264 module_put(t->me);
1265 xt_table_unlock(t);
1266 free_newinfo_counters_untrans:
1267 vfree(counters);
1268 out:
1269 return ret;
1272 static int
1273 do_replace(struct net *net, const void __user *user, unsigned int len)
1275 int ret;
1276 struct ip6t_replace tmp;
1277 struct xt_table_info *newinfo;
1278 void *loc_cpu_entry;
1279 struct ip6t_entry *iter;
1281 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1282 return -EFAULT;
1284 /* overflow check */
1285 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1286 return -ENOMEM;
1288 newinfo = xt_alloc_table_info(tmp.size);
1289 if (!newinfo)
1290 return -ENOMEM;
1292 /* choose the copy that is on our node/cpu */
1293 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1294 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1295 tmp.size) != 0) {
1296 ret = -EFAULT;
1297 goto free_newinfo;
1300 ret = translate_table(net, newinfo, loc_cpu_entry, &tmp);
1301 if (ret != 0)
1302 goto free_newinfo;
1304 duprintf("ip_tables: Translated table\n");
1306 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1307 tmp.num_counters, tmp.counters);
1308 if (ret)
1309 goto free_newinfo_untrans;
1310 return 0;
1312 free_newinfo_untrans:
1313 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1314 cleanup_entry(iter, net);
1315 free_newinfo:
1316 xt_free_table_info(newinfo);
1317 return ret;
1320 static int
1321 do_add_counters(struct net *net, const void __user *user, unsigned int len,
1322 int compat)
1324 unsigned int i, curcpu;
1325 struct xt_counters_info tmp;
1326 struct xt_counters *paddc;
1327 unsigned int num_counters;
1328 char *name;
1329 int size;
1330 void *ptmp;
1331 struct xt_table *t;
1332 const struct xt_table_info *private;
1333 int ret = 0;
1334 const void *loc_cpu_entry;
1335 struct ip6t_entry *iter;
1336 #ifdef CONFIG_COMPAT
1337 struct compat_xt_counters_info compat_tmp;
1339 if (compat) {
1340 ptmp = &compat_tmp;
1341 size = sizeof(struct compat_xt_counters_info);
1342 } else
1343 #endif
1345 ptmp = &tmp;
1346 size = sizeof(struct xt_counters_info);
1349 if (copy_from_user(ptmp, user, size) != 0)
1350 return -EFAULT;
1352 #ifdef CONFIG_COMPAT
1353 if (compat) {
1354 num_counters = compat_tmp.num_counters;
1355 name = compat_tmp.name;
1356 } else
1357 #endif
1359 num_counters = tmp.num_counters;
1360 name = tmp.name;
1363 if (len != size + num_counters * sizeof(struct xt_counters))
1364 return -EINVAL;
1366 paddc = vmalloc(len - size);
1367 if (!paddc)
1368 return -ENOMEM;
1370 if (copy_from_user(paddc, user + size, len - size) != 0) {
1371 ret = -EFAULT;
1372 goto free;
1375 t = xt_find_table_lock(net, AF_INET6, name);
1376 if (!t || IS_ERR(t)) {
1377 ret = t ? PTR_ERR(t) : -ENOENT;
1378 goto free;
1382 local_bh_disable();
1383 private = t->private;
1384 if (private->number != num_counters) {
1385 ret = -EINVAL;
1386 goto unlock_up_free;
1389 i = 0;
1390 /* Choose the copy that is on our node */
1391 curcpu = smp_processor_id();
1392 xt_info_wrlock(curcpu);
1393 loc_cpu_entry = private->entries[curcpu];
1394 xt_entry_foreach(iter, loc_cpu_entry, private->size) {
1395 ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt);
1396 ++i;
1398 xt_info_wrunlock(curcpu);
1400 unlock_up_free:
1401 local_bh_enable();
1402 xt_table_unlock(t);
1403 module_put(t->me);
1404 free:
1405 vfree(paddc);
1407 return ret;
1410 #ifdef CONFIG_COMPAT
1411 struct compat_ip6t_replace {
1412 char name[IP6T_TABLE_MAXNAMELEN];
1413 u32 valid_hooks;
1414 u32 num_entries;
1415 u32 size;
1416 u32 hook_entry[NF_INET_NUMHOOKS];
1417 u32 underflow[NF_INET_NUMHOOKS];
1418 u32 num_counters;
1419 compat_uptr_t counters; /* struct ip6t_counters * */
1420 struct compat_ip6t_entry entries[0];
1423 static int
1424 compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr,
1425 unsigned int *size, struct xt_counters *counters,
1426 unsigned int i)
1428 struct ip6t_entry_target *t;
1429 struct compat_ip6t_entry __user *ce;
1430 u_int16_t target_offset, next_offset;
1431 compat_uint_t origsize;
1432 const struct xt_entry_match *ematch;
1433 int ret = 0;
1435 origsize = *size;
1436 ce = (struct compat_ip6t_entry __user *)*dstptr;
1437 if (copy_to_user(ce, e, sizeof(struct ip6t_entry)) != 0 ||
1438 copy_to_user(&ce->counters, &counters[i],
1439 sizeof(counters[i])) != 0)
1440 return -EFAULT;
1442 *dstptr += sizeof(struct compat_ip6t_entry);
1443 *size -= sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1445 xt_ematch_foreach(ematch, e) {
1446 ret = xt_compat_match_to_user(ematch, dstptr, size);
1447 if (ret != 0)
1448 return ret;
1450 target_offset = e->target_offset - (origsize - *size);
1451 t = ip6t_get_target(e);
1452 ret = xt_compat_target_to_user(t, dstptr, size);
1453 if (ret)
1454 return ret;
1455 next_offset = e->next_offset - (origsize - *size);
1456 if (put_user(target_offset, &ce->target_offset) != 0 ||
1457 put_user(next_offset, &ce->next_offset) != 0)
1458 return -EFAULT;
1459 return 0;
1462 static int
1463 compat_find_calc_match(struct ip6t_entry_match *m,
1464 const char *name,
1465 const struct ip6t_ip6 *ipv6,
1466 unsigned int hookmask,
1467 int *size)
1469 struct xt_match *match;
1471 match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name,
1472 m->u.user.revision);
1473 if (IS_ERR(match)) {
1474 duprintf("compat_check_calc_match: `%s' not found\n",
1475 m->u.user.name);
1476 return PTR_ERR(match);
1478 m->u.kernel.match = match;
1479 *size += xt_compat_match_offset(match);
1480 return 0;
1483 static void compat_release_entry(struct compat_ip6t_entry *e)
1485 struct ip6t_entry_target *t;
1486 struct xt_entry_match *ematch;
1488 /* Cleanup all matches */
1489 xt_ematch_foreach(ematch, e)
1490 module_put(ematch->u.kernel.match->me);
1491 t = compat_ip6t_get_target(e);
1492 module_put(t->u.kernel.target->me);
1495 static int
1496 check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
1497 struct xt_table_info *newinfo,
1498 unsigned int *size,
1499 const unsigned char *base,
1500 const unsigned char *limit,
1501 const unsigned int *hook_entries,
1502 const unsigned int *underflows,
1503 const char *name)
1505 struct xt_entry_match *ematch;
1506 struct ip6t_entry_target *t;
1507 struct xt_target *target;
1508 unsigned int entry_offset;
1509 unsigned int j;
1510 int ret, off, h;
1512 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1513 if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0 ||
1514 (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit) {
1515 duprintf("Bad offset %p, limit = %p\n", e, limit);
1516 return -EINVAL;
1519 if (e->next_offset < sizeof(struct compat_ip6t_entry) +
1520 sizeof(struct compat_xt_entry_target)) {
1521 duprintf("checking: element %p size %u\n",
1522 e, e->next_offset);
1523 return -EINVAL;
1526 /* For purposes of check_entry casting the compat entry is fine */
1527 ret = check_entry((struct ip6t_entry *)e, name);
1528 if (ret)
1529 return ret;
1531 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1532 entry_offset = (void *)e - (void *)base;
1533 j = 0;
1534 xt_ematch_foreach(ematch, e) {
1535 ret = compat_find_calc_match(ematch, name,
1536 &e->ipv6, e->comefrom, &off);
1537 if (ret != 0)
1538 goto release_matches;
1539 ++j;
1542 t = compat_ip6t_get_target(e);
1543 target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name,
1544 t->u.user.revision);
1545 if (IS_ERR(target)) {
1546 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1547 t->u.user.name);
1548 ret = PTR_ERR(target);
1549 goto release_matches;
1551 t->u.kernel.target = target;
1553 off += xt_compat_target_offset(target);
1554 *size += off;
1555 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1556 if (ret)
1557 goto out;
1559 /* Check hooks & underflows */
1560 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1561 if ((unsigned char *)e - base == hook_entries[h])
1562 newinfo->hook_entry[h] = hook_entries[h];
1563 if ((unsigned char *)e - base == underflows[h])
1564 newinfo->underflow[h] = underflows[h];
1567 /* Clear counters and comefrom */
1568 memset(&e->counters, 0, sizeof(e->counters));
1569 e->comefrom = 0;
1570 return 0;
1572 out:
1573 module_put(t->u.kernel.target->me);
1574 release_matches:
1575 xt_ematch_foreach(ematch, e) {
1576 if (j-- == 0)
1577 break;
1578 module_put(ematch->u.kernel.match->me);
1580 return ret;
1583 static int
1584 compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
1585 unsigned int *size, const char *name,
1586 struct xt_table_info *newinfo, unsigned char *base)
1588 struct ip6t_entry_target *t;
1589 struct xt_target *target;
1590 struct ip6t_entry *de;
1591 unsigned int origsize;
1592 int ret, h;
1593 struct xt_entry_match *ematch;
1595 ret = 0;
1596 origsize = *size;
1597 de = (struct ip6t_entry *)*dstptr;
1598 memcpy(de, e, sizeof(struct ip6t_entry));
1599 memcpy(&de->counters, &e->counters, sizeof(e->counters));
1601 *dstptr += sizeof(struct ip6t_entry);
1602 *size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1604 xt_ematch_foreach(ematch, e) {
1605 ret = xt_compat_match_from_user(ematch, dstptr, size);
1606 if (ret != 0)
1607 return ret;
1609 de->target_offset = e->target_offset - (origsize - *size);
1610 t = compat_ip6t_get_target(e);
1611 target = t->u.kernel.target;
1612 xt_compat_target_from_user(t, dstptr, size);
1614 de->next_offset = e->next_offset - (origsize - *size);
1615 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1616 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1617 newinfo->hook_entry[h] -= origsize - *size;
1618 if ((unsigned char *)de - base < newinfo->underflow[h])
1619 newinfo->underflow[h] -= origsize - *size;
1621 return ret;
1624 static int compat_check_entry(struct ip6t_entry *e, struct net *net,
1625 const char *name)
1627 unsigned int j;
1628 int ret = 0;
1629 struct xt_mtchk_param mtpar;
1630 struct xt_entry_match *ematch;
1632 j = 0;
1633 mtpar.net = net;
1634 mtpar.table = name;
1635 mtpar.entryinfo = &e->ipv6;
1636 mtpar.hook_mask = e->comefrom;
1637 mtpar.family = NFPROTO_IPV6;
1638 xt_ematch_foreach(ematch, e) {
1639 ret = check_match(ematch, &mtpar);
1640 if (ret != 0)
1641 goto cleanup_matches;
1642 ++j;
1645 ret = check_target(e, net, name);
1646 if (ret)
1647 goto cleanup_matches;
1648 return 0;
1650 cleanup_matches:
1651 xt_ematch_foreach(ematch, e) {
1652 if (j-- == 0)
1653 break;
1654 cleanup_match(ematch, net);
1656 return ret;
1659 static int
1660 translate_compat_table(struct net *net,
1661 const char *name,
1662 unsigned int valid_hooks,
1663 struct xt_table_info **pinfo,
1664 void **pentry0,
1665 unsigned int total_size,
1666 unsigned int number,
1667 unsigned int *hook_entries,
1668 unsigned int *underflows)
1670 unsigned int i, j;
1671 struct xt_table_info *newinfo, *info;
1672 void *pos, *entry0, *entry1;
1673 struct compat_ip6t_entry *iter0;
1674 struct ip6t_entry *iter1;
1675 unsigned int size;
1676 int ret = 0;
1678 info = *pinfo;
1679 entry0 = *pentry0;
1680 size = total_size;
1681 info->number = number;
1683 /* Init all hooks to impossible value. */
1684 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1685 info->hook_entry[i] = 0xFFFFFFFF;
1686 info->underflow[i] = 0xFFFFFFFF;
1689 duprintf("translate_compat_table: size %u\n", info->size);
1690 j = 0;
1691 xt_compat_lock(AF_INET6);
1692 /* Walk through entries, checking offsets. */
1693 xt_entry_foreach(iter0, entry0, total_size) {
1694 ret = check_compat_entry_size_and_hooks(iter0, info, &size,
1695 entry0,
1696 entry0 + total_size,
1697 hook_entries,
1698 underflows,
1699 name);
1700 if (ret != 0)
1701 goto out_unlock;
1702 ++j;
1705 ret = -EINVAL;
1706 if (j != number) {
1707 duprintf("translate_compat_table: %u not %u entries\n",
1708 j, number);
1709 goto out_unlock;
1712 /* Check hooks all assigned */
1713 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1714 /* Only hooks which are valid */
1715 if (!(valid_hooks & (1 << i)))
1716 continue;
1717 if (info->hook_entry[i] == 0xFFFFFFFF) {
1718 duprintf("Invalid hook entry %u %u\n",
1719 i, hook_entries[i]);
1720 goto out_unlock;
1722 if (info->underflow[i] == 0xFFFFFFFF) {
1723 duprintf("Invalid underflow %u %u\n",
1724 i, underflows[i]);
1725 goto out_unlock;
1729 ret = -ENOMEM;
1730 newinfo = xt_alloc_table_info(size);
1731 if (!newinfo)
1732 goto out_unlock;
1734 newinfo->number = number;
1735 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1736 newinfo->hook_entry[i] = info->hook_entry[i];
1737 newinfo->underflow[i] = info->underflow[i];
1739 entry1 = newinfo->entries[raw_smp_processor_id()];
1740 pos = entry1;
1741 size = total_size;
1742 xt_entry_foreach(iter0, entry0, total_size) {
1743 ret = compat_copy_entry_from_user(iter0, &pos, &size,
1744 name, newinfo, entry1);
1745 if (ret != 0)
1746 break;
1748 xt_compat_flush_offsets(AF_INET6);
1749 xt_compat_unlock(AF_INET6);
1750 if (ret)
1751 goto free_newinfo;
1753 ret = -ELOOP;
1754 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1755 goto free_newinfo;
1757 i = 0;
1758 xt_entry_foreach(iter1, entry1, newinfo->size) {
1759 ret = compat_check_entry(iter1, net, name);
1760 if (ret != 0)
1761 break;
1762 ++i;
1763 if (strcmp(ip6t_get_target(iter1)->u.user.name,
1764 XT_ERROR_TARGET) == 0)
1765 ++newinfo->stacksize;
1767 if (ret) {
1769 * The first i matches need cleanup_entry (calls ->destroy)
1770 * because they had called ->check already. The other j-i
1771 * entries need only release.
1773 int skip = i;
1774 j -= i;
1775 xt_entry_foreach(iter0, entry0, newinfo->size) {
1776 if (skip-- > 0)
1777 continue;
1778 if (j-- == 0)
1779 break;
1780 compat_release_entry(iter0);
1782 xt_entry_foreach(iter1, entry1, newinfo->size) {
1783 if (i-- == 0)
1784 break;
1785 cleanup_entry(iter1, net);
1787 xt_free_table_info(newinfo);
1788 return ret;
1791 /* And one copy for every other CPU */
1792 for_each_possible_cpu(i)
1793 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1794 memcpy(newinfo->entries[i], entry1, newinfo->size);
1796 *pinfo = newinfo;
1797 *pentry0 = entry1;
1798 xt_free_table_info(info);
1799 return 0;
1801 free_newinfo:
1802 xt_free_table_info(newinfo);
1803 out:
1804 xt_entry_foreach(iter0, entry0, total_size) {
1805 if (j-- == 0)
1806 break;
1807 compat_release_entry(iter0);
1809 return ret;
1810 out_unlock:
1811 xt_compat_flush_offsets(AF_INET6);
1812 xt_compat_unlock(AF_INET6);
1813 goto out;
1816 static int
1817 compat_do_replace(struct net *net, void __user *user, unsigned int len)
1819 int ret;
1820 struct compat_ip6t_replace tmp;
1821 struct xt_table_info *newinfo;
1822 void *loc_cpu_entry;
1823 struct ip6t_entry *iter;
1825 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1826 return -EFAULT;
1828 /* overflow check */
1829 if (tmp.size >= INT_MAX / num_possible_cpus())
1830 return -ENOMEM;
1831 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1832 return -ENOMEM;
1834 newinfo = xt_alloc_table_info(tmp.size);
1835 if (!newinfo)
1836 return -ENOMEM;
1838 /* choose the copy that is on our node/cpu */
1839 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1840 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1841 tmp.size) != 0) {
1842 ret = -EFAULT;
1843 goto free_newinfo;
1846 ret = translate_compat_table(net, tmp.name, tmp.valid_hooks,
1847 &newinfo, &loc_cpu_entry, tmp.size,
1848 tmp.num_entries, tmp.hook_entry,
1849 tmp.underflow);
1850 if (ret != 0)
1851 goto free_newinfo;
1853 duprintf("compat_do_replace: Translated table\n");
1855 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1856 tmp.num_counters, compat_ptr(tmp.counters));
1857 if (ret)
1858 goto free_newinfo_untrans;
1859 return 0;
1861 free_newinfo_untrans:
1862 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1863 cleanup_entry(iter, net);
1864 free_newinfo:
1865 xt_free_table_info(newinfo);
1866 return ret;
1869 static int
1870 compat_do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user,
1871 unsigned int len)
1873 int ret;
1875 if (!capable(CAP_NET_ADMIN))
1876 return -EPERM;
1878 switch (cmd) {
1879 case IP6T_SO_SET_REPLACE:
1880 ret = compat_do_replace(sock_net(sk), user, len);
1881 break;
1883 case IP6T_SO_SET_ADD_COUNTERS:
1884 ret = do_add_counters(sock_net(sk), user, len, 1);
1885 break;
1887 default:
1888 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
1889 ret = -EINVAL;
1892 return ret;
1895 struct compat_ip6t_get_entries {
1896 char name[IP6T_TABLE_MAXNAMELEN];
1897 compat_uint_t size;
1898 struct compat_ip6t_entry entrytable[0];
1901 static int
1902 compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1903 void __user *userptr)
1905 struct xt_counters *counters;
1906 const struct xt_table_info *private = table->private;
1907 void __user *pos;
1908 unsigned int size;
1909 int ret = 0;
1910 const void *loc_cpu_entry;
1911 unsigned int i = 0;
1912 struct ip6t_entry *iter;
1914 counters = alloc_counters(table);
1915 if (IS_ERR(counters))
1916 return PTR_ERR(counters);
1918 /* choose the copy that is on our node/cpu, ...
1919 * This choice is lazy (because current thread is
1920 * allowed to migrate to another cpu)
1922 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1923 pos = userptr;
1924 size = total_size;
1925 xt_entry_foreach(iter, loc_cpu_entry, total_size) {
1926 ret = compat_copy_entry_to_user(iter, &pos,
1927 &size, counters, i++);
1928 if (ret != 0)
1929 break;
1932 vfree(counters);
1933 return ret;
1936 static int
1937 compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
1938 int *len)
1940 int ret;
1941 struct compat_ip6t_get_entries get;
1942 struct xt_table *t;
1944 if (*len < sizeof(get)) {
1945 duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get));
1946 return -EINVAL;
1949 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1950 return -EFAULT;
1952 if (*len != sizeof(struct compat_ip6t_get_entries) + get.size) {
1953 duprintf("compat_get_entries: %u != %zu\n",
1954 *len, sizeof(get) + get.size);
1955 return -EINVAL;
1958 xt_compat_lock(AF_INET6);
1959 t = xt_find_table_lock(net, AF_INET6, get.name);
1960 if (t && !IS_ERR(t)) {
1961 const struct xt_table_info *private = t->private;
1962 struct xt_table_info info;
1963 duprintf("t->private->number = %u\n", private->number);
1964 ret = compat_table_info(private, &info);
1965 if (!ret && get.size == info.size) {
1966 ret = compat_copy_entries_to_user(private->size,
1967 t, uptr->entrytable);
1968 } else if (!ret) {
1969 duprintf("compat_get_entries: I've got %u not %u!\n",
1970 private->size, get.size);
1971 ret = -EAGAIN;
1973 xt_compat_flush_offsets(AF_INET6);
1974 module_put(t->me);
1975 xt_table_unlock(t);
1976 } else
1977 ret = t ? PTR_ERR(t) : -ENOENT;
1979 xt_compat_unlock(AF_INET6);
1980 return ret;
1983 static int do_ip6t_get_ctl(struct sock *, int, void __user *, int *);
1985 static int
1986 compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1988 int ret;
1990 if (!capable(CAP_NET_ADMIN))
1991 return -EPERM;
1993 switch (cmd) {
1994 case IP6T_SO_GET_INFO:
1995 ret = get_info(sock_net(sk), user, len, 1);
1996 break;
1997 case IP6T_SO_GET_ENTRIES:
1998 ret = compat_get_entries(sock_net(sk), user, len);
1999 break;
2000 default:
2001 ret = do_ip6t_get_ctl(sk, cmd, user, len);
2003 return ret;
2005 #endif
2007 static int
2008 do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2010 int ret;
2012 if (!capable(CAP_NET_ADMIN))
2013 return -EPERM;
2015 switch (cmd) {
2016 case IP6T_SO_SET_REPLACE:
2017 ret = do_replace(sock_net(sk), user, len);
2018 break;
2020 case IP6T_SO_SET_ADD_COUNTERS:
2021 ret = do_add_counters(sock_net(sk), user, len, 0);
2022 break;
2024 default:
2025 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
2026 ret = -EINVAL;
2029 return ret;
2032 static int
2033 do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2035 int ret;
2037 if (!capable(CAP_NET_ADMIN))
2038 return -EPERM;
2040 switch (cmd) {
2041 case IP6T_SO_GET_INFO:
2042 ret = get_info(sock_net(sk), user, len, 0);
2043 break;
2045 case IP6T_SO_GET_ENTRIES:
2046 ret = get_entries(sock_net(sk), user, len);
2047 break;
2049 case IP6T_SO_GET_REVISION_MATCH:
2050 case IP6T_SO_GET_REVISION_TARGET: {
2051 struct ip6t_get_revision rev;
2052 int target;
2054 if (*len != sizeof(rev)) {
2055 ret = -EINVAL;
2056 break;
2058 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2059 ret = -EFAULT;
2060 break;
2063 if (cmd == IP6T_SO_GET_REVISION_TARGET)
2064 target = 1;
2065 else
2066 target = 0;
2068 try_then_request_module(xt_find_revision(AF_INET6, rev.name,
2069 rev.revision,
2070 target, &ret),
2071 "ip6t_%s", rev.name);
2072 break;
2075 default:
2076 duprintf("do_ip6t_get_ctl: unknown request %i\n", cmd);
2077 ret = -EINVAL;
2080 return ret;
2083 struct xt_table *ip6t_register_table(struct net *net,
2084 const struct xt_table *table,
2085 const struct ip6t_replace *repl)
2087 int ret;
2088 struct xt_table_info *newinfo;
2089 struct xt_table_info bootstrap = {0};
2090 void *loc_cpu_entry;
2091 struct xt_table *new_table;
2093 newinfo = xt_alloc_table_info(repl->size);
2094 if (!newinfo) {
2095 ret = -ENOMEM;
2096 goto out;
2099 /* choose the copy on our node/cpu, but dont care about preemption */
2100 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2101 memcpy(loc_cpu_entry, repl->entries, repl->size);
2103 ret = translate_table(net, newinfo, loc_cpu_entry, repl);
2104 if (ret != 0)
2105 goto out_free;
2107 new_table = xt_register_table(net, table, &bootstrap, newinfo);
2108 if (IS_ERR(new_table)) {
2109 ret = PTR_ERR(new_table);
2110 goto out_free;
2112 return new_table;
2114 out_free:
2115 xt_free_table_info(newinfo);
2116 out:
2117 return ERR_PTR(ret);
2120 void ip6t_unregister_table(struct net *net, struct xt_table *table)
2122 struct xt_table_info *private;
2123 void *loc_cpu_entry;
2124 struct module *table_owner = table->me;
2125 struct ip6t_entry *iter;
2127 private = xt_unregister_table(table);
2129 /* Decrease module usage counts and free resources */
2130 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2131 xt_entry_foreach(iter, loc_cpu_entry, private->size)
2132 cleanup_entry(iter, net);
2133 if (private->number > private->initial_entries)
2134 module_put(table_owner);
2135 xt_free_table_info(private);
2138 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2139 static inline bool
2140 icmp6_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2141 u_int8_t type, u_int8_t code,
2142 bool invert)
2144 return (type == test_type && code >= min_code && code <= max_code)
2145 ^ invert;
2148 static bool
2149 icmp6_match(const struct sk_buff *skb, struct xt_action_param *par)
2151 const struct icmp6hdr *ic;
2152 struct icmp6hdr _icmph;
2153 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2155 /* Must not be a fragment. */
2156 if (par->fragoff != 0)
2157 return false;
2159 ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph);
2160 if (ic == NULL) {
2161 /* We've been asked to examine this packet, and we
2162 * can't. Hence, no choice but to drop.
2164 duprintf("Dropping evil ICMP tinygram.\n");
2165 par->hotdrop = true;
2166 return false;
2169 return icmp6_type_code_match(icmpinfo->type,
2170 icmpinfo->code[0],
2171 icmpinfo->code[1],
2172 ic->icmp6_type, ic->icmp6_code,
2173 !!(icmpinfo->invflags&IP6T_ICMP_INV));
2176 /* Called when user tries to insert an entry of this type. */
2177 static int icmp6_checkentry(const struct xt_mtchk_param *par)
2179 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2181 /* Must specify no unknown invflags */
2182 return (icmpinfo->invflags & ~IP6T_ICMP_INV) ? -EINVAL : 0;
2185 /* The built-in targets: standard (NULL) and error. */
2186 static struct xt_target ip6t_builtin_tg[] __read_mostly = {
2188 .name = IP6T_STANDARD_TARGET,
2189 .targetsize = sizeof(int),
2190 .family = NFPROTO_IPV6,
2191 #ifdef CONFIG_COMPAT
2192 .compatsize = sizeof(compat_int_t),
2193 .compat_from_user = compat_standard_from_user,
2194 .compat_to_user = compat_standard_to_user,
2195 #endif
2198 .name = IP6T_ERROR_TARGET,
2199 .target = ip6t_error,
2200 .targetsize = IP6T_FUNCTION_MAXNAMELEN,
2201 .family = NFPROTO_IPV6,
2205 static struct nf_sockopt_ops ip6t_sockopts = {
2206 .pf = PF_INET6,
2207 .set_optmin = IP6T_BASE_CTL,
2208 .set_optmax = IP6T_SO_SET_MAX+1,
2209 .set = do_ip6t_set_ctl,
2210 #ifdef CONFIG_COMPAT
2211 .compat_set = compat_do_ip6t_set_ctl,
2212 #endif
2213 .get_optmin = IP6T_BASE_CTL,
2214 .get_optmax = IP6T_SO_GET_MAX+1,
2215 .get = do_ip6t_get_ctl,
2216 #ifdef CONFIG_COMPAT
2217 .compat_get = compat_do_ip6t_get_ctl,
2218 #endif
2219 .owner = THIS_MODULE,
2222 static struct xt_match ip6t_builtin_mt[] __read_mostly = {
2224 .name = "icmp6",
2225 .match = icmp6_match,
2226 .matchsize = sizeof(struct ip6t_icmp),
2227 .checkentry = icmp6_checkentry,
2228 .proto = IPPROTO_ICMPV6,
2229 .family = NFPROTO_IPV6,
2233 static int __net_init ip6_tables_net_init(struct net *net)
2235 return xt_proto_init(net, NFPROTO_IPV6);
2238 static void __net_exit ip6_tables_net_exit(struct net *net)
2240 xt_proto_fini(net, NFPROTO_IPV6);
2243 static struct pernet_operations ip6_tables_net_ops = {
2244 .init = ip6_tables_net_init,
2245 .exit = ip6_tables_net_exit,
2248 static int __init ip6_tables_init(void)
2250 int ret;
2252 ret = register_pernet_subsys(&ip6_tables_net_ops);
2253 if (ret < 0)
2254 goto err1;
2256 /* Noone else will be downing sem now, so we won't sleep */
2257 ret = xt_register_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
2258 if (ret < 0)
2259 goto err2;
2260 ret = xt_register_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt));
2261 if (ret < 0)
2262 goto err4;
2264 /* Register setsockopt */
2265 ret = nf_register_sockopt(&ip6t_sockopts);
2266 if (ret < 0)
2267 goto err5;
2269 pr_info("(C) 2000-2006 Netfilter Core Team\n");
2270 return 0;
2272 err5:
2273 xt_unregister_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt));
2274 err4:
2275 xt_unregister_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
2276 err2:
2277 unregister_pernet_subsys(&ip6_tables_net_ops);
2278 err1:
2279 return ret;
2282 static void __exit ip6_tables_fini(void)
2284 nf_unregister_sockopt(&ip6t_sockopts);
2286 xt_unregister_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt));
2287 xt_unregister_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
2288 unregister_pernet_subsys(&ip6_tables_net_ops);
2292 * find the offset to specified header or the protocol number of last header
2293 * if target < 0. "last header" is transport protocol header, ESP, or
2294 * "No next header".
2296 * If target header is found, its offset is set in *offset and return protocol
2297 * number. Otherwise, return -1.
2299 * If the first fragment doesn't contain the final protocol header or
2300 * NEXTHDR_NONE it is considered invalid.
2302 * Note that non-1st fragment is special case that "the protocol number
2303 * of last header" is "next header" field in Fragment header. In this case,
2304 * *offset is meaningless and fragment offset is stored in *fragoff if fragoff
2305 * isn't NULL.
2308 int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
2309 int target, unsigned short *fragoff)
2311 unsigned int start = skb_network_offset(skb) + sizeof(struct ipv6hdr);
2312 u8 nexthdr = ipv6_hdr(skb)->nexthdr;
2313 unsigned int len = skb->len - start;
2315 if (fragoff)
2316 *fragoff = 0;
2318 while (nexthdr != target) {
2319 struct ipv6_opt_hdr _hdr, *hp;
2320 unsigned int hdrlen;
2322 if ((!ipv6_ext_hdr(nexthdr)) || nexthdr == NEXTHDR_NONE) {
2323 if (target < 0)
2324 break;
2325 return -ENOENT;
2328 hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr);
2329 if (hp == NULL)
2330 return -EBADMSG;
2331 if (nexthdr == NEXTHDR_FRAGMENT) {
2332 unsigned short _frag_off;
2333 __be16 *fp;
2334 fp = skb_header_pointer(skb,
2335 start+offsetof(struct frag_hdr,
2336 frag_off),
2337 sizeof(_frag_off),
2338 &_frag_off);
2339 if (fp == NULL)
2340 return -EBADMSG;
2342 _frag_off = ntohs(*fp) & ~0x7;
2343 if (_frag_off) {
2344 if (target < 0 &&
2345 ((!ipv6_ext_hdr(hp->nexthdr)) ||
2346 hp->nexthdr == NEXTHDR_NONE)) {
2347 if (fragoff)
2348 *fragoff = _frag_off;
2349 return hp->nexthdr;
2351 return -ENOENT;
2353 hdrlen = 8;
2354 } else if (nexthdr == NEXTHDR_AUTH)
2355 hdrlen = (hp->hdrlen + 2) << 2;
2356 else
2357 hdrlen = ipv6_optlen(hp);
2359 nexthdr = hp->nexthdr;
2360 len -= hdrlen;
2361 start += hdrlen;
2364 *offset = start;
2365 return nexthdr;
2368 EXPORT_SYMBOL(ip6t_register_table);
2369 EXPORT_SYMBOL(ip6t_unregister_table);
2370 EXPORT_SYMBOL(ip6t_do_table);
2371 EXPORT_SYMBOL(ip6t_ext_hdr);
2372 EXPORT_SYMBOL(ipv6_find_hdr);
2374 module_init(ip6_tables_init);
2375 module_exit(ip6_tables_fini);