[NETFILTER]: netns: put table module on netns stop
[linux-2.6/mini2440.git] / net / ipv6 / netfilter / ip6_tables.c
blob6fabb73ff445f1e941f3fe3b8aeb7e6bb11afb56
1 /*
2 * Packet matching code.
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/capability.h>
13 #include <linux/in.h>
14 #include <linux/skbuff.h>
15 #include <linux/kmod.h>
16 #include <linux/vmalloc.h>
17 #include <linux/netdevice.h>
18 #include <linux/module.h>
19 #include <linux/poison.h>
20 #include <linux/icmpv6.h>
21 #include <net/ipv6.h>
22 #include <net/compat.h>
23 #include <asm/uaccess.h>
24 #include <linux/mutex.h>
25 #include <linux/proc_fs.h>
26 #include <linux/err.h>
27 #include <linux/cpumask.h>
29 #include <linux/netfilter_ipv6/ip6_tables.h>
30 #include <linux/netfilter/x_tables.h>
31 #include <net/netfilter/nf_log.h>
33 MODULE_LICENSE("GPL");
34 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
35 MODULE_DESCRIPTION("IPv6 packet filter");
37 /*#define DEBUG_IP_FIREWALL*/
38 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
39 /*#define DEBUG_IP_FIREWALL_USER*/
41 #ifdef DEBUG_IP_FIREWALL
42 #define dprintf(format, args...) printk(format , ## args)
43 #else
44 #define dprintf(format, args...)
45 #endif
47 #ifdef DEBUG_IP_FIREWALL_USER
48 #define duprintf(format, args...) printk(format , ## args)
49 #else
50 #define duprintf(format, args...)
51 #endif
53 #ifdef CONFIG_NETFILTER_DEBUG
54 #define IP_NF_ASSERT(x) \
55 do { \
56 if (!(x)) \
57 printk("IP_NF_ASSERT: %s:%s:%u\n", \
58 __FUNCTION__, __FILE__, __LINE__); \
59 } while(0)
60 #else
61 #define IP_NF_ASSERT(x)
62 #endif
64 #if 0
65 /* All the better to debug you with... */
66 #define static
67 #define inline
68 #endif
71 We keep a set of rules for each CPU, so we can avoid write-locking
72 them in the softirq when updating the counters and therefore
73 only need to read-lock in the softirq; doing a write_lock_bh() in user
74 context stops packets coming through and allows user context to read
75 the counters or update the rules.
77 Hence the start of any table is given by get_table() below. */
79 /* Check for an extension */
80 int
81 ip6t_ext_hdr(u8 nexthdr)
83 return ( (nexthdr == IPPROTO_HOPOPTS) ||
84 (nexthdr == IPPROTO_ROUTING) ||
85 (nexthdr == IPPROTO_FRAGMENT) ||
86 (nexthdr == IPPROTO_ESP) ||
87 (nexthdr == IPPROTO_AH) ||
88 (nexthdr == IPPROTO_NONE) ||
89 (nexthdr == IPPROTO_DSTOPTS) );
92 /* Returns whether matches rule or not. */
93 /* Performance critical - called for every packet */
94 static inline bool
95 ip6_packet_match(const struct sk_buff *skb,
96 const char *indev,
97 const char *outdev,
98 const struct ip6t_ip6 *ip6info,
99 unsigned int *protoff,
100 int *fragoff, bool *hotdrop)
102 size_t i;
103 unsigned long ret;
104 const struct ipv6hdr *ipv6 = ipv6_hdr(skb);
106 #define FWINV(bool, invflg) ((bool) ^ !!(ip6info->invflags & (invflg)))
108 if (FWINV(ipv6_masked_addr_cmp(&ipv6->saddr, &ip6info->smsk,
109 &ip6info->src), IP6T_INV_SRCIP)
110 || FWINV(ipv6_masked_addr_cmp(&ipv6->daddr, &ip6info->dmsk,
111 &ip6info->dst), IP6T_INV_DSTIP)) {
112 dprintf("Source or dest mismatch.\n");
114 dprintf("SRC: %u. Mask: %u. Target: %u.%s\n", ip->saddr,
115 ipinfo->smsk.s_addr, ipinfo->src.s_addr,
116 ipinfo->invflags & IP6T_INV_SRCIP ? " (INV)" : "");
117 dprintf("DST: %u. Mask: %u. Target: %u.%s\n", ip->daddr,
118 ipinfo->dmsk.s_addr, ipinfo->dst.s_addr,
119 ipinfo->invflags & IP6T_INV_DSTIP ? " (INV)" : "");*/
120 return false;
123 /* Look for ifname matches; this should unroll nicely. */
124 for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
125 ret |= (((const unsigned long *)indev)[i]
126 ^ ((const unsigned long *)ip6info->iniface)[i])
127 & ((const unsigned long *)ip6info->iniface_mask)[i];
130 if (FWINV(ret != 0, IP6T_INV_VIA_IN)) {
131 dprintf("VIA in mismatch (%s vs %s).%s\n",
132 indev, ip6info->iniface,
133 ip6info->invflags&IP6T_INV_VIA_IN ?" (INV)":"");
134 return false;
137 for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
138 ret |= (((const unsigned long *)outdev)[i]
139 ^ ((const unsigned long *)ip6info->outiface)[i])
140 & ((const unsigned long *)ip6info->outiface_mask)[i];
143 if (FWINV(ret != 0, IP6T_INV_VIA_OUT)) {
144 dprintf("VIA out mismatch (%s vs %s).%s\n",
145 outdev, ip6info->outiface,
146 ip6info->invflags&IP6T_INV_VIA_OUT ?" (INV)":"");
147 return false;
150 /* ... might want to do something with class and flowlabel here ... */
152 /* look for the desired protocol header */
153 if((ip6info->flags & IP6T_F_PROTO)) {
154 int protohdr;
155 unsigned short _frag_off;
157 protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off);
158 if (protohdr < 0) {
159 if (_frag_off == 0)
160 *hotdrop = true;
161 return false;
163 *fragoff = _frag_off;
165 dprintf("Packet protocol %hi ?= %s%hi.\n",
166 protohdr,
167 ip6info->invflags & IP6T_INV_PROTO ? "!":"",
168 ip6info->proto);
170 if (ip6info->proto == protohdr) {
171 if(ip6info->invflags & IP6T_INV_PROTO) {
172 return false;
174 return true;
177 /* We need match for the '-p all', too! */
178 if ((ip6info->proto != 0) &&
179 !(ip6info->invflags & IP6T_INV_PROTO))
180 return false;
182 return true;
185 /* should be ip6 safe */
186 static bool
187 ip6_checkentry(const struct ip6t_ip6 *ipv6)
189 if (ipv6->flags & ~IP6T_F_MASK) {
190 duprintf("Unknown flag bits set: %08X\n",
191 ipv6->flags & ~IP6T_F_MASK);
192 return false;
194 if (ipv6->invflags & ~IP6T_INV_MASK) {
195 duprintf("Unknown invflag bits set: %08X\n",
196 ipv6->invflags & ~IP6T_INV_MASK);
197 return false;
199 return true;
202 static unsigned int
203 ip6t_error(struct sk_buff *skb,
204 const struct net_device *in,
205 const struct net_device *out,
206 unsigned int hooknum,
207 const struct xt_target *target,
208 const void *targinfo)
210 if (net_ratelimit())
211 printk("ip6_tables: error: `%s'\n", (char *)targinfo);
213 return NF_DROP;
216 /* Performance critical - called for every packet */
217 static inline bool
218 do_match(struct ip6t_entry_match *m,
219 const struct sk_buff *skb,
220 const struct net_device *in,
221 const struct net_device *out,
222 int offset,
223 unsigned int protoff,
224 bool *hotdrop)
226 /* Stop iteration if it doesn't match */
227 if (!m->u.kernel.match->match(skb, in, out, m->u.kernel.match, m->data,
228 offset, protoff, hotdrop))
229 return true;
230 else
231 return false;
234 static inline struct ip6t_entry *
235 get_entry(void *base, unsigned int offset)
237 return (struct ip6t_entry *)(base + offset);
240 /* All zeroes == unconditional rule. */
241 /* Mildly perf critical (only if packet tracing is on) */
242 static inline int
243 unconditional(const struct ip6t_ip6 *ipv6)
245 unsigned int i;
247 for (i = 0; i < sizeof(*ipv6); i++)
248 if (((char *)ipv6)[i])
249 break;
251 return (i == sizeof(*ipv6));
254 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
255 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
256 /* This cries for unification! */
257 static const char *const hooknames[] = {
258 [NF_INET_PRE_ROUTING] = "PREROUTING",
259 [NF_INET_LOCAL_IN] = "INPUT",
260 [NF_INET_FORWARD] = "FORWARD",
261 [NF_INET_LOCAL_OUT] = "OUTPUT",
262 [NF_INET_POST_ROUTING] = "POSTROUTING",
265 enum nf_ip_trace_comments {
266 NF_IP6_TRACE_COMMENT_RULE,
267 NF_IP6_TRACE_COMMENT_RETURN,
268 NF_IP6_TRACE_COMMENT_POLICY,
271 static const char *const comments[] = {
272 [NF_IP6_TRACE_COMMENT_RULE] = "rule",
273 [NF_IP6_TRACE_COMMENT_RETURN] = "return",
274 [NF_IP6_TRACE_COMMENT_POLICY] = "policy",
277 static struct nf_loginfo trace_loginfo = {
278 .type = NF_LOG_TYPE_LOG,
279 .u = {
280 .log = {
281 .level = 4,
282 .logflags = NF_LOG_MASK,
287 /* Mildly perf critical (only if packet tracing is on) */
288 static inline int
289 get_chainname_rulenum(struct ip6t_entry *s, struct ip6t_entry *e,
290 char *hookname, char **chainname,
291 char **comment, unsigned int *rulenum)
293 struct ip6t_standard_target *t = (void *)ip6t_get_target(s);
295 if (strcmp(t->target.u.kernel.target->name, IP6T_ERROR_TARGET) == 0) {
296 /* Head of user chain: ERROR target with chainname */
297 *chainname = t->target.data;
298 (*rulenum) = 0;
299 } else if (s == e) {
300 (*rulenum)++;
302 if (s->target_offset == sizeof(struct ip6t_entry)
303 && strcmp(t->target.u.kernel.target->name,
304 IP6T_STANDARD_TARGET) == 0
305 && t->verdict < 0
306 && unconditional(&s->ipv6)) {
307 /* Tail of chains: STANDARD target (return/policy) */
308 *comment = *chainname == hookname
309 ? (char *)comments[NF_IP6_TRACE_COMMENT_POLICY]
310 : (char *)comments[NF_IP6_TRACE_COMMENT_RETURN];
312 return 1;
313 } else
314 (*rulenum)++;
316 return 0;
319 static void trace_packet(struct sk_buff *skb,
320 unsigned int hook,
321 const struct net_device *in,
322 const struct net_device *out,
323 const char *tablename,
324 struct xt_table_info *private,
325 struct ip6t_entry *e)
327 void *table_base;
328 struct ip6t_entry *root;
329 char *hookname, *chainname, *comment;
330 unsigned int rulenum = 0;
332 table_base = (void *)private->entries[smp_processor_id()];
333 root = get_entry(table_base, private->hook_entry[hook]);
335 hookname = chainname = (char *)hooknames[hook];
336 comment = (char *)comments[NF_IP6_TRACE_COMMENT_RULE];
338 IP6T_ENTRY_ITERATE(root,
339 private->size - private->hook_entry[hook],
340 get_chainname_rulenum,
341 e, hookname, &chainname, &comment, &rulenum);
343 nf_log_packet(AF_INET6, hook, skb, in, out, &trace_loginfo,
344 "TRACE: %s:%s:%s:%u ",
345 tablename, chainname, comment, rulenum);
347 #endif
349 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
350 unsigned int
351 ip6t_do_table(struct sk_buff *skb,
352 unsigned int hook,
353 const struct net_device *in,
354 const struct net_device *out,
355 struct xt_table *table)
357 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
358 int offset = 0;
359 unsigned int protoff = 0;
360 bool hotdrop = false;
361 /* Initializing verdict to NF_DROP keeps gcc happy. */
362 unsigned int verdict = NF_DROP;
363 const char *indev, *outdev;
364 void *table_base;
365 struct ip6t_entry *e, *back;
366 struct xt_table_info *private;
368 /* Initialization */
369 indev = in ? in->name : nulldevname;
370 outdev = out ? out->name : nulldevname;
371 /* We handle fragments by dealing with the first fragment as
372 * if it was a normal packet. All other fragments are treated
373 * normally, except that they will NEVER match rules that ask
374 * things we don't know, ie. tcp syn flag or ports). If the
375 * rule is also a fragment-specific rule, non-fragments won't
376 * match it. */
378 read_lock_bh(&table->lock);
379 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
380 private = table->private;
381 table_base = (void *)private->entries[smp_processor_id()];
382 e = get_entry(table_base, private->hook_entry[hook]);
384 /* For return from builtin chain */
385 back = get_entry(table_base, private->underflow[hook]);
387 do {
388 IP_NF_ASSERT(e);
389 IP_NF_ASSERT(back);
390 if (ip6_packet_match(skb, indev, outdev, &e->ipv6,
391 &protoff, &offset, &hotdrop)) {
392 struct ip6t_entry_target *t;
394 if (IP6T_MATCH_ITERATE(e, do_match,
395 skb, in, out,
396 offset, protoff, &hotdrop) != 0)
397 goto no_match;
399 ADD_COUNTER(e->counters,
400 ntohs(ipv6_hdr(skb)->payload_len) +
401 sizeof(struct ipv6hdr), 1);
403 t = ip6t_get_target(e);
404 IP_NF_ASSERT(t->u.kernel.target);
406 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
407 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
408 /* The packet is traced: log it */
409 if (unlikely(skb->nf_trace))
410 trace_packet(skb, hook, in, out,
411 table->name, private, e);
412 #endif
413 /* Standard target? */
414 if (!t->u.kernel.target->target) {
415 int v;
417 v = ((struct ip6t_standard_target *)t)->verdict;
418 if (v < 0) {
419 /* Pop from stack? */
420 if (v != IP6T_RETURN) {
421 verdict = (unsigned)(-v) - 1;
422 break;
424 e = back;
425 back = get_entry(table_base,
426 back->comefrom);
427 continue;
429 if (table_base + v != (void *)e + e->next_offset
430 && !(e->ipv6.flags & IP6T_F_GOTO)) {
431 /* Save old back ptr in next entry */
432 struct ip6t_entry *next
433 = (void *)e + e->next_offset;
434 next->comefrom
435 = (void *)back - table_base;
436 /* set back pointer to next entry */
437 back = next;
440 e = get_entry(table_base, v);
441 } else {
442 /* Targets which reenter must return
443 abs. verdicts */
444 #ifdef CONFIG_NETFILTER_DEBUG
445 ((struct ip6t_entry *)table_base)->comefrom
446 = 0xeeeeeeec;
447 #endif
448 verdict = t->u.kernel.target->target(skb,
449 in, out,
450 hook,
451 t->u.kernel.target,
452 t->data);
454 #ifdef CONFIG_NETFILTER_DEBUG
455 if (((struct ip6t_entry *)table_base)->comefrom
456 != 0xeeeeeeec
457 && verdict == IP6T_CONTINUE) {
458 printk("Target %s reentered!\n",
459 t->u.kernel.target->name);
460 verdict = NF_DROP;
462 ((struct ip6t_entry *)table_base)->comefrom
463 = 0x57acc001;
464 #endif
465 if (verdict == IP6T_CONTINUE)
466 e = (void *)e + e->next_offset;
467 else
468 /* Verdict */
469 break;
471 } else {
473 no_match:
474 e = (void *)e + e->next_offset;
476 } while (!hotdrop);
478 #ifdef CONFIG_NETFILTER_DEBUG
479 ((struct ip6t_entry *)table_base)->comefrom = NETFILTER_LINK_POISON;
480 #endif
481 read_unlock_bh(&table->lock);
483 #ifdef DEBUG_ALLOW_ALL
484 return NF_ACCEPT;
485 #else
486 if (hotdrop)
487 return NF_DROP;
488 else return verdict;
489 #endif
492 /* Figures out from what hook each rule can be called: returns 0 if
493 there are loops. Puts hook bitmask in comefrom. */
494 static int
495 mark_source_chains(struct xt_table_info *newinfo,
496 unsigned int valid_hooks, void *entry0)
498 unsigned int hook;
500 /* No recursion; use packet counter to save back ptrs (reset
501 to 0 as we leave), and comefrom to save source hook bitmask */
502 for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
503 unsigned int pos = newinfo->hook_entry[hook];
504 struct ip6t_entry *e = (struct ip6t_entry *)(entry0 + pos);
506 if (!(valid_hooks & (1 << hook)))
507 continue;
509 /* Set initial back pointer. */
510 e->counters.pcnt = pos;
512 for (;;) {
513 struct ip6t_standard_target *t
514 = (void *)ip6t_get_target(e);
515 int visited = e->comefrom & (1 << hook);
517 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
518 printk("iptables: loop hook %u pos %u %08X.\n",
519 hook, pos, e->comefrom);
520 return 0;
522 e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
524 /* Unconditional return/END. */
525 if ((e->target_offset == sizeof(struct ip6t_entry)
526 && (strcmp(t->target.u.user.name,
527 IP6T_STANDARD_TARGET) == 0)
528 && t->verdict < 0
529 && unconditional(&e->ipv6)) || visited) {
530 unsigned int oldpos, size;
532 if (t->verdict < -NF_MAX_VERDICT - 1) {
533 duprintf("mark_source_chains: bad "
534 "negative verdict (%i)\n",
535 t->verdict);
536 return 0;
539 /* Return: backtrack through the last
540 big jump. */
541 do {
542 e->comefrom ^= (1<<NF_INET_NUMHOOKS);
543 #ifdef DEBUG_IP_FIREWALL_USER
544 if (e->comefrom
545 & (1 << NF_INET_NUMHOOKS)) {
546 duprintf("Back unset "
547 "on hook %u "
548 "rule %u\n",
549 hook, pos);
551 #endif
552 oldpos = pos;
553 pos = e->counters.pcnt;
554 e->counters.pcnt = 0;
556 /* We're at the start. */
557 if (pos == oldpos)
558 goto next;
560 e = (struct ip6t_entry *)
561 (entry0 + pos);
562 } while (oldpos == pos + e->next_offset);
564 /* Move along one */
565 size = e->next_offset;
566 e = (struct ip6t_entry *)
567 (entry0 + pos + size);
568 e->counters.pcnt = pos;
569 pos += size;
570 } else {
571 int newpos = t->verdict;
573 if (strcmp(t->target.u.user.name,
574 IP6T_STANDARD_TARGET) == 0
575 && newpos >= 0) {
576 if (newpos > newinfo->size -
577 sizeof(struct ip6t_entry)) {
578 duprintf("mark_source_chains: "
579 "bad verdict (%i)\n",
580 newpos);
581 return 0;
583 /* This a jump; chase it. */
584 duprintf("Jump rule %u -> %u\n",
585 pos, newpos);
586 } else {
587 /* ... this is a fallthru */
588 newpos = pos + e->next_offset;
590 e = (struct ip6t_entry *)
591 (entry0 + newpos);
592 e->counters.pcnt = pos;
593 pos = newpos;
596 next:
597 duprintf("Finished chain %u\n", hook);
599 return 1;
602 static int
603 cleanup_match(struct ip6t_entry_match *m, unsigned int *i)
605 if (i && (*i)-- == 0)
606 return 1;
608 if (m->u.kernel.match->destroy)
609 m->u.kernel.match->destroy(m->u.kernel.match, m->data);
610 module_put(m->u.kernel.match->me);
611 return 0;
614 static int
615 check_entry(struct ip6t_entry *e, const char *name)
617 struct ip6t_entry_target *t;
619 if (!ip6_checkentry(&e->ipv6)) {
620 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
621 return -EINVAL;
624 if (e->target_offset + sizeof(struct ip6t_entry_target) >
625 e->next_offset)
626 return -EINVAL;
628 t = ip6t_get_target(e);
629 if (e->target_offset + t->u.target_size > e->next_offset)
630 return -EINVAL;
632 return 0;
635 static int check_match(struct ip6t_entry_match *m, const char *name,
636 const struct ip6t_ip6 *ipv6,
637 unsigned int hookmask, unsigned int *i)
639 struct xt_match *match;
640 int ret;
642 match = m->u.kernel.match;
643 ret = xt_check_match(match, AF_INET6, m->u.match_size - sizeof(*m),
644 name, hookmask, ipv6->proto,
645 ipv6->invflags & IP6T_INV_PROTO);
646 if (!ret && m->u.kernel.match->checkentry
647 && !m->u.kernel.match->checkentry(name, ipv6, match, m->data,
648 hookmask)) {
649 duprintf("ip_tables: check failed for `%s'.\n",
650 m->u.kernel.match->name);
651 ret = -EINVAL;
653 if (!ret)
654 (*i)++;
655 return ret;
658 static int
659 find_check_match(struct ip6t_entry_match *m,
660 const char *name,
661 const struct ip6t_ip6 *ipv6,
662 unsigned int hookmask,
663 unsigned int *i)
665 struct xt_match *match;
666 int ret;
668 match = try_then_request_module(xt_find_match(AF_INET6, m->u.user.name,
669 m->u.user.revision),
670 "ip6t_%s", m->u.user.name);
671 if (IS_ERR(match) || !match) {
672 duprintf("find_check_match: `%s' not found\n", m->u.user.name);
673 return match ? PTR_ERR(match) : -ENOENT;
675 m->u.kernel.match = match;
677 ret = check_match(m, name, ipv6, hookmask, i);
678 if (ret)
679 goto err;
681 return 0;
682 err:
683 module_put(m->u.kernel.match->me);
684 return ret;
687 static int check_target(struct ip6t_entry *e, const char *name)
689 struct ip6t_entry_target *t;
690 struct xt_target *target;
691 int ret;
693 t = ip6t_get_target(e);
694 target = t->u.kernel.target;
695 ret = xt_check_target(target, AF_INET6, t->u.target_size - sizeof(*t),
696 name, e->comefrom, e->ipv6.proto,
697 e->ipv6.invflags & IP6T_INV_PROTO);
698 if (!ret && t->u.kernel.target->checkentry
699 && !t->u.kernel.target->checkentry(name, e, target, t->data,
700 e->comefrom)) {
701 duprintf("ip_tables: check failed for `%s'.\n",
702 t->u.kernel.target->name);
703 ret = -EINVAL;
705 return ret;
708 static int
709 find_check_entry(struct ip6t_entry *e, const char *name, unsigned int size,
710 unsigned int *i)
712 struct ip6t_entry_target *t;
713 struct xt_target *target;
714 int ret;
715 unsigned int j;
717 ret = check_entry(e, name);
718 if (ret)
719 return ret;
721 j = 0;
722 ret = IP6T_MATCH_ITERATE(e, find_check_match, name, &e->ipv6,
723 e->comefrom, &j);
724 if (ret != 0)
725 goto cleanup_matches;
727 t = ip6t_get_target(e);
728 target = try_then_request_module(xt_find_target(AF_INET6,
729 t->u.user.name,
730 t->u.user.revision),
731 "ip6t_%s", t->u.user.name);
732 if (IS_ERR(target) || !target) {
733 duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
734 ret = target ? PTR_ERR(target) : -ENOENT;
735 goto cleanup_matches;
737 t->u.kernel.target = target;
739 ret = check_target(e, name);
740 if (ret)
741 goto err;
743 (*i)++;
744 return 0;
745 err:
746 module_put(t->u.kernel.target->me);
747 cleanup_matches:
748 IP6T_MATCH_ITERATE(e, cleanup_match, &j);
749 return ret;
752 static int
753 check_entry_size_and_hooks(struct ip6t_entry *e,
754 struct xt_table_info *newinfo,
755 unsigned char *base,
756 unsigned char *limit,
757 const unsigned int *hook_entries,
758 const unsigned int *underflows,
759 unsigned int *i)
761 unsigned int h;
763 if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0
764 || (unsigned char *)e + sizeof(struct ip6t_entry) >= limit) {
765 duprintf("Bad offset %p\n", e);
766 return -EINVAL;
769 if (e->next_offset
770 < sizeof(struct ip6t_entry) + sizeof(struct ip6t_entry_target)) {
771 duprintf("checking: element %p size %u\n",
772 e, e->next_offset);
773 return -EINVAL;
776 /* Check hooks & underflows */
777 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
778 if ((unsigned char *)e - base == hook_entries[h])
779 newinfo->hook_entry[h] = hook_entries[h];
780 if ((unsigned char *)e - base == underflows[h])
781 newinfo->underflow[h] = underflows[h];
784 /* FIXME: underflows must be unconditional, standard verdicts
785 < 0 (not IP6T_RETURN). --RR */
787 /* Clear counters and comefrom */
788 e->counters = ((struct xt_counters) { 0, 0 });
789 e->comefrom = 0;
791 (*i)++;
792 return 0;
795 static int
796 cleanup_entry(struct ip6t_entry *e, unsigned int *i)
798 struct ip6t_entry_target *t;
800 if (i && (*i)-- == 0)
801 return 1;
803 /* Cleanup all matches */
804 IP6T_MATCH_ITERATE(e, cleanup_match, NULL);
805 t = ip6t_get_target(e);
806 if (t->u.kernel.target->destroy)
807 t->u.kernel.target->destroy(t->u.kernel.target, t->data);
808 module_put(t->u.kernel.target->me);
809 return 0;
812 /* Checks and translates the user-supplied table segment (held in
813 newinfo) */
814 static int
815 translate_table(const char *name,
816 unsigned int valid_hooks,
817 struct xt_table_info *newinfo,
818 void *entry0,
819 unsigned int size,
820 unsigned int number,
821 const unsigned int *hook_entries,
822 const unsigned int *underflows)
824 unsigned int i;
825 int ret;
827 newinfo->size = size;
828 newinfo->number = number;
830 /* Init all hooks to impossible value. */
831 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
832 newinfo->hook_entry[i] = 0xFFFFFFFF;
833 newinfo->underflow[i] = 0xFFFFFFFF;
836 duprintf("translate_table: size %u\n", newinfo->size);
837 i = 0;
838 /* Walk through entries, checking offsets. */
839 ret = IP6T_ENTRY_ITERATE(entry0, newinfo->size,
840 check_entry_size_and_hooks,
841 newinfo,
842 entry0,
843 entry0 + size,
844 hook_entries, underflows, &i);
845 if (ret != 0)
846 return ret;
848 if (i != number) {
849 duprintf("translate_table: %u not %u entries\n",
850 i, number);
851 return -EINVAL;
854 /* Check hooks all assigned */
855 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
856 /* Only hooks which are valid */
857 if (!(valid_hooks & (1 << i)))
858 continue;
859 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
860 duprintf("Invalid hook entry %u %u\n",
861 i, hook_entries[i]);
862 return -EINVAL;
864 if (newinfo->underflow[i] == 0xFFFFFFFF) {
865 duprintf("Invalid underflow %u %u\n",
866 i, underflows[i]);
867 return -EINVAL;
871 if (!mark_source_chains(newinfo, valid_hooks, entry0))
872 return -ELOOP;
874 /* Finally, each sanity check must pass */
875 i = 0;
876 ret = IP6T_ENTRY_ITERATE(entry0, newinfo->size,
877 find_check_entry, name, size, &i);
879 if (ret != 0) {
880 IP6T_ENTRY_ITERATE(entry0, newinfo->size,
881 cleanup_entry, &i);
882 return ret;
885 /* And one copy for every other CPU */
886 for_each_possible_cpu(i) {
887 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
888 memcpy(newinfo->entries[i], entry0, newinfo->size);
891 return ret;
894 /* Gets counters. */
895 static inline int
896 add_entry_to_counter(const struct ip6t_entry *e,
897 struct xt_counters total[],
898 unsigned int *i)
900 ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
902 (*i)++;
903 return 0;
906 static inline int
907 set_entry_to_counter(const struct ip6t_entry *e,
908 struct ip6t_counters total[],
909 unsigned int *i)
911 SET_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
913 (*i)++;
914 return 0;
917 static void
918 get_counters(const struct xt_table_info *t,
919 struct xt_counters counters[])
921 unsigned int cpu;
922 unsigned int i;
923 unsigned int curcpu;
925 /* Instead of clearing (by a previous call to memset())
926 * the counters and using adds, we set the counters
927 * with data used by 'current' CPU
928 * We dont care about preemption here.
930 curcpu = raw_smp_processor_id();
932 i = 0;
933 IP6T_ENTRY_ITERATE(t->entries[curcpu],
934 t->size,
935 set_entry_to_counter,
936 counters,
937 &i);
939 for_each_possible_cpu(cpu) {
940 if (cpu == curcpu)
941 continue;
942 i = 0;
943 IP6T_ENTRY_ITERATE(t->entries[cpu],
944 t->size,
945 add_entry_to_counter,
946 counters,
947 &i);
951 static struct xt_counters *alloc_counters(struct xt_table *table)
953 unsigned int countersize;
954 struct xt_counters *counters;
955 struct xt_table_info *private = table->private;
957 /* We need atomic snapshot of counters: rest doesn't change
958 (other than comefrom, which userspace doesn't care
959 about). */
960 countersize = sizeof(struct xt_counters) * private->number;
961 counters = vmalloc_node(countersize, numa_node_id());
963 if (counters == NULL)
964 return ERR_PTR(-ENOMEM);
966 /* First, sum counters... */
967 write_lock_bh(&table->lock);
968 get_counters(private, counters);
969 write_unlock_bh(&table->lock);
971 return counters;
974 static int
975 copy_entries_to_user(unsigned int total_size,
976 struct xt_table *table,
977 void __user *userptr)
979 unsigned int off, num;
980 struct ip6t_entry *e;
981 struct xt_counters *counters;
982 struct xt_table_info *private = table->private;
983 int ret = 0;
984 void *loc_cpu_entry;
986 counters = alloc_counters(table);
987 if (IS_ERR(counters))
988 return PTR_ERR(counters);
990 /* choose the copy that is on our node/cpu, ...
991 * This choice is lazy (because current thread is
992 * allowed to migrate to another cpu)
994 loc_cpu_entry = private->entries[raw_smp_processor_id()];
995 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
996 ret = -EFAULT;
997 goto free_counters;
1000 /* FIXME: use iterator macros --RR */
1001 /* ... then go back and fix counters and names */
1002 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
1003 unsigned int i;
1004 struct ip6t_entry_match *m;
1005 struct ip6t_entry_target *t;
1007 e = (struct ip6t_entry *)(loc_cpu_entry + off);
1008 if (copy_to_user(userptr + off
1009 + offsetof(struct ip6t_entry, counters),
1010 &counters[num],
1011 sizeof(counters[num])) != 0) {
1012 ret = -EFAULT;
1013 goto free_counters;
1016 for (i = sizeof(struct ip6t_entry);
1017 i < e->target_offset;
1018 i += m->u.match_size) {
1019 m = (void *)e + i;
1021 if (copy_to_user(userptr + off + i
1022 + offsetof(struct ip6t_entry_match,
1023 u.user.name),
1024 m->u.kernel.match->name,
1025 strlen(m->u.kernel.match->name)+1)
1026 != 0) {
1027 ret = -EFAULT;
1028 goto free_counters;
1032 t = ip6t_get_target(e);
1033 if (copy_to_user(userptr + off + e->target_offset
1034 + offsetof(struct ip6t_entry_target,
1035 u.user.name),
1036 t->u.kernel.target->name,
1037 strlen(t->u.kernel.target->name)+1) != 0) {
1038 ret = -EFAULT;
1039 goto free_counters;
1043 free_counters:
1044 vfree(counters);
1045 return ret;
1048 #ifdef CONFIG_COMPAT
1049 static void compat_standard_from_user(void *dst, void *src)
1051 int v = *(compat_int_t *)src;
1053 if (v > 0)
1054 v += xt_compat_calc_jump(AF_INET6, v);
1055 memcpy(dst, &v, sizeof(v));
1058 static int compat_standard_to_user(void __user *dst, void *src)
1060 compat_int_t cv = *(int *)src;
1062 if (cv > 0)
1063 cv -= xt_compat_calc_jump(AF_INET6, cv);
1064 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
1067 static inline int
1068 compat_calc_match(struct ip6t_entry_match *m, int *size)
1070 *size += xt_compat_match_offset(m->u.kernel.match);
1071 return 0;
1074 static int compat_calc_entry(struct ip6t_entry *e,
1075 const struct xt_table_info *info,
1076 void *base, struct xt_table_info *newinfo)
1078 struct ip6t_entry_target *t;
1079 unsigned int entry_offset;
1080 int off, i, ret;
1082 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1083 entry_offset = (void *)e - base;
1084 IP6T_MATCH_ITERATE(e, compat_calc_match, &off);
1085 t = ip6t_get_target(e);
1086 off += xt_compat_target_offset(t->u.kernel.target);
1087 newinfo->size -= off;
1088 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1089 if (ret)
1090 return ret;
1092 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1093 if (info->hook_entry[i] &&
1094 (e < (struct ip6t_entry *)(base + info->hook_entry[i])))
1095 newinfo->hook_entry[i] -= off;
1096 if (info->underflow[i] &&
1097 (e < (struct ip6t_entry *)(base + info->underflow[i])))
1098 newinfo->underflow[i] -= off;
1100 return 0;
1103 static int compat_table_info(const struct xt_table_info *info,
1104 struct xt_table_info *newinfo)
1106 void *loc_cpu_entry;
1108 if (!newinfo || !info)
1109 return -EINVAL;
1111 /* we dont care about newinfo->entries[] */
1112 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
1113 newinfo->initial_entries = 0;
1114 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1115 return IP6T_ENTRY_ITERATE(loc_cpu_entry, info->size,
1116 compat_calc_entry, info, loc_cpu_entry,
1117 newinfo);
1119 #endif
1121 static int get_info(struct net *net, void __user *user, int *len, int compat)
1123 char name[IP6T_TABLE_MAXNAMELEN];
1124 struct xt_table *t;
1125 int ret;
1127 if (*len != sizeof(struct ip6t_getinfo)) {
1128 duprintf("length %u != %zu\n", *len,
1129 sizeof(struct ip6t_getinfo));
1130 return -EINVAL;
1133 if (copy_from_user(name, user, sizeof(name)) != 0)
1134 return -EFAULT;
1136 name[IP6T_TABLE_MAXNAMELEN-1] = '\0';
1137 #ifdef CONFIG_COMPAT
1138 if (compat)
1139 xt_compat_lock(AF_INET6);
1140 #endif
1141 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1142 "ip6table_%s", name);
1143 if (t && !IS_ERR(t)) {
1144 struct ip6t_getinfo info;
1145 struct xt_table_info *private = t->private;
1147 #ifdef CONFIG_COMPAT
1148 if (compat) {
1149 struct xt_table_info tmp;
1150 ret = compat_table_info(private, &tmp);
1151 xt_compat_flush_offsets(AF_INET6);
1152 private = &tmp;
1154 #endif
1155 info.valid_hooks = t->valid_hooks;
1156 memcpy(info.hook_entry, private->hook_entry,
1157 sizeof(info.hook_entry));
1158 memcpy(info.underflow, private->underflow,
1159 sizeof(info.underflow));
1160 info.num_entries = private->number;
1161 info.size = private->size;
1162 strcpy(info.name, name);
1164 if (copy_to_user(user, &info, *len) != 0)
1165 ret = -EFAULT;
1166 else
1167 ret = 0;
1169 xt_table_unlock(t);
1170 module_put(t->me);
1171 } else
1172 ret = t ? PTR_ERR(t) : -ENOENT;
1173 #ifdef CONFIG_COMPAT
1174 if (compat)
1175 xt_compat_unlock(AF_INET6);
1176 #endif
1177 return ret;
1180 static int
1181 get_entries(struct net *net, struct ip6t_get_entries __user *uptr, int *len)
1183 int ret;
1184 struct ip6t_get_entries get;
1185 struct xt_table *t;
1187 if (*len < sizeof(get)) {
1188 duprintf("get_entries: %u < %zu\n", *len, sizeof(get));
1189 return -EINVAL;
1191 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1192 return -EFAULT;
1193 if (*len != sizeof(struct ip6t_get_entries) + get.size) {
1194 duprintf("get_entries: %u != %zu\n",
1195 *len, sizeof(get) + get.size);
1196 return -EINVAL;
1199 t = xt_find_table_lock(net, AF_INET6, get.name);
1200 if (t && !IS_ERR(t)) {
1201 struct xt_table_info *private = t->private;
1202 duprintf("t->private->number = %u\n", private->number);
1203 if (get.size == private->size)
1204 ret = copy_entries_to_user(private->size,
1205 t, uptr->entrytable);
1206 else {
1207 duprintf("get_entries: I've got %u not %u!\n",
1208 private->size, get.size);
1209 ret = -EINVAL;
1211 module_put(t->me);
1212 xt_table_unlock(t);
1213 } else
1214 ret = t ? PTR_ERR(t) : -ENOENT;
1216 return ret;
1219 static int
1220 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1221 struct xt_table_info *newinfo, unsigned int num_counters,
1222 void __user *counters_ptr)
1224 int ret;
1225 struct xt_table *t;
1226 struct xt_table_info *oldinfo;
1227 struct xt_counters *counters;
1228 void *loc_cpu_old_entry;
1230 ret = 0;
1231 counters = vmalloc_node(num_counters * sizeof(struct xt_counters),
1232 numa_node_id());
1233 if (!counters) {
1234 ret = -ENOMEM;
1235 goto out;
1238 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1239 "ip6table_%s", name);
1240 if (!t || IS_ERR(t)) {
1241 ret = t ? PTR_ERR(t) : -ENOENT;
1242 goto free_newinfo_counters_untrans;
1245 /* You lied! */
1246 if (valid_hooks != t->valid_hooks) {
1247 duprintf("Valid hook crap: %08X vs %08X\n",
1248 valid_hooks, t->valid_hooks);
1249 ret = -EINVAL;
1250 goto put_module;
1253 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1254 if (!oldinfo)
1255 goto put_module;
1257 /* Update module usage count based on number of rules */
1258 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1259 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1260 if ((oldinfo->number > oldinfo->initial_entries) ||
1261 (newinfo->number <= oldinfo->initial_entries))
1262 module_put(t->me);
1263 if ((oldinfo->number > oldinfo->initial_entries) &&
1264 (newinfo->number <= oldinfo->initial_entries))
1265 module_put(t->me);
1267 /* Get the old counters. */
1268 get_counters(oldinfo, counters);
1269 /* Decrease module usage counts and free resource */
1270 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1271 IP6T_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry,
1272 NULL);
1273 xt_free_table_info(oldinfo);
1274 if (copy_to_user(counters_ptr, counters,
1275 sizeof(struct xt_counters) * num_counters) != 0)
1276 ret = -EFAULT;
1277 vfree(counters);
1278 xt_table_unlock(t);
1279 return ret;
1281 put_module:
1282 module_put(t->me);
1283 xt_table_unlock(t);
1284 free_newinfo_counters_untrans:
1285 vfree(counters);
1286 out:
1287 return ret;
1290 static int
1291 do_replace(struct net *net, void __user *user, unsigned int len)
1293 int ret;
1294 struct ip6t_replace tmp;
1295 struct xt_table_info *newinfo;
1296 void *loc_cpu_entry;
1298 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1299 return -EFAULT;
1301 /* overflow check */
1302 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1303 return -ENOMEM;
1305 newinfo = xt_alloc_table_info(tmp.size);
1306 if (!newinfo)
1307 return -ENOMEM;
1309 /* choose the copy that is on our node/cpu */
1310 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1311 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1312 tmp.size) != 0) {
1313 ret = -EFAULT;
1314 goto free_newinfo;
1317 ret = translate_table(tmp.name, tmp.valid_hooks,
1318 newinfo, loc_cpu_entry, tmp.size, tmp.num_entries,
1319 tmp.hook_entry, tmp.underflow);
1320 if (ret != 0)
1321 goto free_newinfo;
1323 duprintf("ip_tables: Translated table\n");
1325 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1326 tmp.num_counters, tmp.counters);
1327 if (ret)
1328 goto free_newinfo_untrans;
1329 return 0;
1331 free_newinfo_untrans:
1332 IP6T_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL);
1333 free_newinfo:
1334 xt_free_table_info(newinfo);
1335 return ret;
1338 /* We're lazy, and add to the first CPU; overflow works its fey magic
1339 * and everything is OK. */
1340 static inline int
1341 add_counter_to_entry(struct ip6t_entry *e,
1342 const struct xt_counters addme[],
1343 unsigned int *i)
1345 #if 0
1346 duprintf("add_counter: Entry %u %lu/%lu + %lu/%lu\n",
1348 (long unsigned int)e->counters.pcnt,
1349 (long unsigned int)e->counters.bcnt,
1350 (long unsigned int)addme[*i].pcnt,
1351 (long unsigned int)addme[*i].bcnt);
1352 #endif
1354 ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
1356 (*i)++;
1357 return 0;
1360 static int
1361 do_add_counters(struct net *net, void __user *user, unsigned int len,
1362 int compat)
1364 unsigned int i;
1365 struct xt_counters_info tmp;
1366 struct xt_counters *paddc;
1367 unsigned int num_counters;
1368 char *name;
1369 int size;
1370 void *ptmp;
1371 struct xt_table *t;
1372 struct xt_table_info *private;
1373 int ret = 0;
1374 void *loc_cpu_entry;
1375 #ifdef CONFIG_COMPAT
1376 struct compat_xt_counters_info compat_tmp;
1378 if (compat) {
1379 ptmp = &compat_tmp;
1380 size = sizeof(struct compat_xt_counters_info);
1381 } else
1382 #endif
1384 ptmp = &tmp;
1385 size = sizeof(struct xt_counters_info);
1388 if (copy_from_user(ptmp, user, size) != 0)
1389 return -EFAULT;
1391 #ifdef CONFIG_COMPAT
1392 if (compat) {
1393 num_counters = compat_tmp.num_counters;
1394 name = compat_tmp.name;
1395 } else
1396 #endif
1398 num_counters = tmp.num_counters;
1399 name = tmp.name;
1402 if (len != size + num_counters * sizeof(struct xt_counters))
1403 return -EINVAL;
1405 paddc = vmalloc_node(len - size, numa_node_id());
1406 if (!paddc)
1407 return -ENOMEM;
1409 if (copy_from_user(paddc, user + size, len - size) != 0) {
1410 ret = -EFAULT;
1411 goto free;
1414 t = xt_find_table_lock(net, AF_INET6, name);
1415 if (!t || IS_ERR(t)) {
1416 ret = t ? PTR_ERR(t) : -ENOENT;
1417 goto free;
1420 write_lock_bh(&t->lock);
1421 private = t->private;
1422 if (private->number != num_counters) {
1423 ret = -EINVAL;
1424 goto unlock_up_free;
1427 i = 0;
1428 /* Choose the copy that is on our node */
1429 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1430 IP6T_ENTRY_ITERATE(loc_cpu_entry,
1431 private->size,
1432 add_counter_to_entry,
1433 paddc,
1434 &i);
1435 unlock_up_free:
1436 write_unlock_bh(&t->lock);
1437 xt_table_unlock(t);
1438 module_put(t->me);
1439 free:
1440 vfree(paddc);
1442 return ret;
1445 #ifdef CONFIG_COMPAT
1446 struct compat_ip6t_replace {
1447 char name[IP6T_TABLE_MAXNAMELEN];
1448 u32 valid_hooks;
1449 u32 num_entries;
1450 u32 size;
1451 u32 hook_entry[NF_INET_NUMHOOKS];
1452 u32 underflow[NF_INET_NUMHOOKS];
1453 u32 num_counters;
1454 compat_uptr_t counters; /* struct ip6t_counters * */
1455 struct compat_ip6t_entry entries[0];
1458 static int
1459 compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr,
1460 compat_uint_t *size, struct xt_counters *counters,
1461 unsigned int *i)
1463 struct ip6t_entry_target *t;
1464 struct compat_ip6t_entry __user *ce;
1465 u_int16_t target_offset, next_offset;
1466 compat_uint_t origsize;
1467 int ret;
1469 ret = -EFAULT;
1470 origsize = *size;
1471 ce = (struct compat_ip6t_entry __user *)*dstptr;
1472 if (copy_to_user(ce, e, sizeof(struct ip6t_entry)))
1473 goto out;
1475 if (copy_to_user(&ce->counters, &counters[*i], sizeof(counters[*i])))
1476 goto out;
1478 *dstptr += sizeof(struct compat_ip6t_entry);
1479 *size -= sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1481 ret = IP6T_MATCH_ITERATE(e, xt_compat_match_to_user, dstptr, size);
1482 target_offset = e->target_offset - (origsize - *size);
1483 if (ret)
1484 goto out;
1485 t = ip6t_get_target(e);
1486 ret = xt_compat_target_to_user(t, dstptr, size);
1487 if (ret)
1488 goto out;
1489 ret = -EFAULT;
1490 next_offset = e->next_offset - (origsize - *size);
1491 if (put_user(target_offset, &ce->target_offset))
1492 goto out;
1493 if (put_user(next_offset, &ce->next_offset))
1494 goto out;
1496 (*i)++;
1497 return 0;
1498 out:
1499 return ret;
1502 static int
1503 compat_find_calc_match(struct ip6t_entry_match *m,
1504 const char *name,
1505 const struct ip6t_ip6 *ipv6,
1506 unsigned int hookmask,
1507 int *size, int *i)
1509 struct xt_match *match;
1511 match = try_then_request_module(xt_find_match(AF_INET6, m->u.user.name,
1512 m->u.user.revision),
1513 "ip6t_%s", m->u.user.name);
1514 if (IS_ERR(match) || !match) {
1515 duprintf("compat_check_calc_match: `%s' not found\n",
1516 m->u.user.name);
1517 return match ? PTR_ERR(match) : -ENOENT;
1519 m->u.kernel.match = match;
1520 *size += xt_compat_match_offset(match);
1522 (*i)++;
1523 return 0;
1526 static int
1527 compat_release_match(struct ip6t_entry_match *m, unsigned int *i)
1529 if (i && (*i)-- == 0)
1530 return 1;
1532 module_put(m->u.kernel.match->me);
1533 return 0;
1536 static int
1537 compat_release_entry(struct compat_ip6t_entry *e, unsigned int *i)
1539 struct ip6t_entry_target *t;
1541 if (i && (*i)-- == 0)
1542 return 1;
1544 /* Cleanup all matches */
1545 COMPAT_IP6T_MATCH_ITERATE(e, compat_release_match, NULL);
1546 t = compat_ip6t_get_target(e);
1547 module_put(t->u.kernel.target->me);
1548 return 0;
1551 static int
1552 check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
1553 struct xt_table_info *newinfo,
1554 unsigned int *size,
1555 unsigned char *base,
1556 unsigned char *limit,
1557 unsigned int *hook_entries,
1558 unsigned int *underflows,
1559 unsigned int *i,
1560 const char *name)
1562 struct ip6t_entry_target *t;
1563 struct xt_target *target;
1564 unsigned int entry_offset;
1565 int ret, off, h, j;
1567 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1568 if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0
1569 || (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit) {
1570 duprintf("Bad offset %p, limit = %p\n", e, limit);
1571 return -EINVAL;
1574 if (e->next_offset < sizeof(struct compat_ip6t_entry) +
1575 sizeof(struct compat_xt_entry_target)) {
1576 duprintf("checking: element %p size %u\n",
1577 e, e->next_offset);
1578 return -EINVAL;
1581 /* For purposes of check_entry casting the compat entry is fine */
1582 ret = check_entry((struct ip6t_entry *)e, name);
1583 if (ret)
1584 return ret;
1586 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1587 entry_offset = (void *)e - (void *)base;
1588 j = 0;
1589 ret = COMPAT_IP6T_MATCH_ITERATE(e, compat_find_calc_match, name,
1590 &e->ipv6, e->comefrom, &off, &j);
1591 if (ret != 0)
1592 goto release_matches;
1594 t = compat_ip6t_get_target(e);
1595 target = try_then_request_module(xt_find_target(AF_INET6,
1596 t->u.user.name,
1597 t->u.user.revision),
1598 "ip6t_%s", t->u.user.name);
1599 if (IS_ERR(target) || !target) {
1600 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1601 t->u.user.name);
1602 ret = target ? PTR_ERR(target) : -ENOENT;
1603 goto release_matches;
1605 t->u.kernel.target = target;
1607 off += xt_compat_target_offset(target);
1608 *size += off;
1609 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1610 if (ret)
1611 goto out;
1613 /* Check hooks & underflows */
1614 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1615 if ((unsigned char *)e - base == hook_entries[h])
1616 newinfo->hook_entry[h] = hook_entries[h];
1617 if ((unsigned char *)e - base == underflows[h])
1618 newinfo->underflow[h] = underflows[h];
1621 /* Clear counters and comefrom */
1622 memset(&e->counters, 0, sizeof(e->counters));
1623 e->comefrom = 0;
1625 (*i)++;
1626 return 0;
1628 out:
1629 module_put(t->u.kernel.target->me);
1630 release_matches:
1631 IP6T_MATCH_ITERATE(e, compat_release_match, &j);
1632 return ret;
1635 static int
1636 compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
1637 unsigned int *size, const char *name,
1638 struct xt_table_info *newinfo, unsigned char *base)
1640 struct ip6t_entry_target *t;
1641 struct xt_target *target;
1642 struct ip6t_entry *de;
1643 unsigned int origsize;
1644 int ret, h;
1646 ret = 0;
1647 origsize = *size;
1648 de = (struct ip6t_entry *)*dstptr;
1649 memcpy(de, e, sizeof(struct ip6t_entry));
1650 memcpy(&de->counters, &e->counters, sizeof(e->counters));
1652 *dstptr += sizeof(struct ip6t_entry);
1653 *size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1655 ret = COMPAT_IP6T_MATCH_ITERATE(e, xt_compat_match_from_user,
1656 dstptr, size);
1657 if (ret)
1658 return ret;
1659 de->target_offset = e->target_offset - (origsize - *size);
1660 t = compat_ip6t_get_target(e);
1661 target = t->u.kernel.target;
1662 xt_compat_target_from_user(t, dstptr, size);
1664 de->next_offset = e->next_offset - (origsize - *size);
1665 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1666 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1667 newinfo->hook_entry[h] -= origsize - *size;
1668 if ((unsigned char *)de - base < newinfo->underflow[h])
1669 newinfo->underflow[h] -= origsize - *size;
1671 return ret;
1674 static int compat_check_entry(struct ip6t_entry *e, const char *name,
1675 unsigned int *i)
1677 int j, ret;
1679 j = 0;
1680 ret = IP6T_MATCH_ITERATE(e, check_match, name, &e->ipv6,
1681 e->comefrom, &j);
1682 if (ret)
1683 goto cleanup_matches;
1685 ret = check_target(e, name);
1686 if (ret)
1687 goto cleanup_matches;
1689 (*i)++;
1690 return 0;
1692 cleanup_matches:
1693 IP6T_MATCH_ITERATE(e, cleanup_match, &j);
1694 return ret;
1697 static int
1698 translate_compat_table(const char *name,
1699 unsigned int valid_hooks,
1700 struct xt_table_info **pinfo,
1701 void **pentry0,
1702 unsigned int total_size,
1703 unsigned int number,
1704 unsigned int *hook_entries,
1705 unsigned int *underflows)
1707 unsigned int i, j;
1708 struct xt_table_info *newinfo, *info;
1709 void *pos, *entry0, *entry1;
1710 unsigned int size;
1711 int ret;
1713 info = *pinfo;
1714 entry0 = *pentry0;
1715 size = total_size;
1716 info->number = number;
1718 /* Init all hooks to impossible value. */
1719 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1720 info->hook_entry[i] = 0xFFFFFFFF;
1721 info->underflow[i] = 0xFFFFFFFF;
1724 duprintf("translate_compat_table: size %u\n", info->size);
1725 j = 0;
1726 xt_compat_lock(AF_INET6);
1727 /* Walk through entries, checking offsets. */
1728 ret = COMPAT_IP6T_ENTRY_ITERATE(entry0, total_size,
1729 check_compat_entry_size_and_hooks,
1730 info, &size, entry0,
1731 entry0 + total_size,
1732 hook_entries, underflows, &j, name);
1733 if (ret != 0)
1734 goto out_unlock;
1736 ret = -EINVAL;
1737 if (j != number) {
1738 duprintf("translate_compat_table: %u not %u entries\n",
1739 j, number);
1740 goto out_unlock;
1743 /* Check hooks all assigned */
1744 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1745 /* Only hooks which are valid */
1746 if (!(valid_hooks & (1 << i)))
1747 continue;
1748 if (info->hook_entry[i] == 0xFFFFFFFF) {
1749 duprintf("Invalid hook entry %u %u\n",
1750 i, hook_entries[i]);
1751 goto out_unlock;
1753 if (info->underflow[i] == 0xFFFFFFFF) {
1754 duprintf("Invalid underflow %u %u\n",
1755 i, underflows[i]);
1756 goto out_unlock;
1760 ret = -ENOMEM;
1761 newinfo = xt_alloc_table_info(size);
1762 if (!newinfo)
1763 goto out_unlock;
1765 newinfo->number = number;
1766 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1767 newinfo->hook_entry[i] = info->hook_entry[i];
1768 newinfo->underflow[i] = info->underflow[i];
1770 entry1 = newinfo->entries[raw_smp_processor_id()];
1771 pos = entry1;
1772 size = total_size;
1773 ret = COMPAT_IP6T_ENTRY_ITERATE(entry0, total_size,
1774 compat_copy_entry_from_user,
1775 &pos, &size, name, newinfo, entry1);
1776 xt_compat_flush_offsets(AF_INET6);
1777 xt_compat_unlock(AF_INET6);
1778 if (ret)
1779 goto free_newinfo;
1781 ret = -ELOOP;
1782 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1783 goto free_newinfo;
1785 i = 0;
1786 ret = IP6T_ENTRY_ITERATE(entry1, newinfo->size, compat_check_entry,
1787 name, &i);
1788 if (ret) {
1789 j -= i;
1790 COMPAT_IP6T_ENTRY_ITERATE_CONTINUE(entry0, newinfo->size, i,
1791 compat_release_entry, &j);
1792 IP6T_ENTRY_ITERATE(entry1, newinfo->size, cleanup_entry, &i);
1793 xt_free_table_info(newinfo);
1794 return ret;
1797 /* And one copy for every other CPU */
1798 for_each_possible_cpu(i)
1799 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1800 memcpy(newinfo->entries[i], entry1, newinfo->size);
1802 *pinfo = newinfo;
1803 *pentry0 = entry1;
1804 xt_free_table_info(info);
1805 return 0;
1807 free_newinfo:
1808 xt_free_table_info(newinfo);
1809 out:
1810 COMPAT_IP6T_ENTRY_ITERATE(entry0, total_size, compat_release_entry, &j);
1811 return ret;
1812 out_unlock:
1813 xt_compat_flush_offsets(AF_INET6);
1814 xt_compat_unlock(AF_INET6);
1815 goto out;
1818 static int
1819 compat_do_replace(struct net *net, void __user *user, unsigned int len)
1821 int ret;
1822 struct compat_ip6t_replace tmp;
1823 struct xt_table_info *newinfo;
1824 void *loc_cpu_entry;
1826 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1827 return -EFAULT;
1829 /* overflow check */
1830 if (tmp.size >= INT_MAX / num_possible_cpus())
1831 return -ENOMEM;
1832 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1833 return -ENOMEM;
1835 newinfo = xt_alloc_table_info(tmp.size);
1836 if (!newinfo)
1837 return -ENOMEM;
1839 /* choose the copy that is on our node/cpu */
1840 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1841 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1842 tmp.size) != 0) {
1843 ret = -EFAULT;
1844 goto free_newinfo;
1847 ret = translate_compat_table(tmp.name, tmp.valid_hooks,
1848 &newinfo, &loc_cpu_entry, tmp.size,
1849 tmp.num_entries, tmp.hook_entry,
1850 tmp.underflow);
1851 if (ret != 0)
1852 goto free_newinfo;
1854 duprintf("compat_do_replace: Translated table\n");
1856 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1857 tmp.num_counters, compat_ptr(tmp.counters));
1858 if (ret)
1859 goto free_newinfo_untrans;
1860 return 0;
1862 free_newinfo_untrans:
1863 IP6T_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL);
1864 free_newinfo:
1865 xt_free_table_info(newinfo);
1866 return ret;
1869 static int
1870 compat_do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user,
1871 unsigned int len)
1873 int ret;
1875 if (!capable(CAP_NET_ADMIN))
1876 return -EPERM;
1878 switch (cmd) {
1879 case IP6T_SO_SET_REPLACE:
1880 ret = compat_do_replace(sk->sk_net, user, len);
1881 break;
1883 case IP6T_SO_SET_ADD_COUNTERS:
1884 ret = do_add_counters(sk->sk_net, user, len, 1);
1885 break;
1887 default:
1888 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
1889 ret = -EINVAL;
1892 return ret;
1895 struct compat_ip6t_get_entries {
1896 char name[IP6T_TABLE_MAXNAMELEN];
1897 compat_uint_t size;
1898 struct compat_ip6t_entry entrytable[0];
1901 static int
1902 compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1903 void __user *userptr)
1905 struct xt_counters *counters;
1906 struct xt_table_info *private = table->private;
1907 void __user *pos;
1908 unsigned int size;
1909 int ret = 0;
1910 void *loc_cpu_entry;
1911 unsigned int i = 0;
1913 counters = alloc_counters(table);
1914 if (IS_ERR(counters))
1915 return PTR_ERR(counters);
1917 /* choose the copy that is on our node/cpu, ...
1918 * This choice is lazy (because current thread is
1919 * allowed to migrate to another cpu)
1921 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1922 pos = userptr;
1923 size = total_size;
1924 ret = IP6T_ENTRY_ITERATE(loc_cpu_entry, total_size,
1925 compat_copy_entry_to_user,
1926 &pos, &size, counters, &i);
1928 vfree(counters);
1929 return ret;
1932 static int
1933 compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
1934 int *len)
1936 int ret;
1937 struct compat_ip6t_get_entries get;
1938 struct xt_table *t;
1940 if (*len < sizeof(get)) {
1941 duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get));
1942 return -EINVAL;
1945 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1946 return -EFAULT;
1948 if (*len != sizeof(struct compat_ip6t_get_entries) + get.size) {
1949 duprintf("compat_get_entries: %u != %zu\n",
1950 *len, sizeof(get) + get.size);
1951 return -EINVAL;
1954 xt_compat_lock(AF_INET6);
1955 t = xt_find_table_lock(net, AF_INET6, get.name);
1956 if (t && !IS_ERR(t)) {
1957 struct xt_table_info *private = t->private;
1958 struct xt_table_info info;
1959 duprintf("t->private->number = %u\n", private->number);
1960 ret = compat_table_info(private, &info);
1961 if (!ret && get.size == info.size) {
1962 ret = compat_copy_entries_to_user(private->size,
1963 t, uptr->entrytable);
1964 } else if (!ret) {
1965 duprintf("compat_get_entries: I've got %u not %u!\n",
1966 private->size, get.size);
1967 ret = -EINVAL;
1969 xt_compat_flush_offsets(AF_INET6);
1970 module_put(t->me);
1971 xt_table_unlock(t);
1972 } else
1973 ret = t ? PTR_ERR(t) : -ENOENT;
1975 xt_compat_unlock(AF_INET6);
1976 return ret;
1979 static int do_ip6t_get_ctl(struct sock *, int, void __user *, int *);
1981 static int
1982 compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1984 int ret;
1986 if (!capable(CAP_NET_ADMIN))
1987 return -EPERM;
1989 switch (cmd) {
1990 case IP6T_SO_GET_INFO:
1991 ret = get_info(sk->sk_net, user, len, 1);
1992 break;
1993 case IP6T_SO_GET_ENTRIES:
1994 ret = compat_get_entries(sk->sk_net, user, len);
1995 break;
1996 default:
1997 ret = do_ip6t_get_ctl(sk, cmd, user, len);
1999 return ret;
2001 #endif
2003 static int
2004 do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2006 int ret;
2008 if (!capable(CAP_NET_ADMIN))
2009 return -EPERM;
2011 switch (cmd) {
2012 case IP6T_SO_SET_REPLACE:
2013 ret = do_replace(sk->sk_net, user, len);
2014 break;
2016 case IP6T_SO_SET_ADD_COUNTERS:
2017 ret = do_add_counters(sk->sk_net, user, len, 0);
2018 break;
2020 default:
2021 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
2022 ret = -EINVAL;
2025 return ret;
2028 static int
2029 do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2031 int ret;
2033 if (!capable(CAP_NET_ADMIN))
2034 return -EPERM;
2036 switch (cmd) {
2037 case IP6T_SO_GET_INFO:
2038 ret = get_info(sk->sk_net, user, len, 0);
2039 break;
2041 case IP6T_SO_GET_ENTRIES:
2042 ret = get_entries(sk->sk_net, user, len);
2043 break;
2045 case IP6T_SO_GET_REVISION_MATCH:
2046 case IP6T_SO_GET_REVISION_TARGET: {
2047 struct ip6t_get_revision rev;
2048 int target;
2050 if (*len != sizeof(rev)) {
2051 ret = -EINVAL;
2052 break;
2054 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2055 ret = -EFAULT;
2056 break;
2059 if (cmd == IP6T_SO_GET_REVISION_TARGET)
2060 target = 1;
2061 else
2062 target = 0;
2064 try_then_request_module(xt_find_revision(AF_INET6, rev.name,
2065 rev.revision,
2066 target, &ret),
2067 "ip6t_%s", rev.name);
2068 break;
2071 default:
2072 duprintf("do_ip6t_get_ctl: unknown request %i\n", cmd);
2073 ret = -EINVAL;
2076 return ret;
2079 struct xt_table *ip6t_register_table(struct net *net, struct xt_table *table,
2080 const struct ip6t_replace *repl)
2082 int ret;
2083 struct xt_table_info *newinfo;
2084 struct xt_table_info bootstrap
2085 = { 0, 0, 0, { 0 }, { 0 }, { } };
2086 void *loc_cpu_entry;
2087 struct xt_table *new_table;
2089 newinfo = xt_alloc_table_info(repl->size);
2090 if (!newinfo) {
2091 ret = -ENOMEM;
2092 goto out;
2095 /* choose the copy on our node/cpu, but dont care about preemption */
2096 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2097 memcpy(loc_cpu_entry, repl->entries, repl->size);
2099 ret = translate_table(table->name, table->valid_hooks,
2100 newinfo, loc_cpu_entry, repl->size,
2101 repl->num_entries,
2102 repl->hook_entry,
2103 repl->underflow);
2104 if (ret != 0)
2105 goto out_free;
2107 new_table = xt_register_table(net, table, &bootstrap, newinfo);
2108 if (IS_ERR(new_table)) {
2109 ret = PTR_ERR(new_table);
2110 goto out_free;
2112 return new_table;
2114 out_free:
2115 xt_free_table_info(newinfo);
2116 out:
2117 return ERR_PTR(ret);
2120 void ip6t_unregister_table(struct xt_table *table)
2122 struct xt_table_info *private;
2123 void *loc_cpu_entry;
2124 struct module *table_owner = table->me;
2126 private = xt_unregister_table(table);
2128 /* Decrease module usage counts and free resources */
2129 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2130 IP6T_ENTRY_ITERATE(loc_cpu_entry, private->size, cleanup_entry, NULL);
2131 if (private->number > private->initial_entries)
2132 module_put(table_owner);
2133 xt_free_table_info(private);
2136 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2137 static inline bool
2138 icmp6_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2139 u_int8_t type, u_int8_t code,
2140 bool invert)
2142 return (type == test_type && code >= min_code && code <= max_code)
2143 ^ invert;
2146 static bool
2147 icmp6_match(const struct sk_buff *skb,
2148 const struct net_device *in,
2149 const struct net_device *out,
2150 const struct xt_match *match,
2151 const void *matchinfo,
2152 int offset,
2153 unsigned int protoff,
2154 bool *hotdrop)
2156 struct icmp6hdr _icmph, *ic;
2157 const struct ip6t_icmp *icmpinfo = matchinfo;
2159 /* Must not be a fragment. */
2160 if (offset)
2161 return false;
2163 ic = skb_header_pointer(skb, protoff, sizeof(_icmph), &_icmph);
2164 if (ic == NULL) {
2165 /* We've been asked to examine this packet, and we
2166 * can't. Hence, no choice but to drop.
2168 duprintf("Dropping evil ICMP tinygram.\n");
2169 *hotdrop = true;
2170 return false;
2173 return icmp6_type_code_match(icmpinfo->type,
2174 icmpinfo->code[0],
2175 icmpinfo->code[1],
2176 ic->icmp6_type, ic->icmp6_code,
2177 !!(icmpinfo->invflags&IP6T_ICMP_INV));
2180 /* Called when user tries to insert an entry of this type. */
2181 static bool
2182 icmp6_checkentry(const char *tablename,
2183 const void *entry,
2184 const struct xt_match *match,
2185 void *matchinfo,
2186 unsigned int hook_mask)
2188 const struct ip6t_icmp *icmpinfo = matchinfo;
2190 /* Must specify no unknown invflags */
2191 return !(icmpinfo->invflags & ~IP6T_ICMP_INV);
2194 /* The built-in targets: standard (NULL) and error. */
2195 static struct xt_target ip6t_standard_target __read_mostly = {
2196 .name = IP6T_STANDARD_TARGET,
2197 .targetsize = sizeof(int),
2198 .family = AF_INET6,
2199 #ifdef CONFIG_COMPAT
2200 .compatsize = sizeof(compat_int_t),
2201 .compat_from_user = compat_standard_from_user,
2202 .compat_to_user = compat_standard_to_user,
2203 #endif
2206 static struct xt_target ip6t_error_target __read_mostly = {
2207 .name = IP6T_ERROR_TARGET,
2208 .target = ip6t_error,
2209 .targetsize = IP6T_FUNCTION_MAXNAMELEN,
2210 .family = AF_INET6,
2213 static struct nf_sockopt_ops ip6t_sockopts = {
2214 .pf = PF_INET6,
2215 .set_optmin = IP6T_BASE_CTL,
2216 .set_optmax = IP6T_SO_SET_MAX+1,
2217 .set = do_ip6t_set_ctl,
2218 #ifdef CONFIG_COMPAT
2219 .compat_set = compat_do_ip6t_set_ctl,
2220 #endif
2221 .get_optmin = IP6T_BASE_CTL,
2222 .get_optmax = IP6T_SO_GET_MAX+1,
2223 .get = do_ip6t_get_ctl,
2224 #ifdef CONFIG_COMPAT
2225 .compat_get = compat_do_ip6t_get_ctl,
2226 #endif
2227 .owner = THIS_MODULE,
2230 static struct xt_match icmp6_matchstruct __read_mostly = {
2231 .name = "icmp6",
2232 .match = icmp6_match,
2233 .matchsize = sizeof(struct ip6t_icmp),
2234 .checkentry = icmp6_checkentry,
2235 .proto = IPPROTO_ICMPV6,
2236 .family = AF_INET6,
2239 static int __init ip6_tables_init(void)
2241 int ret;
2243 ret = xt_proto_init(AF_INET6);
2244 if (ret < 0)
2245 goto err1;
2247 /* Noone else will be downing sem now, so we won't sleep */
2248 ret = xt_register_target(&ip6t_standard_target);
2249 if (ret < 0)
2250 goto err2;
2251 ret = xt_register_target(&ip6t_error_target);
2252 if (ret < 0)
2253 goto err3;
2254 ret = xt_register_match(&icmp6_matchstruct);
2255 if (ret < 0)
2256 goto err4;
2258 /* Register setsockopt */
2259 ret = nf_register_sockopt(&ip6t_sockopts);
2260 if (ret < 0)
2261 goto err5;
2263 printk(KERN_INFO "ip6_tables: (C) 2000-2006 Netfilter Core Team\n");
2264 return 0;
2266 err5:
2267 xt_unregister_match(&icmp6_matchstruct);
2268 err4:
2269 xt_unregister_target(&ip6t_error_target);
2270 err3:
2271 xt_unregister_target(&ip6t_standard_target);
2272 err2:
2273 xt_proto_fini(AF_INET6);
2274 err1:
2275 return ret;
2278 static void __exit ip6_tables_fini(void)
2280 nf_unregister_sockopt(&ip6t_sockopts);
2282 xt_unregister_match(&icmp6_matchstruct);
2283 xt_unregister_target(&ip6t_error_target);
2284 xt_unregister_target(&ip6t_standard_target);
2285 xt_proto_fini(AF_INET6);
2289 * find the offset to specified header or the protocol number of last header
2290 * if target < 0. "last header" is transport protocol header, ESP, or
2291 * "No next header".
2293 * If target header is found, its offset is set in *offset and return protocol
2294 * number. Otherwise, return -1.
2296 * If the first fragment doesn't contain the final protocol header or
2297 * NEXTHDR_NONE it is considered invalid.
2299 * Note that non-1st fragment is special case that "the protocol number
2300 * of last header" is "next header" field in Fragment header. In this case,
2301 * *offset is meaningless and fragment offset is stored in *fragoff if fragoff
2302 * isn't NULL.
2305 int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
2306 int target, unsigned short *fragoff)
2308 unsigned int start = skb_network_offset(skb) + sizeof(struct ipv6hdr);
2309 u8 nexthdr = ipv6_hdr(skb)->nexthdr;
2310 unsigned int len = skb->len - start;
2312 if (fragoff)
2313 *fragoff = 0;
2315 while (nexthdr != target) {
2316 struct ipv6_opt_hdr _hdr, *hp;
2317 unsigned int hdrlen;
2319 if ((!ipv6_ext_hdr(nexthdr)) || nexthdr == NEXTHDR_NONE) {
2320 if (target < 0)
2321 break;
2322 return -ENOENT;
2325 hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr);
2326 if (hp == NULL)
2327 return -EBADMSG;
2328 if (nexthdr == NEXTHDR_FRAGMENT) {
2329 unsigned short _frag_off;
2330 __be16 *fp;
2331 fp = skb_header_pointer(skb,
2332 start+offsetof(struct frag_hdr,
2333 frag_off),
2334 sizeof(_frag_off),
2335 &_frag_off);
2336 if (fp == NULL)
2337 return -EBADMSG;
2339 _frag_off = ntohs(*fp) & ~0x7;
2340 if (_frag_off) {
2341 if (target < 0 &&
2342 ((!ipv6_ext_hdr(hp->nexthdr)) ||
2343 hp->nexthdr == NEXTHDR_NONE)) {
2344 if (fragoff)
2345 *fragoff = _frag_off;
2346 return hp->nexthdr;
2348 return -ENOENT;
2350 hdrlen = 8;
2351 } else if (nexthdr == NEXTHDR_AUTH)
2352 hdrlen = (hp->hdrlen + 2) << 2;
2353 else
2354 hdrlen = ipv6_optlen(hp);
2356 nexthdr = hp->nexthdr;
2357 len -= hdrlen;
2358 start += hdrlen;
2361 *offset = start;
2362 return nexthdr;
2365 EXPORT_SYMBOL(ip6t_register_table);
2366 EXPORT_SYMBOL(ip6t_unregister_table);
2367 EXPORT_SYMBOL(ip6t_do_table);
2368 EXPORT_SYMBOL(ip6t_ext_hdr);
2369 EXPORT_SYMBOL(ipv6_find_hdr);
2371 module_init(ip6_tables_init);
2372 module_exit(ip6_tables_fini);