netfilter: xtables: do centralized checkentry call (1/2)
[linux-2.6/libata-dev.git] / net / ipv6 / netfilter / ip6_tables.c
blob12c41b8d365b448a41d3aca9160d2b116a1f877b
1 /*
2 * Packet matching code.
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/capability.h>
13 #include <linux/in.h>
14 #include <linux/skbuff.h>
15 #include <linux/kmod.h>
16 #include <linux/vmalloc.h>
17 #include <linux/netdevice.h>
18 #include <linux/module.h>
19 #include <linux/poison.h>
20 #include <linux/icmpv6.h>
21 #include <net/ipv6.h>
22 #include <net/compat.h>
23 #include <asm/uaccess.h>
24 #include <linux/mutex.h>
25 #include <linux/proc_fs.h>
26 #include <linux/err.h>
27 #include <linux/cpumask.h>
29 #include <linux/netfilter_ipv6/ip6_tables.h>
30 #include <linux/netfilter/x_tables.h>
31 #include <net/netfilter/nf_log.h>
33 MODULE_LICENSE("GPL");
34 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
35 MODULE_DESCRIPTION("IPv6 packet filter");
37 /*#define DEBUG_IP_FIREWALL*/
38 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
39 /*#define DEBUG_IP_FIREWALL_USER*/
41 #ifdef DEBUG_IP_FIREWALL
42 #define dprintf(format, args...) printk(format , ## args)
43 #else
44 #define dprintf(format, args...)
45 #endif
47 #ifdef DEBUG_IP_FIREWALL_USER
48 #define duprintf(format, args...) printk(format , ## args)
49 #else
50 #define duprintf(format, args...)
51 #endif
53 #ifdef CONFIG_NETFILTER_DEBUG
54 #define IP_NF_ASSERT(x) \
55 do { \
56 if (!(x)) \
57 printk("IP_NF_ASSERT: %s:%s:%u\n", \
58 __func__, __FILE__, __LINE__); \
59 } while(0)
60 #else
61 #define IP_NF_ASSERT(x)
62 #endif
64 #if 0
65 /* All the better to debug you with... */
66 #define static
67 #define inline
68 #endif
71 We keep a set of rules for each CPU, so we can avoid write-locking
72 them in the softirq when updating the counters and therefore
73 only need to read-lock in the softirq; doing a write_lock_bh() in user
74 context stops packets coming through and allows user context to read
75 the counters or update the rules.
77 Hence the start of any table is given by get_table() below. */
79 /* Check for an extension */
80 int
81 ip6t_ext_hdr(u8 nexthdr)
83 return ( (nexthdr == IPPROTO_HOPOPTS) ||
84 (nexthdr == IPPROTO_ROUTING) ||
85 (nexthdr == IPPROTO_FRAGMENT) ||
86 (nexthdr == IPPROTO_ESP) ||
87 (nexthdr == IPPROTO_AH) ||
88 (nexthdr == IPPROTO_NONE) ||
89 (nexthdr == IPPROTO_DSTOPTS) );
92 /* Returns whether matches rule or not. */
93 /* Performance critical - called for every packet */
94 static inline bool
95 ip6_packet_match(const struct sk_buff *skb,
96 const char *indev,
97 const char *outdev,
98 const struct ip6t_ip6 *ip6info,
99 unsigned int *protoff,
100 int *fragoff, bool *hotdrop)
102 size_t i;
103 unsigned long ret;
104 const struct ipv6hdr *ipv6 = ipv6_hdr(skb);
106 #define FWINV(bool, invflg) ((bool) ^ !!(ip6info->invflags & (invflg)))
108 if (FWINV(ipv6_masked_addr_cmp(&ipv6->saddr, &ip6info->smsk,
109 &ip6info->src), IP6T_INV_SRCIP)
110 || FWINV(ipv6_masked_addr_cmp(&ipv6->daddr, &ip6info->dmsk,
111 &ip6info->dst), IP6T_INV_DSTIP)) {
112 dprintf("Source or dest mismatch.\n");
114 dprintf("SRC: %u. Mask: %u. Target: %u.%s\n", ip->saddr,
115 ipinfo->smsk.s_addr, ipinfo->src.s_addr,
116 ipinfo->invflags & IP6T_INV_SRCIP ? " (INV)" : "");
117 dprintf("DST: %u. Mask: %u. Target: %u.%s\n", ip->daddr,
118 ipinfo->dmsk.s_addr, ipinfo->dst.s_addr,
119 ipinfo->invflags & IP6T_INV_DSTIP ? " (INV)" : "");*/
120 return false;
123 /* Look for ifname matches; this should unroll nicely. */
124 for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
125 ret |= (((const unsigned long *)indev)[i]
126 ^ ((const unsigned long *)ip6info->iniface)[i])
127 & ((const unsigned long *)ip6info->iniface_mask)[i];
130 if (FWINV(ret != 0, IP6T_INV_VIA_IN)) {
131 dprintf("VIA in mismatch (%s vs %s).%s\n",
132 indev, ip6info->iniface,
133 ip6info->invflags&IP6T_INV_VIA_IN ?" (INV)":"");
134 return false;
137 for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
138 ret |= (((const unsigned long *)outdev)[i]
139 ^ ((const unsigned long *)ip6info->outiface)[i])
140 & ((const unsigned long *)ip6info->outiface_mask)[i];
143 if (FWINV(ret != 0, IP6T_INV_VIA_OUT)) {
144 dprintf("VIA out mismatch (%s vs %s).%s\n",
145 outdev, ip6info->outiface,
146 ip6info->invflags&IP6T_INV_VIA_OUT ?" (INV)":"");
147 return false;
150 /* ... might want to do something with class and flowlabel here ... */
152 /* look for the desired protocol header */
153 if((ip6info->flags & IP6T_F_PROTO)) {
154 int protohdr;
155 unsigned short _frag_off;
157 protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off);
158 if (protohdr < 0) {
159 if (_frag_off == 0)
160 *hotdrop = true;
161 return false;
163 *fragoff = _frag_off;
165 dprintf("Packet protocol %hi ?= %s%hi.\n",
166 protohdr,
167 ip6info->invflags & IP6T_INV_PROTO ? "!":"",
168 ip6info->proto);
170 if (ip6info->proto == protohdr) {
171 if(ip6info->invflags & IP6T_INV_PROTO) {
172 return false;
174 return true;
177 /* We need match for the '-p all', too! */
178 if ((ip6info->proto != 0) &&
179 !(ip6info->invflags & IP6T_INV_PROTO))
180 return false;
182 return true;
185 /* should be ip6 safe */
186 static bool
187 ip6_checkentry(const struct ip6t_ip6 *ipv6)
189 if (ipv6->flags & ~IP6T_F_MASK) {
190 duprintf("Unknown flag bits set: %08X\n",
191 ipv6->flags & ~IP6T_F_MASK);
192 return false;
194 if (ipv6->invflags & ~IP6T_INV_MASK) {
195 duprintf("Unknown invflag bits set: %08X\n",
196 ipv6->invflags & ~IP6T_INV_MASK);
197 return false;
199 return true;
202 static unsigned int
203 ip6t_error(struct sk_buff *skb,
204 const struct net_device *in,
205 const struct net_device *out,
206 unsigned int hooknum,
207 const struct xt_target *target,
208 const void *targinfo)
210 if (net_ratelimit())
211 printk("ip6_tables: error: `%s'\n", (char *)targinfo);
213 return NF_DROP;
216 /* Performance critical - called for every packet */
217 static inline bool
218 do_match(struct ip6t_entry_match *m,
219 const struct sk_buff *skb,
220 const struct net_device *in,
221 const struct net_device *out,
222 int offset,
223 unsigned int protoff,
224 bool *hotdrop)
226 /* Stop iteration if it doesn't match */
227 if (!m->u.kernel.match->match(skb, in, out, m->u.kernel.match, m->data,
228 offset, protoff, hotdrop))
229 return true;
230 else
231 return false;
234 static inline struct ip6t_entry *
235 get_entry(void *base, unsigned int offset)
237 return (struct ip6t_entry *)(base + offset);
240 /* All zeroes == unconditional rule. */
241 /* Mildly perf critical (only if packet tracing is on) */
242 static inline int
243 unconditional(const struct ip6t_ip6 *ipv6)
245 unsigned int i;
247 for (i = 0; i < sizeof(*ipv6); i++)
248 if (((char *)ipv6)[i])
249 break;
251 return (i == sizeof(*ipv6));
254 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
255 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
256 /* This cries for unification! */
257 static const char *const hooknames[] = {
258 [NF_INET_PRE_ROUTING] = "PREROUTING",
259 [NF_INET_LOCAL_IN] = "INPUT",
260 [NF_INET_FORWARD] = "FORWARD",
261 [NF_INET_LOCAL_OUT] = "OUTPUT",
262 [NF_INET_POST_ROUTING] = "POSTROUTING",
265 enum nf_ip_trace_comments {
266 NF_IP6_TRACE_COMMENT_RULE,
267 NF_IP6_TRACE_COMMENT_RETURN,
268 NF_IP6_TRACE_COMMENT_POLICY,
271 static const char *const comments[] = {
272 [NF_IP6_TRACE_COMMENT_RULE] = "rule",
273 [NF_IP6_TRACE_COMMENT_RETURN] = "return",
274 [NF_IP6_TRACE_COMMENT_POLICY] = "policy",
277 static struct nf_loginfo trace_loginfo = {
278 .type = NF_LOG_TYPE_LOG,
279 .u = {
280 .log = {
281 .level = 4,
282 .logflags = NF_LOG_MASK,
287 /* Mildly perf critical (only if packet tracing is on) */
288 static inline int
289 get_chainname_rulenum(struct ip6t_entry *s, struct ip6t_entry *e,
290 char *hookname, char **chainname,
291 char **comment, unsigned int *rulenum)
293 struct ip6t_standard_target *t = (void *)ip6t_get_target(s);
295 if (strcmp(t->target.u.kernel.target->name, IP6T_ERROR_TARGET) == 0) {
296 /* Head of user chain: ERROR target with chainname */
297 *chainname = t->target.data;
298 (*rulenum) = 0;
299 } else if (s == e) {
300 (*rulenum)++;
302 if (s->target_offset == sizeof(struct ip6t_entry)
303 && strcmp(t->target.u.kernel.target->name,
304 IP6T_STANDARD_TARGET) == 0
305 && t->verdict < 0
306 && unconditional(&s->ipv6)) {
307 /* Tail of chains: STANDARD target (return/policy) */
308 *comment = *chainname == hookname
309 ? (char *)comments[NF_IP6_TRACE_COMMENT_POLICY]
310 : (char *)comments[NF_IP6_TRACE_COMMENT_RETURN];
312 return 1;
313 } else
314 (*rulenum)++;
316 return 0;
319 static void trace_packet(struct sk_buff *skb,
320 unsigned int hook,
321 const struct net_device *in,
322 const struct net_device *out,
323 const char *tablename,
324 struct xt_table_info *private,
325 struct ip6t_entry *e)
327 void *table_base;
328 const struct ip6t_entry *root;
329 char *hookname, *chainname, *comment;
330 unsigned int rulenum = 0;
332 table_base = (void *)private->entries[smp_processor_id()];
333 root = get_entry(table_base, private->hook_entry[hook]);
335 hookname = chainname = (char *)hooknames[hook];
336 comment = (char *)comments[NF_IP6_TRACE_COMMENT_RULE];
338 IP6T_ENTRY_ITERATE(root,
339 private->size - private->hook_entry[hook],
340 get_chainname_rulenum,
341 e, hookname, &chainname, &comment, &rulenum);
343 nf_log_packet(AF_INET6, hook, skb, in, out, &trace_loginfo,
344 "TRACE: %s:%s:%s:%u ",
345 tablename, chainname, comment, rulenum);
347 #endif
349 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
350 unsigned int
351 ip6t_do_table(struct sk_buff *skb,
352 unsigned int hook,
353 const struct net_device *in,
354 const struct net_device *out,
355 struct xt_table *table)
357 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
358 int offset = 0;
359 unsigned int protoff = 0;
360 bool hotdrop = false;
361 /* Initializing verdict to NF_DROP keeps gcc happy. */
362 unsigned int verdict = NF_DROP;
363 const char *indev, *outdev;
364 void *table_base;
365 struct ip6t_entry *e, *back;
366 struct xt_table_info *private;
368 /* Initialization */
369 indev = in ? in->name : nulldevname;
370 outdev = out ? out->name : nulldevname;
371 /* We handle fragments by dealing with the first fragment as
372 * if it was a normal packet. All other fragments are treated
373 * normally, except that they will NEVER match rules that ask
374 * things we don't know, ie. tcp syn flag or ports). If the
375 * rule is also a fragment-specific rule, non-fragments won't
376 * match it. */
378 read_lock_bh(&table->lock);
379 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
380 private = table->private;
381 table_base = (void *)private->entries[smp_processor_id()];
382 e = get_entry(table_base, private->hook_entry[hook]);
384 /* For return from builtin chain */
385 back = get_entry(table_base, private->underflow[hook]);
387 do {
388 IP_NF_ASSERT(e);
389 IP_NF_ASSERT(back);
390 if (ip6_packet_match(skb, indev, outdev, &e->ipv6,
391 &protoff, &offset, &hotdrop)) {
392 struct ip6t_entry_target *t;
394 if (IP6T_MATCH_ITERATE(e, do_match,
395 skb, in, out,
396 offset, protoff, &hotdrop) != 0)
397 goto no_match;
399 ADD_COUNTER(e->counters,
400 ntohs(ipv6_hdr(skb)->payload_len) +
401 sizeof(struct ipv6hdr), 1);
403 t = ip6t_get_target(e);
404 IP_NF_ASSERT(t->u.kernel.target);
406 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
407 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
408 /* The packet is traced: log it */
409 if (unlikely(skb->nf_trace))
410 trace_packet(skb, hook, in, out,
411 table->name, private, e);
412 #endif
413 /* Standard target? */
414 if (!t->u.kernel.target->target) {
415 int v;
417 v = ((struct ip6t_standard_target *)t)->verdict;
418 if (v < 0) {
419 /* Pop from stack? */
420 if (v != IP6T_RETURN) {
421 verdict = (unsigned)(-v) - 1;
422 break;
424 e = back;
425 back = get_entry(table_base,
426 back->comefrom);
427 continue;
429 if (table_base + v != (void *)e + e->next_offset
430 && !(e->ipv6.flags & IP6T_F_GOTO)) {
431 /* Save old back ptr in next entry */
432 struct ip6t_entry *next
433 = (void *)e + e->next_offset;
434 next->comefrom
435 = (void *)back - table_base;
436 /* set back pointer to next entry */
437 back = next;
440 e = get_entry(table_base, v);
441 } else {
442 /* Targets which reenter must return
443 abs. verdicts */
444 #ifdef CONFIG_NETFILTER_DEBUG
445 ((struct ip6t_entry *)table_base)->comefrom
446 = 0xeeeeeeec;
447 #endif
448 verdict = t->u.kernel.target->target(skb,
449 in, out,
450 hook,
451 t->u.kernel.target,
452 t->data);
454 #ifdef CONFIG_NETFILTER_DEBUG
455 if (((struct ip6t_entry *)table_base)->comefrom
456 != 0xeeeeeeec
457 && verdict == IP6T_CONTINUE) {
458 printk("Target %s reentered!\n",
459 t->u.kernel.target->name);
460 verdict = NF_DROP;
462 ((struct ip6t_entry *)table_base)->comefrom
463 = 0x57acc001;
464 #endif
465 if (verdict == IP6T_CONTINUE)
466 e = (void *)e + e->next_offset;
467 else
468 /* Verdict */
469 break;
471 } else {
473 no_match:
474 e = (void *)e + e->next_offset;
476 } while (!hotdrop);
478 #ifdef CONFIG_NETFILTER_DEBUG
479 ((struct ip6t_entry *)table_base)->comefrom = NETFILTER_LINK_POISON;
480 #endif
481 read_unlock_bh(&table->lock);
483 #ifdef DEBUG_ALLOW_ALL
484 return NF_ACCEPT;
485 #else
486 if (hotdrop)
487 return NF_DROP;
488 else return verdict;
489 #endif
492 /* Figures out from what hook each rule can be called: returns 0 if
493 there are loops. Puts hook bitmask in comefrom. */
494 static int
495 mark_source_chains(struct xt_table_info *newinfo,
496 unsigned int valid_hooks, void *entry0)
498 unsigned int hook;
500 /* No recursion; use packet counter to save back ptrs (reset
501 to 0 as we leave), and comefrom to save source hook bitmask */
502 for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
503 unsigned int pos = newinfo->hook_entry[hook];
504 struct ip6t_entry *e = (struct ip6t_entry *)(entry0 + pos);
506 if (!(valid_hooks & (1 << hook)))
507 continue;
509 /* Set initial back pointer. */
510 e->counters.pcnt = pos;
512 for (;;) {
513 struct ip6t_standard_target *t
514 = (void *)ip6t_get_target(e);
515 int visited = e->comefrom & (1 << hook);
517 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
518 printk("iptables: loop hook %u pos %u %08X.\n",
519 hook, pos, e->comefrom);
520 return 0;
522 e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
524 /* Unconditional return/END. */
525 if ((e->target_offset == sizeof(struct ip6t_entry)
526 && (strcmp(t->target.u.user.name,
527 IP6T_STANDARD_TARGET) == 0)
528 && t->verdict < 0
529 && unconditional(&e->ipv6)) || visited) {
530 unsigned int oldpos, size;
532 if (t->verdict < -NF_MAX_VERDICT - 1) {
533 duprintf("mark_source_chains: bad "
534 "negative verdict (%i)\n",
535 t->verdict);
536 return 0;
539 /* Return: backtrack through the last
540 big jump. */
541 do {
542 e->comefrom ^= (1<<NF_INET_NUMHOOKS);
543 #ifdef DEBUG_IP_FIREWALL_USER
544 if (e->comefrom
545 & (1 << NF_INET_NUMHOOKS)) {
546 duprintf("Back unset "
547 "on hook %u "
548 "rule %u\n",
549 hook, pos);
551 #endif
552 oldpos = pos;
553 pos = e->counters.pcnt;
554 e->counters.pcnt = 0;
556 /* We're at the start. */
557 if (pos == oldpos)
558 goto next;
560 e = (struct ip6t_entry *)
561 (entry0 + pos);
562 } while (oldpos == pos + e->next_offset);
564 /* Move along one */
565 size = e->next_offset;
566 e = (struct ip6t_entry *)
567 (entry0 + pos + size);
568 e->counters.pcnt = pos;
569 pos += size;
570 } else {
571 int newpos = t->verdict;
573 if (strcmp(t->target.u.user.name,
574 IP6T_STANDARD_TARGET) == 0
575 && newpos >= 0) {
576 if (newpos > newinfo->size -
577 sizeof(struct ip6t_entry)) {
578 duprintf("mark_source_chains: "
579 "bad verdict (%i)\n",
580 newpos);
581 return 0;
583 /* This a jump; chase it. */
584 duprintf("Jump rule %u -> %u\n",
585 pos, newpos);
586 } else {
587 /* ... this is a fallthru */
588 newpos = pos + e->next_offset;
590 e = (struct ip6t_entry *)
591 (entry0 + newpos);
592 e->counters.pcnt = pos;
593 pos = newpos;
596 next:
597 duprintf("Finished chain %u\n", hook);
599 return 1;
602 static int
603 cleanup_match(struct ip6t_entry_match *m, unsigned int *i)
605 if (i && (*i)-- == 0)
606 return 1;
608 if (m->u.kernel.match->destroy)
609 m->u.kernel.match->destroy(m->u.kernel.match, m->data);
610 module_put(m->u.kernel.match->me);
611 return 0;
614 static int
615 check_entry(struct ip6t_entry *e, const char *name)
617 struct ip6t_entry_target *t;
619 if (!ip6_checkentry(&e->ipv6)) {
620 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
621 return -EINVAL;
624 if (e->target_offset + sizeof(struct ip6t_entry_target) >
625 e->next_offset)
626 return -EINVAL;
628 t = ip6t_get_target(e);
629 if (e->target_offset + t->u.target_size > e->next_offset)
630 return -EINVAL;
632 return 0;
635 static int check_match(struct ip6t_entry_match *m, const char *name,
636 const struct ip6t_ip6 *ipv6,
637 unsigned int hookmask, unsigned int *i)
639 struct xt_match *match;
640 int ret;
642 match = m->u.kernel.match;
643 ret = xt_check_match(match, AF_INET6, m->u.match_size - sizeof(*m),
644 name, hookmask, ipv6->proto,
645 ipv6->invflags & IP6T_INV_PROTO, ipv6, m->data);
646 if (ret < 0) {
647 duprintf("ip_tables: check failed for `%s'.\n",
648 m->u.kernel.match->name);
649 return ret;
651 ++*i;
652 return 0;
655 static int
656 find_check_match(struct ip6t_entry_match *m,
657 const char *name,
658 const struct ip6t_ip6 *ipv6,
659 unsigned int hookmask,
660 unsigned int *i)
662 struct xt_match *match;
663 int ret;
665 match = try_then_request_module(xt_find_match(AF_INET6, m->u.user.name,
666 m->u.user.revision),
667 "ip6t_%s", m->u.user.name);
668 if (IS_ERR(match) || !match) {
669 duprintf("find_check_match: `%s' not found\n", m->u.user.name);
670 return match ? PTR_ERR(match) : -ENOENT;
672 m->u.kernel.match = match;
674 ret = check_match(m, name, ipv6, hookmask, i);
675 if (ret)
676 goto err;
678 return 0;
679 err:
680 module_put(m->u.kernel.match->me);
681 return ret;
684 static int check_target(struct ip6t_entry *e, const char *name)
686 struct ip6t_entry_target *t;
687 struct xt_target *target;
688 int ret;
690 t = ip6t_get_target(e);
691 target = t->u.kernel.target;
692 ret = xt_check_target(target, AF_INET6, t->u.target_size - sizeof(*t),
693 name, e->comefrom, e->ipv6.proto,
694 e->ipv6.invflags & IP6T_INV_PROTO, e, t->data);
695 if (ret < 0) {
696 duprintf("ip_tables: check failed for `%s'.\n",
697 t->u.kernel.target->name);
698 return ret;
700 return 0;
703 static int
704 find_check_entry(struct ip6t_entry *e, const char *name, unsigned int size,
705 unsigned int *i)
707 struct ip6t_entry_target *t;
708 struct xt_target *target;
709 int ret;
710 unsigned int j;
712 ret = check_entry(e, name);
713 if (ret)
714 return ret;
716 j = 0;
717 ret = IP6T_MATCH_ITERATE(e, find_check_match, name, &e->ipv6,
718 e->comefrom, &j);
719 if (ret != 0)
720 goto cleanup_matches;
722 t = ip6t_get_target(e);
723 target = try_then_request_module(xt_find_target(AF_INET6,
724 t->u.user.name,
725 t->u.user.revision),
726 "ip6t_%s", t->u.user.name);
727 if (IS_ERR(target) || !target) {
728 duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
729 ret = target ? PTR_ERR(target) : -ENOENT;
730 goto cleanup_matches;
732 t->u.kernel.target = target;
734 ret = check_target(e, name);
735 if (ret)
736 goto err;
738 (*i)++;
739 return 0;
740 err:
741 module_put(t->u.kernel.target->me);
742 cleanup_matches:
743 IP6T_MATCH_ITERATE(e, cleanup_match, &j);
744 return ret;
747 static int
748 check_entry_size_and_hooks(struct ip6t_entry *e,
749 struct xt_table_info *newinfo,
750 unsigned char *base,
751 unsigned char *limit,
752 const unsigned int *hook_entries,
753 const unsigned int *underflows,
754 unsigned int *i)
756 unsigned int h;
758 if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0
759 || (unsigned char *)e + sizeof(struct ip6t_entry) >= limit) {
760 duprintf("Bad offset %p\n", e);
761 return -EINVAL;
764 if (e->next_offset
765 < sizeof(struct ip6t_entry) + sizeof(struct ip6t_entry_target)) {
766 duprintf("checking: element %p size %u\n",
767 e, e->next_offset);
768 return -EINVAL;
771 /* Check hooks & underflows */
772 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
773 if ((unsigned char *)e - base == hook_entries[h])
774 newinfo->hook_entry[h] = hook_entries[h];
775 if ((unsigned char *)e - base == underflows[h])
776 newinfo->underflow[h] = underflows[h];
779 /* FIXME: underflows must be unconditional, standard verdicts
780 < 0 (not IP6T_RETURN). --RR */
782 /* Clear counters and comefrom */
783 e->counters = ((struct xt_counters) { 0, 0 });
784 e->comefrom = 0;
786 (*i)++;
787 return 0;
790 static int
791 cleanup_entry(struct ip6t_entry *e, unsigned int *i)
793 struct ip6t_entry_target *t;
795 if (i && (*i)-- == 0)
796 return 1;
798 /* Cleanup all matches */
799 IP6T_MATCH_ITERATE(e, cleanup_match, NULL);
800 t = ip6t_get_target(e);
801 if (t->u.kernel.target->destroy)
802 t->u.kernel.target->destroy(t->u.kernel.target, t->data);
803 module_put(t->u.kernel.target->me);
804 return 0;
807 /* Checks and translates the user-supplied table segment (held in
808 newinfo) */
809 static int
810 translate_table(const char *name,
811 unsigned int valid_hooks,
812 struct xt_table_info *newinfo,
813 void *entry0,
814 unsigned int size,
815 unsigned int number,
816 const unsigned int *hook_entries,
817 const unsigned int *underflows)
819 unsigned int i;
820 int ret;
822 newinfo->size = size;
823 newinfo->number = number;
825 /* Init all hooks to impossible value. */
826 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
827 newinfo->hook_entry[i] = 0xFFFFFFFF;
828 newinfo->underflow[i] = 0xFFFFFFFF;
831 duprintf("translate_table: size %u\n", newinfo->size);
832 i = 0;
833 /* Walk through entries, checking offsets. */
834 ret = IP6T_ENTRY_ITERATE(entry0, newinfo->size,
835 check_entry_size_and_hooks,
836 newinfo,
837 entry0,
838 entry0 + size,
839 hook_entries, underflows, &i);
840 if (ret != 0)
841 return ret;
843 if (i != number) {
844 duprintf("translate_table: %u not %u entries\n",
845 i, number);
846 return -EINVAL;
849 /* Check hooks all assigned */
850 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
851 /* Only hooks which are valid */
852 if (!(valid_hooks & (1 << i)))
853 continue;
854 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
855 duprintf("Invalid hook entry %u %u\n",
856 i, hook_entries[i]);
857 return -EINVAL;
859 if (newinfo->underflow[i] == 0xFFFFFFFF) {
860 duprintf("Invalid underflow %u %u\n",
861 i, underflows[i]);
862 return -EINVAL;
866 if (!mark_source_chains(newinfo, valid_hooks, entry0))
867 return -ELOOP;
869 /* Finally, each sanity check must pass */
870 i = 0;
871 ret = IP6T_ENTRY_ITERATE(entry0, newinfo->size,
872 find_check_entry, name, size, &i);
874 if (ret != 0) {
875 IP6T_ENTRY_ITERATE(entry0, newinfo->size,
876 cleanup_entry, &i);
877 return ret;
880 /* And one copy for every other CPU */
881 for_each_possible_cpu(i) {
882 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
883 memcpy(newinfo->entries[i], entry0, newinfo->size);
886 return ret;
889 /* Gets counters. */
890 static inline int
891 add_entry_to_counter(const struct ip6t_entry *e,
892 struct xt_counters total[],
893 unsigned int *i)
895 ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
897 (*i)++;
898 return 0;
901 static inline int
902 set_entry_to_counter(const struct ip6t_entry *e,
903 struct ip6t_counters total[],
904 unsigned int *i)
906 SET_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
908 (*i)++;
909 return 0;
912 static void
913 get_counters(const struct xt_table_info *t,
914 struct xt_counters counters[])
916 unsigned int cpu;
917 unsigned int i;
918 unsigned int curcpu;
920 /* Instead of clearing (by a previous call to memset())
921 * the counters and using adds, we set the counters
922 * with data used by 'current' CPU
923 * We dont care about preemption here.
925 curcpu = raw_smp_processor_id();
927 i = 0;
928 IP6T_ENTRY_ITERATE(t->entries[curcpu],
929 t->size,
930 set_entry_to_counter,
931 counters,
932 &i);
934 for_each_possible_cpu(cpu) {
935 if (cpu == curcpu)
936 continue;
937 i = 0;
938 IP6T_ENTRY_ITERATE(t->entries[cpu],
939 t->size,
940 add_entry_to_counter,
941 counters,
942 &i);
946 static struct xt_counters *alloc_counters(struct xt_table *table)
948 unsigned int countersize;
949 struct xt_counters *counters;
950 const struct xt_table_info *private = table->private;
952 /* We need atomic snapshot of counters: rest doesn't change
953 (other than comefrom, which userspace doesn't care
954 about). */
955 countersize = sizeof(struct xt_counters) * private->number;
956 counters = vmalloc_node(countersize, numa_node_id());
958 if (counters == NULL)
959 return ERR_PTR(-ENOMEM);
961 /* First, sum counters... */
962 write_lock_bh(&table->lock);
963 get_counters(private, counters);
964 write_unlock_bh(&table->lock);
966 return counters;
969 static int
970 copy_entries_to_user(unsigned int total_size,
971 struct xt_table *table,
972 void __user *userptr)
974 unsigned int off, num;
975 struct ip6t_entry *e;
976 struct xt_counters *counters;
977 const struct xt_table_info *private = table->private;
978 int ret = 0;
979 const void *loc_cpu_entry;
981 counters = alloc_counters(table);
982 if (IS_ERR(counters))
983 return PTR_ERR(counters);
985 /* choose the copy that is on our node/cpu, ...
986 * This choice is lazy (because current thread is
987 * allowed to migrate to another cpu)
989 loc_cpu_entry = private->entries[raw_smp_processor_id()];
990 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
991 ret = -EFAULT;
992 goto free_counters;
995 /* FIXME: use iterator macros --RR */
996 /* ... then go back and fix counters and names */
997 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
998 unsigned int i;
999 const struct ip6t_entry_match *m;
1000 const struct ip6t_entry_target *t;
1002 e = (struct ip6t_entry *)(loc_cpu_entry + off);
1003 if (copy_to_user(userptr + off
1004 + offsetof(struct ip6t_entry, counters),
1005 &counters[num],
1006 sizeof(counters[num])) != 0) {
1007 ret = -EFAULT;
1008 goto free_counters;
1011 for (i = sizeof(struct ip6t_entry);
1012 i < e->target_offset;
1013 i += m->u.match_size) {
1014 m = (void *)e + i;
1016 if (copy_to_user(userptr + off + i
1017 + offsetof(struct ip6t_entry_match,
1018 u.user.name),
1019 m->u.kernel.match->name,
1020 strlen(m->u.kernel.match->name)+1)
1021 != 0) {
1022 ret = -EFAULT;
1023 goto free_counters;
1027 t = ip6t_get_target(e);
1028 if (copy_to_user(userptr + off + e->target_offset
1029 + offsetof(struct ip6t_entry_target,
1030 u.user.name),
1031 t->u.kernel.target->name,
1032 strlen(t->u.kernel.target->name)+1) != 0) {
1033 ret = -EFAULT;
1034 goto free_counters;
1038 free_counters:
1039 vfree(counters);
1040 return ret;
1043 #ifdef CONFIG_COMPAT
1044 static void compat_standard_from_user(void *dst, void *src)
1046 int v = *(compat_int_t *)src;
1048 if (v > 0)
1049 v += xt_compat_calc_jump(AF_INET6, v);
1050 memcpy(dst, &v, sizeof(v));
1053 static int compat_standard_to_user(void __user *dst, void *src)
1055 compat_int_t cv = *(int *)src;
1057 if (cv > 0)
1058 cv -= xt_compat_calc_jump(AF_INET6, cv);
1059 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
1062 static inline int
1063 compat_calc_match(struct ip6t_entry_match *m, int *size)
1065 *size += xt_compat_match_offset(m->u.kernel.match);
1066 return 0;
1069 static int compat_calc_entry(struct ip6t_entry *e,
1070 const struct xt_table_info *info,
1071 void *base, struct xt_table_info *newinfo)
1073 struct ip6t_entry_target *t;
1074 unsigned int entry_offset;
1075 int off, i, ret;
1077 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1078 entry_offset = (void *)e - base;
1079 IP6T_MATCH_ITERATE(e, compat_calc_match, &off);
1080 t = ip6t_get_target(e);
1081 off += xt_compat_target_offset(t->u.kernel.target);
1082 newinfo->size -= off;
1083 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1084 if (ret)
1085 return ret;
1087 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1088 if (info->hook_entry[i] &&
1089 (e < (struct ip6t_entry *)(base + info->hook_entry[i])))
1090 newinfo->hook_entry[i] -= off;
1091 if (info->underflow[i] &&
1092 (e < (struct ip6t_entry *)(base + info->underflow[i])))
1093 newinfo->underflow[i] -= off;
1095 return 0;
1098 static int compat_table_info(const struct xt_table_info *info,
1099 struct xt_table_info *newinfo)
1101 void *loc_cpu_entry;
1103 if (!newinfo || !info)
1104 return -EINVAL;
1106 /* we dont care about newinfo->entries[] */
1107 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
1108 newinfo->initial_entries = 0;
1109 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1110 return IP6T_ENTRY_ITERATE(loc_cpu_entry, info->size,
1111 compat_calc_entry, info, loc_cpu_entry,
1112 newinfo);
1114 #endif
1116 static int get_info(struct net *net, void __user *user, int *len, int compat)
1118 char name[IP6T_TABLE_MAXNAMELEN];
1119 struct xt_table *t;
1120 int ret;
1122 if (*len != sizeof(struct ip6t_getinfo)) {
1123 duprintf("length %u != %zu\n", *len,
1124 sizeof(struct ip6t_getinfo));
1125 return -EINVAL;
1128 if (copy_from_user(name, user, sizeof(name)) != 0)
1129 return -EFAULT;
1131 name[IP6T_TABLE_MAXNAMELEN-1] = '\0';
1132 #ifdef CONFIG_COMPAT
1133 if (compat)
1134 xt_compat_lock(AF_INET6);
1135 #endif
1136 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1137 "ip6table_%s", name);
1138 if (t && !IS_ERR(t)) {
1139 struct ip6t_getinfo info;
1140 const struct xt_table_info *private = t->private;
1142 #ifdef CONFIG_COMPAT
1143 if (compat) {
1144 struct xt_table_info tmp;
1145 ret = compat_table_info(private, &tmp);
1146 xt_compat_flush_offsets(AF_INET6);
1147 private = &tmp;
1149 #endif
1150 info.valid_hooks = t->valid_hooks;
1151 memcpy(info.hook_entry, private->hook_entry,
1152 sizeof(info.hook_entry));
1153 memcpy(info.underflow, private->underflow,
1154 sizeof(info.underflow));
1155 info.num_entries = private->number;
1156 info.size = private->size;
1157 strcpy(info.name, name);
1159 if (copy_to_user(user, &info, *len) != 0)
1160 ret = -EFAULT;
1161 else
1162 ret = 0;
1164 xt_table_unlock(t);
1165 module_put(t->me);
1166 } else
1167 ret = t ? PTR_ERR(t) : -ENOENT;
1168 #ifdef CONFIG_COMPAT
1169 if (compat)
1170 xt_compat_unlock(AF_INET6);
1171 #endif
1172 return ret;
1175 static int
1176 get_entries(struct net *net, struct ip6t_get_entries __user *uptr, int *len)
1178 int ret;
1179 struct ip6t_get_entries get;
1180 struct xt_table *t;
1182 if (*len < sizeof(get)) {
1183 duprintf("get_entries: %u < %zu\n", *len, sizeof(get));
1184 return -EINVAL;
1186 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1187 return -EFAULT;
1188 if (*len != sizeof(struct ip6t_get_entries) + get.size) {
1189 duprintf("get_entries: %u != %zu\n",
1190 *len, sizeof(get) + get.size);
1191 return -EINVAL;
1194 t = xt_find_table_lock(net, AF_INET6, get.name);
1195 if (t && !IS_ERR(t)) {
1196 struct xt_table_info *private = t->private;
1197 duprintf("t->private->number = %u\n", private->number);
1198 if (get.size == private->size)
1199 ret = copy_entries_to_user(private->size,
1200 t, uptr->entrytable);
1201 else {
1202 duprintf("get_entries: I've got %u not %u!\n",
1203 private->size, get.size);
1204 ret = -EAGAIN;
1206 module_put(t->me);
1207 xt_table_unlock(t);
1208 } else
1209 ret = t ? PTR_ERR(t) : -ENOENT;
1211 return ret;
1214 static int
1215 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1216 struct xt_table_info *newinfo, unsigned int num_counters,
1217 void __user *counters_ptr)
1219 int ret;
1220 struct xt_table *t;
1221 struct xt_table_info *oldinfo;
1222 struct xt_counters *counters;
1223 const void *loc_cpu_old_entry;
1225 ret = 0;
1226 counters = vmalloc_node(num_counters * sizeof(struct xt_counters),
1227 numa_node_id());
1228 if (!counters) {
1229 ret = -ENOMEM;
1230 goto out;
1233 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1234 "ip6table_%s", name);
1235 if (!t || IS_ERR(t)) {
1236 ret = t ? PTR_ERR(t) : -ENOENT;
1237 goto free_newinfo_counters_untrans;
1240 /* You lied! */
1241 if (valid_hooks != t->valid_hooks) {
1242 duprintf("Valid hook crap: %08X vs %08X\n",
1243 valid_hooks, t->valid_hooks);
1244 ret = -EINVAL;
1245 goto put_module;
1248 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1249 if (!oldinfo)
1250 goto put_module;
1252 /* Update module usage count based on number of rules */
1253 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1254 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1255 if ((oldinfo->number > oldinfo->initial_entries) ||
1256 (newinfo->number <= oldinfo->initial_entries))
1257 module_put(t->me);
1258 if ((oldinfo->number > oldinfo->initial_entries) &&
1259 (newinfo->number <= oldinfo->initial_entries))
1260 module_put(t->me);
1262 /* Get the old counters. */
1263 get_counters(oldinfo, counters);
1264 /* Decrease module usage counts and free resource */
1265 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1266 IP6T_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry,
1267 NULL);
1268 xt_free_table_info(oldinfo);
1269 if (copy_to_user(counters_ptr, counters,
1270 sizeof(struct xt_counters) * num_counters) != 0)
1271 ret = -EFAULT;
1272 vfree(counters);
1273 xt_table_unlock(t);
1274 return ret;
1276 put_module:
1277 module_put(t->me);
1278 xt_table_unlock(t);
1279 free_newinfo_counters_untrans:
1280 vfree(counters);
1281 out:
1282 return ret;
1285 static int
1286 do_replace(struct net *net, void __user *user, unsigned int len)
1288 int ret;
1289 struct ip6t_replace tmp;
1290 struct xt_table_info *newinfo;
1291 void *loc_cpu_entry;
1293 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1294 return -EFAULT;
1296 /* overflow check */
1297 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1298 return -ENOMEM;
1300 newinfo = xt_alloc_table_info(tmp.size);
1301 if (!newinfo)
1302 return -ENOMEM;
1304 /* choose the copy that is on our node/cpu */
1305 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1306 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1307 tmp.size) != 0) {
1308 ret = -EFAULT;
1309 goto free_newinfo;
1312 ret = translate_table(tmp.name, tmp.valid_hooks,
1313 newinfo, loc_cpu_entry, tmp.size, tmp.num_entries,
1314 tmp.hook_entry, tmp.underflow);
1315 if (ret != 0)
1316 goto free_newinfo;
1318 duprintf("ip_tables: Translated table\n");
1320 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1321 tmp.num_counters, tmp.counters);
1322 if (ret)
1323 goto free_newinfo_untrans;
1324 return 0;
1326 free_newinfo_untrans:
1327 IP6T_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL);
1328 free_newinfo:
1329 xt_free_table_info(newinfo);
1330 return ret;
1333 /* We're lazy, and add to the first CPU; overflow works its fey magic
1334 * and everything is OK. */
1335 static inline int
1336 add_counter_to_entry(struct ip6t_entry *e,
1337 const struct xt_counters addme[],
1338 unsigned int *i)
1340 #if 0
1341 duprintf("add_counter: Entry %u %lu/%lu + %lu/%lu\n",
1343 (long unsigned int)e->counters.pcnt,
1344 (long unsigned int)e->counters.bcnt,
1345 (long unsigned int)addme[*i].pcnt,
1346 (long unsigned int)addme[*i].bcnt);
1347 #endif
1349 ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
1351 (*i)++;
1352 return 0;
1355 static int
1356 do_add_counters(struct net *net, void __user *user, unsigned int len,
1357 int compat)
1359 unsigned int i;
1360 struct xt_counters_info tmp;
1361 struct xt_counters *paddc;
1362 unsigned int num_counters;
1363 char *name;
1364 int size;
1365 void *ptmp;
1366 struct xt_table *t;
1367 const struct xt_table_info *private;
1368 int ret = 0;
1369 const void *loc_cpu_entry;
1370 #ifdef CONFIG_COMPAT
1371 struct compat_xt_counters_info compat_tmp;
1373 if (compat) {
1374 ptmp = &compat_tmp;
1375 size = sizeof(struct compat_xt_counters_info);
1376 } else
1377 #endif
1379 ptmp = &tmp;
1380 size = sizeof(struct xt_counters_info);
1383 if (copy_from_user(ptmp, user, size) != 0)
1384 return -EFAULT;
1386 #ifdef CONFIG_COMPAT
1387 if (compat) {
1388 num_counters = compat_tmp.num_counters;
1389 name = compat_tmp.name;
1390 } else
1391 #endif
1393 num_counters = tmp.num_counters;
1394 name = tmp.name;
1397 if (len != size + num_counters * sizeof(struct xt_counters))
1398 return -EINVAL;
1400 paddc = vmalloc_node(len - size, numa_node_id());
1401 if (!paddc)
1402 return -ENOMEM;
1404 if (copy_from_user(paddc, user + size, len - size) != 0) {
1405 ret = -EFAULT;
1406 goto free;
1409 t = xt_find_table_lock(net, AF_INET6, name);
1410 if (!t || IS_ERR(t)) {
1411 ret = t ? PTR_ERR(t) : -ENOENT;
1412 goto free;
1415 write_lock_bh(&t->lock);
1416 private = t->private;
1417 if (private->number != num_counters) {
1418 ret = -EINVAL;
1419 goto unlock_up_free;
1422 i = 0;
1423 /* Choose the copy that is on our node */
1424 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1425 IP6T_ENTRY_ITERATE(loc_cpu_entry,
1426 private->size,
1427 add_counter_to_entry,
1428 paddc,
1429 &i);
1430 unlock_up_free:
1431 write_unlock_bh(&t->lock);
1432 xt_table_unlock(t);
1433 module_put(t->me);
1434 free:
1435 vfree(paddc);
1437 return ret;
1440 #ifdef CONFIG_COMPAT
1441 struct compat_ip6t_replace {
1442 char name[IP6T_TABLE_MAXNAMELEN];
1443 u32 valid_hooks;
1444 u32 num_entries;
1445 u32 size;
1446 u32 hook_entry[NF_INET_NUMHOOKS];
1447 u32 underflow[NF_INET_NUMHOOKS];
1448 u32 num_counters;
1449 compat_uptr_t counters; /* struct ip6t_counters * */
1450 struct compat_ip6t_entry entries[0];
1453 static int
1454 compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr,
1455 unsigned int *size, struct xt_counters *counters,
1456 unsigned int *i)
1458 struct ip6t_entry_target *t;
1459 struct compat_ip6t_entry __user *ce;
1460 u_int16_t target_offset, next_offset;
1461 compat_uint_t origsize;
1462 int ret;
1464 ret = -EFAULT;
1465 origsize = *size;
1466 ce = (struct compat_ip6t_entry __user *)*dstptr;
1467 if (copy_to_user(ce, e, sizeof(struct ip6t_entry)))
1468 goto out;
1470 if (copy_to_user(&ce->counters, &counters[*i], sizeof(counters[*i])))
1471 goto out;
1473 *dstptr += sizeof(struct compat_ip6t_entry);
1474 *size -= sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1476 ret = IP6T_MATCH_ITERATE(e, xt_compat_match_to_user, dstptr, size);
1477 target_offset = e->target_offset - (origsize - *size);
1478 if (ret)
1479 goto out;
1480 t = ip6t_get_target(e);
1481 ret = xt_compat_target_to_user(t, dstptr, size);
1482 if (ret)
1483 goto out;
1484 ret = -EFAULT;
1485 next_offset = e->next_offset - (origsize - *size);
1486 if (put_user(target_offset, &ce->target_offset))
1487 goto out;
1488 if (put_user(next_offset, &ce->next_offset))
1489 goto out;
1491 (*i)++;
1492 return 0;
1493 out:
1494 return ret;
1497 static int
1498 compat_find_calc_match(struct ip6t_entry_match *m,
1499 const char *name,
1500 const struct ip6t_ip6 *ipv6,
1501 unsigned int hookmask,
1502 int *size, unsigned int *i)
1504 struct xt_match *match;
1506 match = try_then_request_module(xt_find_match(AF_INET6, m->u.user.name,
1507 m->u.user.revision),
1508 "ip6t_%s", m->u.user.name);
1509 if (IS_ERR(match) || !match) {
1510 duprintf("compat_check_calc_match: `%s' not found\n",
1511 m->u.user.name);
1512 return match ? PTR_ERR(match) : -ENOENT;
1514 m->u.kernel.match = match;
1515 *size += xt_compat_match_offset(match);
1517 (*i)++;
1518 return 0;
1521 static int
1522 compat_release_match(struct ip6t_entry_match *m, unsigned int *i)
1524 if (i && (*i)-- == 0)
1525 return 1;
1527 module_put(m->u.kernel.match->me);
1528 return 0;
1531 static int
1532 compat_release_entry(struct compat_ip6t_entry *e, unsigned int *i)
1534 struct ip6t_entry_target *t;
1536 if (i && (*i)-- == 0)
1537 return 1;
1539 /* Cleanup all matches */
1540 COMPAT_IP6T_MATCH_ITERATE(e, compat_release_match, NULL);
1541 t = compat_ip6t_get_target(e);
1542 module_put(t->u.kernel.target->me);
1543 return 0;
1546 static int
1547 check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
1548 struct xt_table_info *newinfo,
1549 unsigned int *size,
1550 unsigned char *base,
1551 unsigned char *limit,
1552 unsigned int *hook_entries,
1553 unsigned int *underflows,
1554 unsigned int *i,
1555 const char *name)
1557 struct ip6t_entry_target *t;
1558 struct xt_target *target;
1559 unsigned int entry_offset;
1560 unsigned int j;
1561 int ret, off, h;
1563 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1564 if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0
1565 || (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit) {
1566 duprintf("Bad offset %p, limit = %p\n", e, limit);
1567 return -EINVAL;
1570 if (e->next_offset < sizeof(struct compat_ip6t_entry) +
1571 sizeof(struct compat_xt_entry_target)) {
1572 duprintf("checking: element %p size %u\n",
1573 e, e->next_offset);
1574 return -EINVAL;
1577 /* For purposes of check_entry casting the compat entry is fine */
1578 ret = check_entry((struct ip6t_entry *)e, name);
1579 if (ret)
1580 return ret;
1582 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1583 entry_offset = (void *)e - (void *)base;
1584 j = 0;
1585 ret = COMPAT_IP6T_MATCH_ITERATE(e, compat_find_calc_match, name,
1586 &e->ipv6, e->comefrom, &off, &j);
1587 if (ret != 0)
1588 goto release_matches;
1590 t = compat_ip6t_get_target(e);
1591 target = try_then_request_module(xt_find_target(AF_INET6,
1592 t->u.user.name,
1593 t->u.user.revision),
1594 "ip6t_%s", t->u.user.name);
1595 if (IS_ERR(target) || !target) {
1596 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1597 t->u.user.name);
1598 ret = target ? PTR_ERR(target) : -ENOENT;
1599 goto release_matches;
1601 t->u.kernel.target = target;
1603 off += xt_compat_target_offset(target);
1604 *size += off;
1605 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1606 if (ret)
1607 goto out;
1609 /* Check hooks & underflows */
1610 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1611 if ((unsigned char *)e - base == hook_entries[h])
1612 newinfo->hook_entry[h] = hook_entries[h];
1613 if ((unsigned char *)e - base == underflows[h])
1614 newinfo->underflow[h] = underflows[h];
1617 /* Clear counters and comefrom */
1618 memset(&e->counters, 0, sizeof(e->counters));
1619 e->comefrom = 0;
1621 (*i)++;
1622 return 0;
1624 out:
1625 module_put(t->u.kernel.target->me);
1626 release_matches:
1627 IP6T_MATCH_ITERATE(e, compat_release_match, &j);
1628 return ret;
1631 static int
1632 compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
1633 unsigned int *size, const char *name,
1634 struct xt_table_info *newinfo, unsigned char *base)
1636 struct ip6t_entry_target *t;
1637 struct xt_target *target;
1638 struct ip6t_entry *de;
1639 unsigned int origsize;
1640 int ret, h;
1642 ret = 0;
1643 origsize = *size;
1644 de = (struct ip6t_entry *)*dstptr;
1645 memcpy(de, e, sizeof(struct ip6t_entry));
1646 memcpy(&de->counters, &e->counters, sizeof(e->counters));
1648 *dstptr += sizeof(struct ip6t_entry);
1649 *size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1651 ret = COMPAT_IP6T_MATCH_ITERATE(e, xt_compat_match_from_user,
1652 dstptr, size);
1653 if (ret)
1654 return ret;
1655 de->target_offset = e->target_offset - (origsize - *size);
1656 t = compat_ip6t_get_target(e);
1657 target = t->u.kernel.target;
1658 xt_compat_target_from_user(t, dstptr, size);
1660 de->next_offset = e->next_offset - (origsize - *size);
1661 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1662 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1663 newinfo->hook_entry[h] -= origsize - *size;
1664 if ((unsigned char *)de - base < newinfo->underflow[h])
1665 newinfo->underflow[h] -= origsize - *size;
1667 return ret;
1670 static int compat_check_entry(struct ip6t_entry *e, const char *name,
1671 unsigned int *i)
1673 unsigned int j;
1674 int ret;
1676 j = 0;
1677 ret = IP6T_MATCH_ITERATE(e, check_match, name, &e->ipv6,
1678 e->comefrom, &j);
1679 if (ret)
1680 goto cleanup_matches;
1682 ret = check_target(e, name);
1683 if (ret)
1684 goto cleanup_matches;
1686 (*i)++;
1687 return 0;
1689 cleanup_matches:
1690 IP6T_MATCH_ITERATE(e, cleanup_match, &j);
1691 return ret;
1694 static int
1695 translate_compat_table(const char *name,
1696 unsigned int valid_hooks,
1697 struct xt_table_info **pinfo,
1698 void **pentry0,
1699 unsigned int total_size,
1700 unsigned int number,
1701 unsigned int *hook_entries,
1702 unsigned int *underflows)
1704 unsigned int i, j;
1705 struct xt_table_info *newinfo, *info;
1706 void *pos, *entry0, *entry1;
1707 unsigned int size;
1708 int ret;
1710 info = *pinfo;
1711 entry0 = *pentry0;
1712 size = total_size;
1713 info->number = number;
1715 /* Init all hooks to impossible value. */
1716 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1717 info->hook_entry[i] = 0xFFFFFFFF;
1718 info->underflow[i] = 0xFFFFFFFF;
1721 duprintf("translate_compat_table: size %u\n", info->size);
1722 j = 0;
1723 xt_compat_lock(AF_INET6);
1724 /* Walk through entries, checking offsets. */
1725 ret = COMPAT_IP6T_ENTRY_ITERATE(entry0, total_size,
1726 check_compat_entry_size_and_hooks,
1727 info, &size, entry0,
1728 entry0 + total_size,
1729 hook_entries, underflows, &j, name);
1730 if (ret != 0)
1731 goto out_unlock;
1733 ret = -EINVAL;
1734 if (j != number) {
1735 duprintf("translate_compat_table: %u not %u entries\n",
1736 j, number);
1737 goto out_unlock;
1740 /* Check hooks all assigned */
1741 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1742 /* Only hooks which are valid */
1743 if (!(valid_hooks & (1 << i)))
1744 continue;
1745 if (info->hook_entry[i] == 0xFFFFFFFF) {
1746 duprintf("Invalid hook entry %u %u\n",
1747 i, hook_entries[i]);
1748 goto out_unlock;
1750 if (info->underflow[i] == 0xFFFFFFFF) {
1751 duprintf("Invalid underflow %u %u\n",
1752 i, underflows[i]);
1753 goto out_unlock;
1757 ret = -ENOMEM;
1758 newinfo = xt_alloc_table_info(size);
1759 if (!newinfo)
1760 goto out_unlock;
1762 newinfo->number = number;
1763 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1764 newinfo->hook_entry[i] = info->hook_entry[i];
1765 newinfo->underflow[i] = info->underflow[i];
1767 entry1 = newinfo->entries[raw_smp_processor_id()];
1768 pos = entry1;
1769 size = total_size;
1770 ret = COMPAT_IP6T_ENTRY_ITERATE(entry0, total_size,
1771 compat_copy_entry_from_user,
1772 &pos, &size, name, newinfo, entry1);
1773 xt_compat_flush_offsets(AF_INET6);
1774 xt_compat_unlock(AF_INET6);
1775 if (ret)
1776 goto free_newinfo;
1778 ret = -ELOOP;
1779 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1780 goto free_newinfo;
1782 i = 0;
1783 ret = IP6T_ENTRY_ITERATE(entry1, newinfo->size, compat_check_entry,
1784 name, &i);
1785 if (ret) {
1786 j -= i;
1787 COMPAT_IP6T_ENTRY_ITERATE_CONTINUE(entry0, newinfo->size, i,
1788 compat_release_entry, &j);
1789 IP6T_ENTRY_ITERATE(entry1, newinfo->size, cleanup_entry, &i);
1790 xt_free_table_info(newinfo);
1791 return ret;
1794 /* And one copy for every other CPU */
1795 for_each_possible_cpu(i)
1796 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1797 memcpy(newinfo->entries[i], entry1, newinfo->size);
1799 *pinfo = newinfo;
1800 *pentry0 = entry1;
1801 xt_free_table_info(info);
1802 return 0;
1804 free_newinfo:
1805 xt_free_table_info(newinfo);
1806 out:
1807 COMPAT_IP6T_ENTRY_ITERATE(entry0, total_size, compat_release_entry, &j);
1808 return ret;
1809 out_unlock:
1810 xt_compat_flush_offsets(AF_INET6);
1811 xt_compat_unlock(AF_INET6);
1812 goto out;
1815 static int
1816 compat_do_replace(struct net *net, void __user *user, unsigned int len)
1818 int ret;
1819 struct compat_ip6t_replace tmp;
1820 struct xt_table_info *newinfo;
1821 void *loc_cpu_entry;
1823 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1824 return -EFAULT;
1826 /* overflow check */
1827 if (tmp.size >= INT_MAX / num_possible_cpus())
1828 return -ENOMEM;
1829 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1830 return -ENOMEM;
1832 newinfo = xt_alloc_table_info(tmp.size);
1833 if (!newinfo)
1834 return -ENOMEM;
1836 /* choose the copy that is on our node/cpu */
1837 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1838 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1839 tmp.size) != 0) {
1840 ret = -EFAULT;
1841 goto free_newinfo;
1844 ret = translate_compat_table(tmp.name, tmp.valid_hooks,
1845 &newinfo, &loc_cpu_entry, tmp.size,
1846 tmp.num_entries, tmp.hook_entry,
1847 tmp.underflow);
1848 if (ret != 0)
1849 goto free_newinfo;
1851 duprintf("compat_do_replace: Translated table\n");
1853 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1854 tmp.num_counters, compat_ptr(tmp.counters));
1855 if (ret)
1856 goto free_newinfo_untrans;
1857 return 0;
1859 free_newinfo_untrans:
1860 IP6T_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL);
1861 free_newinfo:
1862 xt_free_table_info(newinfo);
1863 return ret;
1866 static int
1867 compat_do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user,
1868 unsigned int len)
1870 int ret;
1872 if (!capable(CAP_NET_ADMIN))
1873 return -EPERM;
1875 switch (cmd) {
1876 case IP6T_SO_SET_REPLACE:
1877 ret = compat_do_replace(sock_net(sk), user, len);
1878 break;
1880 case IP6T_SO_SET_ADD_COUNTERS:
1881 ret = do_add_counters(sock_net(sk), user, len, 1);
1882 break;
1884 default:
1885 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
1886 ret = -EINVAL;
1889 return ret;
1892 struct compat_ip6t_get_entries {
1893 char name[IP6T_TABLE_MAXNAMELEN];
1894 compat_uint_t size;
1895 struct compat_ip6t_entry entrytable[0];
1898 static int
1899 compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1900 void __user *userptr)
1902 struct xt_counters *counters;
1903 const struct xt_table_info *private = table->private;
1904 void __user *pos;
1905 unsigned int size;
1906 int ret = 0;
1907 const void *loc_cpu_entry;
1908 unsigned int i = 0;
1910 counters = alloc_counters(table);
1911 if (IS_ERR(counters))
1912 return PTR_ERR(counters);
1914 /* choose the copy that is on our node/cpu, ...
1915 * This choice is lazy (because current thread is
1916 * allowed to migrate to another cpu)
1918 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1919 pos = userptr;
1920 size = total_size;
1921 ret = IP6T_ENTRY_ITERATE(loc_cpu_entry, total_size,
1922 compat_copy_entry_to_user,
1923 &pos, &size, counters, &i);
1925 vfree(counters);
1926 return ret;
1929 static int
1930 compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
1931 int *len)
1933 int ret;
1934 struct compat_ip6t_get_entries get;
1935 struct xt_table *t;
1937 if (*len < sizeof(get)) {
1938 duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get));
1939 return -EINVAL;
1942 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1943 return -EFAULT;
1945 if (*len != sizeof(struct compat_ip6t_get_entries) + get.size) {
1946 duprintf("compat_get_entries: %u != %zu\n",
1947 *len, sizeof(get) + get.size);
1948 return -EINVAL;
1951 xt_compat_lock(AF_INET6);
1952 t = xt_find_table_lock(net, AF_INET6, get.name);
1953 if (t && !IS_ERR(t)) {
1954 const struct xt_table_info *private = t->private;
1955 struct xt_table_info info;
1956 duprintf("t->private->number = %u\n", private->number);
1957 ret = compat_table_info(private, &info);
1958 if (!ret && get.size == info.size) {
1959 ret = compat_copy_entries_to_user(private->size,
1960 t, uptr->entrytable);
1961 } else if (!ret) {
1962 duprintf("compat_get_entries: I've got %u not %u!\n",
1963 private->size, get.size);
1964 ret = -EAGAIN;
1966 xt_compat_flush_offsets(AF_INET6);
1967 module_put(t->me);
1968 xt_table_unlock(t);
1969 } else
1970 ret = t ? PTR_ERR(t) : -ENOENT;
1972 xt_compat_unlock(AF_INET6);
1973 return ret;
1976 static int do_ip6t_get_ctl(struct sock *, int, void __user *, int *);
1978 static int
1979 compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1981 int ret;
1983 if (!capable(CAP_NET_ADMIN))
1984 return -EPERM;
1986 switch (cmd) {
1987 case IP6T_SO_GET_INFO:
1988 ret = get_info(sock_net(sk), user, len, 1);
1989 break;
1990 case IP6T_SO_GET_ENTRIES:
1991 ret = compat_get_entries(sock_net(sk), user, len);
1992 break;
1993 default:
1994 ret = do_ip6t_get_ctl(sk, cmd, user, len);
1996 return ret;
1998 #endif
2000 static int
2001 do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2003 int ret;
2005 if (!capable(CAP_NET_ADMIN))
2006 return -EPERM;
2008 switch (cmd) {
2009 case IP6T_SO_SET_REPLACE:
2010 ret = do_replace(sock_net(sk), user, len);
2011 break;
2013 case IP6T_SO_SET_ADD_COUNTERS:
2014 ret = do_add_counters(sock_net(sk), user, len, 0);
2015 break;
2017 default:
2018 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
2019 ret = -EINVAL;
2022 return ret;
2025 static int
2026 do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2028 int ret;
2030 if (!capable(CAP_NET_ADMIN))
2031 return -EPERM;
2033 switch (cmd) {
2034 case IP6T_SO_GET_INFO:
2035 ret = get_info(sock_net(sk), user, len, 0);
2036 break;
2038 case IP6T_SO_GET_ENTRIES:
2039 ret = get_entries(sock_net(sk), user, len);
2040 break;
2042 case IP6T_SO_GET_REVISION_MATCH:
2043 case IP6T_SO_GET_REVISION_TARGET: {
2044 struct ip6t_get_revision rev;
2045 int target;
2047 if (*len != sizeof(rev)) {
2048 ret = -EINVAL;
2049 break;
2051 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2052 ret = -EFAULT;
2053 break;
2056 if (cmd == IP6T_SO_GET_REVISION_TARGET)
2057 target = 1;
2058 else
2059 target = 0;
2061 try_then_request_module(xt_find_revision(AF_INET6, rev.name,
2062 rev.revision,
2063 target, &ret),
2064 "ip6t_%s", rev.name);
2065 break;
2068 default:
2069 duprintf("do_ip6t_get_ctl: unknown request %i\n", cmd);
2070 ret = -EINVAL;
2073 return ret;
2076 struct xt_table *ip6t_register_table(struct net *net, struct xt_table *table,
2077 const struct ip6t_replace *repl)
2079 int ret;
2080 struct xt_table_info *newinfo;
2081 struct xt_table_info bootstrap
2082 = { 0, 0, 0, { 0 }, { 0 }, { } };
2083 void *loc_cpu_entry;
2084 struct xt_table *new_table;
2086 newinfo = xt_alloc_table_info(repl->size);
2087 if (!newinfo) {
2088 ret = -ENOMEM;
2089 goto out;
2092 /* choose the copy on our node/cpu, but dont care about preemption */
2093 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2094 memcpy(loc_cpu_entry, repl->entries, repl->size);
2096 ret = translate_table(table->name, table->valid_hooks,
2097 newinfo, loc_cpu_entry, repl->size,
2098 repl->num_entries,
2099 repl->hook_entry,
2100 repl->underflow);
2101 if (ret != 0)
2102 goto out_free;
2104 new_table = xt_register_table(net, table, &bootstrap, newinfo);
2105 if (IS_ERR(new_table)) {
2106 ret = PTR_ERR(new_table);
2107 goto out_free;
2109 return new_table;
2111 out_free:
2112 xt_free_table_info(newinfo);
2113 out:
2114 return ERR_PTR(ret);
2117 void ip6t_unregister_table(struct xt_table *table)
2119 struct xt_table_info *private;
2120 void *loc_cpu_entry;
2121 struct module *table_owner = table->me;
2123 private = xt_unregister_table(table);
2125 /* Decrease module usage counts and free resources */
2126 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2127 IP6T_ENTRY_ITERATE(loc_cpu_entry, private->size, cleanup_entry, NULL);
2128 if (private->number > private->initial_entries)
2129 module_put(table_owner);
2130 xt_free_table_info(private);
2133 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2134 static inline bool
2135 icmp6_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2136 u_int8_t type, u_int8_t code,
2137 bool invert)
2139 return (type == test_type && code >= min_code && code <= max_code)
2140 ^ invert;
2143 static bool
2144 icmp6_match(const struct sk_buff *skb,
2145 const struct net_device *in,
2146 const struct net_device *out,
2147 const struct xt_match *match,
2148 const void *matchinfo,
2149 int offset,
2150 unsigned int protoff,
2151 bool *hotdrop)
2153 const struct icmp6hdr *ic;
2154 struct icmp6hdr _icmph;
2155 const struct ip6t_icmp *icmpinfo = matchinfo;
2157 /* Must not be a fragment. */
2158 if (offset)
2159 return false;
2161 ic = skb_header_pointer(skb, protoff, sizeof(_icmph), &_icmph);
2162 if (ic == NULL) {
2163 /* We've been asked to examine this packet, and we
2164 * can't. Hence, no choice but to drop.
2166 duprintf("Dropping evil ICMP tinygram.\n");
2167 *hotdrop = true;
2168 return false;
2171 return icmp6_type_code_match(icmpinfo->type,
2172 icmpinfo->code[0],
2173 icmpinfo->code[1],
2174 ic->icmp6_type, ic->icmp6_code,
2175 !!(icmpinfo->invflags&IP6T_ICMP_INV));
2178 /* Called when user tries to insert an entry of this type. */
2179 static bool
2180 icmp6_checkentry(const char *tablename,
2181 const void *entry,
2182 const struct xt_match *match,
2183 void *matchinfo,
2184 unsigned int hook_mask)
2186 const struct ip6t_icmp *icmpinfo = matchinfo;
2188 /* Must specify no unknown invflags */
2189 return !(icmpinfo->invflags & ~IP6T_ICMP_INV);
2192 /* The built-in targets: standard (NULL) and error. */
2193 static struct xt_target ip6t_standard_target __read_mostly = {
2194 .name = IP6T_STANDARD_TARGET,
2195 .targetsize = sizeof(int),
2196 .family = AF_INET6,
2197 #ifdef CONFIG_COMPAT
2198 .compatsize = sizeof(compat_int_t),
2199 .compat_from_user = compat_standard_from_user,
2200 .compat_to_user = compat_standard_to_user,
2201 #endif
2204 static struct xt_target ip6t_error_target __read_mostly = {
2205 .name = IP6T_ERROR_TARGET,
2206 .target = ip6t_error,
2207 .targetsize = IP6T_FUNCTION_MAXNAMELEN,
2208 .family = AF_INET6,
2211 static struct nf_sockopt_ops ip6t_sockopts = {
2212 .pf = PF_INET6,
2213 .set_optmin = IP6T_BASE_CTL,
2214 .set_optmax = IP6T_SO_SET_MAX+1,
2215 .set = do_ip6t_set_ctl,
2216 #ifdef CONFIG_COMPAT
2217 .compat_set = compat_do_ip6t_set_ctl,
2218 #endif
2219 .get_optmin = IP6T_BASE_CTL,
2220 .get_optmax = IP6T_SO_GET_MAX+1,
2221 .get = do_ip6t_get_ctl,
2222 #ifdef CONFIG_COMPAT
2223 .compat_get = compat_do_ip6t_get_ctl,
2224 #endif
2225 .owner = THIS_MODULE,
2228 static struct xt_match icmp6_matchstruct __read_mostly = {
2229 .name = "icmp6",
2230 .match = icmp6_match,
2231 .matchsize = sizeof(struct ip6t_icmp),
2232 .checkentry = icmp6_checkentry,
2233 .proto = IPPROTO_ICMPV6,
2234 .family = AF_INET6,
2237 static int __net_init ip6_tables_net_init(struct net *net)
2239 return xt_proto_init(net, AF_INET6);
2242 static void __net_exit ip6_tables_net_exit(struct net *net)
2244 xt_proto_fini(net, AF_INET6);
2247 static struct pernet_operations ip6_tables_net_ops = {
2248 .init = ip6_tables_net_init,
2249 .exit = ip6_tables_net_exit,
2252 static int __init ip6_tables_init(void)
2254 int ret;
2256 ret = register_pernet_subsys(&ip6_tables_net_ops);
2257 if (ret < 0)
2258 goto err1;
2260 /* Noone else will be downing sem now, so we won't sleep */
2261 ret = xt_register_target(&ip6t_standard_target);
2262 if (ret < 0)
2263 goto err2;
2264 ret = xt_register_target(&ip6t_error_target);
2265 if (ret < 0)
2266 goto err3;
2267 ret = xt_register_match(&icmp6_matchstruct);
2268 if (ret < 0)
2269 goto err4;
2271 /* Register setsockopt */
2272 ret = nf_register_sockopt(&ip6t_sockopts);
2273 if (ret < 0)
2274 goto err5;
2276 printk(KERN_INFO "ip6_tables: (C) 2000-2006 Netfilter Core Team\n");
2277 return 0;
2279 err5:
2280 xt_unregister_match(&icmp6_matchstruct);
2281 err4:
2282 xt_unregister_target(&ip6t_error_target);
2283 err3:
2284 xt_unregister_target(&ip6t_standard_target);
2285 err2:
2286 unregister_pernet_subsys(&ip6_tables_net_ops);
2287 err1:
2288 return ret;
2291 static void __exit ip6_tables_fini(void)
2293 nf_unregister_sockopt(&ip6t_sockopts);
2295 xt_unregister_match(&icmp6_matchstruct);
2296 xt_unregister_target(&ip6t_error_target);
2297 xt_unregister_target(&ip6t_standard_target);
2299 unregister_pernet_subsys(&ip6_tables_net_ops);
2303 * find the offset to specified header or the protocol number of last header
2304 * if target < 0. "last header" is transport protocol header, ESP, or
2305 * "No next header".
2307 * If target header is found, its offset is set in *offset and return protocol
2308 * number. Otherwise, return -1.
2310 * If the first fragment doesn't contain the final protocol header or
2311 * NEXTHDR_NONE it is considered invalid.
2313 * Note that non-1st fragment is special case that "the protocol number
2314 * of last header" is "next header" field in Fragment header. In this case,
2315 * *offset is meaningless and fragment offset is stored in *fragoff if fragoff
2316 * isn't NULL.
2319 int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
2320 int target, unsigned short *fragoff)
2322 unsigned int start = skb_network_offset(skb) + sizeof(struct ipv6hdr);
2323 u8 nexthdr = ipv6_hdr(skb)->nexthdr;
2324 unsigned int len = skb->len - start;
2326 if (fragoff)
2327 *fragoff = 0;
2329 while (nexthdr != target) {
2330 struct ipv6_opt_hdr _hdr, *hp;
2331 unsigned int hdrlen;
2333 if ((!ipv6_ext_hdr(nexthdr)) || nexthdr == NEXTHDR_NONE) {
2334 if (target < 0)
2335 break;
2336 return -ENOENT;
2339 hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr);
2340 if (hp == NULL)
2341 return -EBADMSG;
2342 if (nexthdr == NEXTHDR_FRAGMENT) {
2343 unsigned short _frag_off;
2344 __be16 *fp;
2345 fp = skb_header_pointer(skb,
2346 start+offsetof(struct frag_hdr,
2347 frag_off),
2348 sizeof(_frag_off),
2349 &_frag_off);
2350 if (fp == NULL)
2351 return -EBADMSG;
2353 _frag_off = ntohs(*fp) & ~0x7;
2354 if (_frag_off) {
2355 if (target < 0 &&
2356 ((!ipv6_ext_hdr(hp->nexthdr)) ||
2357 hp->nexthdr == NEXTHDR_NONE)) {
2358 if (fragoff)
2359 *fragoff = _frag_off;
2360 return hp->nexthdr;
2362 return -ENOENT;
2364 hdrlen = 8;
2365 } else if (nexthdr == NEXTHDR_AUTH)
2366 hdrlen = (hp->hdrlen + 2) << 2;
2367 else
2368 hdrlen = ipv6_optlen(hp);
2370 nexthdr = hp->nexthdr;
2371 len -= hdrlen;
2372 start += hdrlen;
2375 *offset = start;
2376 return nexthdr;
2379 EXPORT_SYMBOL(ip6t_register_table);
2380 EXPORT_SYMBOL(ip6t_unregister_table);
2381 EXPORT_SYMBOL(ip6t_do_table);
2382 EXPORT_SYMBOL(ip6t_ext_hdr);
2383 EXPORT_SYMBOL(ipv6_find_hdr);
2385 module_init(ip6_tables_init);
2386 module_exit(ip6_tables_fini);