sched, hw-branch-tracer: add wait_task_context_switch() function to sched.h
[linux-2.6/kvm.git] / net / ipv6 / netfilter / ip6_tables.c
blobdfed176aed37a05698c62f887e0227d95c1faff4
1 /*
2 * Packet matching code.
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/capability.h>
13 #include <linux/in.h>
14 #include <linux/skbuff.h>
15 #include <linux/kmod.h>
16 #include <linux/vmalloc.h>
17 #include <linux/netdevice.h>
18 #include <linux/module.h>
19 #include <linux/poison.h>
20 #include <linux/icmpv6.h>
21 #include <net/ipv6.h>
22 #include <net/compat.h>
23 #include <asm/uaccess.h>
24 #include <linux/mutex.h>
25 #include <linux/proc_fs.h>
26 #include <linux/err.h>
27 #include <linux/cpumask.h>
29 #include <linux/netfilter_ipv6/ip6_tables.h>
30 #include <linux/netfilter/x_tables.h>
31 #include <net/netfilter/nf_log.h>
33 MODULE_LICENSE("GPL");
34 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
35 MODULE_DESCRIPTION("IPv6 packet filter");
37 /*#define DEBUG_IP_FIREWALL*/
38 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
39 /*#define DEBUG_IP_FIREWALL_USER*/
41 #ifdef DEBUG_IP_FIREWALL
42 #define dprintf(format, args...) printk(format , ## args)
43 #else
44 #define dprintf(format, args...)
45 #endif
47 #ifdef DEBUG_IP_FIREWALL_USER
48 #define duprintf(format, args...) printk(format , ## args)
49 #else
50 #define duprintf(format, args...)
51 #endif
53 #ifdef CONFIG_NETFILTER_DEBUG
54 #define IP_NF_ASSERT(x) \
55 do { \
56 if (!(x)) \
57 printk("IP_NF_ASSERT: %s:%s:%u\n", \
58 __func__, __FILE__, __LINE__); \
59 } while(0)
60 #else
61 #define IP_NF_ASSERT(x)
62 #endif
64 #if 0
65 /* All the better to debug you with... */
66 #define static
67 #define inline
68 #endif
71 We keep a set of rules for each CPU, so we can avoid write-locking
72 them in the softirq when updating the counters and therefore
73 only need to read-lock in the softirq; doing a write_lock_bh() in user
74 context stops packets coming through and allows user context to read
75 the counters or update the rules.
77 Hence the start of any table is given by get_table() below. */
79 /* Check for an extension */
80 int
81 ip6t_ext_hdr(u8 nexthdr)
83 return ( (nexthdr == IPPROTO_HOPOPTS) ||
84 (nexthdr == IPPROTO_ROUTING) ||
85 (nexthdr == IPPROTO_FRAGMENT) ||
86 (nexthdr == IPPROTO_ESP) ||
87 (nexthdr == IPPROTO_AH) ||
88 (nexthdr == IPPROTO_NONE) ||
89 (nexthdr == IPPROTO_DSTOPTS) );
92 /* Returns whether matches rule or not. */
93 /* Performance critical - called for every packet */
94 static inline bool
95 ip6_packet_match(const struct sk_buff *skb,
96 const char *indev,
97 const char *outdev,
98 const struct ip6t_ip6 *ip6info,
99 unsigned int *protoff,
100 int *fragoff, bool *hotdrop)
102 unsigned long ret;
103 const struct ipv6hdr *ipv6 = ipv6_hdr(skb);
105 #define FWINV(bool, invflg) ((bool) ^ !!(ip6info->invflags & (invflg)))
107 if (FWINV(ipv6_masked_addr_cmp(&ipv6->saddr, &ip6info->smsk,
108 &ip6info->src), IP6T_INV_SRCIP)
109 || FWINV(ipv6_masked_addr_cmp(&ipv6->daddr, &ip6info->dmsk,
110 &ip6info->dst), IP6T_INV_DSTIP)) {
111 dprintf("Source or dest mismatch.\n");
113 dprintf("SRC: %u. Mask: %u. Target: %u.%s\n", ip->saddr,
114 ipinfo->smsk.s_addr, ipinfo->src.s_addr,
115 ipinfo->invflags & IP6T_INV_SRCIP ? " (INV)" : "");
116 dprintf("DST: %u. Mask: %u. Target: %u.%s\n", ip->daddr,
117 ipinfo->dmsk.s_addr, ipinfo->dst.s_addr,
118 ipinfo->invflags & IP6T_INV_DSTIP ? " (INV)" : "");*/
119 return false;
122 ret = ifname_compare_aligned(indev, ip6info->iniface, ip6info->iniface_mask);
124 if (FWINV(ret != 0, IP6T_INV_VIA_IN)) {
125 dprintf("VIA in mismatch (%s vs %s).%s\n",
126 indev, ip6info->iniface,
127 ip6info->invflags&IP6T_INV_VIA_IN ?" (INV)":"");
128 return false;
131 ret = ifname_compare_aligned(outdev, ip6info->outiface, ip6info->outiface_mask);
133 if (FWINV(ret != 0, IP6T_INV_VIA_OUT)) {
134 dprintf("VIA out mismatch (%s vs %s).%s\n",
135 outdev, ip6info->outiface,
136 ip6info->invflags&IP6T_INV_VIA_OUT ?" (INV)":"");
137 return false;
140 /* ... might want to do something with class and flowlabel here ... */
142 /* look for the desired protocol header */
143 if((ip6info->flags & IP6T_F_PROTO)) {
144 int protohdr;
145 unsigned short _frag_off;
147 protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off);
148 if (protohdr < 0) {
149 if (_frag_off == 0)
150 *hotdrop = true;
151 return false;
153 *fragoff = _frag_off;
155 dprintf("Packet protocol %hi ?= %s%hi.\n",
156 protohdr,
157 ip6info->invflags & IP6T_INV_PROTO ? "!":"",
158 ip6info->proto);
160 if (ip6info->proto == protohdr) {
161 if(ip6info->invflags & IP6T_INV_PROTO) {
162 return false;
164 return true;
167 /* We need match for the '-p all', too! */
168 if ((ip6info->proto != 0) &&
169 !(ip6info->invflags & IP6T_INV_PROTO))
170 return false;
172 return true;
175 /* should be ip6 safe */
176 static bool
177 ip6_checkentry(const struct ip6t_ip6 *ipv6)
179 if (ipv6->flags & ~IP6T_F_MASK) {
180 duprintf("Unknown flag bits set: %08X\n",
181 ipv6->flags & ~IP6T_F_MASK);
182 return false;
184 if (ipv6->invflags & ~IP6T_INV_MASK) {
185 duprintf("Unknown invflag bits set: %08X\n",
186 ipv6->invflags & ~IP6T_INV_MASK);
187 return false;
189 return true;
192 static unsigned int
193 ip6t_error(struct sk_buff *skb, const struct xt_target_param *par)
195 if (net_ratelimit())
196 printk("ip6_tables: error: `%s'\n",
197 (const char *)par->targinfo);
199 return NF_DROP;
202 /* Performance critical - called for every packet */
203 static inline bool
204 do_match(struct ip6t_entry_match *m, const struct sk_buff *skb,
205 struct xt_match_param *par)
207 par->match = m->u.kernel.match;
208 par->matchinfo = m->data;
210 /* Stop iteration if it doesn't match */
211 if (!m->u.kernel.match->match(skb, par))
212 return true;
213 else
214 return false;
217 static inline struct ip6t_entry *
218 get_entry(void *base, unsigned int offset)
220 return (struct ip6t_entry *)(base + offset);
223 /* All zeroes == unconditional rule. */
224 /* Mildly perf critical (only if packet tracing is on) */
225 static inline int
226 unconditional(const struct ip6t_ip6 *ipv6)
228 unsigned int i;
230 for (i = 0; i < sizeof(*ipv6); i++)
231 if (((char *)ipv6)[i])
232 break;
234 return (i == sizeof(*ipv6));
237 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
238 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
239 /* This cries for unification! */
240 static const char *const hooknames[] = {
241 [NF_INET_PRE_ROUTING] = "PREROUTING",
242 [NF_INET_LOCAL_IN] = "INPUT",
243 [NF_INET_FORWARD] = "FORWARD",
244 [NF_INET_LOCAL_OUT] = "OUTPUT",
245 [NF_INET_POST_ROUTING] = "POSTROUTING",
248 enum nf_ip_trace_comments {
249 NF_IP6_TRACE_COMMENT_RULE,
250 NF_IP6_TRACE_COMMENT_RETURN,
251 NF_IP6_TRACE_COMMENT_POLICY,
254 static const char *const comments[] = {
255 [NF_IP6_TRACE_COMMENT_RULE] = "rule",
256 [NF_IP6_TRACE_COMMENT_RETURN] = "return",
257 [NF_IP6_TRACE_COMMENT_POLICY] = "policy",
260 static struct nf_loginfo trace_loginfo = {
261 .type = NF_LOG_TYPE_LOG,
262 .u = {
263 .log = {
264 .level = 4,
265 .logflags = NF_LOG_MASK,
270 /* Mildly perf critical (only if packet tracing is on) */
271 static inline int
272 get_chainname_rulenum(struct ip6t_entry *s, struct ip6t_entry *e,
273 char *hookname, char **chainname,
274 char **comment, unsigned int *rulenum)
276 struct ip6t_standard_target *t = (void *)ip6t_get_target(s);
278 if (strcmp(t->target.u.kernel.target->name, IP6T_ERROR_TARGET) == 0) {
279 /* Head of user chain: ERROR target with chainname */
280 *chainname = t->target.data;
281 (*rulenum) = 0;
282 } else if (s == e) {
283 (*rulenum)++;
285 if (s->target_offset == sizeof(struct ip6t_entry)
286 && strcmp(t->target.u.kernel.target->name,
287 IP6T_STANDARD_TARGET) == 0
288 && t->verdict < 0
289 && unconditional(&s->ipv6)) {
290 /* Tail of chains: STANDARD target (return/policy) */
291 *comment = *chainname == hookname
292 ? (char *)comments[NF_IP6_TRACE_COMMENT_POLICY]
293 : (char *)comments[NF_IP6_TRACE_COMMENT_RETURN];
295 return 1;
296 } else
297 (*rulenum)++;
299 return 0;
302 static void trace_packet(struct sk_buff *skb,
303 unsigned int hook,
304 const struct net_device *in,
305 const struct net_device *out,
306 const char *tablename,
307 struct xt_table_info *private,
308 struct ip6t_entry *e)
310 void *table_base;
311 const struct ip6t_entry *root;
312 char *hookname, *chainname, *comment;
313 unsigned int rulenum = 0;
315 table_base = (void *)private->entries[smp_processor_id()];
316 root = get_entry(table_base, private->hook_entry[hook]);
318 hookname = chainname = (char *)hooknames[hook];
319 comment = (char *)comments[NF_IP6_TRACE_COMMENT_RULE];
321 IP6T_ENTRY_ITERATE(root,
322 private->size - private->hook_entry[hook],
323 get_chainname_rulenum,
324 e, hookname, &chainname, &comment, &rulenum);
326 nf_log_packet(AF_INET6, hook, skb, in, out, &trace_loginfo,
327 "TRACE: %s:%s:%s:%u ",
328 tablename, chainname, comment, rulenum);
330 #endif
332 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
333 unsigned int
334 ip6t_do_table(struct sk_buff *skb,
335 unsigned int hook,
336 const struct net_device *in,
337 const struct net_device *out,
338 struct xt_table *table)
340 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
341 bool hotdrop = false;
342 /* Initializing verdict to NF_DROP keeps gcc happy. */
343 unsigned int verdict = NF_DROP;
344 const char *indev, *outdev;
345 void *table_base;
346 struct ip6t_entry *e, *back;
347 struct xt_table_info *private;
348 struct xt_match_param mtpar;
349 struct xt_target_param tgpar;
351 /* Initialization */
352 indev = in ? in->name : nulldevname;
353 outdev = out ? out->name : nulldevname;
354 /* We handle fragments by dealing with the first fragment as
355 * if it was a normal packet. All other fragments are treated
356 * normally, except that they will NEVER match rules that ask
357 * things we don't know, ie. tcp syn flag or ports). If the
358 * rule is also a fragment-specific rule, non-fragments won't
359 * match it. */
360 mtpar.hotdrop = &hotdrop;
361 mtpar.in = tgpar.in = in;
362 mtpar.out = tgpar.out = out;
363 mtpar.family = tgpar.family = NFPROTO_IPV6;
364 tgpar.hooknum = hook;
366 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
368 rcu_read_lock_bh();
369 private = rcu_dereference(table->private);
370 table_base = rcu_dereference(private->entries[smp_processor_id()]);
372 e = get_entry(table_base, private->hook_entry[hook]);
374 /* For return from builtin chain */
375 back = get_entry(table_base, private->underflow[hook]);
377 do {
378 IP_NF_ASSERT(e);
379 IP_NF_ASSERT(back);
380 if (ip6_packet_match(skb, indev, outdev, &e->ipv6,
381 &mtpar.thoff, &mtpar.fragoff, &hotdrop)) {
382 struct ip6t_entry_target *t;
384 if (IP6T_MATCH_ITERATE(e, do_match, skb, &mtpar) != 0)
385 goto no_match;
387 ADD_COUNTER(e->counters,
388 ntohs(ipv6_hdr(skb)->payload_len) +
389 sizeof(struct ipv6hdr), 1);
391 t = ip6t_get_target(e);
392 IP_NF_ASSERT(t->u.kernel.target);
394 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
395 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
396 /* The packet is traced: log it */
397 if (unlikely(skb->nf_trace))
398 trace_packet(skb, hook, in, out,
399 table->name, private, e);
400 #endif
401 /* Standard target? */
402 if (!t->u.kernel.target->target) {
403 int v;
405 v = ((struct ip6t_standard_target *)t)->verdict;
406 if (v < 0) {
407 /* Pop from stack? */
408 if (v != IP6T_RETURN) {
409 verdict = (unsigned)(-v) - 1;
410 break;
412 e = back;
413 back = get_entry(table_base,
414 back->comefrom);
415 continue;
417 if (table_base + v != (void *)e + e->next_offset
418 && !(e->ipv6.flags & IP6T_F_GOTO)) {
419 /* Save old back ptr in next entry */
420 struct ip6t_entry *next
421 = (void *)e + e->next_offset;
422 next->comefrom
423 = (void *)back - table_base;
424 /* set back pointer to next entry */
425 back = next;
428 e = get_entry(table_base, v);
429 } else {
430 /* Targets which reenter must return
431 abs. verdicts */
432 tgpar.target = t->u.kernel.target;
433 tgpar.targinfo = t->data;
435 #ifdef CONFIG_NETFILTER_DEBUG
436 ((struct ip6t_entry *)table_base)->comefrom
437 = 0xeeeeeeec;
438 #endif
439 verdict = t->u.kernel.target->target(skb,
440 &tgpar);
442 #ifdef CONFIG_NETFILTER_DEBUG
443 if (((struct ip6t_entry *)table_base)->comefrom
444 != 0xeeeeeeec
445 && verdict == IP6T_CONTINUE) {
446 printk("Target %s reentered!\n",
447 t->u.kernel.target->name);
448 verdict = NF_DROP;
450 ((struct ip6t_entry *)table_base)->comefrom
451 = 0x57acc001;
452 #endif
453 if (verdict == IP6T_CONTINUE)
454 e = (void *)e + e->next_offset;
455 else
456 /* Verdict */
457 break;
459 } else {
461 no_match:
462 e = (void *)e + e->next_offset;
464 } while (!hotdrop);
466 #ifdef CONFIG_NETFILTER_DEBUG
467 ((struct ip6t_entry *)table_base)->comefrom = NETFILTER_LINK_POISON;
468 #endif
469 rcu_read_unlock_bh();
471 #ifdef DEBUG_ALLOW_ALL
472 return NF_ACCEPT;
473 #else
474 if (hotdrop)
475 return NF_DROP;
476 else return verdict;
477 #endif
480 /* Figures out from what hook each rule can be called: returns 0 if
481 there are loops. Puts hook bitmask in comefrom. */
482 static int
483 mark_source_chains(struct xt_table_info *newinfo,
484 unsigned int valid_hooks, void *entry0)
486 unsigned int hook;
488 /* No recursion; use packet counter to save back ptrs (reset
489 to 0 as we leave), and comefrom to save source hook bitmask */
490 for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
491 unsigned int pos = newinfo->hook_entry[hook];
492 struct ip6t_entry *e = (struct ip6t_entry *)(entry0 + pos);
494 if (!(valid_hooks & (1 << hook)))
495 continue;
497 /* Set initial back pointer. */
498 e->counters.pcnt = pos;
500 for (;;) {
501 struct ip6t_standard_target *t
502 = (void *)ip6t_get_target(e);
503 int visited = e->comefrom & (1 << hook);
505 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
506 printk("iptables: loop hook %u pos %u %08X.\n",
507 hook, pos, e->comefrom);
508 return 0;
510 e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
512 /* Unconditional return/END. */
513 if ((e->target_offset == sizeof(struct ip6t_entry)
514 && (strcmp(t->target.u.user.name,
515 IP6T_STANDARD_TARGET) == 0)
516 && t->verdict < 0
517 && unconditional(&e->ipv6)) || visited) {
518 unsigned int oldpos, size;
520 if ((strcmp(t->target.u.user.name,
521 IP6T_STANDARD_TARGET) == 0) &&
522 t->verdict < -NF_MAX_VERDICT - 1) {
523 duprintf("mark_source_chains: bad "
524 "negative verdict (%i)\n",
525 t->verdict);
526 return 0;
529 /* Return: backtrack through the last
530 big jump. */
531 do {
532 e->comefrom ^= (1<<NF_INET_NUMHOOKS);
533 #ifdef DEBUG_IP_FIREWALL_USER
534 if (e->comefrom
535 & (1 << NF_INET_NUMHOOKS)) {
536 duprintf("Back unset "
537 "on hook %u "
538 "rule %u\n",
539 hook, pos);
541 #endif
542 oldpos = pos;
543 pos = e->counters.pcnt;
544 e->counters.pcnt = 0;
546 /* We're at the start. */
547 if (pos == oldpos)
548 goto next;
550 e = (struct ip6t_entry *)
551 (entry0 + pos);
552 } while (oldpos == pos + e->next_offset);
554 /* Move along one */
555 size = e->next_offset;
556 e = (struct ip6t_entry *)
557 (entry0 + pos + size);
558 e->counters.pcnt = pos;
559 pos += size;
560 } else {
561 int newpos = t->verdict;
563 if (strcmp(t->target.u.user.name,
564 IP6T_STANDARD_TARGET) == 0
565 && newpos >= 0) {
566 if (newpos > newinfo->size -
567 sizeof(struct ip6t_entry)) {
568 duprintf("mark_source_chains: "
569 "bad verdict (%i)\n",
570 newpos);
571 return 0;
573 /* This a jump; chase it. */
574 duprintf("Jump rule %u -> %u\n",
575 pos, newpos);
576 } else {
577 /* ... this is a fallthru */
578 newpos = pos + e->next_offset;
580 e = (struct ip6t_entry *)
581 (entry0 + newpos);
582 e->counters.pcnt = pos;
583 pos = newpos;
586 next:
587 duprintf("Finished chain %u\n", hook);
589 return 1;
592 static int
593 cleanup_match(struct ip6t_entry_match *m, unsigned int *i)
595 struct xt_mtdtor_param par;
597 if (i && (*i)-- == 0)
598 return 1;
600 par.match = m->u.kernel.match;
601 par.matchinfo = m->data;
602 par.family = NFPROTO_IPV6;
603 if (par.match->destroy != NULL)
604 par.match->destroy(&par);
605 module_put(par.match->me);
606 return 0;
609 static int
610 check_entry(struct ip6t_entry *e, const char *name)
612 struct ip6t_entry_target *t;
614 if (!ip6_checkentry(&e->ipv6)) {
615 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
616 return -EINVAL;
619 if (e->target_offset + sizeof(struct ip6t_entry_target) >
620 e->next_offset)
621 return -EINVAL;
623 t = ip6t_get_target(e);
624 if (e->target_offset + t->u.target_size > e->next_offset)
625 return -EINVAL;
627 return 0;
630 static int check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par,
631 unsigned int *i)
633 const struct ip6t_ip6 *ipv6 = par->entryinfo;
634 int ret;
636 par->match = m->u.kernel.match;
637 par->matchinfo = m->data;
639 ret = xt_check_match(par, m->u.match_size - sizeof(*m),
640 ipv6->proto, ipv6->invflags & IP6T_INV_PROTO);
641 if (ret < 0) {
642 duprintf("ip_tables: check failed for `%s'.\n",
643 par.match->name);
644 return ret;
646 ++*i;
647 return 0;
650 static int
651 find_check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par,
652 unsigned int *i)
654 struct xt_match *match;
655 int ret;
657 match = try_then_request_module(xt_find_match(AF_INET6, m->u.user.name,
658 m->u.user.revision),
659 "ip6t_%s", m->u.user.name);
660 if (IS_ERR(match) || !match) {
661 duprintf("find_check_match: `%s' not found\n", m->u.user.name);
662 return match ? PTR_ERR(match) : -ENOENT;
664 m->u.kernel.match = match;
666 ret = check_match(m, par, i);
667 if (ret)
668 goto err;
670 return 0;
671 err:
672 module_put(m->u.kernel.match->me);
673 return ret;
676 static int check_target(struct ip6t_entry *e, const char *name)
678 struct ip6t_entry_target *t = ip6t_get_target(e);
679 struct xt_tgchk_param par = {
680 .table = name,
681 .entryinfo = e,
682 .target = t->u.kernel.target,
683 .targinfo = t->data,
684 .hook_mask = e->comefrom,
685 .family = NFPROTO_IPV6,
687 int ret;
689 t = ip6t_get_target(e);
690 ret = xt_check_target(&par, t->u.target_size - sizeof(*t),
691 e->ipv6.proto, e->ipv6.invflags & IP6T_INV_PROTO);
692 if (ret < 0) {
693 duprintf("ip_tables: check failed for `%s'.\n",
694 t->u.kernel.target->name);
695 return ret;
697 return 0;
700 static int
701 find_check_entry(struct ip6t_entry *e, const char *name, unsigned int size,
702 unsigned int *i)
704 struct ip6t_entry_target *t;
705 struct xt_target *target;
706 int ret;
707 unsigned int j;
708 struct xt_mtchk_param mtpar;
710 ret = check_entry(e, name);
711 if (ret)
712 return ret;
714 j = 0;
715 mtpar.table = name;
716 mtpar.entryinfo = &e->ipv6;
717 mtpar.hook_mask = e->comefrom;
718 mtpar.family = NFPROTO_IPV6;
719 ret = IP6T_MATCH_ITERATE(e, find_check_match, &mtpar, &j);
720 if (ret != 0)
721 goto cleanup_matches;
723 t = ip6t_get_target(e);
724 target = try_then_request_module(xt_find_target(AF_INET6,
725 t->u.user.name,
726 t->u.user.revision),
727 "ip6t_%s", t->u.user.name);
728 if (IS_ERR(target) || !target) {
729 duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
730 ret = target ? PTR_ERR(target) : -ENOENT;
731 goto cleanup_matches;
733 t->u.kernel.target = target;
735 ret = check_target(e, name);
736 if (ret)
737 goto err;
739 (*i)++;
740 return 0;
741 err:
742 module_put(t->u.kernel.target->me);
743 cleanup_matches:
744 IP6T_MATCH_ITERATE(e, cleanup_match, &j);
745 return ret;
748 static int
749 check_entry_size_and_hooks(struct ip6t_entry *e,
750 struct xt_table_info *newinfo,
751 unsigned char *base,
752 unsigned char *limit,
753 const unsigned int *hook_entries,
754 const unsigned int *underflows,
755 unsigned int *i)
757 unsigned int h;
759 if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0
760 || (unsigned char *)e + sizeof(struct ip6t_entry) >= limit) {
761 duprintf("Bad offset %p\n", e);
762 return -EINVAL;
765 if (e->next_offset
766 < sizeof(struct ip6t_entry) + sizeof(struct ip6t_entry_target)) {
767 duprintf("checking: element %p size %u\n",
768 e, e->next_offset);
769 return -EINVAL;
772 /* Check hooks & underflows */
773 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
774 if ((unsigned char *)e - base == hook_entries[h])
775 newinfo->hook_entry[h] = hook_entries[h];
776 if ((unsigned char *)e - base == underflows[h])
777 newinfo->underflow[h] = underflows[h];
780 /* FIXME: underflows must be unconditional, standard verdicts
781 < 0 (not IP6T_RETURN). --RR */
783 /* Clear counters and comefrom */
784 e->counters = ((struct xt_counters) { 0, 0 });
785 e->comefrom = 0;
787 (*i)++;
788 return 0;
791 static int
792 cleanup_entry(struct ip6t_entry *e, unsigned int *i)
794 struct xt_tgdtor_param par;
795 struct ip6t_entry_target *t;
797 if (i && (*i)-- == 0)
798 return 1;
800 /* Cleanup all matches */
801 IP6T_MATCH_ITERATE(e, cleanup_match, NULL);
802 t = ip6t_get_target(e);
804 par.target = t->u.kernel.target;
805 par.targinfo = t->data;
806 par.family = NFPROTO_IPV6;
807 if (par.target->destroy != NULL)
808 par.target->destroy(&par);
809 module_put(par.target->me);
810 return 0;
813 /* Checks and translates the user-supplied table segment (held in
814 newinfo) */
815 static int
816 translate_table(const char *name,
817 unsigned int valid_hooks,
818 struct xt_table_info *newinfo,
819 void *entry0,
820 unsigned int size,
821 unsigned int number,
822 const unsigned int *hook_entries,
823 const unsigned int *underflows)
825 unsigned int i;
826 int ret;
828 newinfo->size = size;
829 newinfo->number = number;
831 /* Init all hooks to impossible value. */
832 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
833 newinfo->hook_entry[i] = 0xFFFFFFFF;
834 newinfo->underflow[i] = 0xFFFFFFFF;
837 duprintf("translate_table: size %u\n", newinfo->size);
838 i = 0;
839 /* Walk through entries, checking offsets. */
840 ret = IP6T_ENTRY_ITERATE(entry0, newinfo->size,
841 check_entry_size_and_hooks,
842 newinfo,
843 entry0,
844 entry0 + size,
845 hook_entries, underflows, &i);
846 if (ret != 0)
847 return ret;
849 if (i != number) {
850 duprintf("translate_table: %u not %u entries\n",
851 i, number);
852 return -EINVAL;
855 /* Check hooks all assigned */
856 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
857 /* Only hooks which are valid */
858 if (!(valid_hooks & (1 << i)))
859 continue;
860 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
861 duprintf("Invalid hook entry %u %u\n",
862 i, hook_entries[i]);
863 return -EINVAL;
865 if (newinfo->underflow[i] == 0xFFFFFFFF) {
866 duprintf("Invalid underflow %u %u\n",
867 i, underflows[i]);
868 return -EINVAL;
872 if (!mark_source_chains(newinfo, valid_hooks, entry0))
873 return -ELOOP;
875 /* Finally, each sanity check must pass */
876 i = 0;
877 ret = IP6T_ENTRY_ITERATE(entry0, newinfo->size,
878 find_check_entry, name, size, &i);
880 if (ret != 0) {
881 IP6T_ENTRY_ITERATE(entry0, newinfo->size,
882 cleanup_entry, &i);
883 return ret;
886 /* And one copy for every other CPU */
887 for_each_possible_cpu(i) {
888 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
889 memcpy(newinfo->entries[i], entry0, newinfo->size);
892 return ret;
895 /* Gets counters. */
896 static inline int
897 add_entry_to_counter(const struct ip6t_entry *e,
898 struct xt_counters total[],
899 unsigned int *i)
901 ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
903 (*i)++;
904 return 0;
907 static inline int
908 set_entry_to_counter(const struct ip6t_entry *e,
909 struct ip6t_counters total[],
910 unsigned int *i)
912 SET_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
914 (*i)++;
915 return 0;
918 static void
919 get_counters(const struct xt_table_info *t,
920 struct xt_counters counters[])
922 unsigned int cpu;
923 unsigned int i;
924 unsigned int curcpu;
926 /* Instead of clearing (by a previous call to memset())
927 * the counters and using adds, we set the counters
928 * with data used by 'current' CPU
929 * We dont care about preemption here.
931 curcpu = raw_smp_processor_id();
933 i = 0;
934 IP6T_ENTRY_ITERATE(t->entries[curcpu],
935 t->size,
936 set_entry_to_counter,
937 counters,
938 &i);
940 for_each_possible_cpu(cpu) {
941 if (cpu == curcpu)
942 continue;
943 i = 0;
944 IP6T_ENTRY_ITERATE(t->entries[cpu],
945 t->size,
946 add_entry_to_counter,
947 counters,
948 &i);
952 /* We're lazy, and add to the first CPU; overflow works its fey magic
953 * and everything is OK. */
954 static int
955 add_counter_to_entry(struct ip6t_entry *e,
956 const struct xt_counters addme[],
957 unsigned int *i)
959 ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
961 (*i)++;
962 return 0;
965 /* Take values from counters and add them back onto the current cpu */
966 static void put_counters(struct xt_table_info *t,
967 const struct xt_counters counters[])
969 unsigned int i, cpu;
971 local_bh_disable();
972 cpu = smp_processor_id();
973 i = 0;
974 IP6T_ENTRY_ITERATE(t->entries[cpu],
975 t->size,
976 add_counter_to_entry,
977 counters,
978 &i);
979 local_bh_enable();
982 static inline int
983 zero_entry_counter(struct ip6t_entry *e, void *arg)
985 e->counters.bcnt = 0;
986 e->counters.pcnt = 0;
987 return 0;
990 static void
991 clone_counters(struct xt_table_info *newinfo, const struct xt_table_info *info)
993 unsigned int cpu;
994 const void *loc_cpu_entry = info->entries[raw_smp_processor_id()];
996 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
997 for_each_possible_cpu(cpu) {
998 memcpy(newinfo->entries[cpu], loc_cpu_entry, info->size);
999 IP6T_ENTRY_ITERATE(newinfo->entries[cpu], newinfo->size,
1000 zero_entry_counter, NULL);
1004 static struct xt_counters *alloc_counters(struct xt_table *table)
1006 unsigned int countersize;
1007 struct xt_counters *counters;
1008 struct xt_table_info *private = table->private;
1009 struct xt_table_info *info;
1011 /* We need atomic snapshot of counters: rest doesn't change
1012 (other than comefrom, which userspace doesn't care
1013 about). */
1014 countersize = sizeof(struct xt_counters) * private->number;
1015 counters = vmalloc_node(countersize, numa_node_id());
1017 if (counters == NULL)
1018 goto nomem;
1020 info = xt_alloc_table_info(private->size);
1021 if (!info)
1022 goto free_counters;
1024 clone_counters(info, private);
1026 mutex_lock(&table->lock);
1027 xt_table_entry_swap_rcu(private, info);
1028 synchronize_net(); /* Wait until smoke has cleared */
1030 get_counters(info, counters);
1031 put_counters(private, counters);
1032 mutex_unlock(&table->lock);
1034 xt_free_table_info(info);
1036 free_counters:
1037 vfree(counters);
1038 nomem:
1039 return ERR_PTR(-ENOMEM);
1042 static int
1043 copy_entries_to_user(unsigned int total_size,
1044 struct xt_table *table,
1045 void __user *userptr)
1047 unsigned int off, num;
1048 struct ip6t_entry *e;
1049 struct xt_counters *counters;
1050 const struct xt_table_info *private = table->private;
1051 int ret = 0;
1052 const void *loc_cpu_entry;
1054 counters = alloc_counters(table);
1055 if (IS_ERR(counters))
1056 return PTR_ERR(counters);
1058 /* choose the copy that is on our node/cpu, ...
1059 * This choice is lazy (because current thread is
1060 * allowed to migrate to another cpu)
1062 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1063 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
1064 ret = -EFAULT;
1065 goto free_counters;
1068 /* FIXME: use iterator macros --RR */
1069 /* ... then go back and fix counters and names */
1070 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
1071 unsigned int i;
1072 const struct ip6t_entry_match *m;
1073 const struct ip6t_entry_target *t;
1075 e = (struct ip6t_entry *)(loc_cpu_entry + off);
1076 if (copy_to_user(userptr + off
1077 + offsetof(struct ip6t_entry, counters),
1078 &counters[num],
1079 sizeof(counters[num])) != 0) {
1080 ret = -EFAULT;
1081 goto free_counters;
1084 for (i = sizeof(struct ip6t_entry);
1085 i < e->target_offset;
1086 i += m->u.match_size) {
1087 m = (void *)e + i;
1089 if (copy_to_user(userptr + off + i
1090 + offsetof(struct ip6t_entry_match,
1091 u.user.name),
1092 m->u.kernel.match->name,
1093 strlen(m->u.kernel.match->name)+1)
1094 != 0) {
1095 ret = -EFAULT;
1096 goto free_counters;
1100 t = ip6t_get_target(e);
1101 if (copy_to_user(userptr + off + e->target_offset
1102 + offsetof(struct ip6t_entry_target,
1103 u.user.name),
1104 t->u.kernel.target->name,
1105 strlen(t->u.kernel.target->name)+1) != 0) {
1106 ret = -EFAULT;
1107 goto free_counters;
1111 free_counters:
1112 vfree(counters);
1113 return ret;
1116 #ifdef CONFIG_COMPAT
1117 static void compat_standard_from_user(void *dst, void *src)
1119 int v = *(compat_int_t *)src;
1121 if (v > 0)
1122 v += xt_compat_calc_jump(AF_INET6, v);
1123 memcpy(dst, &v, sizeof(v));
1126 static int compat_standard_to_user(void __user *dst, void *src)
1128 compat_int_t cv = *(int *)src;
1130 if (cv > 0)
1131 cv -= xt_compat_calc_jump(AF_INET6, cv);
1132 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
1135 static inline int
1136 compat_calc_match(struct ip6t_entry_match *m, int *size)
1138 *size += xt_compat_match_offset(m->u.kernel.match);
1139 return 0;
1142 static int compat_calc_entry(struct ip6t_entry *e,
1143 const struct xt_table_info *info,
1144 void *base, struct xt_table_info *newinfo)
1146 struct ip6t_entry_target *t;
1147 unsigned int entry_offset;
1148 int off, i, ret;
1150 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1151 entry_offset = (void *)e - base;
1152 IP6T_MATCH_ITERATE(e, compat_calc_match, &off);
1153 t = ip6t_get_target(e);
1154 off += xt_compat_target_offset(t->u.kernel.target);
1155 newinfo->size -= off;
1156 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1157 if (ret)
1158 return ret;
1160 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1161 if (info->hook_entry[i] &&
1162 (e < (struct ip6t_entry *)(base + info->hook_entry[i])))
1163 newinfo->hook_entry[i] -= off;
1164 if (info->underflow[i] &&
1165 (e < (struct ip6t_entry *)(base + info->underflow[i])))
1166 newinfo->underflow[i] -= off;
1168 return 0;
1171 static int compat_table_info(const struct xt_table_info *info,
1172 struct xt_table_info *newinfo)
1174 void *loc_cpu_entry;
1176 if (!newinfo || !info)
1177 return -EINVAL;
1179 /* we dont care about newinfo->entries[] */
1180 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
1181 newinfo->initial_entries = 0;
1182 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1183 return IP6T_ENTRY_ITERATE(loc_cpu_entry, info->size,
1184 compat_calc_entry, info, loc_cpu_entry,
1185 newinfo);
1187 #endif
1189 static int get_info(struct net *net, void __user *user, int *len, int compat)
1191 char name[IP6T_TABLE_MAXNAMELEN];
1192 struct xt_table *t;
1193 int ret;
1195 if (*len != sizeof(struct ip6t_getinfo)) {
1196 duprintf("length %u != %zu\n", *len,
1197 sizeof(struct ip6t_getinfo));
1198 return -EINVAL;
1201 if (copy_from_user(name, user, sizeof(name)) != 0)
1202 return -EFAULT;
1204 name[IP6T_TABLE_MAXNAMELEN-1] = '\0';
1205 #ifdef CONFIG_COMPAT
1206 if (compat)
1207 xt_compat_lock(AF_INET6);
1208 #endif
1209 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1210 "ip6table_%s", name);
1211 if (t && !IS_ERR(t)) {
1212 struct ip6t_getinfo info;
1213 const struct xt_table_info *private = t->private;
1215 #ifdef CONFIG_COMPAT
1216 if (compat) {
1217 struct xt_table_info tmp;
1218 ret = compat_table_info(private, &tmp);
1219 xt_compat_flush_offsets(AF_INET6);
1220 private = &tmp;
1222 #endif
1223 info.valid_hooks = t->valid_hooks;
1224 memcpy(info.hook_entry, private->hook_entry,
1225 sizeof(info.hook_entry));
1226 memcpy(info.underflow, private->underflow,
1227 sizeof(info.underflow));
1228 info.num_entries = private->number;
1229 info.size = private->size;
1230 strcpy(info.name, name);
1232 if (copy_to_user(user, &info, *len) != 0)
1233 ret = -EFAULT;
1234 else
1235 ret = 0;
1237 xt_table_unlock(t);
1238 module_put(t->me);
1239 } else
1240 ret = t ? PTR_ERR(t) : -ENOENT;
1241 #ifdef CONFIG_COMPAT
1242 if (compat)
1243 xt_compat_unlock(AF_INET6);
1244 #endif
1245 return ret;
1248 static int
1249 get_entries(struct net *net, struct ip6t_get_entries __user *uptr, int *len)
1251 int ret;
1252 struct ip6t_get_entries get;
1253 struct xt_table *t;
1255 if (*len < sizeof(get)) {
1256 duprintf("get_entries: %u < %zu\n", *len, sizeof(get));
1257 return -EINVAL;
1259 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1260 return -EFAULT;
1261 if (*len != sizeof(struct ip6t_get_entries) + get.size) {
1262 duprintf("get_entries: %u != %zu\n",
1263 *len, sizeof(get) + get.size);
1264 return -EINVAL;
1267 t = xt_find_table_lock(net, AF_INET6, get.name);
1268 if (t && !IS_ERR(t)) {
1269 struct xt_table_info *private = t->private;
1270 duprintf("t->private->number = %u\n", private->number);
1271 if (get.size == private->size)
1272 ret = copy_entries_to_user(private->size,
1273 t, uptr->entrytable);
1274 else {
1275 duprintf("get_entries: I've got %u not %u!\n",
1276 private->size, get.size);
1277 ret = -EAGAIN;
1279 module_put(t->me);
1280 xt_table_unlock(t);
1281 } else
1282 ret = t ? PTR_ERR(t) : -ENOENT;
1284 return ret;
1287 static int
1288 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1289 struct xt_table_info *newinfo, unsigned int num_counters,
1290 void __user *counters_ptr)
1292 int ret;
1293 struct xt_table *t;
1294 struct xt_table_info *oldinfo;
1295 struct xt_counters *counters;
1296 const void *loc_cpu_old_entry;
1298 ret = 0;
1299 counters = vmalloc_node(num_counters * sizeof(struct xt_counters),
1300 numa_node_id());
1301 if (!counters) {
1302 ret = -ENOMEM;
1303 goto out;
1306 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1307 "ip6table_%s", name);
1308 if (!t || IS_ERR(t)) {
1309 ret = t ? PTR_ERR(t) : -ENOENT;
1310 goto free_newinfo_counters_untrans;
1313 /* You lied! */
1314 if (valid_hooks != t->valid_hooks) {
1315 duprintf("Valid hook crap: %08X vs %08X\n",
1316 valid_hooks, t->valid_hooks);
1317 ret = -EINVAL;
1318 goto put_module;
1321 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1322 if (!oldinfo)
1323 goto put_module;
1325 /* Update module usage count based on number of rules */
1326 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1327 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1328 if ((oldinfo->number > oldinfo->initial_entries) ||
1329 (newinfo->number <= oldinfo->initial_entries))
1330 module_put(t->me);
1331 if ((oldinfo->number > oldinfo->initial_entries) &&
1332 (newinfo->number <= oldinfo->initial_entries))
1333 module_put(t->me);
1335 /* Get the old counters. */
1336 get_counters(oldinfo, counters);
1337 /* Decrease module usage counts and free resource */
1338 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1339 IP6T_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry,
1340 NULL);
1341 xt_free_table_info(oldinfo);
1342 if (copy_to_user(counters_ptr, counters,
1343 sizeof(struct xt_counters) * num_counters) != 0)
1344 ret = -EFAULT;
1345 vfree(counters);
1346 xt_table_unlock(t);
1347 return ret;
1349 put_module:
1350 module_put(t->me);
1351 xt_table_unlock(t);
1352 free_newinfo_counters_untrans:
1353 vfree(counters);
1354 out:
1355 return ret;
1358 static int
1359 do_replace(struct net *net, void __user *user, unsigned int len)
1361 int ret;
1362 struct ip6t_replace tmp;
1363 struct xt_table_info *newinfo;
1364 void *loc_cpu_entry;
1366 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1367 return -EFAULT;
1369 /* overflow check */
1370 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1371 return -ENOMEM;
1373 newinfo = xt_alloc_table_info(tmp.size);
1374 if (!newinfo)
1375 return -ENOMEM;
1377 /* choose the copy that is on our node/cpu */
1378 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1379 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1380 tmp.size) != 0) {
1381 ret = -EFAULT;
1382 goto free_newinfo;
1385 ret = translate_table(tmp.name, tmp.valid_hooks,
1386 newinfo, loc_cpu_entry, tmp.size, tmp.num_entries,
1387 tmp.hook_entry, tmp.underflow);
1388 if (ret != 0)
1389 goto free_newinfo;
1391 duprintf("ip_tables: Translated table\n");
1393 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1394 tmp.num_counters, tmp.counters);
1395 if (ret)
1396 goto free_newinfo_untrans;
1397 return 0;
1399 free_newinfo_untrans:
1400 IP6T_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL);
1401 free_newinfo:
1402 xt_free_table_info(newinfo);
1403 return ret;
1406 static int
1407 do_add_counters(struct net *net, void __user *user, unsigned int len,
1408 int compat)
1410 unsigned int i;
1411 struct xt_counters_info tmp;
1412 struct xt_counters *paddc;
1413 unsigned int num_counters;
1414 char *name;
1415 int size;
1416 void *ptmp;
1417 struct xt_table *t;
1418 const struct xt_table_info *private;
1419 int ret = 0;
1420 const void *loc_cpu_entry;
1421 #ifdef CONFIG_COMPAT
1422 struct compat_xt_counters_info compat_tmp;
1424 if (compat) {
1425 ptmp = &compat_tmp;
1426 size = sizeof(struct compat_xt_counters_info);
1427 } else
1428 #endif
1430 ptmp = &tmp;
1431 size = sizeof(struct xt_counters_info);
1434 if (copy_from_user(ptmp, user, size) != 0)
1435 return -EFAULT;
1437 #ifdef CONFIG_COMPAT
1438 if (compat) {
1439 num_counters = compat_tmp.num_counters;
1440 name = compat_tmp.name;
1441 } else
1442 #endif
1444 num_counters = tmp.num_counters;
1445 name = tmp.name;
1448 if (len != size + num_counters * sizeof(struct xt_counters))
1449 return -EINVAL;
1451 paddc = vmalloc_node(len - size, numa_node_id());
1452 if (!paddc)
1453 return -ENOMEM;
1455 if (copy_from_user(paddc, user + size, len - size) != 0) {
1456 ret = -EFAULT;
1457 goto free;
1460 t = xt_find_table_lock(net, AF_INET6, name);
1461 if (!t || IS_ERR(t)) {
1462 ret = t ? PTR_ERR(t) : -ENOENT;
1463 goto free;
1466 mutex_lock(&t->lock);
1467 private = t->private;
1468 if (private->number != num_counters) {
1469 ret = -EINVAL;
1470 goto unlock_up_free;
1473 preempt_disable();
1474 i = 0;
1475 /* Choose the copy that is on our node */
1476 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1477 IP6T_ENTRY_ITERATE(loc_cpu_entry,
1478 private->size,
1479 add_counter_to_entry,
1480 paddc,
1481 &i);
1482 preempt_enable();
1483 unlock_up_free:
1484 mutex_unlock(&t->lock);
1485 xt_table_unlock(t);
1486 module_put(t->me);
1487 free:
1488 vfree(paddc);
1490 return ret;
1493 #ifdef CONFIG_COMPAT
1494 struct compat_ip6t_replace {
1495 char name[IP6T_TABLE_MAXNAMELEN];
1496 u32 valid_hooks;
1497 u32 num_entries;
1498 u32 size;
1499 u32 hook_entry[NF_INET_NUMHOOKS];
1500 u32 underflow[NF_INET_NUMHOOKS];
1501 u32 num_counters;
1502 compat_uptr_t counters; /* struct ip6t_counters * */
1503 struct compat_ip6t_entry entries[0];
1506 static int
1507 compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr,
1508 unsigned int *size, struct xt_counters *counters,
1509 unsigned int *i)
1511 struct ip6t_entry_target *t;
1512 struct compat_ip6t_entry __user *ce;
1513 u_int16_t target_offset, next_offset;
1514 compat_uint_t origsize;
1515 int ret;
1517 ret = -EFAULT;
1518 origsize = *size;
1519 ce = (struct compat_ip6t_entry __user *)*dstptr;
1520 if (copy_to_user(ce, e, sizeof(struct ip6t_entry)))
1521 goto out;
1523 if (copy_to_user(&ce->counters, &counters[*i], sizeof(counters[*i])))
1524 goto out;
1526 *dstptr += sizeof(struct compat_ip6t_entry);
1527 *size -= sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1529 ret = IP6T_MATCH_ITERATE(e, xt_compat_match_to_user, dstptr, size);
1530 target_offset = e->target_offset - (origsize - *size);
1531 if (ret)
1532 goto out;
1533 t = ip6t_get_target(e);
1534 ret = xt_compat_target_to_user(t, dstptr, size);
1535 if (ret)
1536 goto out;
1537 ret = -EFAULT;
1538 next_offset = e->next_offset - (origsize - *size);
1539 if (put_user(target_offset, &ce->target_offset))
1540 goto out;
1541 if (put_user(next_offset, &ce->next_offset))
1542 goto out;
1544 (*i)++;
1545 return 0;
1546 out:
1547 return ret;
1550 static int
1551 compat_find_calc_match(struct ip6t_entry_match *m,
1552 const char *name,
1553 const struct ip6t_ip6 *ipv6,
1554 unsigned int hookmask,
1555 int *size, unsigned int *i)
1557 struct xt_match *match;
1559 match = try_then_request_module(xt_find_match(AF_INET6, m->u.user.name,
1560 m->u.user.revision),
1561 "ip6t_%s", m->u.user.name);
1562 if (IS_ERR(match) || !match) {
1563 duprintf("compat_check_calc_match: `%s' not found\n",
1564 m->u.user.name);
1565 return match ? PTR_ERR(match) : -ENOENT;
1567 m->u.kernel.match = match;
1568 *size += xt_compat_match_offset(match);
1570 (*i)++;
1571 return 0;
1574 static int
1575 compat_release_match(struct ip6t_entry_match *m, unsigned int *i)
1577 if (i && (*i)-- == 0)
1578 return 1;
1580 module_put(m->u.kernel.match->me);
1581 return 0;
1584 static int
1585 compat_release_entry(struct compat_ip6t_entry *e, unsigned int *i)
1587 struct ip6t_entry_target *t;
1589 if (i && (*i)-- == 0)
1590 return 1;
1592 /* Cleanup all matches */
1593 COMPAT_IP6T_MATCH_ITERATE(e, compat_release_match, NULL);
1594 t = compat_ip6t_get_target(e);
1595 module_put(t->u.kernel.target->me);
1596 return 0;
1599 static int
1600 check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
1601 struct xt_table_info *newinfo,
1602 unsigned int *size,
1603 unsigned char *base,
1604 unsigned char *limit,
1605 unsigned int *hook_entries,
1606 unsigned int *underflows,
1607 unsigned int *i,
1608 const char *name)
1610 struct ip6t_entry_target *t;
1611 struct xt_target *target;
1612 unsigned int entry_offset;
1613 unsigned int j;
1614 int ret, off, h;
1616 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1617 if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0
1618 || (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit) {
1619 duprintf("Bad offset %p, limit = %p\n", e, limit);
1620 return -EINVAL;
1623 if (e->next_offset < sizeof(struct compat_ip6t_entry) +
1624 sizeof(struct compat_xt_entry_target)) {
1625 duprintf("checking: element %p size %u\n",
1626 e, e->next_offset);
1627 return -EINVAL;
1630 /* For purposes of check_entry casting the compat entry is fine */
1631 ret = check_entry((struct ip6t_entry *)e, name);
1632 if (ret)
1633 return ret;
1635 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1636 entry_offset = (void *)e - (void *)base;
1637 j = 0;
1638 ret = COMPAT_IP6T_MATCH_ITERATE(e, compat_find_calc_match, name,
1639 &e->ipv6, e->comefrom, &off, &j);
1640 if (ret != 0)
1641 goto release_matches;
1643 t = compat_ip6t_get_target(e);
1644 target = try_then_request_module(xt_find_target(AF_INET6,
1645 t->u.user.name,
1646 t->u.user.revision),
1647 "ip6t_%s", t->u.user.name);
1648 if (IS_ERR(target) || !target) {
1649 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1650 t->u.user.name);
1651 ret = target ? PTR_ERR(target) : -ENOENT;
1652 goto release_matches;
1654 t->u.kernel.target = target;
1656 off += xt_compat_target_offset(target);
1657 *size += off;
1658 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1659 if (ret)
1660 goto out;
1662 /* Check hooks & underflows */
1663 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1664 if ((unsigned char *)e - base == hook_entries[h])
1665 newinfo->hook_entry[h] = hook_entries[h];
1666 if ((unsigned char *)e - base == underflows[h])
1667 newinfo->underflow[h] = underflows[h];
1670 /* Clear counters and comefrom */
1671 memset(&e->counters, 0, sizeof(e->counters));
1672 e->comefrom = 0;
1674 (*i)++;
1675 return 0;
1677 out:
1678 module_put(t->u.kernel.target->me);
1679 release_matches:
1680 IP6T_MATCH_ITERATE(e, compat_release_match, &j);
1681 return ret;
1684 static int
1685 compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
1686 unsigned int *size, const char *name,
1687 struct xt_table_info *newinfo, unsigned char *base)
1689 struct ip6t_entry_target *t;
1690 struct xt_target *target;
1691 struct ip6t_entry *de;
1692 unsigned int origsize;
1693 int ret, h;
1695 ret = 0;
1696 origsize = *size;
1697 de = (struct ip6t_entry *)*dstptr;
1698 memcpy(de, e, sizeof(struct ip6t_entry));
1699 memcpy(&de->counters, &e->counters, sizeof(e->counters));
1701 *dstptr += sizeof(struct ip6t_entry);
1702 *size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1704 ret = COMPAT_IP6T_MATCH_ITERATE(e, xt_compat_match_from_user,
1705 dstptr, size);
1706 if (ret)
1707 return ret;
1708 de->target_offset = e->target_offset - (origsize - *size);
1709 t = compat_ip6t_get_target(e);
1710 target = t->u.kernel.target;
1711 xt_compat_target_from_user(t, dstptr, size);
1713 de->next_offset = e->next_offset - (origsize - *size);
1714 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1715 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1716 newinfo->hook_entry[h] -= origsize - *size;
1717 if ((unsigned char *)de - base < newinfo->underflow[h])
1718 newinfo->underflow[h] -= origsize - *size;
1720 return ret;
1723 static int compat_check_entry(struct ip6t_entry *e, const char *name,
1724 unsigned int *i)
1726 unsigned int j;
1727 int ret;
1728 struct xt_mtchk_param mtpar;
1730 j = 0;
1731 mtpar.table = name;
1732 mtpar.entryinfo = &e->ipv6;
1733 mtpar.hook_mask = e->comefrom;
1734 mtpar.family = NFPROTO_IPV6;
1735 ret = IP6T_MATCH_ITERATE(e, check_match, &mtpar, &j);
1736 if (ret)
1737 goto cleanup_matches;
1739 ret = check_target(e, name);
1740 if (ret)
1741 goto cleanup_matches;
1743 (*i)++;
1744 return 0;
1746 cleanup_matches:
1747 IP6T_MATCH_ITERATE(e, cleanup_match, &j);
1748 return ret;
1751 static int
1752 translate_compat_table(const char *name,
1753 unsigned int valid_hooks,
1754 struct xt_table_info **pinfo,
1755 void **pentry0,
1756 unsigned int total_size,
1757 unsigned int number,
1758 unsigned int *hook_entries,
1759 unsigned int *underflows)
1761 unsigned int i, j;
1762 struct xt_table_info *newinfo, *info;
1763 void *pos, *entry0, *entry1;
1764 unsigned int size;
1765 int ret;
1767 info = *pinfo;
1768 entry0 = *pentry0;
1769 size = total_size;
1770 info->number = number;
1772 /* Init all hooks to impossible value. */
1773 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1774 info->hook_entry[i] = 0xFFFFFFFF;
1775 info->underflow[i] = 0xFFFFFFFF;
1778 duprintf("translate_compat_table: size %u\n", info->size);
1779 j = 0;
1780 xt_compat_lock(AF_INET6);
1781 /* Walk through entries, checking offsets. */
1782 ret = COMPAT_IP6T_ENTRY_ITERATE(entry0, total_size,
1783 check_compat_entry_size_and_hooks,
1784 info, &size, entry0,
1785 entry0 + total_size,
1786 hook_entries, underflows, &j, name);
1787 if (ret != 0)
1788 goto out_unlock;
1790 ret = -EINVAL;
1791 if (j != number) {
1792 duprintf("translate_compat_table: %u not %u entries\n",
1793 j, number);
1794 goto out_unlock;
1797 /* Check hooks all assigned */
1798 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1799 /* Only hooks which are valid */
1800 if (!(valid_hooks & (1 << i)))
1801 continue;
1802 if (info->hook_entry[i] == 0xFFFFFFFF) {
1803 duprintf("Invalid hook entry %u %u\n",
1804 i, hook_entries[i]);
1805 goto out_unlock;
1807 if (info->underflow[i] == 0xFFFFFFFF) {
1808 duprintf("Invalid underflow %u %u\n",
1809 i, underflows[i]);
1810 goto out_unlock;
1814 ret = -ENOMEM;
1815 newinfo = xt_alloc_table_info(size);
1816 if (!newinfo)
1817 goto out_unlock;
1819 newinfo->number = number;
1820 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1821 newinfo->hook_entry[i] = info->hook_entry[i];
1822 newinfo->underflow[i] = info->underflow[i];
1824 entry1 = newinfo->entries[raw_smp_processor_id()];
1825 pos = entry1;
1826 size = total_size;
1827 ret = COMPAT_IP6T_ENTRY_ITERATE(entry0, total_size,
1828 compat_copy_entry_from_user,
1829 &pos, &size, name, newinfo, entry1);
1830 xt_compat_flush_offsets(AF_INET6);
1831 xt_compat_unlock(AF_INET6);
1832 if (ret)
1833 goto free_newinfo;
1835 ret = -ELOOP;
1836 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1837 goto free_newinfo;
1839 i = 0;
1840 ret = IP6T_ENTRY_ITERATE(entry1, newinfo->size, compat_check_entry,
1841 name, &i);
1842 if (ret) {
1843 j -= i;
1844 COMPAT_IP6T_ENTRY_ITERATE_CONTINUE(entry0, newinfo->size, i,
1845 compat_release_entry, &j);
1846 IP6T_ENTRY_ITERATE(entry1, newinfo->size, cleanup_entry, &i);
1847 xt_free_table_info(newinfo);
1848 return ret;
1851 /* And one copy for every other CPU */
1852 for_each_possible_cpu(i)
1853 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1854 memcpy(newinfo->entries[i], entry1, newinfo->size);
1856 *pinfo = newinfo;
1857 *pentry0 = entry1;
1858 xt_free_table_info(info);
1859 return 0;
1861 free_newinfo:
1862 xt_free_table_info(newinfo);
1863 out:
1864 COMPAT_IP6T_ENTRY_ITERATE(entry0, total_size, compat_release_entry, &j);
1865 return ret;
1866 out_unlock:
1867 xt_compat_flush_offsets(AF_INET6);
1868 xt_compat_unlock(AF_INET6);
1869 goto out;
1872 static int
1873 compat_do_replace(struct net *net, void __user *user, unsigned int len)
1875 int ret;
1876 struct compat_ip6t_replace tmp;
1877 struct xt_table_info *newinfo;
1878 void *loc_cpu_entry;
1880 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1881 return -EFAULT;
1883 /* overflow check */
1884 if (tmp.size >= INT_MAX / num_possible_cpus())
1885 return -ENOMEM;
1886 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1887 return -ENOMEM;
1889 newinfo = xt_alloc_table_info(tmp.size);
1890 if (!newinfo)
1891 return -ENOMEM;
1893 /* choose the copy that is on our node/cpu */
1894 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1895 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1896 tmp.size) != 0) {
1897 ret = -EFAULT;
1898 goto free_newinfo;
1901 ret = translate_compat_table(tmp.name, tmp.valid_hooks,
1902 &newinfo, &loc_cpu_entry, tmp.size,
1903 tmp.num_entries, tmp.hook_entry,
1904 tmp.underflow);
1905 if (ret != 0)
1906 goto free_newinfo;
1908 duprintf("compat_do_replace: Translated table\n");
1910 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1911 tmp.num_counters, compat_ptr(tmp.counters));
1912 if (ret)
1913 goto free_newinfo_untrans;
1914 return 0;
1916 free_newinfo_untrans:
1917 IP6T_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL);
1918 free_newinfo:
1919 xt_free_table_info(newinfo);
1920 return ret;
1923 static int
1924 compat_do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user,
1925 unsigned int len)
1927 int ret;
1929 if (!capable(CAP_NET_ADMIN))
1930 return -EPERM;
1932 switch (cmd) {
1933 case IP6T_SO_SET_REPLACE:
1934 ret = compat_do_replace(sock_net(sk), user, len);
1935 break;
1937 case IP6T_SO_SET_ADD_COUNTERS:
1938 ret = do_add_counters(sock_net(sk), user, len, 1);
1939 break;
1941 default:
1942 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
1943 ret = -EINVAL;
1946 return ret;
1949 struct compat_ip6t_get_entries {
1950 char name[IP6T_TABLE_MAXNAMELEN];
1951 compat_uint_t size;
1952 struct compat_ip6t_entry entrytable[0];
1955 static int
1956 compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1957 void __user *userptr)
1959 struct xt_counters *counters;
1960 const struct xt_table_info *private = table->private;
1961 void __user *pos;
1962 unsigned int size;
1963 int ret = 0;
1964 const void *loc_cpu_entry;
1965 unsigned int i = 0;
1967 counters = alloc_counters(table);
1968 if (IS_ERR(counters))
1969 return PTR_ERR(counters);
1971 /* choose the copy that is on our node/cpu, ...
1972 * This choice is lazy (because current thread is
1973 * allowed to migrate to another cpu)
1975 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1976 pos = userptr;
1977 size = total_size;
1978 ret = IP6T_ENTRY_ITERATE(loc_cpu_entry, total_size,
1979 compat_copy_entry_to_user,
1980 &pos, &size, counters, &i);
1982 vfree(counters);
1983 return ret;
1986 static int
1987 compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
1988 int *len)
1990 int ret;
1991 struct compat_ip6t_get_entries get;
1992 struct xt_table *t;
1994 if (*len < sizeof(get)) {
1995 duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get));
1996 return -EINVAL;
1999 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
2000 return -EFAULT;
2002 if (*len != sizeof(struct compat_ip6t_get_entries) + get.size) {
2003 duprintf("compat_get_entries: %u != %zu\n",
2004 *len, sizeof(get) + get.size);
2005 return -EINVAL;
2008 xt_compat_lock(AF_INET6);
2009 t = xt_find_table_lock(net, AF_INET6, get.name);
2010 if (t && !IS_ERR(t)) {
2011 const struct xt_table_info *private = t->private;
2012 struct xt_table_info info;
2013 duprintf("t->private->number = %u\n", private->number);
2014 ret = compat_table_info(private, &info);
2015 if (!ret && get.size == info.size) {
2016 ret = compat_copy_entries_to_user(private->size,
2017 t, uptr->entrytable);
2018 } else if (!ret) {
2019 duprintf("compat_get_entries: I've got %u not %u!\n",
2020 private->size, get.size);
2021 ret = -EAGAIN;
2023 xt_compat_flush_offsets(AF_INET6);
2024 module_put(t->me);
2025 xt_table_unlock(t);
2026 } else
2027 ret = t ? PTR_ERR(t) : -ENOENT;
2029 xt_compat_unlock(AF_INET6);
2030 return ret;
2033 static int do_ip6t_get_ctl(struct sock *, int, void __user *, int *);
2035 static int
2036 compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2038 int ret;
2040 if (!capable(CAP_NET_ADMIN))
2041 return -EPERM;
2043 switch (cmd) {
2044 case IP6T_SO_GET_INFO:
2045 ret = get_info(sock_net(sk), user, len, 1);
2046 break;
2047 case IP6T_SO_GET_ENTRIES:
2048 ret = compat_get_entries(sock_net(sk), user, len);
2049 break;
2050 default:
2051 ret = do_ip6t_get_ctl(sk, cmd, user, len);
2053 return ret;
2055 #endif
2057 static int
2058 do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2060 int ret;
2062 if (!capable(CAP_NET_ADMIN))
2063 return -EPERM;
2065 switch (cmd) {
2066 case IP6T_SO_SET_REPLACE:
2067 ret = do_replace(sock_net(sk), user, len);
2068 break;
2070 case IP6T_SO_SET_ADD_COUNTERS:
2071 ret = do_add_counters(sock_net(sk), user, len, 0);
2072 break;
2074 default:
2075 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
2076 ret = -EINVAL;
2079 return ret;
2082 static int
2083 do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2085 int ret;
2087 if (!capable(CAP_NET_ADMIN))
2088 return -EPERM;
2090 switch (cmd) {
2091 case IP6T_SO_GET_INFO:
2092 ret = get_info(sock_net(sk), user, len, 0);
2093 break;
2095 case IP6T_SO_GET_ENTRIES:
2096 ret = get_entries(sock_net(sk), user, len);
2097 break;
2099 case IP6T_SO_GET_REVISION_MATCH:
2100 case IP6T_SO_GET_REVISION_TARGET: {
2101 struct ip6t_get_revision rev;
2102 int target;
2104 if (*len != sizeof(rev)) {
2105 ret = -EINVAL;
2106 break;
2108 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2109 ret = -EFAULT;
2110 break;
2113 if (cmd == IP6T_SO_GET_REVISION_TARGET)
2114 target = 1;
2115 else
2116 target = 0;
2118 try_then_request_module(xt_find_revision(AF_INET6, rev.name,
2119 rev.revision,
2120 target, &ret),
2121 "ip6t_%s", rev.name);
2122 break;
2125 default:
2126 duprintf("do_ip6t_get_ctl: unknown request %i\n", cmd);
2127 ret = -EINVAL;
2130 return ret;
2133 struct xt_table *ip6t_register_table(struct net *net, struct xt_table *table,
2134 const struct ip6t_replace *repl)
2136 int ret;
2137 struct xt_table_info *newinfo;
2138 struct xt_table_info bootstrap
2139 = { 0, 0, 0, { 0 }, { 0 }, { } };
2140 void *loc_cpu_entry;
2141 struct xt_table *new_table;
2143 newinfo = xt_alloc_table_info(repl->size);
2144 if (!newinfo) {
2145 ret = -ENOMEM;
2146 goto out;
2149 /* choose the copy on our node/cpu, but dont care about preemption */
2150 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2151 memcpy(loc_cpu_entry, repl->entries, repl->size);
2153 ret = translate_table(table->name, table->valid_hooks,
2154 newinfo, loc_cpu_entry, repl->size,
2155 repl->num_entries,
2156 repl->hook_entry,
2157 repl->underflow);
2158 if (ret != 0)
2159 goto out_free;
2161 new_table = xt_register_table(net, table, &bootstrap, newinfo);
2162 if (IS_ERR(new_table)) {
2163 ret = PTR_ERR(new_table);
2164 goto out_free;
2166 return new_table;
2168 out_free:
2169 xt_free_table_info(newinfo);
2170 out:
2171 return ERR_PTR(ret);
2174 void ip6t_unregister_table(struct xt_table *table)
2176 struct xt_table_info *private;
2177 void *loc_cpu_entry;
2178 struct module *table_owner = table->me;
2180 private = xt_unregister_table(table);
2182 /* Decrease module usage counts and free resources */
2183 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2184 IP6T_ENTRY_ITERATE(loc_cpu_entry, private->size, cleanup_entry, NULL);
2185 if (private->number > private->initial_entries)
2186 module_put(table_owner);
2187 xt_free_table_info(private);
2190 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2191 static inline bool
2192 icmp6_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2193 u_int8_t type, u_int8_t code,
2194 bool invert)
2196 return (type == test_type && code >= min_code && code <= max_code)
2197 ^ invert;
2200 static bool
2201 icmp6_match(const struct sk_buff *skb, const struct xt_match_param *par)
2203 const struct icmp6hdr *ic;
2204 struct icmp6hdr _icmph;
2205 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2207 /* Must not be a fragment. */
2208 if (par->fragoff != 0)
2209 return false;
2211 ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph);
2212 if (ic == NULL) {
2213 /* We've been asked to examine this packet, and we
2214 * can't. Hence, no choice but to drop.
2216 duprintf("Dropping evil ICMP tinygram.\n");
2217 *par->hotdrop = true;
2218 return false;
2221 return icmp6_type_code_match(icmpinfo->type,
2222 icmpinfo->code[0],
2223 icmpinfo->code[1],
2224 ic->icmp6_type, ic->icmp6_code,
2225 !!(icmpinfo->invflags&IP6T_ICMP_INV));
2228 /* Called when user tries to insert an entry of this type. */
2229 static bool icmp6_checkentry(const struct xt_mtchk_param *par)
2231 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2233 /* Must specify no unknown invflags */
2234 return !(icmpinfo->invflags & ~IP6T_ICMP_INV);
2237 /* The built-in targets: standard (NULL) and error. */
2238 static struct xt_target ip6t_standard_target __read_mostly = {
2239 .name = IP6T_STANDARD_TARGET,
2240 .targetsize = sizeof(int),
2241 .family = AF_INET6,
2242 #ifdef CONFIG_COMPAT
2243 .compatsize = sizeof(compat_int_t),
2244 .compat_from_user = compat_standard_from_user,
2245 .compat_to_user = compat_standard_to_user,
2246 #endif
2249 static struct xt_target ip6t_error_target __read_mostly = {
2250 .name = IP6T_ERROR_TARGET,
2251 .target = ip6t_error,
2252 .targetsize = IP6T_FUNCTION_MAXNAMELEN,
2253 .family = AF_INET6,
2256 static struct nf_sockopt_ops ip6t_sockopts = {
2257 .pf = PF_INET6,
2258 .set_optmin = IP6T_BASE_CTL,
2259 .set_optmax = IP6T_SO_SET_MAX+1,
2260 .set = do_ip6t_set_ctl,
2261 #ifdef CONFIG_COMPAT
2262 .compat_set = compat_do_ip6t_set_ctl,
2263 #endif
2264 .get_optmin = IP6T_BASE_CTL,
2265 .get_optmax = IP6T_SO_GET_MAX+1,
2266 .get = do_ip6t_get_ctl,
2267 #ifdef CONFIG_COMPAT
2268 .compat_get = compat_do_ip6t_get_ctl,
2269 #endif
2270 .owner = THIS_MODULE,
2273 static struct xt_match icmp6_matchstruct __read_mostly = {
2274 .name = "icmp6",
2275 .match = icmp6_match,
2276 .matchsize = sizeof(struct ip6t_icmp),
2277 .checkentry = icmp6_checkentry,
2278 .proto = IPPROTO_ICMPV6,
2279 .family = AF_INET6,
2282 static int __net_init ip6_tables_net_init(struct net *net)
2284 return xt_proto_init(net, AF_INET6);
2287 static void __net_exit ip6_tables_net_exit(struct net *net)
2289 xt_proto_fini(net, AF_INET6);
2292 static struct pernet_operations ip6_tables_net_ops = {
2293 .init = ip6_tables_net_init,
2294 .exit = ip6_tables_net_exit,
2297 static int __init ip6_tables_init(void)
2299 int ret;
2301 ret = register_pernet_subsys(&ip6_tables_net_ops);
2302 if (ret < 0)
2303 goto err1;
2305 /* Noone else will be downing sem now, so we won't sleep */
2306 ret = xt_register_target(&ip6t_standard_target);
2307 if (ret < 0)
2308 goto err2;
2309 ret = xt_register_target(&ip6t_error_target);
2310 if (ret < 0)
2311 goto err3;
2312 ret = xt_register_match(&icmp6_matchstruct);
2313 if (ret < 0)
2314 goto err4;
2316 /* Register setsockopt */
2317 ret = nf_register_sockopt(&ip6t_sockopts);
2318 if (ret < 0)
2319 goto err5;
2321 printk(KERN_INFO "ip6_tables: (C) 2000-2006 Netfilter Core Team\n");
2322 return 0;
2324 err5:
2325 xt_unregister_match(&icmp6_matchstruct);
2326 err4:
2327 xt_unregister_target(&ip6t_error_target);
2328 err3:
2329 xt_unregister_target(&ip6t_standard_target);
2330 err2:
2331 unregister_pernet_subsys(&ip6_tables_net_ops);
2332 err1:
2333 return ret;
2336 static void __exit ip6_tables_fini(void)
2338 nf_unregister_sockopt(&ip6t_sockopts);
2340 xt_unregister_match(&icmp6_matchstruct);
2341 xt_unregister_target(&ip6t_error_target);
2342 xt_unregister_target(&ip6t_standard_target);
2344 unregister_pernet_subsys(&ip6_tables_net_ops);
2348 * find the offset to specified header or the protocol number of last header
2349 * if target < 0. "last header" is transport protocol header, ESP, or
2350 * "No next header".
2352 * If target header is found, its offset is set in *offset and return protocol
2353 * number. Otherwise, return -1.
2355 * If the first fragment doesn't contain the final protocol header or
2356 * NEXTHDR_NONE it is considered invalid.
2358 * Note that non-1st fragment is special case that "the protocol number
2359 * of last header" is "next header" field in Fragment header. In this case,
2360 * *offset is meaningless and fragment offset is stored in *fragoff if fragoff
2361 * isn't NULL.
2364 int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
2365 int target, unsigned short *fragoff)
2367 unsigned int start = skb_network_offset(skb) + sizeof(struct ipv6hdr);
2368 u8 nexthdr = ipv6_hdr(skb)->nexthdr;
2369 unsigned int len = skb->len - start;
2371 if (fragoff)
2372 *fragoff = 0;
2374 while (nexthdr != target) {
2375 struct ipv6_opt_hdr _hdr, *hp;
2376 unsigned int hdrlen;
2378 if ((!ipv6_ext_hdr(nexthdr)) || nexthdr == NEXTHDR_NONE) {
2379 if (target < 0)
2380 break;
2381 return -ENOENT;
2384 hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr);
2385 if (hp == NULL)
2386 return -EBADMSG;
2387 if (nexthdr == NEXTHDR_FRAGMENT) {
2388 unsigned short _frag_off;
2389 __be16 *fp;
2390 fp = skb_header_pointer(skb,
2391 start+offsetof(struct frag_hdr,
2392 frag_off),
2393 sizeof(_frag_off),
2394 &_frag_off);
2395 if (fp == NULL)
2396 return -EBADMSG;
2398 _frag_off = ntohs(*fp) & ~0x7;
2399 if (_frag_off) {
2400 if (target < 0 &&
2401 ((!ipv6_ext_hdr(hp->nexthdr)) ||
2402 hp->nexthdr == NEXTHDR_NONE)) {
2403 if (fragoff)
2404 *fragoff = _frag_off;
2405 return hp->nexthdr;
2407 return -ENOENT;
2409 hdrlen = 8;
2410 } else if (nexthdr == NEXTHDR_AUTH)
2411 hdrlen = (hp->hdrlen + 2) << 2;
2412 else
2413 hdrlen = ipv6_optlen(hp);
2415 nexthdr = hp->nexthdr;
2416 len -= hdrlen;
2417 start += hdrlen;
2420 *offset = start;
2421 return nexthdr;
2424 EXPORT_SYMBOL(ip6t_register_table);
2425 EXPORT_SYMBOL(ip6t_unregister_table);
2426 EXPORT_SYMBOL(ip6t_do_table);
2427 EXPORT_SYMBOL(ip6t_ext_hdr);
2428 EXPORT_SYMBOL(ipv6_find_hdr);
2430 module_init(ip6_tables_init);
2431 module_exit(ip6_tables_fini);