bonding / ipv6: no addrconf for slaves separately from master.
[tomato.git] / release / src-rt / linux / linux-2.6 / net / ipv6 / icmp.c
blob023438b66b55eb1e6a6a05853c163daa4c0a556c
1 /*
2 * Internet Control Message Protocol (ICMPv6)
3 * Linux INET6 implementation
5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt>
8 * $Id: icmp.c,v 1.38 2002/02/08 03:57:19 davem Exp $
10 * Based on net/ipv4/icmp.c
12 * RFC 1885
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License
16 * as published by the Free Software Foundation; either version
17 * 2 of the License, or (at your option) any later version.
21 * Changes:
23 * Andi Kleen : exception handling
24 * Andi Kleen add rate limits. never reply to a icmp.
25 * add more length checks and other fixes.
26 * yoshfuji : ensure to sent parameter problem for
27 * fragments.
28 * YOSHIFUJI Hideaki @USAGI: added sysctl for icmp rate limit.
29 * Randy Dunlap and
30 * YOSHIFUJI Hideaki @USAGI: Per-interface statistics support
31 * Kazunori MIYAZAWA @USAGI: change output process to use ip6_append_data
34 #include <linux/module.h>
35 #include <linux/errno.h>
36 #include <linux/types.h>
37 #include <linux/socket.h>
38 #include <linux/in.h>
39 #include <linux/kernel.h>
40 #include <linux/sockios.h>
41 #include <linux/net.h>
42 #include <linux/skbuff.h>
43 #include <linux/init.h>
44 #include <linux/netfilter.h>
46 #ifdef CONFIG_SYSCTL
47 #include <linux/sysctl.h>
48 #endif
50 #include <linux/inet.h>
51 #include <linux/netdevice.h>
52 #include <linux/icmpv6.h>
54 #include <net/ip.h>
55 #include <net/sock.h>
57 #include <net/ipv6.h>
58 #include <net/ip6_checksum.h>
59 #include <net/protocol.h>
60 #include <net/raw.h>
61 #include <net/rawv6.h>
62 #include <net/transp_v6.h>
63 #include <net/ip6_route.h>
64 #include <net/addrconf.h>
65 #include <net/icmp.h>
67 #include <asm/uaccess.h>
68 #include <asm/system.h>
70 DEFINE_SNMP_STAT(struct icmpv6_mib, icmpv6_statistics) __read_mostly;
71 EXPORT_SYMBOL(icmpv6_statistics);
74 * The ICMP socket(s). This is the most convenient way to flow control
75 * our ICMP output as well as maintain a clean interface throughout
76 * all layers. All Socketless IP sends will soon be gone.
78 * On SMP we have one ICMP socket per-cpu.
80 static DEFINE_PER_CPU(struct socket *, __icmpv6_socket) = NULL;
81 #define icmpv6_socket __get_cpu_var(__icmpv6_socket)
83 static int icmpv6_rcv(struct sk_buff *skb);
85 static struct inet6_protocol icmpv6_protocol = {
86 .handler = icmpv6_rcv,
87 .flags = INET6_PROTO_FINAL,
90 static __inline__ int icmpv6_xmit_lock(void)
92 local_bh_disable();
94 if (unlikely(!spin_trylock(&icmpv6_socket->sk->sk_lock.slock))) {
95 /* This can happen if the output path (f.e. SIT or
96 * ip6ip6 tunnel) signals dst_link_failure() for an
97 * outgoing ICMP6 packet.
99 local_bh_enable();
100 return 1;
102 return 0;
105 static __inline__ void icmpv6_xmit_unlock(void)
107 spin_unlock_bh(&icmpv6_socket->sk->sk_lock.slock);
111 * Slightly more convenient version of icmpv6_send.
113 void icmpv6_param_prob(struct sk_buff *skb, int code, int pos)
115 icmpv6_send(skb, ICMPV6_PARAMPROB, code, pos, skb->dev);
116 kfree_skb(skb);
120 * Figure out, may we reply to this packet with icmp error.
122 * We do not reply, if:
123 * - it was icmp error message.
124 * - it is truncated, so that it is known, that protocol is ICMPV6
125 * (i.e. in the middle of some exthdr)
127 * --ANK (980726)
130 static int is_ineligible(struct sk_buff *skb)
132 int ptr = (u8 *)(ipv6_hdr(skb) + 1) - skb->data;
133 int len = skb->len - ptr;
134 __u8 nexthdr = ipv6_hdr(skb)->nexthdr;
136 if (len < 0)
137 return 1;
139 ptr = ipv6_skip_exthdr(skb, ptr, &nexthdr);
140 if (ptr < 0)
141 return 0;
142 if (nexthdr == IPPROTO_ICMPV6) {
143 u8 _type, *tp;
144 tp = skb_header_pointer(skb,
145 ptr+offsetof(struct icmp6hdr, icmp6_type),
146 sizeof(_type), &_type);
147 if (tp == NULL ||
148 !(*tp & ICMPV6_INFOMSG_MASK))
149 return 1;
151 return 0;
154 static int sysctl_icmpv6_time __read_mostly = 1*HZ;
157 * Check the ICMP output rate limit
159 static inline int icmpv6_xrlim_allow(struct sock *sk, int type,
160 struct flowi *fl)
162 struct dst_entry *dst;
163 int res = 0;
165 /* Informational messages are not limited. */
166 if (type & ICMPV6_INFOMSG_MASK)
167 return 1;
169 /* Do not limit pmtu discovery, it would break it. */
170 if (type == ICMPV6_PKT_TOOBIG)
171 return 1;
174 * Look up the output route.
175 * XXX: perhaps the expire for routing entries cloned by
176 * this lookup should be more aggressive (not longer than timeout).
178 dst = ip6_route_output(sk, fl);
179 if (dst->error) {
180 IP6_INC_STATS(ip6_dst_idev(dst),
181 IPSTATS_MIB_OUTNOROUTES);
182 } else if (dst->dev && (dst->dev->flags&IFF_LOOPBACK)) {
183 res = 1;
184 } else {
185 struct rt6_info *rt = (struct rt6_info *)dst;
186 int tmo = sysctl_icmpv6_time;
188 /* Give more bandwidth to wider prefixes. */
189 if (rt->rt6i_dst.plen < 128)
190 tmo >>= ((128 - rt->rt6i_dst.plen)>>5);
192 res = xrlim_allow(dst, tmo);
194 dst_release(dst);
195 return res;
199 * an inline helper for the "simple" if statement below
200 * checks if parameter problem report is caused by an
201 * unrecognized IPv6 option that has the Option Type
202 * highest-order two bits set to 10
205 static __inline__ int opt_unrec(struct sk_buff *skb, __u32 offset)
207 u8 _optval, *op;
209 offset += skb_network_offset(skb);
210 op = skb_header_pointer(skb, offset, sizeof(_optval), &_optval);
211 if (op == NULL)
212 return 1;
213 return (*op & 0xC0) == 0x80;
216 static int icmpv6_push_pending_frames(struct sock *sk, struct flowi *fl, struct icmp6hdr *thdr, int len)
218 struct sk_buff *skb;
219 struct icmp6hdr *icmp6h;
220 int err = 0;
222 if ((skb = skb_peek(&sk->sk_write_queue)) == NULL)
223 goto out;
225 icmp6h = icmp6_hdr(skb);
226 memcpy(icmp6h, thdr, sizeof(struct icmp6hdr));
227 icmp6h->icmp6_cksum = 0;
229 if (skb_queue_len(&sk->sk_write_queue) == 1) {
230 skb->csum = csum_partial((char *)icmp6h,
231 sizeof(struct icmp6hdr), skb->csum);
232 icmp6h->icmp6_cksum = csum_ipv6_magic(&fl->fl6_src,
233 &fl->fl6_dst,
234 len, fl->proto,
235 skb->csum);
236 } else {
237 __wsum tmp_csum = 0;
239 skb_queue_walk(&sk->sk_write_queue, skb) {
240 tmp_csum = csum_add(tmp_csum, skb->csum);
243 tmp_csum = csum_partial((char *)icmp6h,
244 sizeof(struct icmp6hdr), tmp_csum);
245 icmp6h->icmp6_cksum = csum_ipv6_magic(&fl->fl6_src,
246 &fl->fl6_dst,
247 len, fl->proto,
248 tmp_csum);
250 ip6_push_pending_frames(sk);
251 out:
252 return err;
255 struct icmpv6_msg {
256 struct sk_buff *skb;
257 int offset;
258 uint8_t type;
261 static int icmpv6_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb)
263 struct icmpv6_msg *msg = (struct icmpv6_msg *) from;
264 struct sk_buff *org_skb = msg->skb;
265 __wsum csum = 0;
267 csum = skb_copy_and_csum_bits(org_skb, msg->offset + offset,
268 to, len, csum);
269 skb->csum = csum_block_add(skb->csum, csum, odd);
270 if (!(msg->type & ICMPV6_INFOMSG_MASK))
271 nf_ct_attach(skb, org_skb);
272 return 0;
275 #ifdef CONFIG_IPV6_MIP6
276 static void mip6_addr_swap(struct sk_buff *skb)
278 struct ipv6hdr *iph = ipv6_hdr(skb);
279 struct inet6_skb_parm *opt = IP6CB(skb);
280 struct ipv6_destopt_hao *hao;
281 struct in6_addr tmp;
282 int off;
284 if (opt->dsthao) {
285 off = ipv6_find_tlv(skb, opt->dsthao, IPV6_TLV_HAO);
286 if (likely(off >= 0)) {
287 hao = (struct ipv6_destopt_hao *)
288 (skb_network_header(skb) + off);
289 ipv6_addr_copy(&tmp, &iph->saddr);
290 ipv6_addr_copy(&iph->saddr, &hao->addr);
291 ipv6_addr_copy(&hao->addr, &tmp);
295 #else
296 static inline void mip6_addr_swap(struct sk_buff *skb) {}
297 #endif
300 * Send an ICMP message in response to a packet in error
302 void icmpv6_send(struct sk_buff *skb, int type, int code, __u32 info,
303 struct net_device *dev)
305 struct inet6_dev *idev = NULL;
306 struct ipv6hdr *hdr = ipv6_hdr(skb);
307 struct sock *sk;
308 struct ipv6_pinfo *np;
309 struct in6_addr *saddr = NULL;
310 struct dst_entry *dst;
311 struct icmp6hdr tmp_hdr;
312 struct flowi fl;
313 struct icmpv6_msg msg;
314 int iif = 0;
315 int addr_type = 0;
316 int len;
317 int hlimit, tclass;
318 int err = 0;
320 if ((u8 *)hdr < skb->head ||
321 (skb->network_header + sizeof(*hdr)) > skb->tail)
322 return;
325 * Make sure we respect the rules
326 * i.e. RFC 1885 2.4(e)
327 * Rule (e.1) is enforced by not using icmpv6_send
328 * in any code that processes icmp errors.
330 addr_type = ipv6_addr_type(&hdr->daddr);
332 if (ipv6_chk_addr(&hdr->daddr, skb->dev, 0))
333 saddr = &hdr->daddr;
336 * Dest addr check
339 if ((addr_type & IPV6_ADDR_MULTICAST || skb->pkt_type != PACKET_HOST)) {
340 if (type != ICMPV6_PKT_TOOBIG &&
341 !(type == ICMPV6_PARAMPROB &&
342 code == ICMPV6_UNK_OPTION &&
343 (opt_unrec(skb, info))))
344 return;
346 saddr = NULL;
349 addr_type = ipv6_addr_type(&hdr->saddr);
352 * Source addr check
355 if (addr_type & IPV6_ADDR_LINKLOCAL)
356 iif = skb->dev->ifindex;
359 * Must not send error if the source does not uniquely
360 * identify a single node (RFC2463 Section 2.4).
361 * We check unspecified / multicast addresses here,
362 * and anycast addresses will be checked later.
364 if ((addr_type == IPV6_ADDR_ANY) || (addr_type & IPV6_ADDR_MULTICAST)) {
365 LIMIT_NETDEBUG(KERN_DEBUG "icmpv6_send: addr_any/mcast source\n");
366 return;
370 * Never answer to a ICMP packet.
372 if (is_ineligible(skb)) {
373 LIMIT_NETDEBUG(KERN_DEBUG "icmpv6_send: no reply to icmp error\n");
374 return;
377 mip6_addr_swap(skb);
379 memset(&fl, 0, sizeof(fl));
380 fl.proto = IPPROTO_ICMPV6;
381 ipv6_addr_copy(&fl.fl6_dst, &hdr->saddr);
382 if (saddr)
383 ipv6_addr_copy(&fl.fl6_src, saddr);
384 fl.oif = iif;
385 fl.fl_icmp_type = type;
386 fl.fl_icmp_code = code;
387 security_skb_classify_flow(skb, &fl);
389 if (icmpv6_xmit_lock())
390 return;
392 sk = icmpv6_socket->sk;
393 np = inet6_sk(sk);
395 if (!icmpv6_xrlim_allow(sk, type, &fl))
396 goto out;
398 tmp_hdr.icmp6_type = type;
399 tmp_hdr.icmp6_code = code;
400 tmp_hdr.icmp6_cksum = 0;
401 tmp_hdr.icmp6_pointer = htonl(info);
403 if (!fl.oif && ipv6_addr_is_multicast(&fl.fl6_dst))
404 fl.oif = np->mcast_oif;
406 err = ip6_dst_lookup(sk, &dst, &fl);
407 if (err)
408 goto out;
411 * We won't send icmp if the destination is known
412 * anycast.
414 if (((struct rt6_info *)dst)->rt6i_flags & RTF_ANYCAST) {
415 LIMIT_NETDEBUG(KERN_DEBUG "icmpv6_send: acast source\n");
416 goto out_dst_release;
419 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0)
420 goto out;
422 if (ipv6_addr_is_multicast(&fl.fl6_dst))
423 hlimit = np->mcast_hops;
424 else
425 hlimit = np->hop_limit;
426 if (hlimit < 0)
427 hlimit = dst_metric(dst, RTAX_HOPLIMIT);
428 if (hlimit < 0)
429 hlimit = ipv6_get_hoplimit(dst->dev);
431 tclass = np->tclass;
432 if (tclass < 0)
433 tclass = 0;
435 msg.skb = skb;
436 msg.offset = skb_network_offset(skb);
437 msg.type = type;
439 len = skb->len - msg.offset;
440 len = min_t(unsigned int, len, IPV6_MIN_MTU - sizeof(struct ipv6hdr) -sizeof(struct icmp6hdr));
441 if (len < 0) {
442 LIMIT_NETDEBUG(KERN_DEBUG "icmp: len problem\n");
443 goto out_dst_release;
446 idev = in6_dev_get(skb->dev);
448 err = ip6_append_data(sk, icmpv6_getfrag, &msg,
449 len + sizeof(struct icmp6hdr),
450 sizeof(struct icmp6hdr),
451 hlimit, tclass, NULL, &fl, (struct rt6_info*)dst,
452 MSG_DONTWAIT);
453 if (err) {
454 ip6_flush_pending_frames(sk);
455 goto out_put;
457 err = icmpv6_push_pending_frames(sk, &fl, &tmp_hdr, len + sizeof(struct icmp6hdr));
459 if (type >= ICMPV6_DEST_UNREACH && type <= ICMPV6_PARAMPROB)
460 ICMP6_INC_STATS_OFFSET_BH(idev, ICMP6_MIB_OUTDESTUNREACHS, type - ICMPV6_DEST_UNREACH);
461 ICMP6_INC_STATS_BH(idev, ICMP6_MIB_OUTMSGS);
463 out_put:
464 if (likely(idev != NULL))
465 in6_dev_put(idev);
466 out_dst_release:
467 dst_release(dst);
468 out:
469 icmpv6_xmit_unlock();
472 EXPORT_SYMBOL(icmpv6_send);
474 static void icmpv6_echo_reply(struct sk_buff *skb)
476 struct sock *sk;
477 struct inet6_dev *idev;
478 struct ipv6_pinfo *np;
479 struct in6_addr *saddr = NULL;
480 struct icmp6hdr *icmph = icmp6_hdr(skb);
481 struct icmp6hdr tmp_hdr;
482 struct flowi fl;
483 struct icmpv6_msg msg;
484 struct dst_entry *dst;
485 int err = 0;
486 int hlimit;
487 int tclass;
489 saddr = &ipv6_hdr(skb)->daddr;
491 if (!ipv6_unicast_destination(skb))
492 saddr = NULL;
494 memcpy(&tmp_hdr, icmph, sizeof(tmp_hdr));
495 tmp_hdr.icmp6_type = ICMPV6_ECHO_REPLY;
497 memset(&fl, 0, sizeof(fl));
498 fl.proto = IPPROTO_ICMPV6;
499 ipv6_addr_copy(&fl.fl6_dst, &ipv6_hdr(skb)->saddr);
500 if (saddr)
501 ipv6_addr_copy(&fl.fl6_src, saddr);
502 fl.oif = skb->dev->ifindex;
503 fl.fl_icmp_type = ICMPV6_ECHO_REPLY;
504 security_skb_classify_flow(skb, &fl);
506 if (icmpv6_xmit_lock())
507 return;
509 sk = icmpv6_socket->sk;
510 np = inet6_sk(sk);
512 if (!fl.oif && ipv6_addr_is_multicast(&fl.fl6_dst))
513 fl.oif = np->mcast_oif;
515 err = ip6_dst_lookup(sk, &dst, &fl);
516 if (err)
517 goto out;
518 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0)
519 goto out;
521 if (ipv6_addr_is_multicast(&fl.fl6_dst))
522 hlimit = np->mcast_hops;
523 else
524 hlimit = np->hop_limit;
525 if (hlimit < 0)
526 hlimit = dst_metric(dst, RTAX_HOPLIMIT);
527 if (hlimit < 0)
528 hlimit = ipv6_get_hoplimit(dst->dev);
530 tclass = np->tclass;
531 if (tclass < 0)
532 tclass = 0;
534 idev = in6_dev_get(skb->dev);
536 msg.skb = skb;
537 msg.offset = 0;
538 msg.type = ICMPV6_ECHO_REPLY;
540 err = ip6_append_data(sk, icmpv6_getfrag, &msg, skb->len + sizeof(struct icmp6hdr),
541 sizeof(struct icmp6hdr), hlimit, tclass, NULL, &fl,
542 (struct rt6_info*)dst, MSG_DONTWAIT);
544 if (err) {
545 ip6_flush_pending_frames(sk);
546 goto out_put;
548 err = icmpv6_push_pending_frames(sk, &fl, &tmp_hdr, skb->len + sizeof(struct icmp6hdr));
550 ICMP6_INC_STATS_BH(idev, ICMP6_MIB_OUTECHOREPLIES);
551 ICMP6_INC_STATS_BH(idev, ICMP6_MIB_OUTMSGS);
553 out_put:
554 if (likely(idev != NULL))
555 in6_dev_put(idev);
556 dst_release(dst);
557 out:
558 icmpv6_xmit_unlock();
561 static void icmpv6_notify(struct sk_buff *skb, int type, int code, __be32 info)
563 struct in6_addr *saddr, *daddr;
564 struct inet6_protocol *ipprot;
565 struct sock *sk;
566 int inner_offset;
567 int hash;
568 u8 nexthdr;
570 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
571 return;
573 nexthdr = ((struct ipv6hdr *)skb->data)->nexthdr;
574 if (ipv6_ext_hdr(nexthdr)) {
575 /* now skip over extension headers */
576 inner_offset = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr);
577 if (inner_offset<0)
578 return;
579 } else {
580 inner_offset = sizeof(struct ipv6hdr);
583 /* Checkin header including 8 bytes of inner protocol header. */
584 if (!pskb_may_pull(skb, inner_offset+8))
585 return;
587 saddr = &ipv6_hdr(skb)->saddr;
588 daddr = &ipv6_hdr(skb)->daddr;
590 /* BUGGG_FUTURE: we should try to parse exthdrs in this packet.
591 Without this we will not able f.e. to make source routed
592 pmtu discovery.
593 Corresponding argument (opt) to notifiers is already added.
594 --ANK (980726)
597 hash = nexthdr & (MAX_INET_PROTOS - 1);
599 rcu_read_lock();
600 ipprot = rcu_dereference(inet6_protos[hash]);
601 if (ipprot && ipprot->err_handler)
602 ipprot->err_handler(skb, NULL, type, code, inner_offset, info);
603 rcu_read_unlock();
605 read_lock(&raw_v6_lock);
606 if ((sk = sk_head(&raw_v6_htable[hash])) != NULL) {
607 while ((sk = __raw_v6_lookup(sk, nexthdr, saddr, daddr,
608 IP6CB(skb)->iif))) {
609 rawv6_err(sk, skb, NULL, type, code, inner_offset, info);
610 sk = sk_next(sk);
613 read_unlock(&raw_v6_lock);
617 * Handle icmp messages
620 static int icmpv6_rcv(struct sk_buff *skb)
622 struct net_device *dev = skb->dev;
623 struct inet6_dev *idev = __in6_dev_get(dev);
624 struct in6_addr *saddr, *daddr;
625 struct ipv6hdr *orig_hdr;
626 struct icmp6hdr *hdr;
627 int type;
629 ICMP6_INC_STATS_BH(idev, ICMP6_MIB_INMSGS);
631 saddr = &ipv6_hdr(skb)->saddr;
632 daddr = &ipv6_hdr(skb)->daddr;
634 /* Perform checksum. */
635 switch (skb->ip_summed) {
636 case CHECKSUM_COMPLETE:
637 if (!csum_ipv6_magic(saddr, daddr, skb->len, IPPROTO_ICMPV6,
638 skb->csum))
639 break;
640 /* fall through */
641 case CHECKSUM_NONE:
642 skb->csum = ~csum_unfold(csum_ipv6_magic(saddr, daddr, skb->len,
643 IPPROTO_ICMPV6, 0));
644 if (__skb_checksum_complete(skb)) {
645 LIMIT_NETDEBUG(KERN_DEBUG "ICMPv6 checksum failed [" NIP6_FMT " > " NIP6_FMT "]\n",
646 NIP6(*saddr), NIP6(*daddr));
647 goto discard_it;
651 if (!pskb_pull(skb, sizeof(struct icmp6hdr)))
652 goto discard_it;
654 hdr = icmp6_hdr(skb);
656 type = hdr->icmp6_type;
658 if (type >= ICMPV6_DEST_UNREACH && type <= ICMPV6_PARAMPROB)
659 ICMP6_INC_STATS_OFFSET_BH(idev, ICMP6_MIB_INDESTUNREACHS, type - ICMPV6_DEST_UNREACH);
660 else if (type >= ICMPV6_ECHO_REQUEST && type <= NDISC_REDIRECT)
661 ICMP6_INC_STATS_OFFSET_BH(idev, ICMP6_MIB_INECHOS, type - ICMPV6_ECHO_REQUEST);
663 switch (type) {
664 case ICMPV6_ECHO_REQUEST:
665 icmpv6_echo_reply(skb);
666 break;
668 case ICMPV6_ECHO_REPLY:
669 /* we couldn't care less */
670 break;
672 case ICMPV6_PKT_TOOBIG:
673 /* BUGGG_FUTURE: if packet contains rthdr, we cannot update
674 standard destination cache. Seems, only "advanced"
675 destination cache will allow to solve this problem
676 --ANK (980726)
678 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
679 goto discard_it;
680 hdr = icmp6_hdr(skb);
681 orig_hdr = (struct ipv6hdr *) (hdr + 1);
682 rt6_pmtu_discovery(&orig_hdr->daddr, &orig_hdr->saddr, dev,
683 ntohl(hdr->icmp6_mtu));
686 * Drop through to notify
689 case ICMPV6_DEST_UNREACH:
690 case ICMPV6_TIME_EXCEED:
691 case ICMPV6_PARAMPROB:
692 icmpv6_notify(skb, type, hdr->icmp6_code, hdr->icmp6_mtu);
693 break;
695 case NDISC_ROUTER_SOLICITATION:
696 case NDISC_ROUTER_ADVERTISEMENT:
697 case NDISC_NEIGHBOUR_SOLICITATION:
698 case NDISC_NEIGHBOUR_ADVERTISEMENT:
699 case NDISC_REDIRECT:
700 ndisc_rcv(skb);
701 break;
703 case ICMPV6_MGM_QUERY:
704 igmp6_event_query(skb);
705 break;
707 case ICMPV6_MGM_REPORT:
708 igmp6_event_report(skb);
709 break;
711 case ICMPV6_MGM_REDUCTION:
712 case ICMPV6_NI_QUERY:
713 case ICMPV6_NI_REPLY:
714 case ICMPV6_MLD2_REPORT:
715 case ICMPV6_DHAAD_REQUEST:
716 case ICMPV6_DHAAD_REPLY:
717 case ICMPV6_MOBILE_PREFIX_SOL:
718 case ICMPV6_MOBILE_PREFIX_ADV:
719 break;
721 default:
722 LIMIT_NETDEBUG(KERN_DEBUG "icmpv6: msg of unknown type\n");
724 /* informational */
725 if (type & ICMPV6_INFOMSG_MASK)
726 break;
729 * error of unknown type.
730 * must pass to upper level
733 icmpv6_notify(skb, type, hdr->icmp6_code, hdr->icmp6_mtu);
736 kfree_skb(skb);
737 return 0;
739 discard_it:
740 ICMP6_INC_STATS_BH(idev, ICMP6_MIB_INERRORS);
741 kfree_skb(skb);
742 return 0;
746 * Special lock-class for __icmpv6_socket:
748 static struct lock_class_key icmpv6_socket_sk_dst_lock_key;
750 int __init icmpv6_init(struct net_proto_family *ops)
752 struct sock *sk;
753 int err, i, j;
755 for_each_possible_cpu(i) {
756 err = sock_create_kern(PF_INET6, SOCK_RAW, IPPROTO_ICMPV6,
757 &per_cpu(__icmpv6_socket, i));
758 if (err < 0) {
759 printk(KERN_ERR
760 "Failed to initialize the ICMP6 control socket "
761 "(err %d).\n",
762 err);
763 goto fail;
766 sk = per_cpu(__icmpv6_socket, i)->sk;
767 sk->sk_allocation = GFP_ATOMIC;
769 * Split off their lock-class, because sk->sk_dst_lock
770 * gets used from softirqs, which is safe for
771 * __icmpv6_socket (because those never get directly used
772 * via userspace syscalls), but unsafe for normal sockets.
774 lockdep_set_class(&sk->sk_dst_lock,
775 &icmpv6_socket_sk_dst_lock_key);
777 /* Enough space for 2 64K ICMP packets, including
778 * sk_buff struct overhead.
780 sk->sk_sndbuf =
781 (2 * ((64 * 1024) + sizeof(struct sk_buff)));
783 sk->sk_prot->unhash(sk);
787 if (inet6_add_protocol(&icmpv6_protocol, IPPROTO_ICMPV6) < 0) {
788 printk(KERN_ERR "Failed to register ICMP6 protocol\n");
789 err = -EAGAIN;
790 goto fail;
793 return 0;
795 fail:
796 for (j = 0; j < i; j++) {
797 if (!cpu_possible(j))
798 continue;
799 sock_release(per_cpu(__icmpv6_socket, j));
802 return err;
805 void icmpv6_cleanup(void)
807 int i;
809 for_each_possible_cpu(i) {
810 sock_release(per_cpu(__icmpv6_socket, i));
812 inet6_del_protocol(&icmpv6_protocol, IPPROTO_ICMPV6);
815 static const struct icmp6_err {
816 int err;
817 int fatal;
818 } tab_unreach[] = {
819 { /* NOROUTE */
820 .err = ENETUNREACH,
821 .fatal = 0,
823 { /* ADM_PROHIBITED */
824 .err = EACCES,
825 .fatal = 1,
827 { /* Was NOT_NEIGHBOUR, now reserved */
828 .err = EHOSTUNREACH,
829 .fatal = 0,
831 { /* ADDR_UNREACH */
832 .err = EHOSTUNREACH,
833 .fatal = 0,
835 { /* PORT_UNREACH */
836 .err = ECONNREFUSED,
837 .fatal = 1,
839 { /* POLICY_FAIL */
840 .err = EACCES,
841 .fatal = 1,
843 { /* REJECT_ROUTE */
844 .err = EACCES,
845 .fatal = 1,
849 int icmpv6_err_convert(int type, int code, int *err)
851 int fatal = 0;
853 *err = EPROTO;
855 switch (type) {
856 case ICMPV6_DEST_UNREACH:
857 fatal = 1;
858 if (code < ARRAY_SIZE(tab_unreach)) {
859 *err = tab_unreach[code].err;
860 fatal = tab_unreach[code].fatal;
862 break;
864 case ICMPV6_PKT_TOOBIG:
865 *err = EMSGSIZE;
866 break;
868 case ICMPV6_PARAMPROB:
869 *err = EPROTO;
870 fatal = 1;
871 break;
873 case ICMPV6_TIME_EXCEED:
874 *err = EHOSTUNREACH;
875 break;
878 return fatal;
881 EXPORT_SYMBOL(icmpv6_err_convert);
883 #ifdef CONFIG_SYSCTL
884 ctl_table ipv6_icmp_table[] = {
886 .ctl_name = NET_IPV6_ICMP_RATELIMIT,
887 .procname = "ratelimit",
888 .data = &sysctl_icmpv6_time,
889 .maxlen = sizeof(int),
890 .mode = 0644,
891 .proc_handler = &proc_dointvec
893 { .ctl_name = 0 },
895 #endif