[IPV6]: Per-interface statistics support.
[linux-2.6/mini2440.git] / net / ipv6 / icmp.c
blob52cca93ff2f82fd22e460e9aecb33e78e559a47e
1 /*
2 * Internet Control Message Protocol (ICMPv6)
3 * Linux INET6 implementation
5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt>
8 * $Id: icmp.c,v 1.38 2002/02/08 03:57:19 davem Exp $
10 * Based on net/ipv4/icmp.c
12 * RFC 1885
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License
16 * as published by the Free Software Foundation; either version
17 * 2 of the License, or (at your option) any later version.
21 * Changes:
23 * Andi Kleen : exception handling
24 * Andi Kleen add rate limits. never reply to a icmp.
25 * add more length checks and other fixes.
26 * yoshfuji : ensure to sent parameter problem for
27 * fragments.
28 * YOSHIFUJI Hideaki @USAGI: added sysctl for icmp rate limit.
29 * Randy Dunlap and
30 * YOSHIFUJI Hideaki @USAGI: Per-interface statistics support
31 * Kazunori MIYAZAWA @USAGI: change output process to use ip6_append_data
34 #include <linux/module.h>
35 #include <linux/errno.h>
36 #include <linux/types.h>
37 #include <linux/socket.h>
38 #include <linux/in.h>
39 #include <linux/kernel.h>
40 #include <linux/sched.h>
41 #include <linux/sockios.h>
42 #include <linux/net.h>
43 #include <linux/skbuff.h>
44 #include <linux/init.h>
45 #include <linux/netfilter.h>
47 #ifdef CONFIG_SYSCTL
48 #include <linux/sysctl.h>
49 #endif
51 #include <linux/inet.h>
52 #include <linux/netdevice.h>
53 #include <linux/icmpv6.h>
55 #include <net/ip.h>
56 #include <net/sock.h>
58 #include <net/ipv6.h>
59 #include <net/ip6_checksum.h>
60 #include <net/protocol.h>
61 #include <net/raw.h>
62 #include <net/rawv6.h>
63 #include <net/transp_v6.h>
64 #include <net/ip6_route.h>
65 #include <net/addrconf.h>
66 #include <net/icmp.h>
68 #include <asm/uaccess.h>
69 #include <asm/system.h>
71 DEFINE_SNMP_STAT(struct icmpv6_mib, icmpv6_statistics) __read_mostly;
74 * The ICMP socket(s). This is the most convenient way to flow control
75 * our ICMP output as well as maintain a clean interface throughout
76 * all layers. All Socketless IP sends will soon be gone.
78 * On SMP we have one ICMP socket per-cpu.
80 static DEFINE_PER_CPU(struct socket *, __icmpv6_socket) = NULL;
81 #define icmpv6_socket __get_cpu_var(__icmpv6_socket)
83 static int icmpv6_rcv(struct sk_buff **pskb);
85 static struct inet6_protocol icmpv6_protocol = {
86 .handler = icmpv6_rcv,
87 .flags = INET6_PROTO_FINAL,
90 static __inline__ int icmpv6_xmit_lock(void)
92 local_bh_disable();
94 if (unlikely(!spin_trylock(&icmpv6_socket->sk->sk_lock.slock))) {
95 /* This can happen if the output path (f.e. SIT or
96 * ip6ip6 tunnel) signals dst_link_failure() for an
97 * outgoing ICMP6 packet.
99 local_bh_enable();
100 return 1;
102 return 0;
105 static __inline__ void icmpv6_xmit_unlock(void)
107 spin_unlock_bh(&icmpv6_socket->sk->sk_lock.slock);
111 * Slightly more convenient version of icmpv6_send.
113 void icmpv6_param_prob(struct sk_buff *skb, int code, int pos)
115 icmpv6_send(skb, ICMPV6_PARAMPROB, code, pos, skb->dev);
116 kfree_skb(skb);
120 * Figure out, may we reply to this packet with icmp error.
122 * We do not reply, if:
123 * - it was icmp error message.
124 * - it is truncated, so that it is known, that protocol is ICMPV6
125 * (i.e. in the middle of some exthdr)
127 * --ANK (980726)
130 static int is_ineligible(struct sk_buff *skb)
132 int ptr = (u8*)(skb->nh.ipv6h+1) - skb->data;
133 int len = skb->len - ptr;
134 __u8 nexthdr = skb->nh.ipv6h->nexthdr;
136 if (len < 0)
137 return 1;
139 ptr = ipv6_skip_exthdr(skb, ptr, &nexthdr);
140 if (ptr < 0)
141 return 0;
142 if (nexthdr == IPPROTO_ICMPV6) {
143 u8 _type, *tp;
144 tp = skb_header_pointer(skb,
145 ptr+offsetof(struct icmp6hdr, icmp6_type),
146 sizeof(_type), &_type);
147 if (tp == NULL ||
148 !(*tp & ICMPV6_INFOMSG_MASK))
149 return 1;
151 return 0;
154 static int sysctl_icmpv6_time __read_mostly = 1*HZ;
157 * Check the ICMP output rate limit
159 static inline int icmpv6_xrlim_allow(struct sock *sk, int type,
160 struct flowi *fl)
162 struct dst_entry *dst;
163 int res = 0;
165 /* Informational messages are not limited. */
166 if (type & ICMPV6_INFOMSG_MASK)
167 return 1;
169 /* Do not limit pmtu discovery, it would break it. */
170 if (type == ICMPV6_PKT_TOOBIG)
171 return 1;
174 * Look up the output route.
175 * XXX: perhaps the expire for routing entries cloned by
176 * this lookup should be more aggressive (not longer than timeout).
178 dst = ip6_route_output(sk, fl);
179 if (dst->error) {
180 IP6_INC_STATS(ip6_dst_idev(dst),
181 IPSTATS_MIB_OUTNOROUTES);
182 } else if (dst->dev && (dst->dev->flags&IFF_LOOPBACK)) {
183 res = 1;
184 } else {
185 struct rt6_info *rt = (struct rt6_info *)dst;
186 int tmo = sysctl_icmpv6_time;
188 /* Give more bandwidth to wider prefixes. */
189 if (rt->rt6i_dst.plen < 128)
190 tmo >>= ((128 - rt->rt6i_dst.plen)>>5);
192 res = xrlim_allow(dst, tmo);
194 dst_release(dst);
195 return res;
199 * an inline helper for the "simple" if statement below
200 * checks if parameter problem report is caused by an
201 * unrecognized IPv6 option that has the Option Type
202 * highest-order two bits set to 10
205 static __inline__ int opt_unrec(struct sk_buff *skb, __u32 offset)
207 u8 _optval, *op;
209 offset += skb->nh.raw - skb->data;
210 op = skb_header_pointer(skb, offset, sizeof(_optval), &_optval);
211 if (op == NULL)
212 return 1;
213 return (*op & 0xC0) == 0x80;
216 static int icmpv6_push_pending_frames(struct sock *sk, struct flowi *fl, struct icmp6hdr *thdr, int len)
218 struct sk_buff *skb;
219 struct icmp6hdr *icmp6h;
220 int err = 0;
222 if ((skb = skb_peek(&sk->sk_write_queue)) == NULL)
223 goto out;
225 icmp6h = (struct icmp6hdr*) skb->h.raw;
226 memcpy(icmp6h, thdr, sizeof(struct icmp6hdr));
227 icmp6h->icmp6_cksum = 0;
229 if (skb_queue_len(&sk->sk_write_queue) == 1) {
230 skb->csum = csum_partial((char *)icmp6h,
231 sizeof(struct icmp6hdr), skb->csum);
232 icmp6h->icmp6_cksum = csum_ipv6_magic(&fl->fl6_src,
233 &fl->fl6_dst,
234 len, fl->proto,
235 skb->csum);
236 } else {
237 u32 tmp_csum = 0;
239 skb_queue_walk(&sk->sk_write_queue, skb) {
240 tmp_csum = csum_add(tmp_csum, skb->csum);
243 tmp_csum = csum_partial((char *)icmp6h,
244 sizeof(struct icmp6hdr), tmp_csum);
245 tmp_csum = csum_ipv6_magic(&fl->fl6_src,
246 &fl->fl6_dst,
247 len, fl->proto, tmp_csum);
248 icmp6h->icmp6_cksum = tmp_csum;
250 if (icmp6h->icmp6_cksum == 0)
251 icmp6h->icmp6_cksum = -1;
252 ip6_push_pending_frames(sk);
253 out:
254 return err;
257 struct icmpv6_msg {
258 struct sk_buff *skb;
259 int offset;
260 uint8_t type;
263 static int icmpv6_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb)
265 struct icmpv6_msg *msg = (struct icmpv6_msg *) from;
266 struct sk_buff *org_skb = msg->skb;
267 __u32 csum = 0;
269 csum = skb_copy_and_csum_bits(org_skb, msg->offset + offset,
270 to, len, csum);
271 skb->csum = csum_block_add(skb->csum, csum, odd);
272 if (!(msg->type & ICMPV6_INFOMSG_MASK))
273 nf_ct_attach(skb, org_skb);
274 return 0;
277 #ifdef CONFIG_IPV6_MIP6
278 static void mip6_addr_swap(struct sk_buff *skb)
280 struct ipv6hdr *iph = skb->nh.ipv6h;
281 struct inet6_skb_parm *opt = IP6CB(skb);
282 struct ipv6_destopt_hao *hao;
283 struct in6_addr tmp;
284 int off;
286 if (opt->dsthao) {
287 off = ipv6_find_tlv(skb, opt->dsthao, IPV6_TLV_HAO);
288 if (likely(off >= 0)) {
289 hao = (struct ipv6_destopt_hao *)(skb->nh.raw + off);
290 ipv6_addr_copy(&tmp, &iph->saddr);
291 ipv6_addr_copy(&iph->saddr, &hao->addr);
292 ipv6_addr_copy(&hao->addr, &tmp);
296 #else
297 static inline void mip6_addr_swap(struct sk_buff *skb) {}
298 #endif
301 * Send an ICMP message in response to a packet in error
303 void icmpv6_send(struct sk_buff *skb, int type, int code, __u32 info,
304 struct net_device *dev)
306 struct inet6_dev *idev = NULL;
307 struct ipv6hdr *hdr = skb->nh.ipv6h;
308 struct sock *sk;
309 struct ipv6_pinfo *np;
310 struct in6_addr *saddr = NULL;
311 struct dst_entry *dst;
312 struct icmp6hdr tmp_hdr;
313 struct flowi fl;
314 struct icmpv6_msg msg;
315 int iif = 0;
316 int addr_type = 0;
317 int len;
318 int hlimit, tclass;
319 int err = 0;
321 if ((u8*)hdr < skb->head || (u8*)(hdr+1) > skb->tail)
322 return;
325 * Make sure we respect the rules
326 * i.e. RFC 1885 2.4(e)
327 * Rule (e.1) is enforced by not using icmpv6_send
328 * in any code that processes icmp errors.
330 addr_type = ipv6_addr_type(&hdr->daddr);
332 if (ipv6_chk_addr(&hdr->daddr, skb->dev, 0))
333 saddr = &hdr->daddr;
336 * Dest addr check
339 if ((addr_type & IPV6_ADDR_MULTICAST || skb->pkt_type != PACKET_HOST)) {
340 if (type != ICMPV6_PKT_TOOBIG &&
341 !(type == ICMPV6_PARAMPROB &&
342 code == ICMPV6_UNK_OPTION &&
343 (opt_unrec(skb, info))))
344 return;
346 saddr = NULL;
349 addr_type = ipv6_addr_type(&hdr->saddr);
352 * Source addr check
355 if (addr_type & IPV6_ADDR_LINKLOCAL)
356 iif = skb->dev->ifindex;
359 * Must not send error if the source does not uniquely
360 * identify a single node (RFC2463 Section 2.4).
361 * We check unspecified / multicast addresses here,
362 * and anycast addresses will be checked later.
364 if ((addr_type == IPV6_ADDR_ANY) || (addr_type & IPV6_ADDR_MULTICAST)) {
365 LIMIT_NETDEBUG(KERN_DEBUG "icmpv6_send: addr_any/mcast source\n");
366 return;
370 * Never answer to a ICMP packet.
372 if (is_ineligible(skb)) {
373 LIMIT_NETDEBUG(KERN_DEBUG "icmpv6_send: no reply to icmp error\n");
374 return;
377 mip6_addr_swap(skb);
379 memset(&fl, 0, sizeof(fl));
380 fl.proto = IPPROTO_ICMPV6;
381 ipv6_addr_copy(&fl.fl6_dst, &hdr->saddr);
382 if (saddr)
383 ipv6_addr_copy(&fl.fl6_src, saddr);
384 fl.oif = iif;
385 fl.fl_icmp_type = type;
386 fl.fl_icmp_code = code;
387 security_skb_classify_flow(skb, &fl);
389 if (icmpv6_xmit_lock())
390 return;
392 sk = icmpv6_socket->sk;
393 np = inet6_sk(sk);
395 if (!icmpv6_xrlim_allow(sk, type, &fl))
396 goto out;
398 tmp_hdr.icmp6_type = type;
399 tmp_hdr.icmp6_code = code;
400 tmp_hdr.icmp6_cksum = 0;
401 tmp_hdr.icmp6_pointer = htonl(info);
403 if (!fl.oif && ipv6_addr_is_multicast(&fl.fl6_dst))
404 fl.oif = np->mcast_oif;
406 err = ip6_dst_lookup(sk, &dst, &fl);
407 if (err)
408 goto out;
411 * We won't send icmp if the destination is known
412 * anycast.
414 if (((struct rt6_info *)dst)->rt6i_flags & RTF_ANYCAST) {
415 LIMIT_NETDEBUG(KERN_DEBUG "icmpv6_send: acast source\n");
416 goto out_dst_release;
419 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0)
420 goto out;
422 if (ipv6_addr_is_multicast(&fl.fl6_dst))
423 hlimit = np->mcast_hops;
424 else
425 hlimit = np->hop_limit;
426 if (hlimit < 0)
427 hlimit = dst_metric(dst, RTAX_HOPLIMIT);
428 if (hlimit < 0)
429 hlimit = ipv6_get_hoplimit(dst->dev);
431 tclass = np->tclass;
432 if (tclass < 0)
433 tclass = 0;
435 msg.skb = skb;
436 msg.offset = skb->nh.raw - skb->data;
437 msg.type = type;
439 len = skb->len - msg.offset;
440 len = min_t(unsigned int, len, IPV6_MIN_MTU - sizeof(struct ipv6hdr) -sizeof(struct icmp6hdr));
441 if (len < 0) {
442 LIMIT_NETDEBUG(KERN_DEBUG "icmp: len problem\n");
443 goto out_dst_release;
446 idev = in6_dev_get(skb->dev);
448 err = ip6_append_data(sk, icmpv6_getfrag, &msg,
449 len + sizeof(struct icmp6hdr),
450 sizeof(struct icmp6hdr),
451 hlimit, tclass, NULL, &fl, (struct rt6_info*)dst,
452 MSG_DONTWAIT);
453 if (err) {
454 ip6_flush_pending_frames(sk);
455 goto out_put;
457 err = icmpv6_push_pending_frames(sk, &fl, &tmp_hdr, len + sizeof(struct icmp6hdr));
459 if (type >= ICMPV6_DEST_UNREACH && type <= ICMPV6_PARAMPROB)
460 ICMP6_INC_STATS_OFFSET_BH(idev, ICMP6_MIB_OUTDESTUNREACHS, type - ICMPV6_DEST_UNREACH);
461 ICMP6_INC_STATS_BH(idev, ICMP6_MIB_OUTMSGS);
463 out_put:
464 if (likely(idev != NULL))
465 in6_dev_put(idev);
466 out_dst_release:
467 dst_release(dst);
468 out:
469 icmpv6_xmit_unlock();
472 static void icmpv6_echo_reply(struct sk_buff *skb)
474 struct sock *sk;
475 struct inet6_dev *idev;
476 struct ipv6_pinfo *np;
477 struct in6_addr *saddr = NULL;
478 struct icmp6hdr *icmph = (struct icmp6hdr *) skb->h.raw;
479 struct icmp6hdr tmp_hdr;
480 struct flowi fl;
481 struct icmpv6_msg msg;
482 struct dst_entry *dst;
483 int err = 0;
484 int hlimit;
485 int tclass;
487 saddr = &skb->nh.ipv6h->daddr;
489 if (!ipv6_unicast_destination(skb))
490 saddr = NULL;
492 memcpy(&tmp_hdr, icmph, sizeof(tmp_hdr));
493 tmp_hdr.icmp6_type = ICMPV6_ECHO_REPLY;
495 memset(&fl, 0, sizeof(fl));
496 fl.proto = IPPROTO_ICMPV6;
497 ipv6_addr_copy(&fl.fl6_dst, &skb->nh.ipv6h->saddr);
498 if (saddr)
499 ipv6_addr_copy(&fl.fl6_src, saddr);
500 fl.oif = skb->dev->ifindex;
501 fl.fl_icmp_type = ICMPV6_ECHO_REPLY;
502 security_skb_classify_flow(skb, &fl);
504 if (icmpv6_xmit_lock())
505 return;
507 sk = icmpv6_socket->sk;
508 np = inet6_sk(sk);
510 if (!fl.oif && ipv6_addr_is_multicast(&fl.fl6_dst))
511 fl.oif = np->mcast_oif;
513 err = ip6_dst_lookup(sk, &dst, &fl);
514 if (err)
515 goto out;
516 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0)
517 goto out;
519 if (ipv6_addr_is_multicast(&fl.fl6_dst))
520 hlimit = np->mcast_hops;
521 else
522 hlimit = np->hop_limit;
523 if (hlimit < 0)
524 hlimit = dst_metric(dst, RTAX_HOPLIMIT);
525 if (hlimit < 0)
526 hlimit = ipv6_get_hoplimit(dst->dev);
528 tclass = np->tclass;
529 if (tclass < 0)
530 tclass = 0;
532 idev = in6_dev_get(skb->dev);
534 msg.skb = skb;
535 msg.offset = 0;
536 msg.type = ICMPV6_ECHO_REPLY;
538 err = ip6_append_data(sk, icmpv6_getfrag, &msg, skb->len + sizeof(struct icmp6hdr),
539 sizeof(struct icmp6hdr), hlimit, tclass, NULL, &fl,
540 (struct rt6_info*)dst, MSG_DONTWAIT);
542 if (err) {
543 ip6_flush_pending_frames(sk);
544 goto out_put;
546 err = icmpv6_push_pending_frames(sk, &fl, &tmp_hdr, skb->len + sizeof(struct icmp6hdr));
548 ICMP6_INC_STATS_BH(idev, ICMP6_MIB_OUTECHOREPLIES);
549 ICMP6_INC_STATS_BH(idev, ICMP6_MIB_OUTMSGS);
551 out_put:
552 if (likely(idev != NULL))
553 in6_dev_put(idev);
554 dst_release(dst);
555 out:
556 icmpv6_xmit_unlock();
559 static void icmpv6_notify(struct sk_buff *skb, int type, int code, __be32 info)
561 struct in6_addr *saddr, *daddr;
562 struct inet6_protocol *ipprot;
563 struct sock *sk;
564 int inner_offset;
565 int hash;
566 u8 nexthdr;
568 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
569 return;
571 nexthdr = ((struct ipv6hdr *)skb->data)->nexthdr;
572 if (ipv6_ext_hdr(nexthdr)) {
573 /* now skip over extension headers */
574 inner_offset = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr);
575 if (inner_offset<0)
576 return;
577 } else {
578 inner_offset = sizeof(struct ipv6hdr);
581 /* Checkin header including 8 bytes of inner protocol header. */
582 if (!pskb_may_pull(skb, inner_offset+8))
583 return;
585 saddr = &skb->nh.ipv6h->saddr;
586 daddr = &skb->nh.ipv6h->daddr;
588 /* BUGGG_FUTURE: we should try to parse exthdrs in this packet.
589 Without this we will not able f.e. to make source routed
590 pmtu discovery.
591 Corresponding argument (opt) to notifiers is already added.
592 --ANK (980726)
595 hash = nexthdr & (MAX_INET_PROTOS - 1);
597 rcu_read_lock();
598 ipprot = rcu_dereference(inet6_protos[hash]);
599 if (ipprot && ipprot->err_handler)
600 ipprot->err_handler(skb, NULL, type, code, inner_offset, info);
601 rcu_read_unlock();
603 read_lock(&raw_v6_lock);
604 if ((sk = sk_head(&raw_v6_htable[hash])) != NULL) {
605 while((sk = __raw_v6_lookup(sk, nexthdr, daddr, saddr,
606 IP6CB(skb)->iif))) {
607 rawv6_err(sk, skb, NULL, type, code, inner_offset, info);
608 sk = sk_next(sk);
611 read_unlock(&raw_v6_lock);
615 * Handle icmp messages
618 static int icmpv6_rcv(struct sk_buff **pskb)
620 struct sk_buff *skb = *pskb;
621 struct net_device *dev = skb->dev;
622 struct inet6_dev *idev = __in6_dev_get(dev);
623 struct in6_addr *saddr, *daddr;
624 struct ipv6hdr *orig_hdr;
625 struct icmp6hdr *hdr;
626 int type;
628 ICMP6_INC_STATS_BH(idev, ICMP6_MIB_INMSGS);
630 saddr = &skb->nh.ipv6h->saddr;
631 daddr = &skb->nh.ipv6h->daddr;
633 /* Perform checksum. */
634 switch (skb->ip_summed) {
635 case CHECKSUM_COMPLETE:
636 if (!csum_ipv6_magic(saddr, daddr, skb->len, IPPROTO_ICMPV6,
637 skb->csum))
638 break;
639 /* fall through */
640 case CHECKSUM_NONE:
641 skb->csum = ~csum_ipv6_magic(saddr, daddr, skb->len,
642 IPPROTO_ICMPV6, 0);
643 if (__skb_checksum_complete(skb)) {
644 LIMIT_NETDEBUG(KERN_DEBUG "ICMPv6 checksum failed [" NIP6_FMT " > " NIP6_FMT "]\n",
645 NIP6(*saddr), NIP6(*daddr));
646 goto discard_it;
650 if (!pskb_pull(skb, sizeof(struct icmp6hdr)))
651 goto discard_it;
653 hdr = (struct icmp6hdr *) skb->h.raw;
655 type = hdr->icmp6_type;
657 if (type >= ICMPV6_DEST_UNREACH && type <= ICMPV6_PARAMPROB)
658 ICMP6_INC_STATS_OFFSET_BH(idev, ICMP6_MIB_INDESTUNREACHS, type - ICMPV6_DEST_UNREACH);
659 else if (type >= ICMPV6_ECHO_REQUEST && type <= NDISC_REDIRECT)
660 ICMP6_INC_STATS_OFFSET_BH(idev, ICMP6_MIB_INECHOS, type - ICMPV6_ECHO_REQUEST);
662 switch (type) {
663 case ICMPV6_ECHO_REQUEST:
664 icmpv6_echo_reply(skb);
665 break;
667 case ICMPV6_ECHO_REPLY:
668 /* we couldn't care less */
669 break;
671 case ICMPV6_PKT_TOOBIG:
672 /* BUGGG_FUTURE: if packet contains rthdr, we cannot update
673 standard destination cache. Seems, only "advanced"
674 destination cache will allow to solve this problem
675 --ANK (980726)
677 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
678 goto discard_it;
679 hdr = (struct icmp6hdr *) skb->h.raw;
680 orig_hdr = (struct ipv6hdr *) (hdr + 1);
681 rt6_pmtu_discovery(&orig_hdr->daddr, &orig_hdr->saddr, dev,
682 ntohl(hdr->icmp6_mtu));
685 * Drop through to notify
688 case ICMPV6_DEST_UNREACH:
689 case ICMPV6_TIME_EXCEED:
690 case ICMPV6_PARAMPROB:
691 icmpv6_notify(skb, type, hdr->icmp6_code, hdr->icmp6_mtu);
692 break;
694 case NDISC_ROUTER_SOLICITATION:
695 case NDISC_ROUTER_ADVERTISEMENT:
696 case NDISC_NEIGHBOUR_SOLICITATION:
697 case NDISC_NEIGHBOUR_ADVERTISEMENT:
698 case NDISC_REDIRECT:
699 ndisc_rcv(skb);
700 break;
702 case ICMPV6_MGM_QUERY:
703 igmp6_event_query(skb);
704 break;
706 case ICMPV6_MGM_REPORT:
707 igmp6_event_report(skb);
708 break;
710 case ICMPV6_MGM_REDUCTION:
711 case ICMPV6_NI_QUERY:
712 case ICMPV6_NI_REPLY:
713 case ICMPV6_MLD2_REPORT:
714 case ICMPV6_DHAAD_REQUEST:
715 case ICMPV6_DHAAD_REPLY:
716 case ICMPV6_MOBILE_PREFIX_SOL:
717 case ICMPV6_MOBILE_PREFIX_ADV:
718 break;
720 default:
721 LIMIT_NETDEBUG(KERN_DEBUG "icmpv6: msg of unknown type\n");
723 /* informational */
724 if (type & ICMPV6_INFOMSG_MASK)
725 break;
728 * error of unknown type.
729 * must pass to upper level
732 icmpv6_notify(skb, type, hdr->icmp6_code, hdr->icmp6_mtu);
734 kfree_skb(skb);
735 return 0;
737 discard_it:
738 ICMP6_INC_STATS_BH(idev, ICMP6_MIB_INERRORS);
739 kfree_skb(skb);
740 return 0;
744 * Special lock-class for __icmpv6_socket:
746 static struct lock_class_key icmpv6_socket_sk_dst_lock_key;
748 int __init icmpv6_init(struct net_proto_family *ops)
750 struct sock *sk;
751 int err, i, j;
753 for_each_possible_cpu(i) {
754 err = sock_create_kern(PF_INET6, SOCK_RAW, IPPROTO_ICMPV6,
755 &per_cpu(__icmpv6_socket, i));
756 if (err < 0) {
757 printk(KERN_ERR
758 "Failed to initialize the ICMP6 control socket "
759 "(err %d).\n",
760 err);
761 goto fail;
764 sk = per_cpu(__icmpv6_socket, i)->sk;
765 sk->sk_allocation = GFP_ATOMIC;
767 * Split off their lock-class, because sk->sk_dst_lock
768 * gets used from softirqs, which is safe for
769 * __icmpv6_socket (because those never get directly used
770 * via userspace syscalls), but unsafe for normal sockets.
772 lockdep_set_class(&sk->sk_dst_lock,
773 &icmpv6_socket_sk_dst_lock_key);
775 /* Enough space for 2 64K ICMP packets, including
776 * sk_buff struct overhead.
778 sk->sk_sndbuf =
779 (2 * ((64 * 1024) + sizeof(struct sk_buff)));
781 sk->sk_prot->unhash(sk);
785 if (inet6_add_protocol(&icmpv6_protocol, IPPROTO_ICMPV6) < 0) {
786 printk(KERN_ERR "Failed to register ICMP6 protocol\n");
787 err = -EAGAIN;
788 goto fail;
791 return 0;
793 fail:
794 for (j = 0; j < i; j++) {
795 if (!cpu_possible(j))
796 continue;
797 sock_release(per_cpu(__icmpv6_socket, j));
800 return err;
803 void icmpv6_cleanup(void)
805 int i;
807 for_each_possible_cpu(i) {
808 sock_release(per_cpu(__icmpv6_socket, i));
810 inet6_del_protocol(&icmpv6_protocol, IPPROTO_ICMPV6);
813 static const struct icmp6_err {
814 int err;
815 int fatal;
816 } tab_unreach[] = {
817 { /* NOROUTE */
818 .err = ENETUNREACH,
819 .fatal = 0,
821 { /* ADM_PROHIBITED */
822 .err = EACCES,
823 .fatal = 1,
825 { /* Was NOT_NEIGHBOUR, now reserved */
826 .err = EHOSTUNREACH,
827 .fatal = 0,
829 { /* ADDR_UNREACH */
830 .err = EHOSTUNREACH,
831 .fatal = 0,
833 { /* PORT_UNREACH */
834 .err = ECONNREFUSED,
835 .fatal = 1,
839 int icmpv6_err_convert(int type, int code, int *err)
841 int fatal = 0;
843 *err = EPROTO;
845 switch (type) {
846 case ICMPV6_DEST_UNREACH:
847 fatal = 1;
848 if (code <= ICMPV6_PORT_UNREACH) {
849 *err = tab_unreach[code].err;
850 fatal = tab_unreach[code].fatal;
852 break;
854 case ICMPV6_PKT_TOOBIG:
855 *err = EMSGSIZE;
856 break;
858 case ICMPV6_PARAMPROB:
859 *err = EPROTO;
860 fatal = 1;
861 break;
863 case ICMPV6_TIME_EXCEED:
864 *err = EHOSTUNREACH;
865 break;
868 return fatal;
871 #ifdef CONFIG_SYSCTL
872 ctl_table ipv6_icmp_table[] = {
874 .ctl_name = NET_IPV6_ICMP_RATELIMIT,
875 .procname = "ratelimit",
876 .data = &sysctl_icmpv6_time,
877 .maxlen = sizeof(int),
878 .mode = 0644,
879 .proc_handler = &proc_dointvec
881 { .ctl_name = 0 },
883 #endif