3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
8 * Adapted from linux/net/ipv4/raw.c
11 * Hideaki YOSHIFUJI : sin6_scope_id support
12 * YOSHIFUJI,H.@USAGI : raw checksum (RFC2292(bis) compliance)
13 * Kazunori MIYAZAWA @USAGI: change process style to use ip6_append_data
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
21 #include <linux/errno.h>
22 #include <linux/types.h>
23 #include <linux/socket.h>
24 #include <linux/sockios.h>
25 #include <linux/net.h>
26 #include <linux/in6.h>
27 #include <linux/netdevice.h>
28 #include <linux/if_arp.h>
29 #include <linux/icmpv6.h>
30 #include <linux/netfilter.h>
31 #include <linux/netfilter_ipv6.h>
32 #include <linux/skbuff.h>
33 #include <asm/uaccess.h>
34 #include <asm/ioctls.h>
36 #include <net/net_namespace.h>
42 #include <net/ndisc.h>
43 #include <net/protocol.h>
44 #include <net/ip6_route.h>
45 #include <net/ip6_checksum.h>
46 #include <net/addrconf.h>
47 #include <net/transp_v6.h>
49 #include <net/inet_common.h>
50 #include <net/tcp_states.h>
51 #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
54 #include <linux/mroute6.h>
57 #include <net/rawv6.h>
60 #include <linux/proc_fs.h>
61 #include <linux/seq_file.h>
63 static struct raw_hashinfo raw_v6_hashinfo
= {
64 .lock
= __RW_LOCK_UNLOCKED(raw_v6_hashinfo
.lock
),
67 static struct sock
*__raw_v6_lookup(struct net
*net
, struct sock
*sk
,
68 unsigned short num
, struct in6_addr
*loc_addr
,
69 struct in6_addr
*rmt_addr
, int dif
)
71 struct hlist_node
*node
;
72 int is_multicast
= ipv6_addr_is_multicast(loc_addr
);
74 sk_for_each_from(sk
, node
)
75 if (inet_sk(sk
)->inet_num
== num
) {
76 struct ipv6_pinfo
*np
= inet6_sk(sk
);
78 if (!net_eq(sock_net(sk
), net
))
81 if (!ipv6_addr_any(&np
->daddr
) &&
82 !ipv6_addr_equal(&np
->daddr
, rmt_addr
))
85 if (sk
->sk_bound_dev_if
&& sk
->sk_bound_dev_if
!= dif
)
88 if (!ipv6_addr_any(&np
->rcv_saddr
)) {
89 if (ipv6_addr_equal(&np
->rcv_saddr
, loc_addr
))
92 inet6_mc_check(sk
, loc_addr
, rmt_addr
))
107 static __inline__
int icmpv6_filter(struct sock
*sk
, struct sk_buff
*skb
)
109 struct icmp6hdr
*icmph
;
110 struct raw6_sock
*rp
= raw6_sk(sk
);
112 if (pskb_may_pull(skb
, sizeof(struct icmp6hdr
))) {
113 __u32
*data
= &rp
->filter
.data
[0];
116 icmph
= (struct icmp6hdr
*) skb
->data
;
117 bit_nr
= icmph
->icmp6_type
;
119 return (data
[bit_nr
>> 5] & (1 << (bit_nr
& 31))) != 0;
124 #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
125 static int (*mh_filter
)(struct sock
*sock
, struct sk_buff
*skb
);
127 int rawv6_mh_filter_register(int (*filter
)(struct sock
*sock
,
128 struct sk_buff
*skb
))
130 rcu_assign_pointer(mh_filter
, filter
);
133 EXPORT_SYMBOL(rawv6_mh_filter_register
);
135 int rawv6_mh_filter_unregister(int (*filter
)(struct sock
*sock
,
136 struct sk_buff
*skb
))
138 rcu_assign_pointer(mh_filter
, NULL
);
142 EXPORT_SYMBOL(rawv6_mh_filter_unregister
);
147 * demultiplex raw sockets.
148 * (should consider queueing the skb in the sock receive_queue
149 * without calling rawv6.c)
151 * Caller owns SKB so we must make clones.
153 static int ipv6_raw_deliver(struct sk_buff
*skb
, int nexthdr
)
155 struct in6_addr
*saddr
;
156 struct in6_addr
*daddr
;
162 saddr
= &ipv6_hdr(skb
)->saddr
;
165 hash
= nexthdr
& (MAX_INET_PROTOS
- 1);
167 read_lock(&raw_v6_hashinfo
.lock
);
168 sk
= sk_head(&raw_v6_hashinfo
.ht
[hash
]);
173 net
= dev_net(skb
->dev
);
174 sk
= __raw_v6_lookup(net
, sk
, nexthdr
, daddr
, saddr
, IP6CB(skb
)->iif
);
182 filtered
= icmpv6_filter(sk
, skb
);
185 #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
188 /* XXX: To validate MH only once for each packet,
189 * this is placed here. It should be after checking
190 * xfrm policy, however it doesn't. The checking xfrm
191 * policy is placed in rawv6_rcv() because it is
192 * required for each socket.
194 int (*filter
)(struct sock
*sock
, struct sk_buff
*skb
);
196 filter
= rcu_dereference(mh_filter
);
197 filtered
= filter
? filter(sk
, skb
) : 0;
209 struct sk_buff
*clone
= skb_clone(skb
, GFP_ATOMIC
);
211 /* Not releasing hash table! */
214 rawv6_rcv(sk
, clone
);
217 sk
= __raw_v6_lookup(net
, sk_next(sk
), nexthdr
, daddr
, saddr
,
221 read_unlock(&raw_v6_hashinfo
.lock
);
225 int raw6_local_deliver(struct sk_buff
*skb
, int nexthdr
)
229 raw_sk
= sk_head(&raw_v6_hashinfo
.ht
[nexthdr
& (MAX_INET_PROTOS
- 1)]);
230 if (raw_sk
&& !ipv6_raw_deliver(skb
, nexthdr
))
233 return raw_sk
!= NULL
;
236 /* This cleans up af_inet6 a bit. -DaveM */
237 static int rawv6_bind(struct sock
*sk
, struct sockaddr
*uaddr
, int addr_len
)
239 struct inet_sock
*inet
= inet_sk(sk
);
240 struct ipv6_pinfo
*np
= inet6_sk(sk
);
241 struct sockaddr_in6
*addr
= (struct sockaddr_in6
*) uaddr
;
246 if (addr_len
< SIN6_LEN_RFC2133
)
248 addr_type
= ipv6_addr_type(&addr
->sin6_addr
);
250 /* Raw sockets are IPv6 only */
251 if (addr_type
== IPV6_ADDR_MAPPED
)
252 return -EADDRNOTAVAIL
;
257 if (sk
->sk_state
!= TCP_CLOSE
)
261 /* Check if the address belongs to the host. */
262 if (addr_type
!= IPV6_ADDR_ANY
) {
263 struct net_device
*dev
= NULL
;
265 if (addr_type
& IPV6_ADDR_LINKLOCAL
) {
266 if (addr_len
>= sizeof(struct sockaddr_in6
) &&
267 addr
->sin6_scope_id
) {
268 /* Override any existing binding, if another
269 * one is supplied by user.
271 sk
->sk_bound_dev_if
= addr
->sin6_scope_id
;
274 /* Binding to link-local address requires an interface */
275 if (!sk
->sk_bound_dev_if
)
279 dev
= dev_get_by_index_rcu(sock_net(sk
),
280 sk
->sk_bound_dev_if
);
285 /* ipv4 addr of the socket is invalid. Only the
286 * unspecified and mapped address have a v4 equivalent.
288 v4addr
= LOOPBACK4_IPV6
;
289 if (!(addr_type
& IPV6_ADDR_MULTICAST
)) {
290 err
= -EADDRNOTAVAIL
;
291 if (!ipv6_chk_addr(sock_net(sk
), &addr
->sin6_addr
,
298 inet
->inet_rcv_saddr
= inet
->inet_saddr
= v4addr
;
299 ipv6_addr_copy(&np
->rcv_saddr
, &addr
->sin6_addr
);
300 if (!(addr_type
& IPV6_ADDR_MULTICAST
))
301 ipv6_addr_copy(&np
->saddr
, &addr
->sin6_addr
);
310 static void rawv6_err(struct sock
*sk
, struct sk_buff
*skb
,
311 struct inet6_skb_parm
*opt
,
312 u8 type
, u8 code
, int offset
, __be32 info
)
314 struct inet_sock
*inet
= inet_sk(sk
);
315 struct ipv6_pinfo
*np
= inet6_sk(sk
);
319 /* Report error on raw socket, if:
320 1. User requested recverr.
321 2. Socket is connected (otherwise the error indication
322 is useless without recverr and error is hard.
324 if (!np
->recverr
&& sk
->sk_state
!= TCP_ESTABLISHED
)
327 harderr
= icmpv6_err_convert(type
, code
, &err
);
328 if (type
== ICMPV6_PKT_TOOBIG
)
329 harderr
= (np
->pmtudisc
== IPV6_PMTUDISC_DO
);
332 u8
*payload
= skb
->data
;
335 ipv6_icmp_error(sk
, skb
, err
, 0, ntohl(info
), payload
);
338 if (np
->recverr
|| harderr
) {
340 sk
->sk_error_report(sk
);
344 void raw6_icmp_error(struct sk_buff
*skb
, int nexthdr
,
345 u8 type
, u8 code
, int inner_offset
, __be32 info
)
349 struct in6_addr
*saddr
, *daddr
;
352 hash
= nexthdr
& (RAW_HTABLE_SIZE
- 1);
354 read_lock(&raw_v6_hashinfo
.lock
);
355 sk
= sk_head(&raw_v6_hashinfo
.ht
[hash
]);
357 /* Note: ipv6_hdr(skb) != skb->data */
358 struct ipv6hdr
*ip6h
= (struct ipv6hdr
*)skb
->data
;
359 saddr
= &ip6h
->saddr
;
360 daddr
= &ip6h
->daddr
;
361 net
= dev_net(skb
->dev
);
363 while ((sk
= __raw_v6_lookup(net
, sk
, nexthdr
, saddr
, daddr
,
365 rawv6_err(sk
, skb
, NULL
, type
, code
,
370 read_unlock(&raw_v6_hashinfo
.lock
);
373 static inline int rawv6_rcv_skb(struct sock
* sk
, struct sk_buff
* skb
)
375 if ((raw6_sk(sk
)->checksum
|| sk
->sk_filter
) &&
376 skb_checksum_complete(skb
)) {
377 atomic_inc(&sk
->sk_drops
);
382 /* Charge it to the socket. */
383 if (sock_queue_rcv_skb(sk
, skb
) < 0) {
392 * This is next to useless...
393 * if we demultiplex in network layer we don't need the extra call
394 * just to queue the skb...
395 * maybe we could have the network decide upon a hint if it
396 * should call raw_rcv for demultiplexing
398 int rawv6_rcv(struct sock
*sk
, struct sk_buff
*skb
)
400 struct inet_sock
*inet
= inet_sk(sk
);
401 struct raw6_sock
*rp
= raw6_sk(sk
);
403 if (!xfrm6_policy_check(sk
, XFRM_POLICY_IN
, skb
)) {
404 atomic_inc(&sk
->sk_drops
);
410 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
412 if (skb
->ip_summed
== CHECKSUM_COMPLETE
) {
413 skb_postpull_rcsum(skb
, skb_network_header(skb
),
414 skb_network_header_len(skb
));
415 if (!csum_ipv6_magic(&ipv6_hdr(skb
)->saddr
,
416 &ipv6_hdr(skb
)->daddr
,
417 skb
->len
, inet
->inet_num
, skb
->csum
))
418 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
420 if (!skb_csum_unnecessary(skb
))
421 skb
->csum
= ~csum_unfold(csum_ipv6_magic(&ipv6_hdr(skb
)->saddr
,
422 &ipv6_hdr(skb
)->daddr
,
427 if (skb_checksum_complete(skb
)) {
428 atomic_inc(&sk
->sk_drops
);
434 rawv6_rcv_skb(sk
, skb
);
440 * This should be easy, if there is something there
441 * we return it, otherwise we block.
444 static int rawv6_recvmsg(struct kiocb
*iocb
, struct sock
*sk
,
445 struct msghdr
*msg
, size_t len
,
446 int noblock
, int flags
, int *addr_len
)
448 struct ipv6_pinfo
*np
= inet6_sk(sk
);
449 struct sockaddr_in6
*sin6
= (struct sockaddr_in6
*)msg
->msg_name
;
458 *addr_len
=sizeof(*sin6
);
460 if (flags
& MSG_ERRQUEUE
)
461 return ipv6_recv_error(sk
, msg
, len
);
463 skb
= skb_recv_datagram(sk
, flags
, noblock
, &err
);
470 msg
->msg_flags
|= MSG_TRUNC
;
473 if (skb_csum_unnecessary(skb
)) {
474 err
= skb_copy_datagram_iovec(skb
, 0, msg
->msg_iov
, copied
);
475 } else if (msg
->msg_flags
&MSG_TRUNC
) {
476 if (__skb_checksum_complete(skb
))
478 err
= skb_copy_datagram_iovec(skb
, 0, msg
->msg_iov
, copied
);
480 err
= skb_copy_and_csum_datagram_iovec(skb
, 0, msg
->msg_iov
);
487 /* Copy the address. */
489 sin6
->sin6_family
= AF_INET6
;
491 ipv6_addr_copy(&sin6
->sin6_addr
, &ipv6_hdr(skb
)->saddr
);
492 sin6
->sin6_flowinfo
= 0;
493 sin6
->sin6_scope_id
= 0;
494 if (ipv6_addr_type(&sin6
->sin6_addr
) & IPV6_ADDR_LINKLOCAL
)
495 sin6
->sin6_scope_id
= IP6CB(skb
)->iif
;
498 sock_recv_ts_and_drops(msg
, sk
, skb
);
501 datagram_recv_ctl(sk
, msg
, skb
);
504 if (flags
& MSG_TRUNC
)
508 skb_free_datagram(sk
, skb
);
513 skb_kill_datagram(sk
, skb
, flags
);
515 /* Error for blocking case is chosen to masquerade
516 as some normal condition.
518 err
= (flags
&MSG_DONTWAIT
) ? -EAGAIN
: -EHOSTUNREACH
;
522 static int rawv6_push_pending_frames(struct sock
*sk
, struct flowi
*fl
,
523 struct raw6_sock
*rp
)
536 if ((skb
= skb_peek(&sk
->sk_write_queue
)) == NULL
)
540 total_len
= inet_sk(sk
)->cork
.length
- (skb_network_header(skb
) -
542 if (offset
>= total_len
- 1) {
544 ip6_flush_pending_frames(sk
);
548 /* should be check HW csum miyazawa */
549 if (skb_queue_len(&sk
->sk_write_queue
) == 1) {
551 * Only one fragment on the socket.
553 tmp_csum
= skb
->csum
;
555 struct sk_buff
*csum_skb
= NULL
;
558 skb_queue_walk(&sk
->sk_write_queue
, skb
) {
559 tmp_csum
= csum_add(tmp_csum
, skb
->csum
);
564 len
= skb
->len
- skb_transport_offset(skb
);
576 offset
+= skb_transport_offset(skb
);
577 if (skb_copy_bits(skb
, offset
, &csum
, 2))
580 /* in case cksum was not initialized */
582 tmp_csum
= csum_sub(tmp_csum
, csum_unfold(csum
));
584 csum
= csum_ipv6_magic(&fl
->fl6_src
,
586 total_len
, fl
->proto
, tmp_csum
);
588 if (csum
== 0 && fl
->proto
== IPPROTO_UDP
)
589 csum
= CSUM_MANGLED_0
;
591 if (skb_store_bits(skb
, offset
, &csum
, 2))
595 err
= ip6_push_pending_frames(sk
);
600 static int rawv6_send_hdrinc(struct sock
*sk
, void *from
, int length
,
601 struct flowi
*fl
, struct rt6_info
*rt
,
604 struct ipv6_pinfo
*np
= inet6_sk(sk
);
609 if (length
> rt
->u
.dst
.dev
->mtu
) {
610 ipv6_local_error(sk
, EMSGSIZE
, fl
, rt
->u
.dst
.dev
->mtu
);
616 skb
= sock_alloc_send_skb(sk
,
617 length
+ LL_ALLOCATED_SPACE(rt
->u
.dst
.dev
) + 15,
618 flags
& MSG_DONTWAIT
, &err
);
621 skb_reserve(skb
, LL_RESERVED_SPACE(rt
->u
.dst
.dev
));
623 skb
->priority
= sk
->sk_priority
;
624 skb
->mark
= sk
->sk_mark
;
625 skb_dst_set(skb
, dst_clone(&rt
->u
.dst
));
627 skb_put(skb
, length
);
628 skb_reset_network_header(skb
);
631 skb
->ip_summed
= CHECKSUM_NONE
;
633 skb
->transport_header
= skb
->network_header
;
634 err
= memcpy_fromiovecend((void *)iph
, from
, 0, length
);
638 IP6_UPD_PO_STATS(sock_net(sk
), rt
->rt6i_idev
, IPSTATS_MIB_OUT
, skb
->len
);
639 err
= NF_HOOK(PF_INET6
, NF_INET_LOCAL_OUT
, skb
, NULL
, rt
->u
.dst
.dev
,
642 err
= net_xmit_errno(err
);
652 IP6_INC_STATS(sock_net(sk
), rt
->rt6i_idev
, IPSTATS_MIB_OUTDISCARDS
);
653 if (err
== -ENOBUFS
&& !np
->recverr
)
658 static int rawv6_probe_proto_opt(struct flowi
*fl
, struct msghdr
*msg
)
661 u8 __user
*type
= NULL
;
662 u8 __user
*code
= NULL
;
670 for (i
= 0; i
< msg
->msg_iovlen
; i
++) {
671 iov
= &msg
->msg_iov
[i
];
677 /* check if one-byte field is readable or not. */
678 if (iov
->iov_base
&& iov
->iov_len
< 1)
682 type
= iov
->iov_base
;
683 /* check if code field is readable or not. */
684 if (iov
->iov_len
> 1)
687 code
= iov
->iov_base
;
690 if (get_user(fl
->fl_icmp_type
, type
) ||
691 get_user(fl
->fl_icmp_code
, code
))
697 if (iov
->iov_base
&& iov
->iov_len
< 1)
699 /* check if type field is readable or not. */
700 if (iov
->iov_len
> 2 - len
) {
701 u8 __user
*p
= iov
->iov_base
;
702 if (get_user(fl
->fl_mh_type
, &p
[2 - len
]))
719 static int rawv6_sendmsg(struct kiocb
*iocb
, struct sock
*sk
,
720 struct msghdr
*msg
, size_t len
)
722 struct ipv6_txoptions opt_space
;
723 struct sockaddr_in6
* sin6
= (struct sockaddr_in6
*) msg
->msg_name
;
724 struct in6_addr
*daddr
, *final_p
= NULL
, final
;
725 struct inet_sock
*inet
= inet_sk(sk
);
726 struct ipv6_pinfo
*np
= inet6_sk(sk
);
727 struct raw6_sock
*rp
= raw6_sk(sk
);
728 struct ipv6_txoptions
*opt
= NULL
;
729 struct ip6_flowlabel
*flowlabel
= NULL
;
730 struct dst_entry
*dst
= NULL
;
732 int addr_len
= msg
->msg_namelen
;
738 /* Rough check on arithmetic overflow,
739 better check is made in ip6_append_data().
744 /* Mirror BSD error message compatibility */
745 if (msg
->msg_flags
& MSG_OOB
)
749 * Get and verify the address.
751 memset(&fl
, 0, sizeof(fl
));
753 fl
.mark
= sk
->sk_mark
;
756 if (addr_len
< SIN6_LEN_RFC2133
)
759 if (sin6
->sin6_family
&& sin6
->sin6_family
!= AF_INET6
)
760 return(-EAFNOSUPPORT
);
762 /* port is the proto value [0..255] carried in nexthdr */
763 proto
= ntohs(sin6
->sin6_port
);
766 proto
= inet
->inet_num
;
767 else if (proto
!= inet
->inet_num
)
773 daddr
= &sin6
->sin6_addr
;
775 fl
.fl6_flowlabel
= sin6
->sin6_flowinfo
&IPV6_FLOWINFO_MASK
;
776 if (fl
.fl6_flowlabel
&IPV6_FLOWLABEL_MASK
) {
777 flowlabel
= fl6_sock_lookup(sk
, fl
.fl6_flowlabel
);
778 if (flowlabel
== NULL
)
780 daddr
= &flowlabel
->dst
;
785 * Otherwise it will be difficult to maintain
788 if (sk
->sk_state
== TCP_ESTABLISHED
&&
789 ipv6_addr_equal(daddr
, &np
->daddr
))
792 if (addr_len
>= sizeof(struct sockaddr_in6
) &&
793 sin6
->sin6_scope_id
&&
794 ipv6_addr_type(daddr
)&IPV6_ADDR_LINKLOCAL
)
795 fl
.oif
= sin6
->sin6_scope_id
;
797 if (sk
->sk_state
!= TCP_ESTABLISHED
)
798 return -EDESTADDRREQ
;
800 proto
= inet
->inet_num
;
802 fl
.fl6_flowlabel
= np
->flow_label
;
806 fl
.oif
= sk
->sk_bound_dev_if
;
808 if (msg
->msg_controllen
) {
810 memset(opt
, 0, sizeof(struct ipv6_txoptions
));
811 opt
->tot_len
= sizeof(struct ipv6_txoptions
);
813 err
= datagram_send_ctl(sock_net(sk
), msg
, &fl
, opt
, &hlimit
, &tclass
);
815 fl6_sock_release(flowlabel
);
818 if ((fl
.fl6_flowlabel
&IPV6_FLOWLABEL_MASK
) && !flowlabel
) {
819 flowlabel
= fl6_sock_lookup(sk
, fl
.fl6_flowlabel
);
820 if (flowlabel
== NULL
)
823 if (!(opt
->opt_nflen
|opt
->opt_flen
))
829 opt
= fl6_merge_options(&opt_space
, flowlabel
, opt
);
830 opt
= ipv6_fixup_options(&opt_space
, opt
);
833 err
= rawv6_probe_proto_opt(&fl
, msg
);
837 if (!ipv6_addr_any(daddr
))
838 ipv6_addr_copy(&fl
.fl6_dst
, daddr
);
840 fl
.fl6_dst
.s6_addr
[15] = 0x1; /* :: means loopback (BSD'ism) */
841 if (ipv6_addr_any(&fl
.fl6_src
) && !ipv6_addr_any(&np
->saddr
))
842 ipv6_addr_copy(&fl
.fl6_src
, &np
->saddr
);
844 /* merge ip6_build_xmit from ip6_output */
845 if (opt
&& opt
->srcrt
) {
846 struct rt0_hdr
*rt0
= (struct rt0_hdr
*) opt
->srcrt
;
847 ipv6_addr_copy(&final
, &fl
.fl6_dst
);
848 ipv6_addr_copy(&fl
.fl6_dst
, rt0
->addr
);
852 if (!fl
.oif
&& ipv6_addr_is_multicast(&fl
.fl6_dst
))
853 fl
.oif
= np
->mcast_oif
;
854 security_sk_classify_flow(sk
, &fl
);
856 err
= ip6_dst_lookup(sk
, &dst
, &fl
);
860 ipv6_addr_copy(&fl
.fl6_dst
, final_p
);
862 err
= __xfrm_lookup(sock_net(sk
), &dst
, &fl
, sk
, XFRM_LOOKUP_WAIT
);
865 err
= ip6_dst_blackhole(sk
, &dst
, &fl
);
871 if (ipv6_addr_is_multicast(&fl
.fl6_dst
))
872 hlimit
= np
->mcast_hops
;
874 hlimit
= np
->hop_limit
;
876 hlimit
= ip6_dst_hoplimit(dst
);
882 if (msg
->msg_flags
&MSG_CONFIRM
)
887 err
= rawv6_send_hdrinc(sk
, msg
->msg_iov
, len
, &fl
, (struct rt6_info
*)dst
, msg
->msg_flags
);
890 err
= ip6_append_data(sk
, ip_generic_getfrag
, msg
->msg_iov
,
891 len
, 0, hlimit
, tclass
, opt
, &fl
, (struct rt6_info
*)dst
,
895 ip6_flush_pending_frames(sk
);
896 else if (!(msg
->msg_flags
& MSG_MORE
))
897 err
= rawv6_push_pending_frames(sk
, &fl
, rp
);
903 fl6_sock_release(flowlabel
);
904 return err
<0?err
:len
;
907 if (!(msg
->msg_flags
& MSG_PROBE
) || len
)
908 goto back_from_confirm
;
913 static int rawv6_seticmpfilter(struct sock
*sk
, int level
, int optname
,
914 char __user
*optval
, int optlen
)
918 if (optlen
> sizeof(struct icmp6_filter
))
919 optlen
= sizeof(struct icmp6_filter
);
920 if (copy_from_user(&raw6_sk(sk
)->filter
, optval
, optlen
))
930 static int rawv6_geticmpfilter(struct sock
*sk
, int level
, int optname
,
931 char __user
*optval
, int __user
*optlen
)
937 if (get_user(len
, optlen
))
941 if (len
> sizeof(struct icmp6_filter
))
942 len
= sizeof(struct icmp6_filter
);
943 if (put_user(len
, optlen
))
945 if (copy_to_user(optval
, &raw6_sk(sk
)->filter
, len
))
956 static int do_rawv6_setsockopt(struct sock
*sk
, int level
, int optname
,
957 char __user
*optval
, unsigned int optlen
)
959 struct raw6_sock
*rp
= raw6_sk(sk
);
962 if (get_user(val
, (int __user
*)optval
))
967 if (inet_sk(sk
)->inet_num
== IPPROTO_ICMPV6
&&
968 level
== IPPROTO_IPV6
) {
970 * RFC3542 tells that IPV6_CHECKSUM socket
971 * option in the IPPROTO_IPV6 level is not
972 * allowed on ICMPv6 sockets.
973 * If you want to set it, use IPPROTO_RAW
974 * level IPV6_CHECKSUM socket option
980 /* You may get strange result with a positive odd offset;
981 RFC2292bis agrees with me. */
982 if (val
> 0 && (val
&1))
995 return(-ENOPROTOOPT
);
999 static int rawv6_setsockopt(struct sock
*sk
, int level
, int optname
,
1000 char __user
*optval
, unsigned int optlen
)
1007 if (inet_sk(sk
)->inet_num
!= IPPROTO_ICMPV6
)
1009 return rawv6_seticmpfilter(sk
, level
, optname
, optval
,
1012 if (optname
== IPV6_CHECKSUM
)
1015 return ipv6_setsockopt(sk
, level
, optname
, optval
,
1019 return do_rawv6_setsockopt(sk
, level
, optname
, optval
, optlen
);
1022 #ifdef CONFIG_COMPAT
1023 static int compat_rawv6_setsockopt(struct sock
*sk
, int level
, int optname
,
1024 char __user
*optval
, unsigned int optlen
)
1030 if (inet_sk(sk
)->inet_num
!= IPPROTO_ICMPV6
)
1032 return rawv6_seticmpfilter(sk
, level
, optname
, optval
, optlen
);
1034 if (optname
== IPV6_CHECKSUM
)
1037 return compat_ipv6_setsockopt(sk
, level
, optname
,
1040 return do_rawv6_setsockopt(sk
, level
, optname
, optval
, optlen
);
1044 static int do_rawv6_getsockopt(struct sock
*sk
, int level
, int optname
,
1045 char __user
*optval
, int __user
*optlen
)
1047 struct raw6_sock
*rp
= raw6_sk(sk
);
1050 if (get_user(len
,optlen
))
1056 * We allow getsockopt() for IPPROTO_IPV6-level
1057 * IPV6_CHECKSUM socket option on ICMPv6 sockets
1058 * since RFC3542 is silent about it.
1060 if (rp
->checksum
== 0)
1067 return -ENOPROTOOPT
;
1070 len
= min_t(unsigned int, sizeof(int), len
);
1072 if (put_user(len
, optlen
))
1074 if (copy_to_user(optval
,&val
,len
))
1079 static int rawv6_getsockopt(struct sock
*sk
, int level
, int optname
,
1080 char __user
*optval
, int __user
*optlen
)
1087 if (inet_sk(sk
)->inet_num
!= IPPROTO_ICMPV6
)
1089 return rawv6_geticmpfilter(sk
, level
, optname
, optval
,
1092 if (optname
== IPV6_CHECKSUM
)
1095 return ipv6_getsockopt(sk
, level
, optname
, optval
,
1099 return do_rawv6_getsockopt(sk
, level
, optname
, optval
, optlen
);
1102 #ifdef CONFIG_COMPAT
1103 static int compat_rawv6_getsockopt(struct sock
*sk
, int level
, int optname
,
1104 char __user
*optval
, int __user
*optlen
)
1110 if (inet_sk(sk
)->inet_num
!= IPPROTO_ICMPV6
)
1112 return rawv6_geticmpfilter(sk
, level
, optname
, optval
, optlen
);
1114 if (optname
== IPV6_CHECKSUM
)
1117 return compat_ipv6_getsockopt(sk
, level
, optname
,
1120 return do_rawv6_getsockopt(sk
, level
, optname
, optval
, optlen
);
1124 static int rawv6_ioctl(struct sock
*sk
, int cmd
, unsigned long arg
)
1129 int amount
= sk_wmem_alloc_get(sk
);
1131 return put_user(amount
, (int __user
*)arg
);
1135 struct sk_buff
*skb
;
1138 spin_lock_bh(&sk
->sk_receive_queue
.lock
);
1139 skb
= skb_peek(&sk
->sk_receive_queue
);
1141 amount
= skb
->tail
- skb
->transport_header
;
1142 spin_unlock_bh(&sk
->sk_receive_queue
.lock
);
1143 return put_user(amount
, (int __user
*)arg
);
1147 #ifdef CONFIG_IPV6_MROUTE
1148 return ip6mr_ioctl(sk
, cmd
, (void __user
*)arg
);
1150 return -ENOIOCTLCMD
;
1155 static void rawv6_close(struct sock
*sk
, long timeout
)
1157 if (inet_sk(sk
)->inet_num
== IPPROTO_RAW
)
1158 ip6_ra_control(sk
, -1);
1160 sk_common_release(sk
);
1163 static void raw6_destroy(struct sock
*sk
)
1166 ip6_flush_pending_frames(sk
);
1169 inet6_destroy_sock(sk
);
1172 static int rawv6_init_sk(struct sock
*sk
)
1174 struct raw6_sock
*rp
= raw6_sk(sk
);
1176 switch (inet_sk(sk
)->inet_num
) {
1177 case IPPROTO_ICMPV6
:
1191 struct proto rawv6_prot
= {
1193 .owner
= THIS_MODULE
,
1194 .close
= rawv6_close
,
1195 .destroy
= raw6_destroy
,
1196 .connect
= ip6_datagram_connect
,
1197 .disconnect
= udp_disconnect
,
1198 .ioctl
= rawv6_ioctl
,
1199 .init
= rawv6_init_sk
,
1200 .setsockopt
= rawv6_setsockopt
,
1201 .getsockopt
= rawv6_getsockopt
,
1202 .sendmsg
= rawv6_sendmsg
,
1203 .recvmsg
= rawv6_recvmsg
,
1205 .backlog_rcv
= rawv6_rcv_skb
,
1206 .hash
= raw_hash_sk
,
1207 .unhash
= raw_unhash_sk
,
1208 .obj_size
= sizeof(struct raw6_sock
),
1209 .h
.raw_hash
= &raw_v6_hashinfo
,
1210 #ifdef CONFIG_COMPAT
1211 .compat_setsockopt
= compat_rawv6_setsockopt
,
1212 .compat_getsockopt
= compat_rawv6_getsockopt
,
1216 #ifdef CONFIG_PROC_FS
1217 static void raw6_sock_seq_show(struct seq_file
*seq
, struct sock
*sp
, int i
)
1219 struct ipv6_pinfo
*np
= inet6_sk(sp
);
1220 struct in6_addr
*dest
, *src
;
1224 src
= &np
->rcv_saddr
;
1226 srcp
= inet_sk(sp
)->inet_num
;
1228 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1229 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %d\n",
1231 src
->s6_addr32
[0], src
->s6_addr32
[1],
1232 src
->s6_addr32
[2], src
->s6_addr32
[3], srcp
,
1233 dest
->s6_addr32
[0], dest
->s6_addr32
[1],
1234 dest
->s6_addr32
[2], dest
->s6_addr32
[3], destp
,
1236 sk_wmem_alloc_get(sp
),
1237 sk_rmem_alloc_get(sp
),
1241 atomic_read(&sp
->sk_refcnt
), sp
, atomic_read(&sp
->sk_drops
));
1244 static int raw6_seq_show(struct seq_file
*seq
, void *v
)
1246 if (v
== SEQ_START_TOKEN
)
1251 "st tx_queue rx_queue tr tm->when retrnsmt"
1252 " uid timeout inode ref pointer drops\n");
1254 raw6_sock_seq_show(seq
, v
, raw_seq_private(seq
)->bucket
);
1258 static const struct seq_operations raw6_seq_ops
= {
1259 .start
= raw_seq_start
,
1260 .next
= raw_seq_next
,
1261 .stop
= raw_seq_stop
,
1262 .show
= raw6_seq_show
,
1265 static int raw6_seq_open(struct inode
*inode
, struct file
*file
)
1267 return raw_seq_open(inode
, file
, &raw_v6_hashinfo
, &raw6_seq_ops
);
1270 static const struct file_operations raw6_seq_fops
= {
1271 .owner
= THIS_MODULE
,
1272 .open
= raw6_seq_open
,
1274 .llseek
= seq_lseek
,
1275 .release
= seq_release_net
,
1278 static int raw6_init_net(struct net
*net
)
1280 if (!proc_net_fops_create(net
, "raw6", S_IRUGO
, &raw6_seq_fops
))
1286 static void raw6_exit_net(struct net
*net
)
1288 proc_net_remove(net
, "raw6");
1291 static struct pernet_operations raw6_net_ops
= {
1292 .init
= raw6_init_net
,
1293 .exit
= raw6_exit_net
,
1296 int __init
raw6_proc_init(void)
1298 return register_pernet_subsys(&raw6_net_ops
);
1301 void raw6_proc_exit(void)
1303 unregister_pernet_subsys(&raw6_net_ops
);
1305 #endif /* CONFIG_PROC_FS */
1307 /* Same as inet6_dgram_ops, sans udp_poll. */
1308 static const struct proto_ops inet6_sockraw_ops
= {
1310 .owner
= THIS_MODULE
,
1311 .release
= inet6_release
,
1313 .connect
= inet_dgram_connect
, /* ok */
1314 .socketpair
= sock_no_socketpair
, /* a do nothing */
1315 .accept
= sock_no_accept
, /* a do nothing */
1316 .getname
= inet6_getname
,
1317 .poll
= datagram_poll
, /* ok */
1318 .ioctl
= inet6_ioctl
, /* must change */
1319 .listen
= sock_no_listen
, /* ok */
1320 .shutdown
= inet_shutdown
, /* ok */
1321 .setsockopt
= sock_common_setsockopt
, /* ok */
1322 .getsockopt
= sock_common_getsockopt
, /* ok */
1323 .sendmsg
= inet_sendmsg
, /* ok */
1324 .recvmsg
= sock_common_recvmsg
, /* ok */
1325 .mmap
= sock_no_mmap
,
1326 .sendpage
= sock_no_sendpage
,
1327 #ifdef CONFIG_COMPAT
1328 .compat_setsockopt
= compat_sock_common_setsockopt
,
1329 .compat_getsockopt
= compat_sock_common_getsockopt
,
1333 static struct inet_protosw rawv6_protosw
= {
1335 .protocol
= IPPROTO_IP
, /* wild card */
1336 .prot
= &rawv6_prot
,
1337 .ops
= &inet6_sockraw_ops
,
1338 .no_check
= UDP_CSUM_DEFAULT
,
1339 .flags
= INET_PROTOSW_REUSE
,
1342 int __init
rawv6_init(void)
1346 ret
= inet6_register_protosw(&rawv6_protosw
);
1353 void rawv6_exit(void)
1355 inet6_unregister_protosw(&rawv6_protosw
);