3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
8 * Based on linux/ipv4/udp.c
10 * $Id: udp.c,v 1.48 2000/01/09 02:19:53 davem Exp $
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
18 #include <linux/config.h>
19 #include <linux/errno.h>
20 #include <linux/types.h>
21 #include <linux/socket.h>
22 #include <linux/sockios.h>
23 #include <linux/sched.h>
24 #include <linux/net.h>
25 #include <linux/in6.h>
26 #include <linux/netdevice.h>
27 #include <linux/if_arp.h>
28 #include <linux/ipv6.h>
29 #include <linux/icmpv6.h>
30 #include <linux/init.h>
31 #include <asm/uaccess.h>
37 #include <net/ndisc.h>
38 #include <net/protocol.h>
39 #include <net/transp_v6.h>
40 #include <net/ip6_route.h>
41 #include <net/addrconf.h>
44 #include <net/inet_common.h>
46 #include <net/checksum.h>
48 struct udp_mib udp_stats_in6
[NR_CPUS
*2];
50 /* Grrr, addr_type already calculated by caller, but I don't want
51 * to add some silly "cookie" argument to this method just for that.
53 static int udp_v6_get_port(struct sock
*sk
, unsigned short snum
)
55 write_lock_bh(&udp_hash_lock
);
57 int best_size_so_far
, best
, result
, i
;
59 if (udp_port_rover
> sysctl_local_port_range
[1] ||
60 udp_port_rover
< sysctl_local_port_range
[0])
61 udp_port_rover
= sysctl_local_port_range
[0];
62 best_size_so_far
= 32767;
63 best
= result
= udp_port_rover
;
64 for (i
= 0; i
< UDP_HTABLE_SIZE
; i
++, result
++) {
68 sk
= udp_hash
[result
& (UDP_HTABLE_SIZE
- 1)];
70 if (result
> sysctl_local_port_range
[1])
71 result
= sysctl_local_port_range
[0] +
72 ((result
- sysctl_local_port_range
[0]) &
73 (UDP_HTABLE_SIZE
- 1));
78 if (++size
>= best_size_so_far
)
80 } while ((sk
= sk
->next
) != NULL
);
81 best_size_so_far
= size
;
86 for(;; result
+= UDP_HTABLE_SIZE
) {
87 if (result
> sysctl_local_port_range
[1])
88 result
= sysctl_local_port_range
[0]
89 + ((result
- sysctl_local_port_range
[0]) &
90 (UDP_HTABLE_SIZE
- 1));
91 if (!udp_lport_inuse(result
))
95 udp_port_rover
= snum
= result
;
98 int addr_type
= ipv6_addr_type(&sk
->net_pinfo
.af_inet6
.rcv_saddr
);
100 for (sk2
= udp_hash
[snum
& (UDP_HTABLE_SIZE
- 1)];
103 if (sk2
->num
== snum
&&
105 sk2
->bound_dev_if
== sk
->bound_dev_if
&&
107 addr_type
== IPV6_ADDR_ANY
||
108 !ipv6_addr_cmp(&sk
->net_pinfo
.af_inet6
.rcv_saddr
,
109 &sk2
->net_pinfo
.af_inet6
.rcv_saddr
)) &&
110 (!sk2
->reuse
|| !sk
->reuse
))
116 write_unlock_bh(&udp_hash_lock
);
120 write_unlock_bh(&udp_hash_lock
);
124 static void udp_v6_hash(struct sock
*sk
)
126 struct sock
**skp
= &udp_hash
[sk
->num
& (UDP_HTABLE_SIZE
- 1)];
128 write_lock_bh(&udp_hash_lock
);
129 if ((sk
->next
= *skp
) != NULL
)
130 (*skp
)->pprev
= &sk
->next
;
133 sock_prot_inc_use(sk
->prot
);
135 write_unlock_bh(&udp_hash_lock
);
138 static void udp_v6_unhash(struct sock
*sk
)
140 write_lock_bh(&udp_hash_lock
);
143 sk
->next
->pprev
= sk
->pprev
;
144 *sk
->pprev
= sk
->next
;
146 sock_prot_dec_use(sk
->prot
);
149 write_unlock_bh(&udp_hash_lock
);
152 static struct sock
*udp_v6_lookup(struct in6_addr
*saddr
, u16 sport
,
153 struct in6_addr
*daddr
, u16 dport
, int dif
)
155 struct sock
*sk
, *result
= NULL
;
156 unsigned short hnum
= ntohs(dport
);
159 read_lock(&udp_hash_lock
);
160 for(sk
= udp_hash
[hnum
& (UDP_HTABLE_SIZE
- 1)]; sk
!= NULL
; sk
= sk
->next
) {
161 if((sk
->num
== hnum
) &&
162 (sk
->family
== PF_INET6
)) {
163 struct ipv6_pinfo
*np
= &sk
->net_pinfo
.af_inet6
;
166 if(sk
->dport
!= sport
)
170 if(!ipv6_addr_any(&np
->rcv_saddr
)) {
171 if(ipv6_addr_cmp(&np
->rcv_saddr
, daddr
))
175 if(!ipv6_addr_any(&np
->daddr
)) {
176 if(ipv6_addr_cmp(&np
->daddr
, saddr
))
180 if(sk
->bound_dev_if
) {
181 if(sk
->bound_dev_if
!= dif
)
188 } else if(score
> badness
) {
196 read_unlock(&udp_hash_lock
);
204 int udpv6_connect(struct sock
*sk
, struct sockaddr
*uaddr
, int addr_len
)
206 struct sockaddr_in6
*usin
= (struct sockaddr_in6
*) uaddr
;
207 struct ipv6_pinfo
*np
= &sk
->net_pinfo
.af_inet6
;
208 struct in6_addr
*daddr
;
209 struct in6_addr saddr
;
210 struct dst_entry
*dst
;
212 struct ip6_flowlabel
*flowlabel
= NULL
;
216 if (usin
->sin6_family
== AF_INET
) {
217 err
= udp_connect(sk
, uaddr
, addr_len
);
221 if (addr_len
< sizeof(*usin
))
224 if (usin
->sin6_family
!= AF_INET6
)
225 return -EAFNOSUPPORT
;
227 fl
.fl6_flowlabel
= 0;
229 fl
.fl6_flowlabel
= usin
->sin6_flowinfo
&IPV6_FLOWINFO_MASK
;
230 if (fl
.fl6_flowlabel
&IPV6_FLOWLABEL_MASK
) {
231 flowlabel
= fl6_sock_lookup(sk
, fl
.fl6_flowlabel
);
232 if (flowlabel
== NULL
)
234 ipv6_addr_copy(&usin
->sin6_addr
, &flowlabel
->dst
);
238 addr_type
= ipv6_addr_type(&usin
->sin6_addr
);
240 if (addr_type
== IPV6_ADDR_ANY
) {
244 usin
->sin6_addr
.s6_addr
[15] = 0x01;
247 daddr
= &usin
->sin6_addr
;
249 if (addr_type
== IPV6_ADDR_MAPPED
) {
250 struct sockaddr_in sin
;
252 sin
.sin_family
= AF_INET
;
253 sin
.sin_addr
.s_addr
= daddr
->s6_addr32
[3];
254 sin
.sin_port
= usin
->sin6_port
;
256 err
= udp_connect(sk
, (struct sockaddr
*) &sin
, sizeof(sin
));
262 ipv6_addr_set(&np
->daddr
, 0, 0,
263 __constant_htonl(0x0000ffff),
266 if(ipv6_addr_any(&np
->saddr
)) {
267 ipv6_addr_set(&np
->saddr
, 0, 0,
268 __constant_htonl(0x0000ffff),
273 if(ipv6_addr_any(&np
->rcv_saddr
)) {
274 ipv6_addr_set(&np
->rcv_saddr
, 0, 0,
275 __constant_htonl(0x0000ffff),
281 ipv6_addr_copy(&np
->daddr
, daddr
);
282 np
->flow_label
= fl
.fl6_flowlabel
;
284 sk
->dport
= usin
->sin6_port
;
287 * Check for a route to destination an obtain the
288 * destination cache for it.
291 fl
.proto
= IPPROTO_UDP
;
292 fl
.fl6_dst
= &np
->daddr
;
294 fl
.oif
= sk
->bound_dev_if
;
295 fl
.uli_u
.ports
.dport
= sk
->dport
;
296 fl
.uli_u
.ports
.sport
= sk
->sport
;
299 if (flowlabel
->opt
&& flowlabel
->opt
->srcrt
) {
300 struct rt0_hdr
*rt0
= (struct rt0_hdr
*) flowlabel
->opt
->srcrt
;
301 fl
.fl6_dst
= rt0
->addr
;
303 } else if (np
->opt
&& np
->opt
->srcrt
) {
304 struct rt0_hdr
*rt0
= (struct rt0_hdr
*) np
->opt
->srcrt
;
305 fl
.fl6_dst
= rt0
->addr
;
308 dst
= ip6_route_output(sk
, &fl
);
310 if ((err
= dst
->error
) != 0) {
312 fl6_sock_release(flowlabel
);
316 ip6_dst_store(sk
, dst
, fl
.fl6_dst
);
318 /* get the source adddress used in the apropriate device */
320 err
= ipv6_get_saddr(dst
, daddr
, &saddr
);
323 if(ipv6_addr_any(&np
->saddr
))
324 ipv6_addr_copy(&np
->saddr
, &saddr
);
326 if(ipv6_addr_any(&np
->rcv_saddr
)) {
327 ipv6_addr_copy(&np
->rcv_saddr
, &saddr
);
328 sk
->rcv_saddr
= 0xffffffff;
330 sk
->state
= TCP_ESTABLISHED
;
332 fl6_sock_release(flowlabel
);
337 static void udpv6_close(struct sock
*sk
, long timeout
)
339 inet_sock_release(sk
);
343 * This should be easy, if there is something there we
344 * return it, otherwise we block.
347 int udpv6_recvmsg(struct sock
*sk
, struct msghdr
*msg
, int len
,
348 int noblock
, int flags
, int *addr_len
)
354 *addr_len
=sizeof(struct sockaddr_in6
);
356 if (flags
& MSG_ERRQUEUE
)
357 return ipv6_recv_error(sk
, msg
, len
);
359 skb
= skb_recv_datagram(sk
, flags
, noblock
, &err
);
363 copied
= skb
->len
- sizeof(struct udphdr
);
366 msg
->msg_flags
|= MSG_TRUNC
;
369 #ifndef CONFIG_UDP_DELAY_CSUM
370 err
= skb_copy_datagram_iovec(skb
, sizeof(struct udphdr
),
371 msg
->msg_iov
, copied
);
373 if (skb
->ip_summed
==CHECKSUM_UNNECESSARY
) {
374 err
= skb_copy_datagram_iovec(skb
, sizeof(struct udphdr
), msg
->msg_iov
,
376 } else if (copied
> msg
->msg_iov
[0].iov_len
|| (msg
->msg_flags
&MSG_TRUNC
)) {
377 if ((unsigned short)csum_fold(csum_partial(skb
->h
.raw
, skb
->len
, skb
->csum
))) {
379 if (flags
&MSG_PEEK
) {
381 spin_lock_irq(&sk
->receive_queue
.lock
);
382 if (skb
== skb_peek(&sk
->receive_queue
)) {
383 __skb_unlink(skb
, &sk
->receive_queue
);
386 spin_unlock_irq(&sk
->receive_queue
.lock
);
391 /* Error for blocking case is chosen to masquerade
392 as some normal condition.
394 err
= (flags
&MSG_DONTWAIT
) ? -EAGAIN
: -EHOSTUNREACH
;
395 udp_stats_in6
.UdpInErrors
++;
398 err
= skb_copy_datagram_iovec(skb
, sizeof(struct udphdr
), msg
->msg_iov
,
401 unsigned int csum
= csum_partial(skb
->h
.raw
, sizeof(struct udphdr
), skb
->csum
);
404 csum
= csum_and_copy_to_user((char*)&skb
->h
.uh
[1], msg
->msg_iov
[0].iov_base
, copied
, csum
, &err
);
407 if ((unsigned short)csum_fold(csum
)) {
408 /* Error for blocking case is chosen to masquerade
409 as some normal condition.
411 err
= (flags
&MSG_DONTWAIT
) ? -EAGAIN
: -EHOSTUNREACH
;
412 udp_stats_in6
.UdpInErrors
++;
420 sk
->stamp
=skb
->stamp
;
422 /* Copy the address. */
424 struct sockaddr_in6
*sin6
;
426 sin6
= (struct sockaddr_in6
*) msg
->msg_name
;
427 sin6
->sin6_family
= AF_INET6
;
428 sin6
->sin6_port
= skb
->h
.uh
->source
;
429 sin6
->sin6_flowinfo
= 0;
431 if (skb
->protocol
== __constant_htons(ETH_P_IP
)) {
432 ipv6_addr_set(&sin6
->sin6_addr
, 0, 0,
433 __constant_htonl(0xffff), skb
->nh
.iph
->saddr
);
434 if (sk
->protinfo
.af_inet
.cmsg_flags
)
435 ip_cmsg_recv(msg
, skb
);
437 memcpy(&sin6
->sin6_addr
, &skb
->nh
.ipv6h
->saddr
,
438 sizeof(struct in6_addr
));
440 if (sk
->net_pinfo
.af_inet6
.rxopt
.all
)
441 datagram_recv_ctl(sk
, msg
, skb
);
447 skb_free_datagram(sk
, skb
);
452 void udpv6_err(struct sk_buff
*skb
, struct ipv6hdr
*hdr
,
453 struct inet6_skb_parm
*opt
,
454 int type
, int code
, unsigned char *buff
, __u32 info
)
456 struct net_device
*dev
= skb
->dev
;
457 struct in6_addr
*saddr
= &hdr
->saddr
;
458 struct in6_addr
*daddr
= &hdr
->daddr
;
463 if (buff
+ sizeof(struct udphdr
) > skb
->tail
)
466 uh
= (struct udphdr
*) buff
;
468 sk
= udp_v6_lookup(daddr
, uh
->dest
, saddr
, uh
->source
, dev
->ifindex
);
473 if (!icmpv6_err_convert(type
, code
, &err
) &&
474 !sk
->net_pinfo
.af_inet6
.recverr
)
477 if (sk
->bsdism
&& sk
->state
!=TCP_ESTABLISHED
&&
478 !sk
->net_pinfo
.af_inet6
.recverr
)
481 if (sk
->net_pinfo
.af_inet6
.recverr
)
482 ipv6_icmp_error(sk
, skb
, err
, uh
->dest
, ntohl(info
), (u8
*)(uh
+1));
485 sk
->error_report(sk
);
490 static inline int udpv6_queue_rcv_skb(struct sock
* sk
, struct sk_buff
*skb
)
492 #if defined(CONFIG_FILTER) && defined(CONFIG_UDP_DELAY_CSUM)
493 if (sk
->filter
&& skb
->ip_summed
!= CHECKSUM_UNNECESSARY
) {
494 if ((unsigned short)csum_fold(csum_partial(skb
->h
.raw
, skb
->len
, skb
->csum
))) {
495 UDP6_INC_STATS_BH(UdpInErrors
);
496 IP6_INC_STATS_BH(Ip6InDiscards
);
500 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
503 if (sock_queue_rcv_skb(sk
,skb
)<0) {
504 UDP6_INC_STATS_BH(UdpInErrors
);
505 IP6_INC_STATS_BH(Ip6InDiscards
);
509 IP6_INC_STATS_BH(Ip6InDelivers
);
510 UDP6_INC_STATS_BH(UdpInDatagrams
);
514 static struct sock
*udp_v6_mcast_next(struct sock
*sk
,
515 u16 loc_port
, struct in6_addr
*loc_addr
,
516 u16 rmt_port
, struct in6_addr
*rmt_addr
,
520 unsigned short num
= ntohs(loc_port
);
521 for(; s
; s
= s
->next
) {
523 struct ipv6_pinfo
*np
= &s
->net_pinfo
.af_inet6
;
525 if(s
->dport
!= rmt_port
)
528 if(!ipv6_addr_any(&np
->daddr
) &&
529 ipv6_addr_cmp(&np
->daddr
, rmt_addr
))
532 if (s
->bound_dev_if
&& s
->bound_dev_if
!= dif
)
535 if(!ipv6_addr_any(&np
->rcv_saddr
)) {
536 if(ipv6_addr_cmp(&np
->rcv_saddr
, loc_addr
) == 0)
539 if(!inet6_mc_check(s
, loc_addr
))
548 * Note: called only from the BH handler context,
549 * so we don't need to lock the hashes.
551 static void udpv6_mcast_deliver(struct udphdr
*uh
,
552 struct in6_addr
*saddr
, struct in6_addr
*daddr
,
555 struct sock
*sk
, *sk2
;
556 struct sk_buff
*buff
;
559 read_lock(&udp_hash_lock
);
560 sk
= udp_hash
[ntohs(uh
->dest
) & (UDP_HTABLE_SIZE
- 1)];
561 dif
= skb
->dev
->ifindex
;
562 sk
= udp_v6_mcast_next(sk
, uh
->dest
, daddr
, uh
->source
, saddr
, dif
);
568 while((sk2
= udp_v6_mcast_next(sk2
->next
, uh
->dest
, saddr
,
569 uh
->source
, daddr
, dif
))) {
571 buff
= skb_clone(skb
, GFP_ATOMIC
);
575 if (sock_queue_rcv_skb(sk2
, buff
) >= 0)
580 if (sock_queue_rcv_skb(sk
, skb
) < 0) {
584 read_unlock(&udp_hash_lock
);
587 int udpv6_rcv(struct sk_buff
*skb
, unsigned long len
)
591 struct net_device
*dev
= skb
->dev
;
592 struct in6_addr
*saddr
= &skb
->nh
.ipv6h
->saddr
;
593 struct in6_addr
*daddr
= &skb
->nh
.ipv6h
->daddr
;
597 __skb_pull(skb
, skb
->h
.raw
- skb
->data
);
599 ulen
= ntohs(uh
->len
);
601 /* Check for jumbo payload */
602 if (ulen
== 0 && skb
->nh
.ipv6h
->payload_len
== 0)
605 if (ulen
> len
|| len
< sizeof(*uh
)) {
607 printk(KERN_DEBUG
"UDP: short packet: %d/%ld\n", ulen
, len
);
608 UDP6_INC_STATS_BH(UdpInErrors
);
613 if (uh
->check
== 0) {
614 /* IPv6 draft-v2 section 8.1 says that we SHOULD log
615 this error. Well, it is reasonable.
618 printk(KERN_INFO
"IPv6: udp checksum is 0\n");
624 #ifndef CONFIG_UDP_DELAY_CSUM
625 switch (skb
->ip_summed
) {
627 skb
->csum
= csum_partial((char*)uh
, ulen
, 0);
629 if (csum_ipv6_magic(saddr
, daddr
, ulen
, IPPROTO_UDP
, skb
->csum
)) {
630 printk(KERN_DEBUG
"IPv6: udp checksum error\n");
635 if (skb
->ip_summed
==CHECKSUM_HW
) {
636 if (csum_ipv6_magic(saddr
, daddr
, ulen
, IPPROTO_UDP
, skb
->csum
))
638 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
639 } else if (skb
->ip_summed
!= CHECKSUM_UNNECESSARY
)
640 skb
->csum
= ~csum_ipv6_magic(saddr
, daddr
, ulen
, IPPROTO_UDP
, 0);
646 * Multicast receive code
648 if (ipv6_addr_type(daddr
) & IPV6_ADDR_MULTICAST
) {
649 udpv6_mcast_deliver(uh
, saddr
, daddr
, skb
);
656 * check socket cache ... must talk to Alan about his plans
657 * for sock caches... i'll skip this for now.
660 sk
= udp_v6_lookup(saddr
, uh
->source
, daddr
, uh
->dest
, dev
->ifindex
);
663 #ifdef CONFIG_UDP_DELAY_CSUM
664 if (skb
->ip_summed
!= CHECKSUM_UNNECESSARY
&&
665 (unsigned short)csum_fold(csum_partial((char*)uh
, len
, skb
->csum
)))
668 UDP6_INC_STATS_BH(UdpNoPorts
);
670 icmpv6_send(skb
, ICMPV6_DEST_UNREACH
, ICMPV6_PORT_UNREACH
, 0, dev
);
675 if (0/*sk->user_callback &&
676 sk->user_callback(sk->user_data, skb) == 0*/) {
677 UDP6_INC_STATS_BH(UdpInDatagrams
);
684 udpv6_queue_rcv_skb(sk
, skb
);
689 UDP6_INC_STATS_BH(UdpInErrors
);
704 struct in6_addr
*daddr
;
711 static int udpv6_getfrag(const void *data
, struct in6_addr
*addr
,
712 char *buff
, unsigned int offset
, unsigned int len
)
714 struct udpv6fakehdr
*udh
= (struct udpv6fakehdr
*) data
;
722 offset
-= sizeof(struct udphdr
);
724 dst
+= sizeof(struct udphdr
);
726 clen
-= sizeof(struct udphdr
);
729 if (csum_partial_copy_fromiovecend(dst
, udh
->iov
, offset
,
734 struct in6_addr
*daddr
;
736 udh
->wcheck
= csum_partial((char *)udh
, sizeof(struct udphdr
),
743 * use packet destination address
744 * this should improve cache locality
748 udh
->uh
.check
= csum_ipv6_magic(addr
, daddr
,
749 udh
->pl_len
, IPPROTO_UDP
,
751 if (udh
->uh
.check
== 0)
754 memcpy(buff
, udh
, sizeof(struct udphdr
));
759 static int udpv6_sendmsg(struct sock
*sk
, struct msghdr
*msg
, int ulen
)
761 struct ipv6_txoptions opt_space
;
762 struct udpv6fakehdr udh
;
763 struct ipv6_pinfo
*np
= &sk
->net_pinfo
.af_inet6
;
764 struct sockaddr_in6
*sin6
= (struct sockaddr_in6
*) msg
->msg_name
;
765 struct ipv6_txoptions
*opt
= NULL
;
766 struct ip6_flowlabel
*flowlabel
= NULL
;
768 int addr_len
= msg
->msg_namelen
;
769 struct in6_addr
*daddr
;
770 int len
= ulen
+ sizeof(struct udphdr
);
776 /* Rough check on arithmetic overflow,
777 better check is made in ip6_build_xmit
779 if (ulen
< 0 || ulen
> INT_MAX
- sizeof(struct udphdr
))
782 fl
.fl6_flowlabel
= 0;
785 if (sin6
->sin6_family
== AF_INET
)
786 return udp_sendmsg(sk
, msg
, ulen
);
788 if (addr_len
< sizeof(*sin6
))
791 if (sin6
->sin6_family
&& sin6
->sin6_family
!= AF_INET6
)
794 if (sin6
->sin6_port
== 0)
797 udh
.uh
.dest
= sin6
->sin6_port
;
798 daddr
= &sin6
->sin6_addr
;
801 fl
.fl6_flowlabel
= sin6
->sin6_flowinfo
&IPV6_FLOWINFO_MASK
;
802 if (fl
.fl6_flowlabel
&IPV6_FLOWLABEL_MASK
) {
803 flowlabel
= fl6_sock_lookup(sk
, fl
.fl6_flowlabel
);
804 if (flowlabel
== NULL
)
806 daddr
= &flowlabel
->dst
;
810 /* Otherwise it will be difficult to maintain sk->dst_cache. */
811 if (sk
->state
== TCP_ESTABLISHED
&&
812 !ipv6_addr_cmp(daddr
, &sk
->net_pinfo
.af_inet6
.daddr
))
813 daddr
= &sk
->net_pinfo
.af_inet6
.daddr
;
815 if (sk
->state
!= TCP_ESTABLISHED
)
818 udh
.uh
.dest
= sk
->dport
;
819 daddr
= &sk
->net_pinfo
.af_inet6
.daddr
;
820 fl
.fl6_flowlabel
= np
->flow_label
;
823 addr_type
= ipv6_addr_type(daddr
);
825 if (addr_type
== IPV6_ADDR_MAPPED
) {
826 struct sockaddr_in sin
;
828 sin
.sin_family
= AF_INET
;
829 sin
.sin_addr
.s_addr
= daddr
->s6_addr32
[3];
830 sin
.sin_port
= udh
.uh
.dest
;
831 msg
->msg_name
= (struct sockaddr
*)(&sin
);
832 msg
->msg_namelen
= sizeof(sin
);
833 fl6_sock_release(flowlabel
);
835 return udp_sendmsg(sk
, msg
, ulen
);
839 fl
.oif
= sk
->bound_dev_if
;
842 if (msg
->msg_controllen
) {
844 memset(opt
, 0, sizeof(struct ipv6_txoptions
));
846 err
= datagram_send_ctl(msg
, &fl
, opt
, &hlimit
);
848 fl6_sock_release(flowlabel
);
851 if ((fl
.fl6_flowlabel
&IPV6_FLOWLABEL_MASK
) && !flowlabel
) {
852 flowlabel
= fl6_sock_lookup(sk
, fl
.fl6_flowlabel
);
853 if (flowlabel
== NULL
)
856 if (!(opt
->opt_nflen
|opt
->opt_flen
))
862 opt
= fl6_merge_options(&opt_space
, flowlabel
, opt
);
863 if (opt
&& opt
->srcrt
)
866 udh
.uh
.source
= sk
->sport
;
867 udh
.uh
.len
= len
< 0x10000 ? htons(len
) : 0;
869 udh
.iov
= msg
->msg_iov
;
873 fl
.proto
= IPPROTO_UDP
;
875 fl
.uli_u
.ports
.dport
= udh
.uh
.dest
;
876 fl
.uli_u
.ports
.sport
= udh
.uh
.source
;
878 err
= ip6_build_xmit(sk
, udpv6_getfrag
, &udh
, &fl
, len
, opt
, hlimit
,
881 fl6_sock_release(flowlabel
);
886 UDP6_INC_STATS_USER(UdpOutDatagrams
);
890 static struct inet6_protocol udpv6_protocol
=
892 udpv6_rcv
, /* UDP handler */
893 udpv6_err
, /* UDP error control */
895 IPPROTO_UDP
, /* protocol ID */
902 #define LINE_FMT "%-190s\n"
904 static void get_udp6_sock(struct sock
*sp
, char *tmpbuf
, int i
)
906 struct in6_addr
*dest
, *src
;
909 unsigned long timer_expires
;
911 dest
= &sp
->net_pinfo
.af_inet6
.daddr
;
912 src
= &sp
->net_pinfo
.af_inet6
.rcv_saddr
;
913 destp
= ntohs(sp
->dport
);
914 srcp
= ntohs(sp
->sport
);
915 timer_active
= (sp
->timer
.prev
!= NULL
) ? 2 : 0;
916 timer_expires
= (timer_active
== 2 ? sp
->timer
.expires
: jiffies
);
918 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
919 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %ld %d %p",
921 src
->s6_addr32
[0], src
->s6_addr32
[1],
922 src
->s6_addr32
[2], src
->s6_addr32
[3], srcp
,
923 dest
->s6_addr32
[0], dest
->s6_addr32
[1],
924 dest
->s6_addr32
[2], dest
->s6_addr32
[3], destp
,
926 atomic_read(&sp
->wmem_alloc
), atomic_read(&sp
->rmem_alloc
),
927 timer_active
, timer_expires
-jiffies
, 0,
928 sp
->socket
->inode
->i_uid
, 0,
929 sp
->socket
? sp
->socket
->inode
->i_ino
: 0,
930 atomic_read(&sp
->refcnt
), sp
);
933 int udp6_get_info(char *buffer
, char **start
, off_t offset
, int length
)
935 int len
= 0, num
= 0, i
;
938 char tmpbuf
[LINE_LEN
+2];
940 if (offset
< LINE_LEN
+1)
941 len
+= sprintf(buffer
, LINE_FMT
,
943 "local_address " /* 38 */
944 "remote_address " /* 38 */
945 "st tx_queue rx_queue tr tm->when retrnsmt" /* 41 */
946 " uid timeout inode"); /* 21 */
950 read_lock(&udp_hash_lock
);
951 for (i
= 0; i
< UDP_HTABLE_SIZE
; i
++) {
954 for (sk
= udp_hash
[i
]; sk
; sk
= sk
->next
, num
++) {
955 if (sk
->family
!= PF_INET6
)
960 get_udp6_sock(sk
, tmpbuf
, i
);
961 len
+= sprintf(buffer
+len
, LINE_FMT
, tmpbuf
);
967 read_unlock(&udp_hash_lock
);
968 begin
= len
- (pos
- offset
);
969 *start
= buffer
+ begin
;
978 struct proto udpv6_prot
= {
979 udpv6_close
, /* close */
980 udpv6_connect
, /* connect */
981 udp_disconnect
, /* disconnect */
983 NULL
, /* retransmit */
984 NULL
, /* write_wakeup */
985 NULL
, /* read_wakeup */
986 datagram_poll
, /* poll */
987 udp_ioctl
, /* ioctl */
989 inet6_destroy_sock
, /* destroy */
991 ipv6_setsockopt
, /* setsockopt */
992 ipv6_getsockopt
, /* getsockopt */
993 udpv6_sendmsg
, /* sendmsg */
994 udpv6_recvmsg
, /* recvmsg */
996 udpv6_queue_rcv_skb
, /* backlog_rcv */
997 udp_v6_hash
, /* hash */
998 udp_v6_unhash
, /* unhash */
999 udp_v6_get_port
, /* get_port */
1000 128, /* max_header */
1001 0, /* retransmits */
1005 void __init
udpv6_init(void)
1007 inet6_add_protocol(&udpv6_protocol
);