Merge with Linux 2.3.40.
[linux-2.6/linux-mips.git] / net / ipv6 / udp.c
blob3ecc55030a694261b4fd23dcf026d88f832d88a5
1 /*
2 * UDP over IPv6
3 * Linux INET6 implementation
5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt>
8 * Based on linux/ipv4/udp.c
10 * $Id: udp.c,v 1.48 2000/01/09 02:19:53 davem Exp $
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
18 #include <linux/config.h>
19 #include <linux/errno.h>
20 #include <linux/types.h>
21 #include <linux/socket.h>
22 #include <linux/sockios.h>
23 #include <linux/sched.h>
24 #include <linux/net.h>
25 #include <linux/in6.h>
26 #include <linux/netdevice.h>
27 #include <linux/if_arp.h>
28 #include <linux/ipv6.h>
29 #include <linux/icmpv6.h>
30 #include <linux/init.h>
31 #include <asm/uaccess.h>
33 #include <net/sock.h>
34 #include <net/snmp.h>
36 #include <net/ipv6.h>
37 #include <net/ndisc.h>
38 #include <net/protocol.h>
39 #include <net/transp_v6.h>
40 #include <net/ip6_route.h>
41 #include <net/addrconf.h>
42 #include <net/ip.h>
43 #include <net/udp.h>
44 #include <net/inet_common.h>
46 #include <net/checksum.h>
48 struct udp_mib udp_stats_in6[NR_CPUS*2];
50 /* Grrr, addr_type already calculated by caller, but I don't want
51 * to add some silly "cookie" argument to this method just for that.
53 static int udp_v6_get_port(struct sock *sk, unsigned short snum)
55 write_lock_bh(&udp_hash_lock);
56 if (snum == 0) {
57 int best_size_so_far, best, result, i;
59 if (udp_port_rover > sysctl_local_port_range[1] ||
60 udp_port_rover < sysctl_local_port_range[0])
61 udp_port_rover = sysctl_local_port_range[0];
62 best_size_so_far = 32767;
63 best = result = udp_port_rover;
64 for (i = 0; i < UDP_HTABLE_SIZE; i++, result++) {
65 struct sock *sk;
66 int size;
68 sk = udp_hash[result & (UDP_HTABLE_SIZE - 1)];
69 if (!sk) {
70 if (result > sysctl_local_port_range[1])
71 result = sysctl_local_port_range[0] +
72 ((result - sysctl_local_port_range[0]) &
73 (UDP_HTABLE_SIZE - 1));
74 goto gotit;
76 size = 0;
77 do {
78 if (++size >= best_size_so_far)
79 goto next;
80 } while ((sk = sk->next) != NULL);
81 best_size_so_far = size;
82 best = result;
83 next:
85 result = best;
86 for(;; result += UDP_HTABLE_SIZE) {
87 if (result > sysctl_local_port_range[1])
88 result = sysctl_local_port_range[0]
89 + ((result - sysctl_local_port_range[0]) &
90 (UDP_HTABLE_SIZE - 1));
91 if (!udp_lport_inuse(result))
92 break;
94 gotit:
95 udp_port_rover = snum = result;
96 } else {
97 struct sock *sk2;
98 int addr_type = ipv6_addr_type(&sk->net_pinfo.af_inet6.rcv_saddr);
100 for (sk2 = udp_hash[snum & (UDP_HTABLE_SIZE - 1)];
101 sk2 != NULL;
102 sk2 = sk2->next) {
103 if (sk2->num == snum &&
104 sk2 != sk &&
105 sk2->bound_dev_if == sk->bound_dev_if &&
106 (!sk2->rcv_saddr ||
107 addr_type == IPV6_ADDR_ANY ||
108 !ipv6_addr_cmp(&sk->net_pinfo.af_inet6.rcv_saddr,
109 &sk2->net_pinfo.af_inet6.rcv_saddr)) &&
110 (!sk2->reuse || !sk->reuse))
111 goto fail;
115 sk->num = snum;
116 write_unlock_bh(&udp_hash_lock);
117 return 0;
119 fail:
120 write_unlock_bh(&udp_hash_lock);
121 return 1;
124 static void udp_v6_hash(struct sock *sk)
126 struct sock **skp = &udp_hash[sk->num & (UDP_HTABLE_SIZE - 1)];
128 write_lock_bh(&udp_hash_lock);
129 if ((sk->next = *skp) != NULL)
130 (*skp)->pprev = &sk->next;
131 *skp = sk;
132 sk->pprev = skp;
133 sock_prot_inc_use(sk->prot);
134 sock_hold(sk);
135 write_unlock_bh(&udp_hash_lock);
138 static void udp_v6_unhash(struct sock *sk)
140 write_lock_bh(&udp_hash_lock);
141 if (sk->pprev) {
142 if (sk->next)
143 sk->next->pprev = sk->pprev;
144 *sk->pprev = sk->next;
145 sk->pprev = NULL;
146 sock_prot_dec_use(sk->prot);
147 __sock_put(sk);
149 write_unlock_bh(&udp_hash_lock);
152 static struct sock *udp_v6_lookup(struct in6_addr *saddr, u16 sport,
153 struct in6_addr *daddr, u16 dport, int dif)
155 struct sock *sk, *result = NULL;
156 unsigned short hnum = ntohs(dport);
157 int badness = -1;
159 read_lock(&udp_hash_lock);
160 for(sk = udp_hash[hnum & (UDP_HTABLE_SIZE - 1)]; sk != NULL; sk = sk->next) {
161 if((sk->num == hnum) &&
162 (sk->family == PF_INET6)) {
163 struct ipv6_pinfo *np = &sk->net_pinfo.af_inet6;
164 int score = 0;
165 if(sk->dport) {
166 if(sk->dport != sport)
167 continue;
168 score++;
170 if(!ipv6_addr_any(&np->rcv_saddr)) {
171 if(ipv6_addr_cmp(&np->rcv_saddr, daddr))
172 continue;
173 score++;
175 if(!ipv6_addr_any(&np->daddr)) {
176 if(ipv6_addr_cmp(&np->daddr, saddr))
177 continue;
178 score++;
180 if(sk->bound_dev_if) {
181 if(sk->bound_dev_if != dif)
182 continue;
183 score++;
185 if(score == 4) {
186 result = sk;
187 break;
188 } else if(score > badness) {
189 result = sk;
190 badness = score;
194 if (result)
195 sock_hold(result);
196 read_unlock(&udp_hash_lock);
197 return result;
204 int udpv6_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
206 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
207 struct ipv6_pinfo *np = &sk->net_pinfo.af_inet6;
208 struct in6_addr *daddr;
209 struct in6_addr saddr;
210 struct dst_entry *dst;
211 struct flowi fl;
212 struct ip6_flowlabel *flowlabel = NULL;
213 int addr_type;
214 int err;
216 if (usin->sin6_family == AF_INET) {
217 err = udp_connect(sk, uaddr, addr_len);
218 goto ipv4_connected;
221 if (addr_len < sizeof(*usin))
222 return -EINVAL;
224 if (usin->sin6_family != AF_INET6)
225 return -EAFNOSUPPORT;
227 fl.fl6_flowlabel = 0;
228 if (np->sndflow) {
229 fl.fl6_flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
230 if (fl.fl6_flowlabel&IPV6_FLOWLABEL_MASK) {
231 flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel);
232 if (flowlabel == NULL)
233 return -EINVAL;
234 ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst);
238 addr_type = ipv6_addr_type(&usin->sin6_addr);
240 if (addr_type == IPV6_ADDR_ANY) {
242 * connect to self
244 usin->sin6_addr.s6_addr[15] = 0x01;
247 daddr = &usin->sin6_addr;
249 if (addr_type == IPV6_ADDR_MAPPED) {
250 struct sockaddr_in sin;
252 sin.sin_family = AF_INET;
253 sin.sin_addr.s_addr = daddr->s6_addr32[3];
254 sin.sin_port = usin->sin6_port;
256 err = udp_connect(sk, (struct sockaddr*) &sin, sizeof(sin));
258 ipv4_connected:
259 if (err < 0)
260 return err;
262 ipv6_addr_set(&np->daddr, 0, 0,
263 __constant_htonl(0x0000ffff),
264 sk->daddr);
266 if(ipv6_addr_any(&np->saddr)) {
267 ipv6_addr_set(&np->saddr, 0, 0,
268 __constant_htonl(0x0000ffff),
269 sk->saddr);
273 if(ipv6_addr_any(&np->rcv_saddr)) {
274 ipv6_addr_set(&np->rcv_saddr, 0, 0,
275 __constant_htonl(0x0000ffff),
276 sk->rcv_saddr);
278 return 0;
281 ipv6_addr_copy(&np->daddr, daddr);
282 np->flow_label = fl.fl6_flowlabel;
284 sk->dport = usin->sin6_port;
287 * Check for a route to destination an obtain the
288 * destination cache for it.
291 fl.proto = IPPROTO_UDP;
292 fl.fl6_dst = &np->daddr;
293 fl.fl6_src = &saddr;
294 fl.oif = sk->bound_dev_if;
295 fl.uli_u.ports.dport = sk->dport;
296 fl.uli_u.ports.sport = sk->sport;
298 if (flowlabel) {
299 if (flowlabel->opt && flowlabel->opt->srcrt) {
300 struct rt0_hdr *rt0 = (struct rt0_hdr *) flowlabel->opt->srcrt;
301 fl.fl6_dst = rt0->addr;
303 } else if (np->opt && np->opt->srcrt) {
304 struct rt0_hdr *rt0 = (struct rt0_hdr *) np->opt->srcrt;
305 fl.fl6_dst = rt0->addr;
308 dst = ip6_route_output(sk, &fl);
310 if ((err = dst->error) != 0) {
311 dst_release(dst);
312 fl6_sock_release(flowlabel);
313 return err;
316 ip6_dst_store(sk, dst, fl.fl6_dst);
318 /* get the source adddress used in the apropriate device */
320 err = ipv6_get_saddr(dst, daddr, &saddr);
322 if (err == 0) {
323 if(ipv6_addr_any(&np->saddr))
324 ipv6_addr_copy(&np->saddr, &saddr);
326 if(ipv6_addr_any(&np->rcv_saddr)) {
327 ipv6_addr_copy(&np->rcv_saddr, &saddr);
328 sk->rcv_saddr = 0xffffffff;
330 sk->state = TCP_ESTABLISHED;
332 fl6_sock_release(flowlabel);
334 return err;
337 static void udpv6_close(struct sock *sk, long timeout)
339 inet_sock_release(sk);
343 * This should be easy, if there is something there we
344 * return it, otherwise we block.
347 int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, int len,
348 int noblock, int flags, int *addr_len)
350 struct sk_buff *skb;
351 int copied, err;
353 if (addr_len)
354 *addr_len=sizeof(struct sockaddr_in6);
356 if (flags & MSG_ERRQUEUE)
357 return ipv6_recv_error(sk, msg, len);
359 skb = skb_recv_datagram(sk, flags, noblock, &err);
360 if (!skb)
361 goto out;
363 copied = skb->len - sizeof(struct udphdr);
364 if (copied > len) {
365 copied = len;
366 msg->msg_flags |= MSG_TRUNC;
369 #ifndef CONFIG_UDP_DELAY_CSUM
370 err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr),
371 msg->msg_iov, copied);
372 #else
373 if (skb->ip_summed==CHECKSUM_UNNECESSARY) {
374 err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov,
375 copied);
376 } else if (copied > msg->msg_iov[0].iov_len || (msg->msg_flags&MSG_TRUNC)) {
377 if ((unsigned short)csum_fold(csum_partial(skb->h.raw, skb->len, skb->csum))) {
378 /* Clear queue. */
379 if (flags&MSG_PEEK) {
380 int clear = 0;
381 spin_lock_irq(&sk->receive_queue.lock);
382 if (skb == skb_peek(&sk->receive_queue)) {
383 __skb_unlink(skb, &sk->receive_queue);
384 clear = 1;
386 spin_unlock_irq(&sk->receive_queue.lock);
387 if (clear)
388 kfree_skb(skb);
391 /* Error for blocking case is chosen to masquerade
392 as some normal condition.
394 err = (flags&MSG_DONTWAIT) ? -EAGAIN : -EHOSTUNREACH;
395 udp_stats_in6.UdpInErrors++;
396 goto out_free;
398 err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov,
399 copied);
400 } else {
401 unsigned int csum = csum_partial(skb->h.raw, sizeof(struct udphdr), skb->csum);
403 err = 0;
404 csum = csum_and_copy_to_user((char*)&skb->h.uh[1], msg->msg_iov[0].iov_base, copied, csum, &err);
405 if (err)
406 goto out_free;
407 if ((unsigned short)csum_fold(csum)) {
408 /* Error for blocking case is chosen to masquerade
409 as some normal condition.
411 err = (flags&MSG_DONTWAIT) ? -EAGAIN : -EHOSTUNREACH;
412 udp_stats_in6.UdpInErrors++;
413 goto out_free;
416 #endif
417 if (err)
418 goto out_free;
420 sk->stamp=skb->stamp;
422 /* Copy the address. */
423 if (msg->msg_name) {
424 struct sockaddr_in6 *sin6;
426 sin6 = (struct sockaddr_in6 *) msg->msg_name;
427 sin6->sin6_family = AF_INET6;
428 sin6->sin6_port = skb->h.uh->source;
429 sin6->sin6_flowinfo = 0;
431 if (skb->protocol == __constant_htons(ETH_P_IP)) {
432 ipv6_addr_set(&sin6->sin6_addr, 0, 0,
433 __constant_htonl(0xffff), skb->nh.iph->saddr);
434 if (sk->protinfo.af_inet.cmsg_flags)
435 ip_cmsg_recv(msg, skb);
436 } else {
437 memcpy(&sin6->sin6_addr, &skb->nh.ipv6h->saddr,
438 sizeof(struct in6_addr));
440 if (sk->net_pinfo.af_inet6.rxopt.all)
441 datagram_recv_ctl(sk, msg, skb);
444 err = copied;
446 out_free:
447 skb_free_datagram(sk, skb);
448 out:
449 return err;
452 void udpv6_err(struct sk_buff *skb, struct ipv6hdr *hdr,
453 struct inet6_skb_parm *opt,
454 int type, int code, unsigned char *buff, __u32 info)
456 struct net_device *dev = skb->dev;
457 struct in6_addr *saddr = &hdr->saddr;
458 struct in6_addr *daddr = &hdr->daddr;
459 struct sock *sk;
460 struct udphdr *uh;
461 int err;
463 if (buff + sizeof(struct udphdr) > skb->tail)
464 return;
466 uh = (struct udphdr *) buff;
468 sk = udp_v6_lookup(daddr, uh->dest, saddr, uh->source, dev->ifindex);
470 if (sk == NULL)
471 return;
473 if (!icmpv6_err_convert(type, code, &err) &&
474 !sk->net_pinfo.af_inet6.recverr)
475 goto out;
477 if (sk->bsdism && sk->state!=TCP_ESTABLISHED &&
478 !sk->net_pinfo.af_inet6.recverr)
479 goto out;
481 if (sk->net_pinfo.af_inet6.recverr)
482 ipv6_icmp_error(sk, skb, err, uh->dest, ntohl(info), (u8 *)(uh+1));
484 sk->err = err;
485 sk->error_report(sk);
486 out:
487 sock_put(sk);
490 static inline int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
492 #if defined(CONFIG_FILTER) && defined(CONFIG_UDP_DELAY_CSUM)
493 if (sk->filter && skb->ip_summed != CHECKSUM_UNNECESSARY) {
494 if ((unsigned short)csum_fold(csum_partial(skb->h.raw, skb->len, skb->csum))) {
495 UDP6_INC_STATS_BH(UdpInErrors);
496 IP6_INC_STATS_BH(Ip6InDiscards);
497 kfree_skb(skb);
498 return 0;
500 skb->ip_summed = CHECKSUM_UNNECESSARY;
502 #endif
503 if (sock_queue_rcv_skb(sk,skb)<0) {
504 UDP6_INC_STATS_BH(UdpInErrors);
505 IP6_INC_STATS_BH(Ip6InDiscards);
506 kfree_skb(skb);
507 return 0;
509 IP6_INC_STATS_BH(Ip6InDelivers);
510 UDP6_INC_STATS_BH(UdpInDatagrams);
511 return 0;
514 static struct sock *udp_v6_mcast_next(struct sock *sk,
515 u16 loc_port, struct in6_addr *loc_addr,
516 u16 rmt_port, struct in6_addr *rmt_addr,
517 int dif)
519 struct sock *s = sk;
520 unsigned short num = ntohs(loc_port);
521 for(; s; s = s->next) {
522 if(s->num == num) {
523 struct ipv6_pinfo *np = &s->net_pinfo.af_inet6;
524 if(s->dport) {
525 if(s->dport != rmt_port)
526 continue;
528 if(!ipv6_addr_any(&np->daddr) &&
529 ipv6_addr_cmp(&np->daddr, rmt_addr))
530 continue;
532 if (s->bound_dev_if && s->bound_dev_if != dif)
533 continue;
535 if(!ipv6_addr_any(&np->rcv_saddr)) {
536 if(ipv6_addr_cmp(&np->rcv_saddr, loc_addr) == 0)
537 return s;
539 if(!inet6_mc_check(s, loc_addr))
540 continue;
541 return s;
544 return NULL;
548 * Note: called only from the BH handler context,
549 * so we don't need to lock the hashes.
551 static void udpv6_mcast_deliver(struct udphdr *uh,
552 struct in6_addr *saddr, struct in6_addr *daddr,
553 struct sk_buff *skb)
555 struct sock *sk, *sk2;
556 struct sk_buff *buff;
557 int dif;
559 read_lock(&udp_hash_lock);
560 sk = udp_hash[ntohs(uh->dest) & (UDP_HTABLE_SIZE - 1)];
561 dif = skb->dev->ifindex;
562 sk = udp_v6_mcast_next(sk, uh->dest, daddr, uh->source, saddr, dif);
563 if (!sk)
564 goto free_skb;
566 buff = NULL;
567 sk2 = sk;
568 while((sk2 = udp_v6_mcast_next(sk2->next, uh->dest, saddr,
569 uh->source, daddr, dif))) {
570 if (!buff) {
571 buff = skb_clone(skb, GFP_ATOMIC);
572 if (!buff)
573 continue;
575 if (sock_queue_rcv_skb(sk2, buff) >= 0)
576 buff = NULL;
578 if (buff)
579 kfree_skb(buff);
580 if (sock_queue_rcv_skb(sk, skb) < 0) {
581 free_skb:
582 kfree_skb(skb);
584 read_unlock(&udp_hash_lock);
587 int udpv6_rcv(struct sk_buff *skb, unsigned long len)
589 struct sock *sk;
590 struct udphdr *uh;
591 struct net_device *dev = skb->dev;
592 struct in6_addr *saddr = &skb->nh.ipv6h->saddr;
593 struct in6_addr *daddr = &skb->nh.ipv6h->daddr;
594 u32 ulen;
596 uh = skb->h.uh;
597 __skb_pull(skb, skb->h.raw - skb->data);
599 ulen = ntohs(uh->len);
601 /* Check for jumbo payload */
602 if (ulen == 0 && skb->nh.ipv6h->payload_len == 0)
603 ulen = len;
605 if (ulen > len || len < sizeof(*uh)) {
606 if (net_ratelimit())
607 printk(KERN_DEBUG "UDP: short packet: %d/%ld\n", ulen, len);
608 UDP6_INC_STATS_BH(UdpInErrors);
609 kfree_skb(skb);
610 return(0);
613 if (uh->check == 0) {
614 /* IPv6 draft-v2 section 8.1 says that we SHOULD log
615 this error. Well, it is reasonable.
617 if (net_ratelimit())
618 printk(KERN_INFO "IPv6: udp checksum is 0\n");
619 goto discard;
622 skb_trim(skb, ulen);
624 #ifndef CONFIG_UDP_DELAY_CSUM
625 switch (skb->ip_summed) {
626 case CHECKSUM_NONE:
627 skb->csum = csum_partial((char*)uh, ulen, 0);
628 case CHECKSUM_HW:
629 if (csum_ipv6_magic(saddr, daddr, ulen, IPPROTO_UDP, skb->csum)) {
630 printk(KERN_DEBUG "IPv6: udp checksum error\n");
631 goto discard;
634 #else
635 if (skb->ip_summed==CHECKSUM_HW) {
636 if (csum_ipv6_magic(saddr, daddr, ulen, IPPROTO_UDP, skb->csum))
637 goto discard;
638 skb->ip_summed = CHECKSUM_UNNECESSARY;
639 } else if (skb->ip_summed != CHECKSUM_UNNECESSARY)
640 skb->csum = ~csum_ipv6_magic(saddr, daddr, ulen, IPPROTO_UDP, 0);
641 #endif
643 len = ulen;
646 * Multicast receive code
648 if (ipv6_addr_type(daddr) & IPV6_ADDR_MULTICAST) {
649 udpv6_mcast_deliver(uh, saddr, daddr, skb);
650 return 0;
653 /* Unicast */
656 * check socket cache ... must talk to Alan about his plans
657 * for sock caches... i'll skip this for now.
660 sk = udp_v6_lookup(saddr, uh->source, daddr, uh->dest, dev->ifindex);
662 if (sk == NULL) {
663 #ifdef CONFIG_UDP_DELAY_CSUM
664 if (skb->ip_summed != CHECKSUM_UNNECESSARY &&
665 (unsigned short)csum_fold(csum_partial((char*)uh, len, skb->csum)))
666 goto discard;
667 #endif
668 UDP6_INC_STATS_BH(UdpNoPorts);
670 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0, dev);
672 kfree_skb(skb);
673 return(0);
675 if (0/*sk->user_callback &&
676 sk->user_callback(sk->user_data, skb) == 0*/) {
677 UDP6_INC_STATS_BH(UdpInDatagrams);
678 sock_put(sk);
679 return(0);
682 /* deliver */
684 udpv6_queue_rcv_skb(sk, skb);
685 sock_put(sk);
686 return(0);
688 discard:
689 UDP6_INC_STATS_BH(UdpInErrors);
690 kfree_skb(skb);
691 return(0);
695 * Sending
698 struct udpv6fakehdr
700 struct udphdr uh;
701 struct iovec *iov;
702 __u32 wcheck;
703 __u32 pl_len;
704 struct in6_addr *daddr;
708 * with checksum
711 static int udpv6_getfrag(const void *data, struct in6_addr *addr,
712 char *buff, unsigned int offset, unsigned int len)
714 struct udpv6fakehdr *udh = (struct udpv6fakehdr *) data;
715 char *dst;
716 int final = 0;
717 int clen = len;
719 dst = buff;
721 if (offset) {
722 offset -= sizeof(struct udphdr);
723 } else {
724 dst += sizeof(struct udphdr);
725 final = 1;
726 clen -= sizeof(struct udphdr);
729 if (csum_partial_copy_fromiovecend(dst, udh->iov, offset,
730 clen, &udh->wcheck))
731 return -EFAULT;
733 if (final) {
734 struct in6_addr *daddr;
736 udh->wcheck = csum_partial((char *)udh, sizeof(struct udphdr),
737 udh->wcheck);
739 if (udh->daddr) {
740 daddr = udh->daddr;
741 } else {
743 * use packet destination address
744 * this should improve cache locality
746 daddr = addr + 1;
748 udh->uh.check = csum_ipv6_magic(addr, daddr,
749 udh->pl_len, IPPROTO_UDP,
750 udh->wcheck);
751 if (udh->uh.check == 0)
752 udh->uh.check = -1;
754 memcpy(buff, udh, sizeof(struct udphdr));
756 return 0;
759 static int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, int ulen)
761 struct ipv6_txoptions opt_space;
762 struct udpv6fakehdr udh;
763 struct ipv6_pinfo *np = &sk->net_pinfo.af_inet6;
764 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) msg->msg_name;
765 struct ipv6_txoptions *opt = NULL;
766 struct ip6_flowlabel *flowlabel = NULL;
767 struct flowi fl;
768 int addr_len = msg->msg_namelen;
769 struct in6_addr *daddr;
770 int len = ulen + sizeof(struct udphdr);
771 int addr_type;
772 int hlimit = -1;
774 int err;
776 /* Rough check on arithmetic overflow,
777 better check is made in ip6_build_xmit
779 if (ulen < 0 || ulen > INT_MAX - sizeof(struct udphdr))
780 return -EMSGSIZE;
782 fl.fl6_flowlabel = 0;
784 if (sin6) {
785 if (sin6->sin6_family == AF_INET)
786 return udp_sendmsg(sk, msg, ulen);
788 if (addr_len < sizeof(*sin6))
789 return -EINVAL;
791 if (sin6->sin6_family && sin6->sin6_family != AF_INET6)
792 return -EINVAL;
794 if (sin6->sin6_port == 0)
795 return -EINVAL;
797 udh.uh.dest = sin6->sin6_port;
798 daddr = &sin6->sin6_addr;
800 if (np->sndflow) {
801 fl.fl6_flowlabel = sin6->sin6_flowinfo&IPV6_FLOWINFO_MASK;
802 if (fl.fl6_flowlabel&IPV6_FLOWLABEL_MASK) {
803 flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel);
804 if (flowlabel == NULL)
805 return -EINVAL;
806 daddr = &flowlabel->dst;
810 /* Otherwise it will be difficult to maintain sk->dst_cache. */
811 if (sk->state == TCP_ESTABLISHED &&
812 !ipv6_addr_cmp(daddr, &sk->net_pinfo.af_inet6.daddr))
813 daddr = &sk->net_pinfo.af_inet6.daddr;
814 } else {
815 if (sk->state != TCP_ESTABLISHED)
816 return -ENOTCONN;
818 udh.uh.dest = sk->dport;
819 daddr = &sk->net_pinfo.af_inet6.daddr;
820 fl.fl6_flowlabel = np->flow_label;
823 addr_type = ipv6_addr_type(daddr);
825 if (addr_type == IPV6_ADDR_MAPPED) {
826 struct sockaddr_in sin;
828 sin.sin_family = AF_INET;
829 sin.sin_addr.s_addr = daddr->s6_addr32[3];
830 sin.sin_port = udh.uh.dest;
831 msg->msg_name = (struct sockaddr *)(&sin);
832 msg->msg_namelen = sizeof(sin);
833 fl6_sock_release(flowlabel);
835 return udp_sendmsg(sk, msg, ulen);
838 udh.daddr = NULL;
839 fl.oif = sk->bound_dev_if;
840 fl.fl6_src = NULL;
842 if (msg->msg_controllen) {
843 opt = &opt_space;
844 memset(opt, 0, sizeof(struct ipv6_txoptions));
846 err = datagram_send_ctl(msg, &fl, opt, &hlimit);
847 if (err < 0) {
848 fl6_sock_release(flowlabel);
849 return err;
851 if ((fl.fl6_flowlabel&IPV6_FLOWLABEL_MASK) && !flowlabel) {
852 flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel);
853 if (flowlabel == NULL)
854 return -EINVAL;
856 if (!(opt->opt_nflen|opt->opt_flen))
857 opt = NULL;
859 if (opt == NULL)
860 opt = np->opt;
861 if (flowlabel)
862 opt = fl6_merge_options(&opt_space, flowlabel, opt);
863 if (opt && opt->srcrt)
864 udh.daddr = daddr;
866 udh.uh.source = sk->sport;
867 udh.uh.len = len < 0x10000 ? htons(len) : 0;
868 udh.uh.check = 0;
869 udh.iov = msg->msg_iov;
870 udh.wcheck = 0;
871 udh.pl_len = len;
873 fl.proto = IPPROTO_UDP;
874 fl.fl6_dst = daddr;
875 fl.uli_u.ports.dport = udh.uh.dest;
876 fl.uli_u.ports.sport = udh.uh.source;
878 err = ip6_build_xmit(sk, udpv6_getfrag, &udh, &fl, len, opt, hlimit,
879 msg->msg_flags);
881 fl6_sock_release(flowlabel);
883 if (err < 0)
884 return err;
886 UDP6_INC_STATS_USER(UdpOutDatagrams);
887 return ulen;
890 static struct inet6_protocol udpv6_protocol =
892 udpv6_rcv, /* UDP handler */
893 udpv6_err, /* UDP error control */
894 NULL, /* next */
895 IPPROTO_UDP, /* protocol ID */
896 0, /* copy */
897 NULL, /* data */
898 "UDPv6" /* name */
901 #define LINE_LEN 190
902 #define LINE_FMT "%-190s\n"
904 static void get_udp6_sock(struct sock *sp, char *tmpbuf, int i)
906 struct in6_addr *dest, *src;
907 __u16 destp, srcp;
908 int timer_active;
909 unsigned long timer_expires;
911 dest = &sp->net_pinfo.af_inet6.daddr;
912 src = &sp->net_pinfo.af_inet6.rcv_saddr;
913 destp = ntohs(sp->dport);
914 srcp = ntohs(sp->sport);
915 timer_active = (sp->timer.prev != NULL) ? 2 : 0;
916 timer_expires = (timer_active == 2 ? sp->timer.expires : jiffies);
917 sprintf(tmpbuf,
918 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
919 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %ld %d %p",
921 src->s6_addr32[0], src->s6_addr32[1],
922 src->s6_addr32[2], src->s6_addr32[3], srcp,
923 dest->s6_addr32[0], dest->s6_addr32[1],
924 dest->s6_addr32[2], dest->s6_addr32[3], destp,
925 sp->state,
926 atomic_read(&sp->wmem_alloc), atomic_read(&sp->rmem_alloc),
927 timer_active, timer_expires-jiffies, 0,
928 sp->socket->inode->i_uid, 0,
929 sp->socket ? sp->socket->inode->i_ino : 0,
930 atomic_read(&sp->refcnt), sp);
933 int udp6_get_info(char *buffer, char **start, off_t offset, int length)
935 int len = 0, num = 0, i;
936 off_t pos = 0;
937 off_t begin;
938 char tmpbuf[LINE_LEN+2];
940 if (offset < LINE_LEN+1)
941 len += sprintf(buffer, LINE_FMT,
942 " sl " /* 6 */
943 "local_address " /* 38 */
944 "remote_address " /* 38 */
945 "st tx_queue rx_queue tr tm->when retrnsmt" /* 41 */
946 " uid timeout inode"); /* 21 */
947 /*----*/
948 /*144 */
949 pos = LINE_LEN+1;
950 read_lock(&udp_hash_lock);
951 for (i = 0; i < UDP_HTABLE_SIZE; i++) {
952 struct sock *sk;
954 for (sk = udp_hash[i]; sk; sk = sk->next, num++) {
955 if (sk->family != PF_INET6)
956 continue;
957 pos += LINE_LEN+1;
958 if (pos < offset)
959 continue;
960 get_udp6_sock(sk, tmpbuf, i);
961 len += sprintf(buffer+len, LINE_FMT, tmpbuf);
962 if(len >= length)
963 goto out;
966 out:
967 read_unlock(&udp_hash_lock);
968 begin = len - (pos - offset);
969 *start = buffer + begin;
970 len -= begin;
971 if(len > length)
972 len = length;
973 if (len < 0)
974 len = 0;
975 return len;
978 struct proto udpv6_prot = {
979 udpv6_close, /* close */
980 udpv6_connect, /* connect */
981 udp_disconnect, /* disconnect */
982 NULL, /* accept */
983 NULL, /* retransmit */
984 NULL, /* write_wakeup */
985 NULL, /* read_wakeup */
986 datagram_poll, /* poll */
987 udp_ioctl, /* ioctl */
988 NULL, /* init */
989 inet6_destroy_sock, /* destroy */
990 NULL, /* shutdown */
991 ipv6_setsockopt, /* setsockopt */
992 ipv6_getsockopt, /* getsockopt */
993 udpv6_sendmsg, /* sendmsg */
994 udpv6_recvmsg, /* recvmsg */
995 NULL, /* bind */
996 udpv6_queue_rcv_skb, /* backlog_rcv */
997 udp_v6_hash, /* hash */
998 udp_v6_unhash, /* unhash */
999 udp_v6_get_port, /* get_port */
1000 128, /* max_header */
1001 0, /* retransmits */
1002 "UDP", /* name */
1005 void __init udpv6_init(void)
1007 inet6_add_protocol(&udpv6_protocol);