2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * The User Datagram Protocol (UDP).
8 * Version: $Id: udp.c,v 1.102 2002/02/01 22:01:04 davem Exp $
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
13 * Alan Cox, <Alan.Cox@linux.org>
14 * Hirokazu Takahashi, <taka@valinux.co.jp>
17 * Alan Cox : verify_area() calls
18 * Alan Cox : stopped close while in use off icmp
19 * messages. Not a fix but a botch that
20 * for udp at least is 'valid'.
21 * Alan Cox : Fixed icmp handling properly
22 * Alan Cox : Correct error for oversized datagrams
23 * Alan Cox : Tidied select() semantics.
24 * Alan Cox : udp_err() fixed properly, also now
25 * select and read wake correctly on errors
26 * Alan Cox : udp_send verify_area moved to avoid mem leak
27 * Alan Cox : UDP can count its memory
28 * Alan Cox : send to an unknown connection causes
29 * an ECONNREFUSED off the icmp, but
31 * Alan Cox : Switched to new sk_buff handlers. No more backlog!
32 * Alan Cox : Using generic datagram code. Even smaller and the PEEK
33 * bug no longer crashes it.
34 * Fred Van Kempen : Net2e support for sk->broadcast.
35 * Alan Cox : Uses skb_free_datagram
36 * Alan Cox : Added get/set sockopt support.
37 * Alan Cox : Broadcasting without option set returns EACCES.
38 * Alan Cox : No wakeup calls. Instead we now use the callbacks.
39 * Alan Cox : Use ip_tos and ip_ttl
40 * Alan Cox : SNMP Mibs
41 * Alan Cox : MSG_DONTROUTE, and 0.0.0.0 support.
42 * Matt Dillon : UDP length checks.
43 * Alan Cox : Smarter af_inet used properly.
44 * Alan Cox : Use new kernel side addressing.
45 * Alan Cox : Incorrect return on truncated datagram receive.
46 * Arnt Gulbrandsen : New udp_send and stuff
47 * Alan Cox : Cache last socket
48 * Alan Cox : Route cache
49 * Jon Peatfield : Minor efficiency fix to sendto().
50 * Mike Shaver : RFC1122 checks.
51 * Alan Cox : Nonblocking error fix.
52 * Willy Konynenberg : Transparent proxying support.
53 * Mike McLagan : Routing by source
54 * David S. Miller : New socket lookup architecture.
55 * Last socket cache retained as it
56 * does have a high hit rate.
57 * Olaf Kirch : Don't linearise iovec on sendmsg.
58 * Andi Kleen : Some cleanups, cache destination entry
60 * Vitaly E. Lavrov : Transparent proxy revived after year coma.
61 * Melvin Smith : Check msg_name not msg_namelen in sendto(),
62 * return ENOTCONN for unconnected sockets (POSIX)
63 * Janos Farkas : don't deliver multi/broadcasts to a different
64 * bound-to-device socket
65 * Hirokazu Takahashi : HW checksumming for outgoing UDP
67 * Hirokazu Takahashi : sendfile() on UDP works now.
68 * Arnaldo C. Melo : convert /proc/net/udp to seq_file
69 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
70 * Alexey Kuznetsov: allow both IPv4 and IPv6 sockets to bind
71 * a single port at the same time.
72 * Derek Atkins <derek@ihtfp.com>: Add Encapulation Support
75 * This program is free software; you can redistribute it and/or
76 * modify it under the terms of the GNU General Public License
77 * as published by the Free Software Foundation; either version
78 * 2 of the License, or (at your option) any later version.
81 #include <asm/system.h>
82 #include <asm/uaccess.h>
83 #include <asm/ioctls.h>
84 #include <linux/types.h>
85 #include <linux/fcntl.h>
86 #include <linux/module.h>
87 #include <linux/socket.h>
88 #include <linux/sockios.h>
89 #include <linux/igmp.h>
91 #include <linux/errno.h>
92 #include <linux/timer.h>
94 #include <linux/inet.h>
95 #include <linux/netdevice.h>
96 #include <net/tcp_states.h>
97 #include <linux/skbuff.h>
98 #include <linux/proc_fs.h>
99 #include <linux/seq_file.h>
100 #include <net/icmp.h>
101 #include <net/route.h>
102 #include <net/checksum.h>
103 #include <net/xfrm.h>
104 #include "udp_impl.h"
107 * Snmp MIB for the UDP layer
110 DEFINE_SNMP_STAT(struct udp_mib
, udp_statistics
) __read_mostly
;
112 struct hlist_head udp_hash
[UDP_HTABLE_SIZE
];
113 DEFINE_RWLOCK(udp_hash_lock
);
115 static int udp_port_rover
;
118 * Note about this hash function :
119 * Typical use is probably daddr = 0, only dport is going to vary hash
121 static inline unsigned int hash_port_and_addr(__u16 port
, __be32 addr
)
128 static inline int __udp_lib_port_inuse(unsigned int hash
, int port
,
129 __be32 daddr
, struct hlist_head udptable
[])
132 struct hlist_node
*node
;
133 struct inet_sock
*inet
;
135 sk_for_each(sk
, node
, &udptable
[hash
& (UDP_HTABLE_SIZE
- 1)]) {
136 if (sk
->sk_hash
!= hash
)
139 if (inet
->num
!= port
)
141 if (inet
->rcv_saddr
== daddr
)
148 * __udp_lib_get_port - UDP/-Lite port lookup for IPv4 and IPv6
150 * @sk: socket struct in question
151 * @snum: port number to look up
152 * @udptable: hash list table, must be of UDP_HTABLE_SIZE
153 * @port_rover: pointer to record of last unallocated port
154 * @saddr_comp: AF-dependent comparison of bound local IP addresses
156 int __udp_lib_get_port(struct sock
*sk
, unsigned short snum
,
157 struct hlist_head udptable
[], int *port_rover
,
158 int (*saddr_comp
)(const struct sock
*sk1
,
159 const struct sock
*sk2
) )
161 struct hlist_node
*node
;
162 struct hlist_head
*head
;
167 write_lock_bh(&udp_hash_lock
);
169 int best_size_so_far
, best
, result
, i
;
171 if (*port_rover
> sysctl_local_port_range
[1] ||
172 *port_rover
< sysctl_local_port_range
[0])
173 *port_rover
= sysctl_local_port_range
[0];
174 best_size_so_far
= 32767;
175 best
= result
= *port_rover
;
176 for (i
= 0; i
< UDP_HTABLE_SIZE
; i
++, result
++) {
179 hash
= hash_port_and_addr(result
,
180 inet_sk(sk
)->rcv_saddr
);
181 head
= &udptable
[hash
& (UDP_HTABLE_SIZE
- 1)];
182 if (hlist_empty(head
)) {
183 if (result
> sysctl_local_port_range
[1])
184 result
= sysctl_local_port_range
[0] +
185 ((result
- sysctl_local_port_range
[0]) &
186 (UDP_HTABLE_SIZE
- 1));
190 sk_for_each(sk2
, node
, head
) {
191 if (++size
>= best_size_so_far
)
194 best_size_so_far
= size
;
200 for (i
= 0; i
< (1 << 16) / UDP_HTABLE_SIZE
;
201 i
++, result
+= UDP_HTABLE_SIZE
) {
202 if (result
> sysctl_local_port_range
[1])
203 result
= sysctl_local_port_range
[0]
204 + ((result
- sysctl_local_port_range
[0]) &
205 (UDP_HTABLE_SIZE
- 1));
206 hash
= hash_port_and_addr(result
, 0);
207 if (__udp_lib_port_inuse(hash
, result
,
210 if (!inet_sk(sk
)->rcv_saddr
)
213 hash
= hash_port_and_addr(result
,
214 inet_sk(sk
)->rcv_saddr
);
215 if (! __udp_lib_port_inuse(hash
, result
,
216 inet_sk(sk
)->rcv_saddr
, udptable
))
219 if (i
>= (1 << 16) / UDP_HTABLE_SIZE
)
222 *port_rover
= snum
= result
;
224 hash
= hash_port_and_addr(snum
, 0);
225 head
= &udptable
[hash
& (UDP_HTABLE_SIZE
- 1)];
227 sk_for_each(sk2
, node
, head
)
228 if (sk2
->sk_hash
== hash
&&
230 inet_sk(sk2
)->num
== snum
&&
231 (!sk2
->sk_reuse
|| !sk
->sk_reuse
) &&
232 (!sk2
->sk_bound_dev_if
|| !sk
->sk_bound_dev_if
||
233 sk2
->sk_bound_dev_if
== sk
->sk_bound_dev_if
) &&
234 (*saddr_comp
)(sk
, sk2
))
237 if (inet_sk(sk
)->rcv_saddr
) {
238 hash
= hash_port_and_addr(snum
,
239 inet_sk(sk
)->rcv_saddr
);
240 head
= &udptable
[hash
& (UDP_HTABLE_SIZE
- 1)];
242 sk_for_each(sk2
, node
, head
)
243 if (sk2
->sk_hash
== hash
&&
245 inet_sk(sk2
)->num
== snum
&&
246 (!sk2
->sk_reuse
|| !sk
->sk_reuse
) &&
247 (!sk2
->sk_bound_dev_if
||
248 !sk
->sk_bound_dev_if
||
249 sk2
->sk_bound_dev_if
==
250 sk
->sk_bound_dev_if
) &&
251 (*saddr_comp
)(sk
, sk2
))
255 inet_sk(sk
)->num
= snum
;
257 if (sk_unhashed(sk
)) {
258 head
= &udptable
[hash
& (UDP_HTABLE_SIZE
- 1)];
259 sk_add_node(sk
, head
);
260 sock_prot_inc_use(sk
->sk_prot
);
264 write_unlock_bh(&udp_hash_lock
);
268 int udp_get_port(struct sock
*sk
, unsigned short snum
,
269 int (*scmp
)(const struct sock
*, const struct sock
*))
271 return __udp_lib_get_port(sk
, snum
, udp_hash
, &udp_port_rover
, scmp
);
274 int ipv4_rcv_saddr_equal(const struct sock
*sk1
, const struct sock
*sk2
)
276 struct inet_sock
*inet1
= inet_sk(sk1
), *inet2
= inet_sk(sk2
);
278 return ( !ipv6_only_sock(sk2
) &&
279 (!inet1
->rcv_saddr
|| !inet2
->rcv_saddr
||
280 inet1
->rcv_saddr
== inet2
->rcv_saddr
));
283 static inline int udp_v4_get_port(struct sock
*sk
, unsigned short snum
)
285 return udp_get_port(sk
, snum
, ipv4_rcv_saddr_equal
);
288 /* UDP is nearly always wildcards out the wazoo, it makes no sense to try
289 * harder than this. -DaveM
291 static struct sock
*__udp4_lib_lookup(__be32 saddr
, __be16 sport
,
292 __be32 daddr
, __be16 dport
,
293 int dif
, struct hlist_head udptable
[])
295 struct sock
*sk
, *result
= NULL
;
296 struct hlist_node
*node
;
297 unsigned int hash
, hashwild
;
298 int score
, best
= -1, hport
= ntohs(dport
);
300 hash
= hash_port_and_addr(hport
, daddr
);
301 hashwild
= hash_port_and_addr(hport
, 0);
303 read_lock(&udp_hash_lock
);
307 sk_for_each(sk
, node
, &udptable
[hash
& (UDP_HTABLE_SIZE
- 1)]) {
308 struct inet_sock
*inet
= inet_sk(sk
);
310 if (sk
->sk_hash
!= hash
|| ipv6_only_sock(sk
) ||
314 score
= (sk
->sk_family
== PF_INET
? 1 : 0);
315 if (inet
->rcv_saddr
) {
316 if (inet
->rcv_saddr
!= daddr
)
321 if (inet
->daddr
!= saddr
)
326 if (inet
->dport
!= sport
)
330 if (sk
->sk_bound_dev_if
) {
331 if (sk
->sk_bound_dev_if
!= dif
)
338 } else if (score
> best
) {
344 if (hash
!= hashwild
) {
351 read_unlock(&udp_hash_lock
);
355 static inline struct sock
*udp_v4_mcast_next(struct sock
*sk
, unsigned int hnum
,
356 int hport
, __be32 loc_addr
,
357 __be16 rmt_port
, __be32 rmt_addr
,
360 struct hlist_node
*node
;
363 sk_for_each_from(s
, node
) {
364 struct inet_sock
*inet
= inet_sk(s
);
366 if (s
->sk_hash
!= hnum
||
367 inet
->num
!= hport
||
368 (inet
->daddr
&& inet
->daddr
!= rmt_addr
) ||
369 (inet
->dport
!= rmt_port
&& inet
->dport
) ||
370 (inet
->rcv_saddr
&& inet
->rcv_saddr
!= loc_addr
) ||
372 (s
->sk_bound_dev_if
&& s
->sk_bound_dev_if
!= dif
))
374 if (!ip_mc_sf_allow(s
, loc_addr
, rmt_addr
, dif
))
384 * This routine is called by the ICMP module when it gets some
385 * sort of error condition. If err < 0 then the socket should
386 * be closed and the error returned to the user. If err > 0
387 * it's just the icmp type << 8 | icmp code.
388 * Header points to the ip header of the error packet. We move
389 * on past this. Then (as it used to claim before adjustment)
390 * header points to the first 8 bytes of the udp header. We need
391 * to find the appropriate port.
394 void __udp4_lib_err(struct sk_buff
*skb
, u32 info
, struct hlist_head udptable
[])
396 struct inet_sock
*inet
;
397 struct iphdr
*iph
= (struct iphdr
*)skb
->data
;
398 struct udphdr
*uh
= (struct udphdr
*)(skb
->data
+(iph
->ihl
<<2));
399 const int type
= icmp_hdr(skb
)->type
;
400 const int code
= icmp_hdr(skb
)->code
;
405 sk
= __udp4_lib_lookup(iph
->daddr
, uh
->dest
, iph
->saddr
, uh
->source
,
406 skb
->dev
->ifindex
, udptable
);
408 ICMP_INC_STATS_BH(ICMP_MIB_INERRORS
);
409 return; /* No socket for error */
418 case ICMP_TIME_EXCEEDED
:
421 case ICMP_SOURCE_QUENCH
:
423 case ICMP_PARAMETERPROB
:
427 case ICMP_DEST_UNREACH
:
428 if (code
== ICMP_FRAG_NEEDED
) { /* Path MTU discovery */
429 if (inet
->pmtudisc
!= IP_PMTUDISC_DONT
) {
437 if (code
<= NR_ICMP_UNREACH
) {
438 harderr
= icmp_err_convert
[code
].fatal
;
439 err
= icmp_err_convert
[code
].errno
;
445 * RFC1122: OK. Passes ICMP errors back to application, as per
448 if (!inet
->recverr
) {
449 if (!harderr
|| sk
->sk_state
!= TCP_ESTABLISHED
)
452 ip_icmp_error(sk
, skb
, err
, uh
->dest
, info
, (u8
*)(uh
+1));
455 sk
->sk_error_report(sk
);
460 void udp_err(struct sk_buff
*skb
, u32 info
)
462 return __udp4_lib_err(skb
, info
, udp_hash
);
466 * Throw away all pending data and cancel the corking. Socket is locked.
468 static void udp_flush_pending_frames(struct sock
*sk
)
470 struct udp_sock
*up
= udp_sk(sk
);
475 ip_flush_pending_frames(sk
);
480 * udp4_hwcsum_outgoing - handle outgoing HW checksumming
481 * @sk: socket we are sending on
482 * @skb: sk_buff containing the filled-in UDP header
483 * (checksum field must be zeroed out)
485 static void udp4_hwcsum_outgoing(struct sock
*sk
, struct sk_buff
*skb
,
486 __be32 src
, __be32 dst
, int len
)
489 struct udphdr
*uh
= udp_hdr(skb
);
492 if (skb_queue_len(&sk
->sk_write_queue
) == 1) {
494 * Only one fragment on the socket.
496 skb
->csum_start
= skb_transport_header(skb
) - skb
->head
;
497 skb
->csum_offset
= offsetof(struct udphdr
, check
);
498 uh
->check
= ~csum_tcpudp_magic(src
, dst
, len
, IPPROTO_UDP
, 0);
501 * HW-checksum won't work as there are two or more
502 * fragments on the socket so that all csums of sk_buffs
505 offset
= skb_transport_offset(skb
);
506 skb
->csum
= skb_checksum(skb
, offset
, skb
->len
- offset
, 0);
508 skb
->ip_summed
= CHECKSUM_NONE
;
510 skb_queue_walk(&sk
->sk_write_queue
, skb
) {
511 csum
= csum_add(csum
, skb
->csum
);
514 uh
->check
= csum_tcpudp_magic(src
, dst
, len
, IPPROTO_UDP
, csum
);
516 uh
->check
= CSUM_MANGLED_0
;
521 * Push out all pending data as one UDP datagram. Socket is locked.
523 static int udp_push_pending_frames(struct sock
*sk
)
525 struct udp_sock
*up
= udp_sk(sk
);
526 struct inet_sock
*inet
= inet_sk(sk
);
527 struct flowi
*fl
= &inet
->cork
.fl
;
533 /* Grab the skbuff where UDP header space exists. */
534 if ((skb
= skb_peek(&sk
->sk_write_queue
)) == NULL
)
538 * Create a UDP header
541 uh
->source
= fl
->fl_ip_sport
;
542 uh
->dest
= fl
->fl_ip_dport
;
543 uh
->len
= htons(up
->len
);
546 if (up
->pcflag
) /* UDP-Lite */
547 csum
= udplite_csum_outgoing(sk
, skb
);
549 else if (sk
->sk_no_check
== UDP_CSUM_NOXMIT
) { /* UDP csum disabled */
551 skb
->ip_summed
= CHECKSUM_NONE
;
554 } else if (skb
->ip_summed
== CHECKSUM_PARTIAL
) { /* UDP hardware csum */
556 udp4_hwcsum_outgoing(sk
, skb
, fl
->fl4_src
,fl
->fl4_dst
, up
->len
);
559 } else /* `normal' UDP */
560 csum
= udp_csum_outgoing(sk
, skb
);
562 /* add protocol-dependent pseudo-header */
563 uh
->check
= csum_tcpudp_magic(fl
->fl4_src
, fl
->fl4_dst
, up
->len
,
564 sk
->sk_protocol
, csum
);
566 uh
->check
= CSUM_MANGLED_0
;
569 err
= ip_push_pending_frames(sk
);
576 int udp_sendmsg(struct kiocb
*iocb
, struct sock
*sk
, struct msghdr
*msg
,
579 struct inet_sock
*inet
= inet_sk(sk
);
580 struct udp_sock
*up
= udp_sk(sk
);
582 struct ipcm_cookie ipc
;
583 struct rtable
*rt
= NULL
;
586 __be32 daddr
, faddr
, saddr
;
589 int err
, is_udplite
= up
->pcflag
;
590 int corkreq
= up
->corkflag
|| msg
->msg_flags
&MSG_MORE
;
591 int (*getfrag
)(void *, char *, int, int, int, struct sk_buff
*);
600 if (msg
->msg_flags
&MSG_OOB
) /* Mirror BSD error message compatibility */
607 * There are pending frames.
608 * The socket lock must be held while it's corked.
611 if (likely(up
->pending
)) {
612 if (unlikely(up
->pending
!= AF_INET
)) {
620 ulen
+= sizeof(struct udphdr
);
623 * Get and verify the address.
626 struct sockaddr_in
* usin
= (struct sockaddr_in
*)msg
->msg_name
;
627 if (msg
->msg_namelen
< sizeof(*usin
))
629 if (usin
->sin_family
!= AF_INET
) {
630 if (usin
->sin_family
!= AF_UNSPEC
)
631 return -EAFNOSUPPORT
;
634 daddr
= usin
->sin_addr
.s_addr
;
635 dport
= usin
->sin_port
;
639 if (sk
->sk_state
!= TCP_ESTABLISHED
)
640 return -EDESTADDRREQ
;
643 /* Open fast path for connected socket.
644 Route will not be used, if at least one option is set.
648 ipc
.addr
= inet
->saddr
;
650 ipc
.oif
= sk
->sk_bound_dev_if
;
651 if (msg
->msg_controllen
) {
652 err
= ip_cmsg_send(msg
, &ipc
);
663 ipc
.addr
= faddr
= daddr
;
665 if (ipc
.opt
&& ipc
.opt
->srr
) {
668 faddr
= ipc
.opt
->faddr
;
671 tos
= RT_TOS(inet
->tos
);
672 if (sock_flag(sk
, SOCK_LOCALROUTE
) ||
673 (msg
->msg_flags
& MSG_DONTROUTE
) ||
674 (ipc
.opt
&& ipc
.opt
->is_strictroute
)) {
679 if (MULTICAST(daddr
)) {
681 ipc
.oif
= inet
->mc_index
;
683 saddr
= inet
->mc_addr
;
688 rt
= (struct rtable
*)sk_dst_check(sk
, 0);
691 struct flowi fl
= { .oif
= ipc
.oif
,
696 .proto
= sk
->sk_protocol
,
698 { .sport
= inet
->sport
,
699 .dport
= dport
} } };
700 security_sk_classify_flow(sk
, &fl
);
701 err
= ip_route_output_flow(&rt
, &fl
, sk
, 1);
706 if ((rt
->rt_flags
& RTCF_BROADCAST
) &&
707 !sock_flag(sk
, SOCK_BROADCAST
))
710 sk_dst_set(sk
, dst_clone(&rt
->u
.dst
));
713 if (msg
->msg_flags
&MSG_CONFIRM
)
719 daddr
= ipc
.addr
= rt
->rt_dst
;
722 if (unlikely(up
->pending
)) {
723 /* The socket is already corked while preparing it. */
724 /* ... which is an evident application bug. --ANK */
727 LIMIT_NETDEBUG(KERN_DEBUG
"udp cork app bug 2\n");
732 * Now cork the socket to pend data.
734 inet
->cork
.fl
.fl4_dst
= daddr
;
735 inet
->cork
.fl
.fl_ip_dport
= dport
;
736 inet
->cork
.fl
.fl4_src
= saddr
;
737 inet
->cork
.fl
.fl_ip_sport
= inet
->sport
;
738 up
->pending
= AF_INET
;
742 getfrag
= is_udplite
? udplite_getfrag
: ip_generic_getfrag
;
743 err
= ip_append_data(sk
, getfrag
, msg
->msg_iov
, ulen
,
744 sizeof(struct udphdr
), &ipc
, rt
,
745 corkreq
? msg
->msg_flags
|MSG_MORE
: msg
->msg_flags
);
747 udp_flush_pending_frames(sk
);
749 err
= udp_push_pending_frames(sk
);
750 else if (unlikely(skb_queue_empty(&sk
->sk_write_queue
)))
759 UDP_INC_STATS_USER(UDP_MIB_OUTDATAGRAMS
, is_udplite
);
763 * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space. Reporting
764 * ENOBUFS might not be good (it's not tunable per se), but otherwise
765 * we don't have a good statistic (IpOutDiscards but it can be too many
766 * things). We could add another new stat but at least for now that
767 * seems like overkill.
769 if (err
== -ENOBUFS
|| test_bit(SOCK_NOSPACE
, &sk
->sk_socket
->flags
)) {
770 UDP_INC_STATS_USER(UDP_MIB_SNDBUFERRORS
, is_udplite
);
775 dst_confirm(&rt
->u
.dst
);
776 if (!(msg
->msg_flags
&MSG_PROBE
) || len
)
777 goto back_from_confirm
;
782 int udp_sendpage(struct sock
*sk
, struct page
*page
, int offset
,
783 size_t size
, int flags
)
785 struct udp_sock
*up
= udp_sk(sk
);
789 struct msghdr msg
= { .msg_flags
= flags
|MSG_MORE
};
791 /* Call udp_sendmsg to specify destination address which
792 * sendpage interface can't pass.
793 * This will succeed only when the socket is connected.
795 ret
= udp_sendmsg(NULL
, sk
, &msg
, 0);
802 if (unlikely(!up
->pending
)) {
805 LIMIT_NETDEBUG(KERN_DEBUG
"udp cork app bug 3\n");
809 ret
= ip_append_page(sk
, page
, offset
, size
, flags
);
810 if (ret
== -EOPNOTSUPP
) {
812 return sock_no_sendpage(sk
->sk_socket
, page
, offset
,
816 udp_flush_pending_frames(sk
);
821 if (!(up
->corkflag
|| (flags
&MSG_MORE
)))
822 ret
= udp_push_pending_frames(sk
);
831 * IOCTL requests applicable to the UDP protocol
834 int udp_ioctl(struct sock
*sk
, int cmd
, unsigned long arg
)
839 int amount
= atomic_read(&sk
->sk_wmem_alloc
);
840 return put_user(amount
, (int __user
*)arg
);
846 unsigned long amount
;
849 spin_lock_bh(&sk
->sk_receive_queue
.lock
);
850 skb
= skb_peek(&sk
->sk_receive_queue
);
853 * We will only return the amount
854 * of this packet since that is all
857 amount
= skb
->len
- sizeof(struct udphdr
);
859 spin_unlock_bh(&sk
->sk_receive_queue
.lock
);
860 return put_user(amount
, (int __user
*)arg
);
871 * This should be easy, if there is something there we
872 * return it, otherwise we block.
875 int udp_recvmsg(struct kiocb
*iocb
, struct sock
*sk
, struct msghdr
*msg
,
876 size_t len
, int noblock
, int flags
, int *addr_len
)
878 struct inet_sock
*inet
= inet_sk(sk
);
879 struct sockaddr_in
*sin
= (struct sockaddr_in
*)msg
->msg_name
;
881 unsigned int ulen
, copied
;
883 int is_udplite
= IS_UDPLITE(sk
);
886 * Check any passed addresses
889 *addr_len
=sizeof(*sin
);
891 if (flags
& MSG_ERRQUEUE
)
892 return ip_recv_error(sk
, msg
, len
);
895 skb
= skb_recv_datagram(sk
, flags
, noblock
, &err
);
899 ulen
= skb
->len
- sizeof(struct udphdr
);
903 else if (copied
< ulen
)
904 msg
->msg_flags
|= MSG_TRUNC
;
907 * If checksum is needed at all, try to do it while copying the
908 * data. If the data is truncated, or if we only want a partial
909 * coverage checksum (UDP-Lite), do it before the copy.
912 if (copied
< ulen
|| UDP_SKB_CB(skb
)->partial_cov
) {
913 if (udp_lib_checksum_complete(skb
))
917 if (skb_csum_unnecessary(skb
))
918 err
= skb_copy_datagram_iovec(skb
, sizeof(struct udphdr
),
919 msg
->msg_iov
, copied
);
921 err
= skb_copy_and_csum_datagram_iovec(skb
, sizeof(struct udphdr
), msg
->msg_iov
);
930 sock_recv_timestamp(msg
, sk
, skb
);
932 /* Copy the address. */
935 sin
->sin_family
= AF_INET
;
936 sin
->sin_port
= udp_hdr(skb
)->source
;
937 sin
->sin_addr
.s_addr
= ip_hdr(skb
)->saddr
;
938 memset(sin
->sin_zero
, 0, sizeof(sin
->sin_zero
));
940 if (inet
->cmsg_flags
)
941 ip_cmsg_recv(msg
, skb
);
944 if (flags
& MSG_TRUNC
)
948 skb_free_datagram(sk
, skb
);
953 UDP_INC_STATS_BH(UDP_MIB_INERRORS
, is_udplite
);
955 skb_kill_datagram(sk
, skb
, flags
);
963 int udp_disconnect(struct sock
*sk
, int flags
)
965 struct inet_sock
*inet
= inet_sk(sk
);
967 * 1003.1g - break association.
970 sk
->sk_state
= TCP_CLOSE
;
973 sk
->sk_bound_dev_if
= 0;
974 if (!(sk
->sk_userlocks
& SOCK_BINDADDR_LOCK
))
975 inet_reset_saddr(sk
);
977 if (!(sk
->sk_userlocks
& SOCK_BINDPORT_LOCK
)) {
978 sk
->sk_prot
->unhash(sk
);
986 * 1 if the the UDP system should process it
987 * 0 if we should drop this packet
988 * -1 if it should get processed by xfrm4_rcv_encap
990 static int udp_encap_rcv(struct sock
* sk
, struct sk_buff
*skb
)
995 struct udp_sock
*up
= udp_sk(sk
);
1002 __u16 encap_type
= up
->encap_type
;
1004 /* if we're overly short, let UDP handle it */
1005 len
= skb
->len
- sizeof(struct udphdr
);
1009 /* if this is not encapsulated socket, then just return now */
1013 /* If this is a paged skb, make sure we pull up
1014 * whatever data we need to look at. */
1015 if (!pskb_may_pull(skb
, sizeof(struct udphdr
) + min(len
, 8)))
1018 /* Now we can get the pointers */
1020 udpdata
= (__u8
*)uh
+ sizeof(struct udphdr
);
1021 udpdata32
= (__be32
*)udpdata
;
1023 switch (encap_type
) {
1025 case UDP_ENCAP_ESPINUDP
:
1026 /* Check if this is a keepalive packet. If so, eat it. */
1027 if (len
== 1 && udpdata
[0] == 0xff) {
1029 } else if (len
> sizeof(struct ip_esp_hdr
) && udpdata32
[0] != 0) {
1030 /* ESP Packet without Non-ESP header */
1031 len
= sizeof(struct udphdr
);
1033 /* Must be an IKE packet.. pass it through */
1036 case UDP_ENCAP_ESPINUDP_NON_IKE
:
1037 /* Check if this is a keepalive packet. If so, eat it. */
1038 if (len
== 1 && udpdata
[0] == 0xff) {
1040 } else if (len
> 2 * sizeof(u32
) + sizeof(struct ip_esp_hdr
) &&
1041 udpdata32
[0] == 0 && udpdata32
[1] == 0) {
1043 /* ESP Packet with Non-IKE marker */
1044 len
= sizeof(struct udphdr
) + 2 * sizeof(u32
);
1046 /* Must be an IKE packet.. pass it through */
1051 /* At this point we are sure that this is an ESPinUDP packet,
1052 * so we need to remove 'len' bytes from the packet (the UDP
1053 * header and optional ESP marker bytes) and then modify the
1054 * protocol to ESP, and then call into the transform receiver.
1056 if (skb_cloned(skb
) && pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
))
1059 /* Now we can update and verify the packet length... */
1061 iphlen
= iph
->ihl
<< 2;
1062 iph
->tot_len
= htons(ntohs(iph
->tot_len
) - len
);
1063 if (skb
->len
< iphlen
+ len
) {
1064 /* packet is too small!?! */
1068 /* pull the data buffer up to the ESP header and set the
1069 * transport header to point to ESP. Keep UDP on the stack
1072 __skb_pull(skb
, len
);
1073 skb_reset_transport_header(skb
);
1075 /* modify the protocol (it's ESP!) */
1076 iph
->protocol
= IPPROTO_ESP
;
1078 /* and let the caller know to send this into the ESP processor... */
1086 * >0: "udp encap" protocol resubmission
1088 * Note that in the success and error cases, the skb is assumed to
1089 * have either been requeued or freed.
1091 int udp_queue_rcv_skb(struct sock
* sk
, struct sk_buff
*skb
)
1093 struct udp_sock
*up
= udp_sk(sk
);
1097 * Charge it to the socket, dropping if the queue is full.
1099 if (!xfrm4_policy_check(sk
, XFRM_POLICY_IN
, skb
))
1103 if (up
->encap_type
) {
1105 * This is an encapsulation socket, so let's see if this is
1106 * an encapsulated packet.
1107 * If it's a keepalive packet, then just eat it.
1108 * If it's an encapsulateed packet, then pass it to the
1109 * IPsec xfrm input and return the response
1110 * appropriately. Otherwise, just fall through and
1111 * pass this up the UDP socket.
1115 ret
= udp_encap_rcv(sk
, skb
);
1117 /* Eat the packet .. */
1122 /* process the ESP packet */
1123 ret
= xfrm4_rcv_encap(skb
, up
->encap_type
);
1124 UDP_INC_STATS_BH(UDP_MIB_INDATAGRAMS
, up
->pcflag
);
1127 /* FALLTHROUGH -- it's a UDP Packet */
1131 * UDP-Lite specific tests, ignored on UDP sockets
1133 if ((up
->pcflag
& UDPLITE_RECV_CC
) && UDP_SKB_CB(skb
)->partial_cov
) {
1136 * MIB statistics other than incrementing the error count are
1137 * disabled for the following two types of errors: these depend
1138 * on the application settings, not on the functioning of the
1139 * protocol stack as such.
1141 * RFC 3828 here recommends (sec 3.3): "There should also be a
1142 * way ... to ... at least let the receiving application block
1143 * delivery of packets with coverage values less than a value
1144 * provided by the application."
1146 if (up
->pcrlen
== 0) { /* full coverage was set */
1147 LIMIT_NETDEBUG(KERN_WARNING
"UDPLITE: partial coverage "
1148 "%d while full coverage %d requested\n",
1149 UDP_SKB_CB(skb
)->cscov
, skb
->len
);
1152 /* The next case involves violating the min. coverage requested
1153 * by the receiver. This is subtle: if receiver wants x and x is
1154 * greater than the buffersize/MTU then receiver will complain
1155 * that it wants x while sender emits packets of smaller size y.
1156 * Therefore the above ...()->partial_cov statement is essential.
1158 if (UDP_SKB_CB(skb
)->cscov
< up
->pcrlen
) {
1159 LIMIT_NETDEBUG(KERN_WARNING
1160 "UDPLITE: coverage %d too small, need min %d\n",
1161 UDP_SKB_CB(skb
)->cscov
, up
->pcrlen
);
1166 if (sk
->sk_filter
) {
1167 if (udp_lib_checksum_complete(skb
))
1171 if ((rc
= sock_queue_rcv_skb(sk
,skb
)) < 0) {
1172 /* Note that an ENOMEM error is charged twice */
1174 UDP_INC_STATS_BH(UDP_MIB_RCVBUFERRORS
, up
->pcflag
);
1178 UDP_INC_STATS_BH(UDP_MIB_INDATAGRAMS
, up
->pcflag
);
1182 UDP_INC_STATS_BH(UDP_MIB_INERRORS
, up
->pcflag
);
1188 * Multicasts and broadcasts go to each listener.
1190 * Note: called only from the BH handler context,
1191 * so we don't need to lock the hashes.
1193 static int __udp4_lib_mcast_deliver(struct sk_buff
*skb
,
1195 __be32 saddr
, __be32 daddr
,
1196 struct hlist_head udptable
[])
1198 struct sock
*sk
, *skw
, *sknext
;
1200 int hport
= ntohs(uh
->dest
);
1201 unsigned int hash
= hash_port_and_addr(hport
, daddr
);
1202 unsigned int hashwild
= hash_port_and_addr(hport
, 0);
1204 dif
= skb
->dev
->ifindex
;
1206 read_lock(&udp_hash_lock
);
1208 sk
= sk_head(&udptable
[hash
& (UDP_HTABLE_SIZE
- 1)]);
1209 skw
= sk_head(&udptable
[hashwild
& (UDP_HTABLE_SIZE
- 1)]);
1211 sk
= udp_v4_mcast_next(sk
, hash
, hport
, daddr
, uh
->source
, saddr
, dif
);
1214 sk
= udp_v4_mcast_next(skw
, hash
, hport
, daddr
, uh
->source
,
1219 struct sk_buff
*skb1
= skb
;
1220 sknext
= udp_v4_mcast_next(sk_next(sk
), hash
, hport
,
1221 daddr
, uh
->source
, saddr
, dif
);
1222 if (!sknext
&& hash
!= hashwild
) {
1224 sknext
= udp_v4_mcast_next(skw
, hash
, hport
,
1225 daddr
, uh
->source
, saddr
, dif
);
1228 skb1
= skb_clone(skb
, GFP_ATOMIC
);
1231 int ret
= udp_queue_rcv_skb(sk
, skb1
);
1234 * we should probably re-process
1235 * instead of dropping packets here.
1243 read_unlock(&udp_hash_lock
);
1247 /* Initialize UDP checksum. If exited with zero value (success),
1248 * CHECKSUM_UNNECESSARY means, that no more checks are required.
1249 * Otherwise, csum completion requires chacksumming packet body,
1250 * including udp header and folding it to skb->csum.
1252 static inline int udp4_csum_init(struct sk_buff
*skb
, struct udphdr
*uh
,
1255 const struct iphdr
*iph
;
1258 UDP_SKB_CB(skb
)->partial_cov
= 0;
1259 UDP_SKB_CB(skb
)->cscov
= skb
->len
;
1261 if (proto
== IPPROTO_UDPLITE
) {
1262 err
= udplite_checksum_init(skb
, uh
);
1268 if (uh
->check
== 0) {
1269 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1270 } else if (skb
->ip_summed
== CHECKSUM_COMPLETE
) {
1271 if (!csum_tcpudp_magic(iph
->saddr
, iph
->daddr
, skb
->len
,
1273 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1275 if (!skb_csum_unnecessary(skb
))
1276 skb
->csum
= csum_tcpudp_nofold(iph
->saddr
, iph
->daddr
,
1277 skb
->len
, proto
, 0);
1278 /* Probably, we should checksum udp header (it should be in cache
1279 * in any case) and data in tiny packets (< rx copybreak).
1286 * All we need to do is get the socket, and then do a checksum.
1289 int __udp4_lib_rcv(struct sk_buff
*skb
, struct hlist_head udptable
[],
1293 struct udphdr
*uh
= udp_hdr(skb
);
1294 unsigned short ulen
;
1295 struct rtable
*rt
= (struct rtable
*)skb
->dst
;
1296 __be32 saddr
= ip_hdr(skb
)->saddr
;
1297 __be32 daddr
= ip_hdr(skb
)->daddr
;
1300 * Validate the packet.
1302 if (!pskb_may_pull(skb
, sizeof(struct udphdr
)))
1303 goto drop
; /* No space for header. */
1305 ulen
= ntohs(uh
->len
);
1306 if (ulen
> skb
->len
)
1309 if (proto
== IPPROTO_UDP
) {
1310 /* UDP validates ulen. */
1311 if (ulen
< sizeof(*uh
) || pskb_trim_rcsum(skb
, ulen
))
1316 if (udp4_csum_init(skb
, uh
, proto
))
1319 if (rt
->rt_flags
& (RTCF_BROADCAST
|RTCF_MULTICAST
))
1320 return __udp4_lib_mcast_deliver(skb
, uh
, saddr
, daddr
, udptable
);
1322 sk
= __udp4_lib_lookup(saddr
, uh
->source
, daddr
, uh
->dest
,
1323 skb
->dev
->ifindex
, udptable
);
1326 int ret
= udp_queue_rcv_skb(sk
, skb
);
1329 /* a return value > 0 means to resubmit the input, but
1330 * it wants the return to be -protocol, or 0
1337 if (!xfrm4_policy_check(NULL
, XFRM_POLICY_IN
, skb
))
1341 /* No socket. Drop packet silently, if checksum is wrong */
1342 if (udp_lib_checksum_complete(skb
))
1345 UDP_INC_STATS_BH(UDP_MIB_NOPORTS
, proto
== IPPROTO_UDPLITE
);
1346 icmp_send(skb
, ICMP_DEST_UNREACH
, ICMP_PORT_UNREACH
, 0);
1349 * Hmm. We got an UDP packet to a port to which we
1350 * don't wanna listen. Ignore it.
1356 LIMIT_NETDEBUG(KERN_DEBUG
"UDP%s: short packet: From %u.%u.%u.%u:%u %d/%d to %u.%u.%u.%u:%u\n",
1357 proto
== IPPROTO_UDPLITE
? "-Lite" : "",
1368 * RFC1122: OK. Discards the bad packet silently (as far as
1369 * the network is concerned, anyway) as per 4.1.3.4 (MUST).
1371 LIMIT_NETDEBUG(KERN_DEBUG
"UDP%s: bad checksum. From %d.%d.%d.%d:%d to %d.%d.%d.%d:%d ulen %d\n",
1372 proto
== IPPROTO_UDPLITE
? "-Lite" : "",
1379 UDP_INC_STATS_BH(UDP_MIB_INERRORS
, proto
== IPPROTO_UDPLITE
);
1384 int udp_rcv(struct sk_buff
*skb
)
1386 return __udp4_lib_rcv(skb
, udp_hash
, IPPROTO_UDP
);
1389 int udp_destroy_sock(struct sock
*sk
)
1392 udp_flush_pending_frames(sk
);
1398 * Socket option code for UDP
1400 int udp_lib_setsockopt(struct sock
*sk
, int level
, int optname
,
1401 char __user
*optval
, int optlen
,
1402 int (*push_pending_frames
)(struct sock
*))
1404 struct udp_sock
*up
= udp_sk(sk
);
1408 if (optlen
<sizeof(int))
1411 if (get_user(val
, (int __user
*)optval
))
1421 (*push_pending_frames
)(sk
);
1429 case UDP_ENCAP_ESPINUDP
:
1430 case UDP_ENCAP_ESPINUDP_NON_IKE
:
1431 up
->encap_type
= val
;
1440 * UDP-Lite's partial checksum coverage (RFC 3828).
1442 /* The sender sets actual checksum coverage length via this option.
1443 * The case coverage > packet length is handled by send module. */
1444 case UDPLITE_SEND_CSCOV
:
1445 if (!up
->pcflag
) /* Disable the option on UDP sockets */
1446 return -ENOPROTOOPT
;
1447 if (val
!= 0 && val
< 8) /* Illegal coverage: use default (8) */
1450 up
->pcflag
|= UDPLITE_SEND_CC
;
1453 /* The receiver specifies a minimum checksum coverage value. To make
1454 * sense, this should be set to at least 8 (as done below). If zero is
1455 * used, this again means full checksum coverage. */
1456 case UDPLITE_RECV_CSCOV
:
1457 if (!up
->pcflag
) /* Disable the option on UDP sockets */
1458 return -ENOPROTOOPT
;
1459 if (val
!= 0 && val
< 8) /* Avoid silly minimal values. */
1462 up
->pcflag
|= UDPLITE_RECV_CC
;
1473 int udp_setsockopt(struct sock
*sk
, int level
, int optname
,
1474 char __user
*optval
, int optlen
)
1476 if (level
== SOL_UDP
|| level
== SOL_UDPLITE
)
1477 return udp_lib_setsockopt(sk
, level
, optname
, optval
, optlen
,
1478 udp_push_pending_frames
);
1479 return ip_setsockopt(sk
, level
, optname
, optval
, optlen
);
1482 #ifdef CONFIG_COMPAT
1483 int compat_udp_setsockopt(struct sock
*sk
, int level
, int optname
,
1484 char __user
*optval
, int optlen
)
1486 if (level
== SOL_UDP
|| level
== SOL_UDPLITE
)
1487 return udp_lib_setsockopt(sk
, level
, optname
, optval
, optlen
,
1488 udp_push_pending_frames
);
1489 return compat_ip_setsockopt(sk
, level
, optname
, optval
, optlen
);
1493 int udp_lib_getsockopt(struct sock
*sk
, int level
, int optname
,
1494 char __user
*optval
, int __user
*optlen
)
1496 struct udp_sock
*up
= udp_sk(sk
);
1499 if (get_user(len
,optlen
))
1502 len
= min_t(unsigned int, len
, sizeof(int));
1513 val
= up
->encap_type
;
1516 /* The following two cannot be changed on UDP sockets, the return is
1517 * always 0 (which corresponds to the full checksum coverage of UDP). */
1518 case UDPLITE_SEND_CSCOV
:
1522 case UDPLITE_RECV_CSCOV
:
1527 return -ENOPROTOOPT
;
1530 if (put_user(len
, optlen
))
1532 if (copy_to_user(optval
, &val
,len
))
1537 int udp_getsockopt(struct sock
*sk
, int level
, int optname
,
1538 char __user
*optval
, int __user
*optlen
)
1540 if (level
== SOL_UDP
|| level
== SOL_UDPLITE
)
1541 return udp_lib_getsockopt(sk
, level
, optname
, optval
, optlen
);
1542 return ip_getsockopt(sk
, level
, optname
, optval
, optlen
);
1545 #ifdef CONFIG_COMPAT
1546 int compat_udp_getsockopt(struct sock
*sk
, int level
, int optname
,
1547 char __user
*optval
, int __user
*optlen
)
1549 if (level
== SOL_UDP
|| level
== SOL_UDPLITE
)
1550 return udp_lib_getsockopt(sk
, level
, optname
, optval
, optlen
);
1551 return compat_ip_getsockopt(sk
, level
, optname
, optval
, optlen
);
1555 * udp_poll - wait for a UDP event.
1556 * @file - file struct
1558 * @wait - poll table
1560 * This is same as datagram poll, except for the special case of
1561 * blocking sockets. If application is using a blocking fd
1562 * and a packet with checksum error is in the queue;
1563 * then it could get return from select indicating data available
1564 * but then block when reading it. Add special case code
1565 * to work around these arguably broken applications.
1567 unsigned int udp_poll(struct file
*file
, struct socket
*sock
, poll_table
*wait
)
1569 unsigned int mask
= datagram_poll(file
, sock
, wait
);
1570 struct sock
*sk
= sock
->sk
;
1571 int is_lite
= IS_UDPLITE(sk
);
1573 /* Check for false positives due to checksum errors */
1574 if ( (mask
& POLLRDNORM
) &&
1575 !(file
->f_flags
& O_NONBLOCK
) &&
1576 !(sk
->sk_shutdown
& RCV_SHUTDOWN
)){
1577 struct sk_buff_head
*rcvq
= &sk
->sk_receive_queue
;
1578 struct sk_buff
*skb
;
1580 spin_lock_bh(&rcvq
->lock
);
1581 while ((skb
= skb_peek(rcvq
)) != NULL
&&
1582 udp_lib_checksum_complete(skb
)) {
1583 UDP_INC_STATS_BH(UDP_MIB_INERRORS
, is_lite
);
1584 __skb_unlink(skb
, rcvq
);
1587 spin_unlock_bh(&rcvq
->lock
);
1589 /* nothing to see, move along */
1591 mask
&= ~(POLLIN
| POLLRDNORM
);
1598 struct proto udp_prot
= {
1600 .owner
= THIS_MODULE
,
1601 .close
= udp_lib_close
,
1602 .connect
= ip4_datagram_connect
,
1603 .disconnect
= udp_disconnect
,
1605 .destroy
= udp_destroy_sock
,
1606 .setsockopt
= udp_setsockopt
,
1607 .getsockopt
= udp_getsockopt
,
1608 .sendmsg
= udp_sendmsg
,
1609 .recvmsg
= udp_recvmsg
,
1610 .sendpage
= udp_sendpage
,
1611 .backlog_rcv
= udp_queue_rcv_skb
,
1612 .hash
= udp_lib_hash
,
1613 .unhash
= udp_lib_unhash
,
1614 .get_port
= udp_v4_get_port
,
1615 .obj_size
= sizeof(struct udp_sock
),
1616 #ifdef CONFIG_COMPAT
1617 .compat_setsockopt
= compat_udp_setsockopt
,
1618 .compat_getsockopt
= compat_udp_getsockopt
,
1622 /* ------------------------------------------------------------------------ */
1623 #ifdef CONFIG_PROC_FS
1625 static struct sock
*udp_get_first(struct seq_file
*seq
)
1628 struct udp_iter_state
*state
= seq
->private;
1630 for (state
->bucket
= 0; state
->bucket
< UDP_HTABLE_SIZE
; ++state
->bucket
) {
1631 struct hlist_node
*node
;
1632 sk_for_each(sk
, node
, state
->hashtable
+ state
->bucket
) {
1633 if (sk
->sk_family
== state
->family
)
1642 static struct sock
*udp_get_next(struct seq_file
*seq
, struct sock
*sk
)
1644 struct udp_iter_state
*state
= seq
->private;
1650 } while (sk
&& sk
->sk_family
!= state
->family
);
1652 if (!sk
&& ++state
->bucket
< UDP_HTABLE_SIZE
) {
1653 sk
= sk_head(state
->hashtable
+ state
->bucket
);
1659 static struct sock
*udp_get_idx(struct seq_file
*seq
, loff_t pos
)
1661 struct sock
*sk
= udp_get_first(seq
);
1664 while (pos
&& (sk
= udp_get_next(seq
, sk
)) != NULL
)
1666 return pos
? NULL
: sk
;
1669 static void *udp_seq_start(struct seq_file
*seq
, loff_t
*pos
)
1671 read_lock(&udp_hash_lock
);
1672 return *pos
? udp_get_idx(seq
, *pos
-1) : (void *)1;
1675 static void *udp_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
1680 sk
= udp_get_idx(seq
, 0);
1682 sk
= udp_get_next(seq
, v
);
1688 static void udp_seq_stop(struct seq_file
*seq
, void *v
)
1690 read_unlock(&udp_hash_lock
);
1693 static int udp_seq_open(struct inode
*inode
, struct file
*file
)
1695 struct udp_seq_afinfo
*afinfo
= PDE(inode
)->data
;
1696 struct seq_file
*seq
;
1698 struct udp_iter_state
*s
= kzalloc(sizeof(*s
), GFP_KERNEL
);
1702 s
->family
= afinfo
->family
;
1703 s
->hashtable
= afinfo
->hashtable
;
1704 s
->seq_ops
.start
= udp_seq_start
;
1705 s
->seq_ops
.next
= udp_seq_next
;
1706 s
->seq_ops
.show
= afinfo
->seq_show
;
1707 s
->seq_ops
.stop
= udp_seq_stop
;
1709 rc
= seq_open(file
, &s
->seq_ops
);
1713 seq
= file
->private_data
;
1722 /* ------------------------------------------------------------------------ */
1723 int udp_proc_register(struct udp_seq_afinfo
*afinfo
)
1725 struct proc_dir_entry
*p
;
1730 afinfo
->seq_fops
->owner
= afinfo
->owner
;
1731 afinfo
->seq_fops
->open
= udp_seq_open
;
1732 afinfo
->seq_fops
->read
= seq_read
;
1733 afinfo
->seq_fops
->llseek
= seq_lseek
;
1734 afinfo
->seq_fops
->release
= seq_release_private
;
1736 p
= proc_net_fops_create(afinfo
->name
, S_IRUGO
, afinfo
->seq_fops
);
1744 void udp_proc_unregister(struct udp_seq_afinfo
*afinfo
)
1748 proc_net_remove(afinfo
->name
);
1749 memset(afinfo
->seq_fops
, 0, sizeof(*afinfo
->seq_fops
));
1752 /* ------------------------------------------------------------------------ */
1753 static void udp4_format_sock(struct sock
*sp
, char *tmpbuf
, int bucket
)
1755 struct inet_sock
*inet
= inet_sk(sp
);
1756 __be32 dest
= inet
->daddr
;
1757 __be32 src
= inet
->rcv_saddr
;
1758 __u16 destp
= ntohs(inet
->dport
);
1759 __u16 srcp
= ntohs(inet
->sport
);
1761 sprintf(tmpbuf
, "%4d: %08X:%04X %08X:%04X"
1762 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p",
1763 bucket
, src
, srcp
, dest
, destp
, sp
->sk_state
,
1764 atomic_read(&sp
->sk_wmem_alloc
),
1765 atomic_read(&sp
->sk_rmem_alloc
),
1766 0, 0L, 0, sock_i_uid(sp
), 0, sock_i_ino(sp
),
1767 atomic_read(&sp
->sk_refcnt
), sp
);
1770 int udp4_seq_show(struct seq_file
*seq
, void *v
)
1772 if (v
== SEQ_START_TOKEN
)
1773 seq_printf(seq
, "%-127s\n",
1774 " sl local_address rem_address st tx_queue "
1775 "rx_queue tr tm->when retrnsmt uid timeout "
1779 struct udp_iter_state
*state
= seq
->private;
1781 udp4_format_sock(v
, tmpbuf
, state
->bucket
);
1782 seq_printf(seq
, "%-127s\n", tmpbuf
);
1787 /* ------------------------------------------------------------------------ */
1788 static struct file_operations udp4_seq_fops
;
1789 static struct udp_seq_afinfo udp4_seq_afinfo
= {
1790 .owner
= THIS_MODULE
,
1793 .hashtable
= udp_hash
,
1794 .seq_show
= udp4_seq_show
,
1795 .seq_fops
= &udp4_seq_fops
,
1798 int __init
udp4_proc_init(void)
1800 return udp_proc_register(&udp4_seq_afinfo
);
1803 void udp4_proc_exit(void)
1805 udp_proc_unregister(&udp4_seq_afinfo
);
1807 #endif /* CONFIG_PROC_FS */
1809 EXPORT_SYMBOL(udp_disconnect
);
1810 EXPORT_SYMBOL(udp_hash
);
1811 EXPORT_SYMBOL(udp_hash_lock
);
1812 EXPORT_SYMBOL(udp_ioctl
);
1813 EXPORT_SYMBOL(udp_get_port
);
1814 EXPORT_SYMBOL(udp_prot
);
1815 EXPORT_SYMBOL(udp_sendmsg
);
1816 EXPORT_SYMBOL(udp_lib_getsockopt
);
1817 EXPORT_SYMBOL(udp_lib_setsockopt
);
1818 EXPORT_SYMBOL(udp_poll
);
1820 #ifdef CONFIG_PROC_FS
1821 EXPORT_SYMBOL(udp_proc_register
);
1822 EXPORT_SYMBOL(udp_proc_unregister
);