2 * Copyright (c) 2004 Jeffrey M. Hsu. All rights reserved.
3 * Copyright (c) 2004 The DragonFly Project. All rights reserved.
5 * This code is derived from software contributed to The DragonFly Project
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of The DragonFly Project nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific, prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * Copyright (c) 1982, 1986, 1988, 1990, 1993
36 * The Regents of the University of California. All rights reserved.
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. Neither the name of the University nor the names of its contributors
47 * may be used to endorse or promote products derived from this software
48 * without specific prior written permission.
50 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
51 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
52 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
53 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
54 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
55 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
56 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
57 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
58 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
59 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
62 * @(#)uipc_socket.c 8.3 (Berkeley) 4/15/94
63 * $FreeBSD: src/sys/kern/uipc_socket.c,v 1.68.2.24 2003/11/11 17:18:18 silby Exp $
68 #include <sys/param.h>
69 #include <sys/systm.h>
70 #include <sys/fcntl.h>
71 #include <sys/malloc.h>
73 #include <sys/domain.h>
74 #include <sys/file.h> /* for struct knote */
75 #include <sys/kernel.h>
76 #include <sys/event.h>
78 #include <sys/protosw.h>
79 #include <sys/socket.h>
80 #include <sys/socketvar.h>
81 #include <sys/socketops.h>
82 #include <sys/resourcevar.h>
83 #include <sys/signalvar.h>
84 #include <sys/sysctl.h>
87 #include <vm/vm_zone.h>
89 #include <net/netmsg2.h>
90 #include <net/netisr2.h>
92 #include <sys/thread2.h>
93 #include <sys/socketvar2.h>
94 #include <sys/spinlock2.h>
96 #include <machine/limits.h>
99 extern int tcp_sosend_agglim
;
100 extern int tcp_sosend_async
;
101 extern int tcp_sosend_jcluster
;
102 extern int udp_sosend_async
;
103 extern int udp_sosend_prepend
;
105 static int do_setopt_accept_filter(struct socket
*so
, struct sockopt
*sopt
);
108 static void filt_sordetach(struct knote
*kn
);
109 static int filt_soread(struct knote
*kn
, long hint
);
110 static void filt_sowdetach(struct knote
*kn
);
111 static int filt_sowrite(struct knote
*kn
, long hint
);
112 static int filt_solisten(struct knote
*kn
, long hint
);
114 static int soclose_sync(struct socket
*so
, int fflag
);
115 static void soclose_fast(struct socket
*so
);
117 static struct filterops solisten_filtops
=
118 { FILTEROP_ISFD
|FILTEROP_MPSAFE
, NULL
, filt_sordetach
, filt_solisten
};
119 static struct filterops soread_filtops
=
120 { FILTEROP_ISFD
|FILTEROP_MPSAFE
, NULL
, filt_sordetach
, filt_soread
};
121 static struct filterops sowrite_filtops
=
122 { FILTEROP_ISFD
|FILTEROP_MPSAFE
, NULL
, filt_sowdetach
, filt_sowrite
};
123 static struct filterops soexcept_filtops
=
124 { FILTEROP_ISFD
|FILTEROP_MPSAFE
, NULL
, filt_sordetach
, filt_soread
};
126 MALLOC_DEFINE(M_SOCKET
, "socket", "socket struct");
127 MALLOC_DEFINE(M_SONAME
, "soname", "socket name");
128 MALLOC_DEFINE(M_PCB
, "pcb", "protocol control block");
131 static int somaxconn
= SOMAXCONN
;
132 SYSCTL_INT(_kern_ipc
, KIPC_SOMAXCONN
, somaxconn
, CTLFLAG_RW
,
133 &somaxconn
, 0, "Maximum pending socket connection queue size");
135 static int use_soclose_fast
= 1;
136 SYSCTL_INT(_kern_ipc
, OID_AUTO
, soclose_fast
, CTLFLAG_RW
,
137 &use_soclose_fast
, 0, "Fast socket close");
139 int use_soaccept_pred_fast
= 1;
140 SYSCTL_INT(_kern_ipc
, OID_AUTO
, soaccept_pred_fast
, CTLFLAG_RW
,
141 &use_soaccept_pred_fast
, 0, "Fast socket accept predication");
143 int use_sendfile_async
= 1;
144 SYSCTL_INT(_kern_ipc
, OID_AUTO
, sendfile_async
, CTLFLAG_RW
,
145 &use_sendfile_async
, 0, "sendfile uses asynchronized pru_send");
147 int use_soconnect_async
= 1;
148 SYSCTL_INT(_kern_ipc
, OID_AUTO
, soconnect_async
, CTLFLAG_RW
,
149 &use_soconnect_async
, 0, "soconnect uses asynchronized pru_connect");
152 * Socket operation routines.
153 * These routines are called by the routines in
154 * sys_socket.c or from a system process, and
155 * implement the semantics of socket operations by
156 * switching out to the protocol specific routines.
160 * Get a socket structure, and initialize it.
161 * Note that it would probably be better to allocate socket
162 * and PCB at the same time, but I'm not convinced that all
163 * the protocols can be easily modified to do this.
166 soalloc(int waitok
, struct protosw
*pr
)
171 waitmask
= waitok
? M_WAITOK
: M_NOWAIT
;
172 so
= kmalloc(sizeof(struct socket
), M_SOCKET
, M_ZERO
|waitmask
);
174 /* XXX race condition for reentrant kernel */
176 TAILQ_INIT(&so
->so_aiojobq
);
177 TAILQ_INIT(&so
->so_rcv
.ssb_kq
.ki_mlist
);
178 TAILQ_INIT(&so
->so_snd
.ssb_kq
.ki_mlist
);
179 lwkt_token_init(&so
->so_rcv
.ssb_token
, "rcvtok");
180 lwkt_token_init(&so
->so_snd
.ssb_token
, "sndtok");
181 spin_init(&so
->so_rcvd_spin
, "soalloc");
182 netmsg_init(&so
->so_rcvd_msg
.base
, so
, &netisr_adone_rport
,
183 MSGF_DROPABLE
| MSGF_PRIORITY
,
184 so
->so_proto
->pr_usrreqs
->pru_rcvd
);
185 so
->so_rcvd_msg
.nm_pru_flags
|= PRUR_ASYNC
;
186 so
->so_state
= SS_NOFDREF
;
193 socreate(int dom
, struct socket
**aso
, int type
,
194 int proto
, struct thread
*td
)
196 struct proc
*p
= td
->td_proc
;
199 struct pru_attach_info ai
;
203 prp
= pffindproto(dom
, proto
, type
);
205 prp
= pffindtype(dom
, type
);
207 if (prp
== NULL
|| prp
->pr_usrreqs
->pru_attach
== 0)
208 return (EPROTONOSUPPORT
);
210 if (p
->p_ucred
->cr_prison
&& jail_socket_unixiproute_only
&&
211 prp
->pr_domain
->dom_family
!= PF_LOCAL
&&
212 prp
->pr_domain
->dom_family
!= PF_INET
&&
213 prp
->pr_domain
->dom_family
!= PF_INET6
&&
214 prp
->pr_domain
->dom_family
!= PF_ROUTE
) {
215 return (EPROTONOSUPPORT
);
218 if (prp
->pr_type
!= type
)
220 so
= soalloc(p
!= NULL
, prp
);
225 * Callers of socreate() presumably will connect up a descriptor
226 * and call soclose() if they cannot. This represents our so_refs
227 * (which should be 1) from soalloc().
229 soclrstate(so
, SS_NOFDREF
);
232 * Set a default port for protocol processing. No action will occur
233 * on the socket on this port until an inpcb is attached to it and
234 * is able to match incoming packets, or until the socket becomes
235 * available to userland.
237 * We normally default the socket to the protocol thread on cpu 0,
238 * if protocol does not provide its own method to initialize the
241 * If PR_SYNC_PORT is set (unix domain sockets) there is no protocol
242 * thread and all pr_*()/pru_*() calls are executed synchronously.
244 if (prp
->pr_flags
& PR_SYNC_PORT
)
245 so
->so_port
= &netisr_sync_port
;
246 else if (prp
->pr_initport
!= NULL
)
247 so
->so_port
= prp
->pr_initport();
249 so
->so_port
= netisr_cpuport(0);
251 TAILQ_INIT(&so
->so_incomp
);
252 TAILQ_INIT(&so
->so_comp
);
254 so
->so_cred
= crhold(p
->p_ucred
);
255 ai
.sb_rlimit
= &p
->p_rlimit
[RLIMIT_SBSIZE
];
256 ai
.p_ucred
= p
->p_ucred
;
257 ai
.fd_rdir
= p
->p_fd
->fd_rdir
;
260 * Auto-sizing of socket buffers is managed by the protocols and
261 * the appropriate flags must be set in the pru_attach function.
263 error
= so_pru_attach(so
, proto
, &ai
);
265 sosetstate(so
, SS_NOFDREF
);
266 sofree(so
); /* from soalloc */
271 * NOTE: Returns referenced socket.
278 sobind(struct socket
*so
, struct sockaddr
*nam
, struct thread
*td
)
282 error
= so_pru_bind(so
, nam
, td
);
287 sodealloc(struct socket
*so
)
289 KKASSERT((so
->so_state
& (SS_INCOMP
| SS_COMP
)) == 0);
290 /* TODO: assert accept queues are empty, after unix socket is fixed */
292 if (so
->so_rcv
.ssb_hiwat
)
293 (void)chgsbsize(so
->so_cred
->cr_uidinfo
,
294 &so
->so_rcv
.ssb_hiwat
, 0, RLIM_INFINITY
);
295 if (so
->so_snd
.ssb_hiwat
)
296 (void)chgsbsize(so
->so_cred
->cr_uidinfo
,
297 &so
->so_snd
.ssb_hiwat
, 0, RLIM_INFINITY
);
299 /* remove accept filter if present */
300 if (so
->so_accf
!= NULL
)
301 do_setopt_accept_filter(so
, NULL
);
304 if (so
->so_faddr
!= NULL
)
305 kfree(so
->so_faddr
, M_SONAME
);
310 solisten(struct socket
*so
, int backlog
, struct thread
*td
)
312 if (so
->so_state
& (SS_ISCONNECTED
| SS_ISCONNECTING
))
315 lwkt_gettoken(&so
->so_rcv
.ssb_token
);
316 if (TAILQ_EMPTY(&so
->so_comp
))
317 so
->so_options
|= SO_ACCEPTCONN
;
318 lwkt_reltoken(&so
->so_rcv
.ssb_token
);
319 if (backlog
< 0 || backlog
> somaxconn
)
321 so
->so_qlimit
= backlog
;
322 return so_pru_listen(so
, td
);
326 soqflush(struct socket
*so
)
328 lwkt_getpooltoken(so
);
329 if (so
->so_options
& SO_ACCEPTCONN
) {
332 while ((sp
= TAILQ_FIRST(&so
->so_incomp
)) != NULL
) {
333 KKASSERT((sp
->so_state
& (SS_INCOMP
| SS_COMP
)) ==
335 TAILQ_REMOVE(&so
->so_incomp
, sp
, so_list
);
337 soclrstate(sp
, SS_INCOMP
);
338 soabort_async(sp
, TRUE
);
340 while ((sp
= TAILQ_FIRST(&so
->so_comp
)) != NULL
) {
341 KKASSERT((sp
->so_state
& (SS_INCOMP
| SS_COMP
)) ==
343 TAILQ_REMOVE(&so
->so_comp
, sp
, so_list
);
345 soclrstate(sp
, SS_COMP
);
346 soabort_async(sp
, TRUE
);
349 lwkt_relpooltoken(so
);
353 * Destroy a disconnected socket. This routine is a NOP if entities
354 * still have a reference on the socket:
356 * so_pcb - The protocol stack still has a reference
357 * SS_NOFDREF - There is no longer a file pointer reference
360 sofree(struct socket
*so
)
365 * This is a bit hackish at the moment. We need to interlock
366 * any accept queue we are on before we potentially lose the
367 * last reference to avoid races against a re-reference from
368 * someone operating on the queue.
370 while ((head
= so
->so_head
) != NULL
) {
371 lwkt_getpooltoken(head
);
372 if (so
->so_head
== head
)
374 lwkt_relpooltoken(head
);
378 * Arbitrage the last free.
380 KKASSERT(so
->so_refs
> 0);
381 if (atomic_fetchadd_int(&so
->so_refs
, -1) != 1) {
383 lwkt_relpooltoken(head
);
387 KKASSERT(so
->so_pcb
== NULL
&& (so
->so_state
& SS_NOFDREF
));
388 KKASSERT((so
->so_state
& SS_ASSERTINPROG
) == 0);
392 * We're done, remove ourselves from the accept queue we are
393 * on, if we are on one.
395 if (so
->so_state
& SS_INCOMP
) {
396 KKASSERT((so
->so_state
& (SS_INCOMP
| SS_COMP
)) ==
398 TAILQ_REMOVE(&head
->so_incomp
, so
, so_list
);
400 } else if (so
->so_state
& SS_COMP
) {
402 * We must not decommission a socket that's
403 * on the accept(2) queue. If we do, then
404 * accept(2) may hang after select(2) indicated
405 * that the listening socket was ready.
407 KKASSERT((so
->so_state
& (SS_INCOMP
| SS_COMP
)) ==
409 lwkt_relpooltoken(head
);
412 panic("sofree: not queued");
414 soclrstate(so
, SS_INCOMP
);
416 lwkt_relpooltoken(head
);
418 /* Flush accept queues, if we are accepting. */
421 ssb_release(&so
->so_snd
, so
);
427 * Close a socket on last file table reference removal.
428 * Initiate disconnect if connected.
429 * Free socket when disconnect complete.
432 soclose(struct socket
*so
, int fflag
)
436 funsetown(&so
->so_sigio
);
437 sosetstate(so
, SS_ISCLOSING
);
438 if (!use_soclose_fast
||
439 (so
->so_proto
->pr_flags
& PR_SYNC_PORT
) ||
440 ((so
->so_state
& SS_ISCONNECTED
) &&
441 (so
->so_options
& SO_LINGER
))) {
442 error
= soclose_sync(so
, fflag
);
451 sodiscard(struct socket
*so
)
453 if (so
->so_state
& SS_NOFDREF
)
454 panic("soclose: NOFDREF");
455 sosetstate(so
, SS_NOFDREF
); /* take ref */
459 * Append the completed queue of head to head_inh (inherting listen socket).
462 soinherit(struct socket
*head
, struct socket
*head_inh
)
464 boolean_t do_wakeup
= FALSE
;
466 KASSERT(head
->so_options
& SO_ACCEPTCONN
,
467 ("head does not accept connection"));
468 KASSERT(head_inh
->so_options
& SO_ACCEPTCONN
,
469 ("head_inh does not accept connection"));
471 lwkt_getpooltoken(head
);
472 lwkt_getpooltoken(head_inh
);
474 if (head
->so_qlen
> 0)
477 while (!TAILQ_EMPTY(&head
->so_comp
)) {
478 struct ucred
*old_cr
;
481 sp
= TAILQ_FIRST(&head
->so_comp
);
482 KKASSERT((sp
->so_state
& (SS_INCOMP
| SS_COMP
)) == SS_COMP
);
485 * Remove this socket from the current listen socket
488 TAILQ_REMOVE(&head
->so_comp
, sp
, so_list
);
491 /* Save the old ucred for later free. */
492 old_cr
= sp
->so_cred
;
495 * Install this socket to the inheriting listen socket
498 sp
->so_cred
= crhold(head_inh
->so_cred
); /* non-blocking */
499 sp
->so_head
= head_inh
;
501 TAILQ_INSERT_TAIL(&head_inh
->so_comp
, sp
, so_list
);
506 * crfree() may block and release the tokens temporarily.
507 * However, we are fine here, since the transition is done.
512 lwkt_relpooltoken(head_inh
);
513 lwkt_relpooltoken(head
);
517 * "New" connections have arrived
520 wakeup(&head_inh
->so_timeo
);
525 soclose_sync(struct socket
*so
, int fflag
)
529 if ((so
->so_proto
->pr_flags
& PR_SYNC_PORT
) == 0)
530 so_pru_sync(so
); /* unpend async prus */
532 if (so
->so_pcb
== NULL
)
535 if (so
->so_state
& SS_ISCONNECTED
) {
536 if ((so
->so_state
& SS_ISDISCONNECTING
) == 0) {
537 error
= sodisconnect(so
);
541 if (so
->so_options
& SO_LINGER
) {
542 if ((so
->so_state
& SS_ISDISCONNECTING
) &&
545 while (so
->so_state
& SS_ISCONNECTED
) {
546 error
= tsleep(&so
->so_timeo
, PCATCH
,
547 "soclos", so
->so_linger
* hz
);
557 error2
= so_pru_detach(so
);
558 if (error2
== EJUSTRETURN
) {
560 * Protocol will call sodiscard()
561 * and sofree() for us.
570 sofree(so
); /* dispose of ref */
576 soclose_fast_handler(netmsg_t msg
)
578 struct socket
*so
= msg
->base
.nm_so
;
580 if (so
->so_pcb
== NULL
)
583 if ((so
->so_state
& SS_ISCONNECTED
) &&
584 (so
->so_state
& SS_ISDISCONNECTING
) == 0)
585 so_pru_disconnect_direct(so
);
590 error
= so_pru_detach_direct(so
);
591 if (error
== EJUSTRETURN
) {
593 * Protocol will call sodiscard()
594 * and sofree() for us.
605 soclose_fast(struct socket
*so
)
607 struct netmsg_base
*base
= &so
->so_clomsg
;
609 netmsg_init(base
, so
, &netisr_apanic_rport
, 0,
610 soclose_fast_handler
);
611 lwkt_sendmsg(so
->so_port
, &base
->lmsg
);
615 * Abort and destroy a socket. Only one abort can be in progress
616 * at any given moment.
619 soabort_async(struct socket
*so
, boolean_t clr_head
)
622 * Keep a reference before clearing the so_head
623 * to avoid racing socket close in netisr.
628 so_pru_abort_async(so
);
632 soabort_direct(struct socket
*so
)
635 so_pru_abort_direct(so
);
639 * so is passed in ref'd, which becomes owned by
640 * the cleared SS_NOFDREF flag.
643 soaccept_generic(struct socket
*so
)
645 if ((so
->so_state
& SS_NOFDREF
) == 0)
646 panic("soaccept: !NOFDREF");
647 soclrstate(so
, SS_NOFDREF
); /* owned by lack of SS_NOFDREF */
651 soaccept(struct socket
*so
, struct sockaddr
**nam
)
655 soaccept_generic(so
);
656 error
= so_pru_accept(so
, nam
);
661 soconnect(struct socket
*so
, struct sockaddr
*nam
, struct thread
*td
,
666 if (so
->so_options
& SO_ACCEPTCONN
)
669 * If protocol is connection-based, can only connect once.
670 * Otherwise, if connected, try to disconnect first.
671 * This allows user to disconnect by connecting to, e.g.,
674 if (so
->so_state
& (SS_ISCONNECTED
|SS_ISCONNECTING
) &&
675 ((so
->so_proto
->pr_flags
& PR_CONNREQUIRED
) ||
676 (error
= sodisconnect(so
)))) {
680 * Prevent accumulated error from previous connection
684 if (!sync
&& so
->so_proto
->pr_usrreqs
->pru_preconnect
)
685 error
= so_pru_connect_async(so
, nam
, td
);
687 error
= so_pru_connect(so
, nam
, td
);
693 soconnect2(struct socket
*so1
, struct socket
*so2
)
697 error
= so_pru_connect2(so1
, so2
);
702 sodisconnect(struct socket
*so
)
706 if ((so
->so_state
& SS_ISCONNECTED
) == 0) {
710 if (so
->so_state
& SS_ISDISCONNECTING
) {
714 error
= so_pru_disconnect(so
);
719 #define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? M_NOWAIT : M_WAITOK)
722 * If send must go all at once and message is larger than
723 * send buffering, then hard error.
724 * Lock against other senders.
725 * If must go all at once and not enough room now, then
726 * inform user that this would block and do nothing.
727 * Otherwise, if nonblocking, send as much as possible.
728 * The data to be sent is described by "uio" if nonzero,
729 * otherwise by the mbuf chain "top" (which must be null
730 * if uio is not). Data provided in mbuf chain must be small
731 * enough to send all at once.
733 * Returns nonzero on error, timeout or signal; callers
734 * must check for short counts if EINTR/ERESTART are returned.
735 * Data and control buffers are freed on return.
738 sosend(struct socket
*so
, struct sockaddr
*addr
, struct uio
*uio
,
739 struct mbuf
*top
, struct mbuf
*control
, int flags
,
746 int clen
= 0, error
, dontroute
, mlen
;
747 int atomic
= sosendallatonce(so
) || top
;
751 resid
= uio
->uio_resid
;
753 resid
= (size_t)top
->m_pkthdr
.len
;
756 for (m
= top
; m
; m
= m
->m_next
)
758 KKASSERT(top
->m_pkthdr
.len
== len
);
763 * WARNING! resid is unsigned, space and len are signed. space
764 * can wind up negative if the sockbuf is overcommitted.
766 * Also check to make sure that MSG_EOR isn't used on SOCK_STREAM
767 * type sockets since that's an error.
769 if (so
->so_type
== SOCK_STREAM
&& (flags
& MSG_EOR
)) {
775 (flags
& MSG_DONTROUTE
) && (so
->so_options
& SO_DONTROUTE
) == 0 &&
776 (so
->so_proto
->pr_flags
& PR_ATOMIC
);
777 if (td
->td_lwp
!= NULL
)
778 td
->td_lwp
->lwp_ru
.ru_msgsnd
++;
780 clen
= control
->m_len
;
781 #define gotoerr(errcode) { error = errcode; goto release; }
784 error
= ssb_lock(&so
->so_snd
, SBLOCKWAIT(flags
));
789 if (so
->so_state
& SS_CANTSENDMORE
)
792 error
= so
->so_error
;
796 if ((so
->so_state
& SS_ISCONNECTED
) == 0) {
798 * `sendto' and `sendmsg' is allowed on a connection-
799 * based socket if it supports implied connect.
800 * Return ENOTCONN if not connected and no address is
803 if ((so
->so_proto
->pr_flags
& PR_CONNREQUIRED
) &&
804 (so
->so_proto
->pr_flags
& PR_IMPLOPCL
) == 0) {
805 if ((so
->so_state
& SS_ISCONFIRMING
) == 0 &&
806 !(resid
== 0 && clen
!= 0))
808 } else if (addr
== NULL
)
809 gotoerr(so
->so_proto
->pr_flags
& PR_CONNREQUIRED
?
810 ENOTCONN
: EDESTADDRREQ
);
812 if ((atomic
&& resid
> so
->so_snd
.ssb_hiwat
) ||
813 clen
> so
->so_snd
.ssb_hiwat
) {
816 space
= ssb_space(&so
->so_snd
);
819 if ((space
< 0 || (size_t)space
< resid
+ clen
) && uio
&&
820 (atomic
|| space
< so
->so_snd
.ssb_lowat
|| space
< clen
)) {
821 if (flags
& (MSG_FNONBLOCKING
|MSG_DONTWAIT
))
822 gotoerr(EWOULDBLOCK
);
823 ssb_unlock(&so
->so_snd
);
824 error
= ssb_wait(&so
->so_snd
);
834 * Data is prepackaged in "top".
838 top
->m_flags
|= M_EOR
;
842 m
= m_getl((int)resid
, M_WAITOK
, MT_DATA
,
843 top
== NULL
? M_PKTHDR
: 0, &mlen
);
846 m
->m_pkthdr
.rcvif
= NULL
;
848 len
= imin((int)szmin(mlen
, resid
), space
);
849 if (resid
< MINCLSIZE
) {
851 * For datagram protocols, leave room
852 * for protocol headers in first mbuf.
854 if (atomic
&& top
== NULL
&& len
< mlen
)
858 error
= uiomove(mtod(m
, caddr_t
), (size_t)len
, uio
);
859 resid
= uio
->uio_resid
;
862 top
->m_pkthdr
.len
+= len
;
868 top
->m_flags
|= M_EOR
;
871 } while (space
> 0 && atomic
);
873 so
->so_options
|= SO_DONTROUTE
;
874 if (flags
& MSG_OOB
) {
875 pru_flags
= PRUS_OOB
;
876 } else if ((flags
& MSG_EOF
) &&
877 (so
->so_proto
->pr_flags
& PR_IMPLOPCL
) &&
880 * If the user set MSG_EOF, the protocol
881 * understands this flag and nothing left to
882 * send then use PRU_SEND_EOF instead of PRU_SEND.
884 pru_flags
= PRUS_EOF
;
885 } else if (resid
> 0 && space
> 0) {
886 /* If there is more to send, set PRUS_MORETOCOME */
887 pru_flags
= PRUS_MORETOCOME
;
892 * XXX all the SS_CANTSENDMORE checks previously
893 * done could be out of date. We could have recieved
894 * a reset packet in an interrupt or maybe we slept
895 * while doing page faults in uiomove() etc. We could
896 * probably recheck again inside the splnet() protection
897 * here, but there are probably other places that this
898 * also happens. We must rethink this.
900 error
= so_pru_send(so
, pru_flags
, top
, addr
, control
, td
);
902 so
->so_options
&= ~SO_DONTROUTE
;
909 } while (resid
&& space
> 0);
913 ssb_unlock(&so
->so_snd
);
924 * A specialization of sosend() for UDP based on protocol-specific knowledge:
925 * so->so_proto->pr_flags has the PR_ATOMIC field set. This means that
926 * sosendallatonce() returns true,
927 * the "atomic" variable is true,
928 * and sosendudp() blocks until space is available for the entire send.
929 * so->so_proto->pr_flags does not have the PR_CONNREQUIRED or
930 * PR_IMPLOPCL flags set.
931 * UDP has no out-of-band data.
932 * UDP has no control data.
933 * UDP does not support MSG_EOR.
936 sosendudp(struct socket
*so
, struct sockaddr
*addr
, struct uio
*uio
,
937 struct mbuf
*top
, struct mbuf
*control
, int flags
, struct thread
*td
)
940 int error
, pru_flags
= 0;
943 if (td
->td_lwp
!= NULL
)
944 td
->td_lwp
->lwp_ru
.ru_msgsnd
++;
948 KASSERT((uio
&& !top
) || (top
&& !uio
), ("bad arguments to sosendudp"));
949 resid
= uio
? uio
->uio_resid
: (size_t)top
->m_pkthdr
.len
;
952 error
= ssb_lock(&so
->so_snd
, SBLOCKWAIT(flags
));
956 if (so
->so_state
& SS_CANTSENDMORE
)
959 error
= so
->so_error
;
963 if (!(so
->so_state
& SS_ISCONNECTED
) && addr
== NULL
)
964 gotoerr(EDESTADDRREQ
);
965 if (resid
> so
->so_snd
.ssb_hiwat
)
967 space
= ssb_space(&so
->so_snd
);
968 if (uio
&& (space
< 0 || (size_t)space
< resid
)) {
969 if (flags
& (MSG_FNONBLOCKING
|MSG_DONTWAIT
))
970 gotoerr(EWOULDBLOCK
);
971 ssb_unlock(&so
->so_snd
);
972 error
= ssb_wait(&so
->so_snd
);
979 int hdrlen
= max_hdr
;
982 * We try to optimize out the additional mbuf
983 * allocations in M_PREPEND() on output path, e.g.
984 * - udp_output(), when it tries to prepend protocol
986 * - Link layer output function, when it tries to
987 * prepend link layer header.
989 * This probably will not benefit any data that will
990 * be fragmented, so this optimization is only performed
991 * when the size of data and max size of protocol+link
992 * headers fit into one mbuf cluster.
994 if (uio
->uio_resid
> MCLBYTES
- hdrlen
||
995 !udp_sosend_prepend
) {
996 top
= m_uiomove(uio
);
1002 top
= m_getl(uio
->uio_resid
+ hdrlen
, M_WAITOK
,
1003 MT_DATA
, M_PKTHDR
, &nsize
);
1004 KASSERT(nsize
>= uio
->uio_resid
+ hdrlen
,
1005 ("sosendudp invalid nsize %d, "
1006 "resid %zu, hdrlen %d",
1007 nsize
, uio
->uio_resid
, hdrlen
));
1009 top
->m_len
= uio
->uio_resid
;
1010 top
->m_pkthdr
.len
= uio
->uio_resid
;
1011 top
->m_data
+= hdrlen
;
1013 error
= uiomove(mtod(top
, caddr_t
), top
->m_len
, uio
);
1019 if (flags
& MSG_DONTROUTE
)
1020 pru_flags
|= PRUS_DONTROUTE
;
1022 if (udp_sosend_async
&& (flags
& MSG_SYNC
) == 0) {
1023 so_pru_send_async(so
, pru_flags
, top
, addr
, NULL
, td
);
1026 error
= so_pru_send(so
, pru_flags
, top
, addr
, NULL
, td
);
1028 top
= NULL
; /* sent or freed in lower layer */
1031 ssb_unlock(&so
->so_snd
);
1039 sosendtcp(struct socket
*so
, struct sockaddr
*addr
, struct uio
*uio
,
1040 struct mbuf
*top
, struct mbuf
*control
, int flags
,
1052 KKASSERT(top
== NULL
);
1054 resid
= uio
->uio_resid
;
1057 resid
= (size_t)top
->m_pkthdr
.len
;
1060 for (m
= top
; m
; m
= m
->m_next
)
1062 KKASSERT(top
->m_pkthdr
.len
== len
);
1067 * WARNING! resid is unsigned, space and len are signed. space
1068 * can wind up negative if the sockbuf is overcommitted.
1070 * Also check to make sure that MSG_EOR isn't used on TCP
1072 if (flags
& MSG_EOR
) {
1078 /* TCP doesn't do control messages (rights, creds, etc) */
1079 if (control
->m_len
) {
1083 m_freem(control
); /* empty control, just free it */
1087 if (td
->td_lwp
!= NULL
)
1088 td
->td_lwp
->lwp_ru
.ru_msgsnd
++;
1090 #define gotoerr(errcode) { error = errcode; goto release; }
1093 error
= ssb_lock(&so
->so_snd
, SBLOCKWAIT(flags
));
1098 if (so
->so_state
& SS_CANTSENDMORE
)
1101 error
= so
->so_error
;
1105 if ((so
->so_state
& SS_ISCONNECTED
) == 0 &&
1106 (so
->so_state
& SS_ISCONFIRMING
) == 0)
1108 if (allatonce
&& resid
> so
->so_snd
.ssb_hiwat
)
1111 space
= ssb_space_prealloc(&so
->so_snd
);
1112 if (flags
& MSG_OOB
)
1114 if ((space
< 0 || (size_t)space
< resid
) && !allatonce
&&
1115 space
< so
->so_snd
.ssb_lowat
) {
1116 if (flags
& (MSG_FNONBLOCKING
|MSG_DONTWAIT
))
1117 gotoerr(EWOULDBLOCK
);
1118 ssb_unlock(&so
->so_snd
);
1119 error
= ssb_wait(&so
->so_snd
);
1126 int cnt
= 0, async
= 0;
1130 * Data is prepackaged in "top".
1134 if (resid
> INT_MAX
)
1136 if (tcp_sosend_jcluster
) {
1137 m
= m_getlj((int)resid
, M_WAITOK
, MT_DATA
,
1138 top
== NULL
? M_PKTHDR
: 0, &mlen
);
1140 m
= m_getl((int)resid
, M_WAITOK
, MT_DATA
,
1141 top
== NULL
? M_PKTHDR
: 0, &mlen
);
1144 m
->m_pkthdr
.len
= 0;
1145 m
->m_pkthdr
.rcvif
= NULL
;
1147 len
= imin((int)szmin(mlen
, resid
), space
);
1149 error
= uiomove(mtod(m
, caddr_t
), (size_t)len
, uio
);
1150 resid
= uio
->uio_resid
;
1153 top
->m_pkthdr
.len
+= len
;
1160 } while (space
> 0 && cnt
< tcp_sosend_agglim
);
1162 if (tcp_sosend_async
)
1165 if (flags
& MSG_OOB
) {
1166 pru_flags
= PRUS_OOB
;
1168 } else if ((flags
& MSG_EOF
) && resid
== 0) {
1169 pru_flags
= PRUS_EOF
;
1170 } else if (resid
> 0 && space
> 0) {
1171 /* If there is more to send, set PRUS_MORETOCOME */
1172 pru_flags
= PRUS_MORETOCOME
;
1178 if (flags
& MSG_SYNC
)
1182 * XXX all the SS_CANTSENDMORE checks previously
1183 * done could be out of date. We could have recieved
1184 * a reset packet in an interrupt or maybe we slept
1185 * while doing page faults in uiomove() etc. We could
1186 * probably recheck again inside the splnet() protection
1187 * here, but there are probably other places that this
1188 * also happens. We must rethink this.
1190 for (m
= top
; m
; m
= m
->m_next
)
1191 ssb_preallocstream(&so
->so_snd
, m
);
1193 error
= so_pru_send(so
, pru_flags
, top
,
1196 so_pru_send_async(so
, pru_flags
, top
,
1205 } while (resid
&& space
> 0);
1209 ssb_unlock(&so
->so_snd
);
1220 * Implement receive operations on a socket.
1222 * We depend on the way that records are added to the signalsockbuf
1223 * by sbappend*. In particular, each record (mbufs linked through m_next)
1224 * must begin with an address if the protocol so specifies,
1225 * followed by an optional mbuf or mbufs containing ancillary data,
1226 * and then zero or more mbufs of data.
1228 * Although the signalsockbuf is locked, new data may still be appended.
1229 * A token inside the ssb_lock deals with MP issues and still allows
1230 * the network to access the socket if we block in a uio.
1232 * The caller may receive the data as a single mbuf chain by supplying
1233 * an mbuf **mp0 for use in returning the chain. The uio is then used
1234 * only for the count in uio_resid.
1237 soreceive(struct socket
*so
, struct sockaddr
**psa
, struct uio
*uio
,
1238 struct sockbuf
*sio
, struct mbuf
**controlp
, int *flagsp
)
1241 struct mbuf
*free_chain
= NULL
;
1242 int flags
, len
, error
, offset
;
1243 struct protosw
*pr
= so
->so_proto
;
1245 size_t resid
, orig_resid
;
1248 resid
= uio
->uio_resid
;
1250 resid
= (size_t)(sio
->sb_climit
- sio
->sb_cc
);
1258 flags
= *flagsp
&~ MSG_EOR
;
1261 if (flags
& MSG_OOB
) {
1262 m
= m_get(M_WAITOK
, MT_DATA
);
1265 error
= so_pru_rcvoob(so
, m
, flags
& MSG_PEEK
);
1271 KKASSERT(resid
>= (size_t)m
->m_len
);
1272 resid
-= (size_t)m
->m_len
;
1273 } while (resid
> 0 && m
);
1276 uio
->uio_resid
= resid
;
1277 error
= uiomove(mtod(m
, caddr_t
),
1278 (int)szmin(resid
, m
->m_len
),
1280 resid
= uio
->uio_resid
;
1282 } while (uio
->uio_resid
&& error
== 0 && m
);
1289 if ((so
->so_state
& SS_ISCONFIRMING
) && resid
)
1293 * The token interlocks against the protocol thread while
1294 * ssb_lock is a blocking lock against other userland entities.
1296 lwkt_gettoken(&so
->so_rcv
.ssb_token
);
1298 error
= ssb_lock(&so
->so_rcv
, SBLOCKWAIT(flags
));
1302 m
= so
->so_rcv
.ssb_mb
;
1304 * If we have less data than requested, block awaiting more
1305 * (subject to any timeout) if:
1306 * 1. the current count is less than the low water mark, or
1307 * 2. MSG_WAITALL is set, and it is possible to do the entire
1308 * receive operation at once if we block (resid <= hiwat).
1309 * 3. MSG_DONTWAIT is not set
1310 * If MSG_WAITALL is set but resid is larger than the receive buffer,
1311 * we have to do the receive in sections, and thus risk returning
1312 * a short count if a timeout or signal occurs after we start.
1314 if (m
== NULL
|| (((flags
& MSG_DONTWAIT
) == 0 &&
1315 (size_t)so
->so_rcv
.ssb_cc
< resid
) &&
1316 (so
->so_rcv
.ssb_cc
< so
->so_rcv
.ssb_lowat
||
1317 ((flags
& MSG_WAITALL
) && resid
<= (size_t)so
->so_rcv
.ssb_hiwat
)) &&
1318 m
->m_nextpkt
== 0 && (pr
->pr_flags
& PR_ATOMIC
) == 0)) {
1319 KASSERT(m
!= NULL
|| !so
->so_rcv
.ssb_cc
, ("receive 1"));
1323 error
= so
->so_error
;
1324 if ((flags
& MSG_PEEK
) == 0)
1328 if (so
->so_state
& SS_CANTRCVMORE
) {
1334 for (; m
; m
= m
->m_next
) {
1335 if (m
->m_type
== MT_OOBDATA
|| (m
->m_flags
& M_EOR
)) {
1336 m
= so
->so_rcv
.ssb_mb
;
1340 if ((so
->so_state
& (SS_ISCONNECTED
|SS_ISCONNECTING
)) == 0 &&
1341 (pr
->pr_flags
& PR_CONNREQUIRED
)) {
1347 if (flags
& (MSG_FNONBLOCKING
|MSG_DONTWAIT
)) {
1348 error
= EWOULDBLOCK
;
1351 ssb_unlock(&so
->so_rcv
);
1352 error
= ssb_wait(&so
->so_rcv
);
1358 if (uio
&& uio
->uio_td
&& uio
->uio_td
->td_proc
)
1359 uio
->uio_td
->td_lwp
->lwp_ru
.ru_msgrcv
++;
1362 * note: m should be == sb_mb here. Cache the next record while
1363 * cleaning up. Note that calling m_free*() will break out critical
1366 KKASSERT(m
== so
->so_rcv
.ssb_mb
);
1369 * Skip any address mbufs prepending the record.
1371 if (pr
->pr_flags
& PR_ADDR
) {
1372 KASSERT(m
->m_type
== MT_SONAME
, ("receive 1a"));
1375 *psa
= dup_sockaddr(mtod(m
, struct sockaddr
*));
1376 if (flags
& MSG_PEEK
)
1379 m
= sbunlinkmbuf(&so
->so_rcv
.sb
, m
, &free_chain
);
1383 * Skip any control mbufs prepending the record.
1385 while (m
&& m
->m_type
== MT_CONTROL
&& error
== 0) {
1386 if (flags
& MSG_PEEK
) {
1388 *controlp
= m_copy(m
, 0, m
->m_len
);
1389 m
= m
->m_next
; /* XXX race */
1392 n
= sbunlinkmbuf(&so
->so_rcv
.sb
, m
, NULL
);
1393 if (pr
->pr_domain
->dom_externalize
&&
1394 mtod(m
, struct cmsghdr
*)->cmsg_type
==
1396 error
= (*pr
->pr_domain
->dom_externalize
)(m
);
1400 m
= sbunlinkmbuf(&so
->so_rcv
.sb
, m
, &free_chain
);
1403 if (controlp
&& *controlp
) {
1405 controlp
= &(*controlp
)->m_next
;
1414 if (type
== MT_OOBDATA
)
1419 * Copy to the UIO or mbuf return chain (*mp).
1423 while (m
&& resid
> 0 && error
== 0) {
1424 if (m
->m_type
== MT_OOBDATA
) {
1425 if (type
!= MT_OOBDATA
)
1427 } else if (type
== MT_OOBDATA
)
1430 KASSERT(m
->m_type
== MT_DATA
|| m
->m_type
== MT_HEADER
,
1432 soclrstate(so
, SS_RCVATMARK
);
1433 len
= (resid
> INT_MAX
) ? INT_MAX
: resid
;
1434 if (so
->so_oobmark
&& len
> so
->so_oobmark
- offset
)
1435 len
= so
->so_oobmark
- offset
;
1436 if (len
> m
->m_len
- moff
)
1437 len
= m
->m_len
- moff
;
1440 * Copy out to the UIO or pass the mbufs back to the SIO.
1441 * The SIO is dealt with when we eat the mbuf, but deal
1442 * with the resid here either way.
1445 uio
->uio_resid
= resid
;
1446 error
= uiomove(mtod(m
, caddr_t
) + moff
, len
, uio
);
1447 resid
= uio
->uio_resid
;
1451 resid
-= (size_t)len
;
1455 * Eat the entire mbuf or just a piece of it
1457 if (len
== m
->m_len
- moff
) {
1458 if (m
->m_flags
& M_EOR
)
1460 if (flags
& MSG_PEEK
) {
1465 n
= sbunlinkmbuf(&so
->so_rcv
.sb
, m
, NULL
);
1469 m
= sbunlinkmbuf(&so
->so_rcv
.sb
, m
, &free_chain
);
1473 if (flags
& MSG_PEEK
) {
1477 n
= m_copym(m
, 0, len
, M_WAITOK
);
1483 so
->so_rcv
.ssb_cc
-= len
;
1486 if (so
->so_oobmark
) {
1487 if ((flags
& MSG_PEEK
) == 0) {
1488 so
->so_oobmark
-= len
;
1489 if (so
->so_oobmark
== 0) {
1490 sosetstate(so
, SS_RCVATMARK
);
1495 if (offset
== so
->so_oobmark
)
1499 if (flags
& MSG_EOR
)
1502 * If the MSG_WAITALL flag is set (for non-atomic socket),
1503 * we must not quit until resid == 0 or an error
1504 * termination. If a signal/timeout occurs, return
1505 * with a short count but without error.
1506 * Keep signalsockbuf locked against other readers.
1508 while ((flags
& MSG_WAITALL
) && m
== NULL
&&
1509 resid
> 0 && !sosendallatonce(so
) &&
1510 so
->so_rcv
.ssb_mb
== NULL
) {
1511 if (so
->so_error
|| so
->so_state
& SS_CANTRCVMORE
)
1514 * The window might have closed to zero, make
1515 * sure we send an ack now that we've drained
1516 * the buffer or we might end up blocking until
1517 * the idle takes over (5 seconds).
1519 if (pr
->pr_flags
& PR_WANTRCVD
&& so
->so_pcb
)
1520 so_pru_rcvd(so
, flags
);
1521 error
= ssb_wait(&so
->so_rcv
);
1523 ssb_unlock(&so
->so_rcv
);
1527 m
= so
->so_rcv
.ssb_mb
;
1532 * If an atomic read was requested but unread data still remains
1533 * in the record, set MSG_TRUNC.
1535 if (m
&& pr
->pr_flags
& PR_ATOMIC
)
1539 * Cleanup. If an atomic read was requested drop any unread data.
1541 if ((flags
& MSG_PEEK
) == 0) {
1542 if (m
&& (pr
->pr_flags
& PR_ATOMIC
))
1543 sbdroprecord(&so
->so_rcv
.sb
);
1544 if ((pr
->pr_flags
& PR_WANTRCVD
) && so
->so_pcb
)
1545 so_pru_rcvd(so
, flags
);
1548 if (orig_resid
== resid
&& orig_resid
&&
1549 (flags
& MSG_EOR
) == 0 && (so
->so_state
& SS_CANTRCVMORE
) == 0) {
1550 ssb_unlock(&so
->so_rcv
);
1557 ssb_unlock(&so
->so_rcv
);
1559 lwkt_reltoken(&so
->so_rcv
.ssb_token
);
1561 m_freem(free_chain
);
1566 sorecvtcp(struct socket
*so
, struct sockaddr
**psa
, struct uio
*uio
,
1567 struct sockbuf
*sio
, struct mbuf
**controlp
, int *flagsp
)
1570 struct mbuf
*free_chain
= NULL
;
1571 int flags
, len
, error
, offset
;
1572 struct protosw
*pr
= so
->so_proto
;
1575 size_t resid
, orig_resid
, restmp
;
1578 resid
= uio
->uio_resid
;
1580 resid
= (size_t)(sio
->sb_climit
- sio
->sb_cc
);
1588 flags
= *flagsp
&~ MSG_EOR
;
1591 if (flags
& MSG_OOB
) {
1592 m
= m_get(M_WAITOK
, MT_DATA
);
1595 error
= so_pru_rcvoob(so
, m
, flags
& MSG_PEEK
);
1601 KKASSERT(resid
>= (size_t)m
->m_len
);
1602 resid
-= (size_t)m
->m_len
;
1603 } while (resid
> 0 && m
);
1606 uio
->uio_resid
= resid
;
1607 error
= uiomove(mtod(m
, caddr_t
),
1608 (int)szmin(resid
, m
->m_len
),
1610 resid
= uio
->uio_resid
;
1612 } while (uio
->uio_resid
&& error
== 0 && m
);
1621 * The token interlocks against the protocol thread while
1622 * ssb_lock is a blocking lock against other userland entities.
1624 * Lock a limited number of mbufs (not all, so sbcompress() still
1625 * works well). The token is used as an interlock for sbwait() so
1626 * release it afterwords.
1629 error
= ssb_lock(&so
->so_rcv
, SBLOCKWAIT(flags
));
1633 lwkt_gettoken(&so
->so_rcv
.ssb_token
);
1634 m
= so
->so_rcv
.ssb_mb
;
1637 * If we have less data than requested, block awaiting more
1638 * (subject to any timeout) if:
1639 * 1. the current count is less than the low water mark, or
1640 * 2. MSG_WAITALL is set, and it is possible to do the entire
1641 * receive operation at once if we block (resid <= hiwat).
1642 * 3. MSG_DONTWAIT is not set
1643 * If MSG_WAITALL is set but resid is larger than the receive buffer,
1644 * we have to do the receive in sections, and thus risk returning
1645 * a short count if a timeout or signal occurs after we start.
1647 if (m
== NULL
|| (((flags
& MSG_DONTWAIT
) == 0 &&
1648 (size_t)so
->so_rcv
.ssb_cc
< resid
) &&
1649 (so
->so_rcv
.ssb_cc
< so
->so_rcv
.ssb_lowat
||
1650 ((flags
& MSG_WAITALL
) && resid
<= (size_t)so
->so_rcv
.ssb_hiwat
)))) {
1651 KASSERT(m
!= NULL
|| !so
->so_rcv
.ssb_cc
, ("receive 1"));
1655 lwkt_reltoken(&so
->so_rcv
.ssb_token
);
1656 error
= so
->so_error
;
1657 if ((flags
& MSG_PEEK
) == 0)
1661 if (so
->so_state
& SS_CANTRCVMORE
) {
1664 lwkt_reltoken(&so
->so_rcv
.ssb_token
);
1667 if ((so
->so_state
& (SS_ISCONNECTED
|SS_ISCONNECTING
)) == 0 &&
1668 (pr
->pr_flags
& PR_CONNREQUIRED
)) {
1669 lwkt_reltoken(&so
->so_rcv
.ssb_token
);
1674 lwkt_reltoken(&so
->so_rcv
.ssb_token
);
1677 if (flags
& (MSG_FNONBLOCKING
|MSG_DONTWAIT
)) {
1678 lwkt_reltoken(&so
->so_rcv
.ssb_token
);
1679 error
= EWOULDBLOCK
;
1682 ssb_unlock(&so
->so_rcv
);
1683 error
= ssb_wait(&so
->so_rcv
);
1684 lwkt_reltoken(&so
->so_rcv
.ssb_token
);
1696 while (n
&& restmp
< resid
) {
1697 n
->m_flags
|= M_SOLOCKED
;
1699 if (n
->m_next
== NULL
)
1706 * Release token for loop
1708 lwkt_reltoken(&so
->so_rcv
.ssb_token
);
1709 if (uio
&& uio
->uio_td
&& uio
->uio_td
->td_proc
)
1710 uio
->uio_td
->td_lwp
->lwp_ru
.ru_msgrcv
++;
1713 * note: m should be == sb_mb here. Cache the next record while
1714 * cleaning up. Note that calling m_free*() will break out critical
1717 KKASSERT(m
== so
->so_rcv
.ssb_mb
);
1720 * Copy to the UIO or mbuf return chain (*mp).
1722 * NOTE: Token is not held for loop
1728 while (m
&& (m
->m_flags
& M_SOLOCKED
) && resid
> 0 && error
== 0) {
1729 KASSERT(m
->m_type
== MT_DATA
|| m
->m_type
== MT_HEADER
,
1732 soclrstate(so
, SS_RCVATMARK
);
1733 len
= (resid
> INT_MAX
) ? INT_MAX
: resid
;
1734 if (so
->so_oobmark
&& len
> so
->so_oobmark
- offset
)
1735 len
= so
->so_oobmark
- offset
;
1736 if (len
> m
->m_len
- moff
)
1737 len
= m
->m_len
- moff
;
1740 * Copy out to the UIO or pass the mbufs back to the SIO.
1741 * The SIO is dealt with when we eat the mbuf, but deal
1742 * with the resid here either way.
1745 uio
->uio_resid
= resid
;
1746 error
= uiomove(mtod(m
, caddr_t
) + moff
, len
, uio
);
1747 resid
= uio
->uio_resid
;
1751 resid
-= (size_t)len
;
1755 * Eat the entire mbuf or just a piece of it
1758 if (len
== m
->m_len
- moff
) {
1768 if (so
->so_oobmark
&& offset
== so
->so_oobmark
) {
1775 * Synchronize sockbuf with data we read.
1777 * NOTE: (m) is junk on entry (it could be left over from the
1780 if ((flags
& MSG_PEEK
) == 0) {
1781 lwkt_gettoken(&so
->so_rcv
.ssb_token
);
1782 m
= so
->so_rcv
.ssb_mb
;
1783 while (m
&& offset
>= m
->m_len
) {
1784 if (so
->so_oobmark
) {
1785 so
->so_oobmark
-= m
->m_len
;
1786 if (so
->so_oobmark
== 0) {
1787 sosetstate(so
, SS_RCVATMARK
);
1793 n
= sbunlinkmbuf(&so
->so_rcv
.sb
, m
, NULL
);
1797 m
= sbunlinkmbuf(&so
->so_rcv
.sb
,
1804 n
= m_copym(m
, 0, offset
, M_WAITOK
);
1808 m
->m_data
+= offset
;
1810 so
->so_rcv
.ssb_cc
-= offset
;
1811 if (so
->so_oobmark
) {
1812 so
->so_oobmark
-= offset
;
1813 if (so
->so_oobmark
== 0) {
1814 sosetstate(so
, SS_RCVATMARK
);
1820 lwkt_reltoken(&so
->so_rcv
.ssb_token
);
1824 * If the MSG_WAITALL flag is set (for non-atomic socket),
1825 * we must not quit until resid == 0 or an error termination.
1827 * If a signal/timeout occurs, return with a short count but without
1830 * Keep signalsockbuf locked against other readers.
1832 * XXX if MSG_PEEK we currently do quit.
1834 if ((flags
& MSG_WAITALL
) && !(flags
& MSG_PEEK
) &&
1835 didoob
== 0 && resid
> 0 &&
1836 !sosendallatonce(so
)) {
1837 lwkt_gettoken(&so
->so_rcv
.ssb_token
);
1839 while ((m
= so
->so_rcv
.ssb_mb
) == NULL
) {
1840 if (so
->so_error
|| (so
->so_state
& SS_CANTRCVMORE
)) {
1841 error
= so
->so_error
;
1845 * The window might have closed to zero, make
1846 * sure we send an ack now that we've drained
1847 * the buffer or we might end up blocking until
1848 * the idle takes over (5 seconds).
1851 so_pru_rcvd_async(so
);
1852 if (so
->so_rcv
.ssb_mb
== NULL
)
1853 error
= ssb_wait(&so
->so_rcv
);
1855 lwkt_reltoken(&so
->so_rcv
.ssb_token
);
1856 ssb_unlock(&so
->so_rcv
);
1861 if (m
&& error
== 0)
1863 lwkt_reltoken(&so
->so_rcv
.ssb_token
);
1867 * Token not held here.
1869 * Cleanup. If an atomic read was requested drop any unread data XXX
1871 if ((flags
& MSG_PEEK
) == 0) {
1873 so_pru_rcvd_async(so
);
1876 if (orig_resid
== resid
&& orig_resid
&&
1877 (so
->so_state
& SS_CANTRCVMORE
) == 0) {
1878 ssb_unlock(&so
->so_rcv
);
1885 ssb_unlock(&so
->so_rcv
);
1888 m_freem(free_chain
);
1893 * Shut a socket down. Note that we do not get a frontend lock as we
1894 * want to be able to shut the socket down even if another thread is
1895 * blocked in a read(), thus waking it up.
1898 soshutdown(struct socket
*so
, int how
)
1900 if (!(how
== SHUT_RD
|| how
== SHUT_WR
|| how
== SHUT_RDWR
))
1903 if (how
!= SHUT_WR
) {
1904 /*ssb_lock(&so->so_rcv, M_WAITOK);*/
1906 /*ssb_unlock(&so->so_rcv);*/
1909 return (so_pru_shutdown(so
));
1914 sorflush(struct socket
*so
)
1916 struct signalsockbuf
*ssb
= &so
->so_rcv
;
1917 struct protosw
*pr
= so
->so_proto
;
1918 struct signalsockbuf asb
;
1920 atomic_set_int(&ssb
->ssb_flags
, SSB_NOINTR
);
1922 lwkt_gettoken(&ssb
->ssb_token
);
1927 * Can't just blow up the ssb structure here
1929 bzero(&ssb
->sb
, sizeof(ssb
->sb
));
1934 atomic_clear_int(&ssb
->ssb_flags
, SSB_CLEAR_MASK
);
1936 if ((pr
->pr_flags
& PR_RIGHTS
) && pr
->pr_domain
->dom_dispose
)
1937 (*pr
->pr_domain
->dom_dispose
)(asb
.ssb_mb
);
1938 ssb_release(&asb
, so
);
1940 lwkt_reltoken(&ssb
->ssb_token
);
1945 do_setopt_accept_filter(struct socket
*so
, struct sockopt
*sopt
)
1947 struct accept_filter_arg
*afap
= NULL
;
1948 struct accept_filter
*afp
;
1949 struct so_accf
*af
= so
->so_accf
;
1952 /* do not set/remove accept filters on non listen sockets */
1953 if ((so
->so_options
& SO_ACCEPTCONN
) == 0) {
1958 /* removing the filter */
1961 if (af
->so_accept_filter
!= NULL
&&
1962 af
->so_accept_filter
->accf_destroy
!= NULL
) {
1963 af
->so_accept_filter
->accf_destroy(so
);
1965 if (af
->so_accept_filter_str
!= NULL
) {
1966 kfree(af
->so_accept_filter_str
, M_ACCF
);
1971 so
->so_options
&= ~SO_ACCEPTFILTER
;
1974 /* adding a filter */
1975 /* must remove previous filter first */
1980 /* don't put large objects on the kernel stack */
1981 afap
= kmalloc(sizeof(*afap
), M_TEMP
, M_WAITOK
);
1982 error
= sooptcopyin(sopt
, afap
, sizeof *afap
, sizeof *afap
);
1983 afap
->af_name
[sizeof(afap
->af_name
)-1] = '\0';
1984 afap
->af_arg
[sizeof(afap
->af_arg
)-1] = '\0';
1987 afp
= accept_filt_get(afap
->af_name
);
1992 af
= kmalloc(sizeof(*af
), M_ACCF
, M_WAITOK
| M_ZERO
);
1993 if (afp
->accf_create
!= NULL
) {
1994 if (afap
->af_name
[0] != '\0') {
1995 int len
= strlen(afap
->af_name
) + 1;
1997 af
->so_accept_filter_str
= kmalloc(len
, M_ACCF
,
1999 strcpy(af
->so_accept_filter_str
, afap
->af_name
);
2001 af
->so_accept_filter_arg
= afp
->accf_create(so
, afap
->af_arg
);
2002 if (af
->so_accept_filter_arg
== NULL
) {
2003 kfree(af
->so_accept_filter_str
, M_ACCF
);
2010 af
->so_accept_filter
= afp
;
2012 so
->so_options
|= SO_ACCEPTFILTER
;
2015 kfree(afap
, M_TEMP
);
2021 * Perhaps this routine, and sooptcopyout(), below, ought to come in
2022 * an additional variant to handle the case where the option value needs
2023 * to be some kind of integer, but not a specific size.
2024 * In addition to their use here, these functions are also called by the
2025 * protocol-level pr_ctloutput() routines.
2028 sooptcopyin(struct sockopt
*sopt
, void *buf
, size_t len
, size_t minlen
)
2030 return soopt_to_kbuf(sopt
, buf
, len
, minlen
);
2034 soopt_to_kbuf(struct sockopt
*sopt
, void *buf
, size_t len
, size_t minlen
)
2038 KKASSERT(!sopt
->sopt_val
|| kva_p(sopt
->sopt_val
));
2039 KKASSERT(kva_p(buf
));
2042 * If the user gives us more than we wanted, we ignore it,
2043 * but if we don't get the minimum length the caller
2044 * wants, we return EINVAL. On success, sopt->sopt_valsize
2045 * is set to however much we actually retrieved.
2047 if ((valsize
= sopt
->sopt_valsize
) < minlen
)
2050 sopt
->sopt_valsize
= valsize
= len
;
2052 bcopy(sopt
->sopt_val
, buf
, valsize
);
2058 sosetopt(struct socket
*so
, struct sockopt
*sopt
)
2064 struct signalsockbuf
*sotmp
;
2067 sopt
->sopt_dir
= SOPT_SET
;
2068 if (sopt
->sopt_level
!= SOL_SOCKET
) {
2069 if (so
->so_proto
&& so
->so_proto
->pr_ctloutput
) {
2070 return (so_pr_ctloutput(so
, sopt
));
2072 error
= ENOPROTOOPT
;
2074 switch (sopt
->sopt_name
) {
2076 case SO_ACCEPTFILTER
:
2077 error
= do_setopt_accept_filter(so
, sopt
);
2083 error
= sooptcopyin(sopt
, &l
, sizeof l
, sizeof l
);
2087 so
->so_linger
= l
.l_linger
;
2089 so
->so_options
|= SO_LINGER
;
2091 so
->so_options
&= ~SO_LINGER
;
2097 case SO_USELOOPBACK
:
2104 error
= sooptcopyin(sopt
, &optval
, sizeof optval
,
2109 so
->so_options
|= sopt
->sopt_name
;
2111 so
->so_options
&= ~sopt
->sopt_name
;
2118 error
= sooptcopyin(sopt
, &optval
, sizeof optval
,
2124 * Values < 1 make no sense for any of these
2125 * options, so disallow them.
2132 switch (sopt
->sopt_name
) {
2135 if (ssb_reserve(sopt
->sopt_name
== SO_SNDBUF
?
2136 &so
->so_snd
: &so
->so_rcv
, (u_long
)optval
,
2138 &curproc
->p_rlimit
[RLIMIT_SBSIZE
]) == 0) {
2142 sotmp
= (sopt
->sopt_name
== SO_SNDBUF
) ?
2143 &so
->so_snd
: &so
->so_rcv
;
2144 atomic_clear_int(&sotmp
->ssb_flags
,
2149 * Make sure the low-water is never greater than
2153 so
->so_snd
.ssb_lowat
=
2154 (optval
> so
->so_snd
.ssb_hiwat
) ?
2155 so
->so_snd
.ssb_hiwat
: optval
;
2156 atomic_clear_int(&so
->so_snd
.ssb_flags
,
2160 so
->so_rcv
.ssb_lowat
=
2161 (optval
> so
->so_rcv
.ssb_hiwat
) ?
2162 so
->so_rcv
.ssb_hiwat
: optval
;
2163 atomic_clear_int(&so
->so_rcv
.ssb_flags
,
2171 error
= sooptcopyin(sopt
, &tv
, sizeof tv
,
2176 /* assert(hz > 0); */
2177 if (tv
.tv_sec
< 0 || tv
.tv_sec
> INT_MAX
/ hz
||
2178 tv
.tv_usec
< 0 || tv
.tv_usec
>= 1000000) {
2182 /* assert(tick > 0); */
2183 /* assert(ULONG_MAX - INT_MAX >= 1000000); */
2184 val
= (u_long
)(tv
.tv_sec
* hz
) + tv
.tv_usec
/ ustick
;
2185 if (val
> INT_MAX
) {
2189 if (val
== 0 && tv
.tv_usec
!= 0)
2192 switch (sopt
->sopt_name
) {
2194 so
->so_snd
.ssb_timeo
= val
;
2197 so
->so_rcv
.ssb_timeo
= val
;
2202 error
= ENOPROTOOPT
;
2205 if (error
== 0 && so
->so_proto
&& so
->so_proto
->pr_ctloutput
) {
2206 (void) so_pr_ctloutput(so
, sopt
);
2213 /* Helper routine for getsockopt */
2215 sooptcopyout(struct sockopt
*sopt
, const void *buf
, size_t len
)
2217 soopt_from_kbuf(sopt
, buf
, len
);
2222 soopt_from_kbuf(struct sockopt
*sopt
, const void *buf
, size_t len
)
2227 sopt
->sopt_valsize
= 0;
2231 KKASSERT(!sopt
->sopt_val
|| kva_p(sopt
->sopt_val
));
2232 KKASSERT(kva_p(buf
));
2235 * Documented get behavior is that we always return a value,
2236 * possibly truncated to fit in the user's buffer.
2237 * Traditional behavior is that we always tell the user
2238 * precisely how much we copied, rather than something useful
2239 * like the total amount we had available for her.
2240 * Note that this interface is not idempotent; the entire answer must
2241 * generated ahead of time.
2243 valsize
= szmin(len
, sopt
->sopt_valsize
);
2244 sopt
->sopt_valsize
= valsize
;
2245 if (sopt
->sopt_val
!= 0) {
2246 bcopy(buf
, sopt
->sopt_val
, valsize
);
2251 sogetopt(struct socket
*so
, struct sockopt
*sopt
)
2258 struct accept_filter_arg
*afap
;
2262 sopt
->sopt_dir
= SOPT_GET
;
2263 if (sopt
->sopt_level
!= SOL_SOCKET
) {
2264 if (so
->so_proto
&& so
->so_proto
->pr_ctloutput
) {
2265 return (so_pr_ctloutput(so
, sopt
));
2267 return (ENOPROTOOPT
);
2269 switch (sopt
->sopt_name
) {
2271 case SO_ACCEPTFILTER
:
2272 if ((so
->so_options
& SO_ACCEPTCONN
) == 0)
2274 afap
= kmalloc(sizeof(*afap
), M_TEMP
,
2276 if ((so
->so_options
& SO_ACCEPTFILTER
) != 0) {
2277 strcpy(afap
->af_name
, so
->so_accf
->so_accept_filter
->accf_name
);
2278 if (so
->so_accf
->so_accept_filter_str
!= NULL
)
2279 strcpy(afap
->af_arg
, so
->so_accf
->so_accept_filter_str
);
2281 error
= sooptcopyout(sopt
, afap
, sizeof(*afap
));
2282 kfree(afap
, M_TEMP
);
2287 l
.l_onoff
= so
->so_options
& SO_LINGER
;
2288 l
.l_linger
= so
->so_linger
;
2289 error
= sooptcopyout(sopt
, &l
, sizeof l
);
2292 case SO_USELOOPBACK
:
2302 optval
= so
->so_options
& sopt
->sopt_name
;
2304 error
= sooptcopyout(sopt
, &optval
, sizeof optval
);
2308 optval
= so
->so_type
;
2312 optval
= so
->so_error
;
2317 optval
= so
->so_snd
.ssb_hiwat
;
2321 optval
= so
->so_rcv
.ssb_hiwat
;
2325 optval
= so
->so_snd
.ssb_lowat
;
2329 optval
= so
->so_rcv
.ssb_lowat
;
2334 optval
= (sopt
->sopt_name
== SO_SNDTIMEO
?
2335 so
->so_snd
.ssb_timeo
: so
->so_rcv
.ssb_timeo
);
2337 tv
.tv_sec
= optval
/ hz
;
2338 tv
.tv_usec
= (optval
% hz
) * ustick
;
2339 error
= sooptcopyout(sopt
, &tv
, sizeof tv
);
2343 optval_l
= ssb_space(&so
->so_snd
);
2344 error
= sooptcopyout(sopt
, &optval_l
, sizeof(optval_l
));
2348 optval
= -1; /* no hint */
2352 error
= ENOPROTOOPT
;
2355 if (error
== 0 && so
->so_proto
&& so
->so_proto
->pr_ctloutput
)
2356 so_pr_ctloutput(so
, sopt
);
2361 /* XXX; prepare mbuf for (__FreeBSD__ < 3) routines. */
2363 soopt_getm(struct sockopt
*sopt
, struct mbuf
**mp
)
2365 struct mbuf
*m
, *m_prev
;
2366 int sopt_size
= sopt
->sopt_valsize
, msize
;
2368 m
= m_getl(sopt_size
, sopt
->sopt_td
? M_WAITOK
: M_NOWAIT
, MT_DATA
,
2372 m
->m_len
= min(msize
, sopt_size
);
2373 sopt_size
-= m
->m_len
;
2377 while (sopt_size
> 0) {
2378 m
= m_getl(sopt_size
, sopt
->sopt_td
? M_WAITOK
: M_NOWAIT
,
2379 MT_DATA
, 0, &msize
);
2384 m
->m_len
= min(msize
, sopt_size
);
2385 sopt_size
-= m
->m_len
;
2392 /* XXX; copyin sopt data into mbuf chain for (__FreeBSD__ < 3) routines. */
2394 soopt_mcopyin(struct sockopt
*sopt
, struct mbuf
*m
)
2396 soopt_to_mbuf(sopt
, m
);
2401 soopt_to_mbuf(struct sockopt
*sopt
, struct mbuf
*m
)
2406 KKASSERT(!sopt
->sopt_val
|| kva_p(sopt
->sopt_val
));
2408 if (sopt
->sopt_val
== NULL
)
2410 val
= sopt
->sopt_val
;
2411 valsize
= sopt
->sopt_valsize
;
2412 while (m
!= NULL
&& valsize
>= m
->m_len
) {
2413 bcopy(val
, mtod(m
, char *), m
->m_len
);
2414 valsize
-= m
->m_len
;
2415 val
= (caddr_t
)val
+ m
->m_len
;
2418 if (m
!= NULL
) /* should be allocated enoughly at ip6_sooptmcopyin() */
2419 panic("ip6_sooptmcopyin");
2422 /* XXX; copyout mbuf chain data into soopt for (__FreeBSD__ < 3) routines. */
2424 soopt_mcopyout(struct sockopt
*sopt
, struct mbuf
*m
)
2426 return soopt_from_mbuf(sopt
, m
);
2430 soopt_from_mbuf(struct sockopt
*sopt
, struct mbuf
*m
)
2432 struct mbuf
*m0
= m
;
2437 KKASSERT(!sopt
->sopt_val
|| kva_p(sopt
->sopt_val
));
2439 if (sopt
->sopt_val
== NULL
)
2441 val
= sopt
->sopt_val
;
2442 maxsize
= sopt
->sopt_valsize
;
2443 while (m
!= NULL
&& maxsize
>= m
->m_len
) {
2444 bcopy(mtod(m
, char *), val
, m
->m_len
);
2445 maxsize
-= m
->m_len
;
2446 val
= (caddr_t
)val
+ m
->m_len
;
2447 valsize
+= m
->m_len
;
2451 /* enough soopt buffer should be given from user-land */
2455 sopt
->sopt_valsize
= valsize
;
2460 sohasoutofband(struct socket
*so
)
2462 if (so
->so_sigio
!= NULL
)
2463 pgsigio(so
->so_sigio
, SIGURG
, 0);
2464 KNOTE(&so
->so_rcv
.ssb_kq
.ki_note
, NOTE_OOB
);
2468 sokqfilter(struct file
*fp
, struct knote
*kn
)
2470 struct socket
*so
= (struct socket
*)kn
->kn_fp
->f_data
;
2471 struct signalsockbuf
*ssb
;
2473 switch (kn
->kn_filter
) {
2475 if (so
->so_options
& SO_ACCEPTCONN
)
2476 kn
->kn_fop
= &solisten_filtops
;
2478 kn
->kn_fop
= &soread_filtops
;
2482 kn
->kn_fop
= &sowrite_filtops
;
2486 kn
->kn_fop
= &soexcept_filtops
;
2490 return (EOPNOTSUPP
);
2493 knote_insert(&ssb
->ssb_kq
.ki_note
, kn
);
2494 atomic_set_int(&ssb
->ssb_flags
, SSB_KNOTE
);
2499 filt_sordetach(struct knote
*kn
)
2501 struct socket
*so
= (struct socket
*)kn
->kn_fp
->f_data
;
2503 knote_remove(&so
->so_rcv
.ssb_kq
.ki_note
, kn
);
2504 if (SLIST_EMPTY(&so
->so_rcv
.ssb_kq
.ki_note
))
2505 atomic_clear_int(&so
->so_rcv
.ssb_flags
, SSB_KNOTE
);
2510 filt_soread(struct knote
*kn
, long hint
)
2512 struct socket
*so
= (struct socket
*)kn
->kn_fp
->f_data
;
2514 if (kn
->kn_sfflags
& NOTE_OOB
) {
2515 if ((so
->so_oobmark
|| (so
->so_state
& SS_RCVATMARK
))) {
2516 kn
->kn_fflags
|= NOTE_OOB
;
2521 kn
->kn_data
= so
->so_rcv
.ssb_cc
;
2523 if (so
->so_state
& SS_CANTRCVMORE
) {
2525 * Only set NODATA if all data has been exhausted.
2527 if (kn
->kn_data
== 0)
2528 kn
->kn_flags
|= EV_NODATA
;
2529 kn
->kn_flags
|= EV_EOF
;
2530 kn
->kn_fflags
= so
->so_error
;
2533 if (so
->so_error
) /* temporary udp error */
2535 if (kn
->kn_sfflags
& NOTE_LOWAT
)
2536 return (kn
->kn_data
>= kn
->kn_sdata
);
2537 return ((kn
->kn_data
>= so
->so_rcv
.ssb_lowat
) ||
2538 !TAILQ_EMPTY(&so
->so_comp
));
2542 filt_sowdetach(struct knote
*kn
)
2544 struct socket
*so
= (struct socket
*)kn
->kn_fp
->f_data
;
2546 knote_remove(&so
->so_snd
.ssb_kq
.ki_note
, kn
);
2547 if (SLIST_EMPTY(&so
->so_snd
.ssb_kq
.ki_note
))
2548 atomic_clear_int(&so
->so_snd
.ssb_flags
, SSB_KNOTE
);
2553 filt_sowrite(struct knote
*kn
, long hint
)
2555 struct socket
*so
= (struct socket
*)kn
->kn_fp
->f_data
;
2557 kn
->kn_data
= ssb_space(&so
->so_snd
);
2558 if (so
->so_state
& SS_CANTSENDMORE
) {
2559 kn
->kn_flags
|= (EV_EOF
| EV_NODATA
);
2560 kn
->kn_fflags
= so
->so_error
;
2563 if (so
->so_error
) /* temporary udp error */
2565 if (((so
->so_state
& SS_ISCONNECTED
) == 0) &&
2566 (so
->so_proto
->pr_flags
& PR_CONNREQUIRED
))
2568 if (kn
->kn_sfflags
& NOTE_LOWAT
)
2569 return (kn
->kn_data
>= kn
->kn_sdata
);
2570 return (kn
->kn_data
>= so
->so_snd
.ssb_lowat
);
2575 filt_solisten(struct knote
*kn
, long hint
)
2577 struct socket
*so
= (struct socket
*)kn
->kn_fp
->f_data
;
2579 kn
->kn_data
= so
->so_qlen
;
2580 return (! TAILQ_EMPTY(&so
->so_comp
));