2 * Copyright (c) 2004 Jeffrey M. Hsu. All rights reserved.
3 * Copyright (c) 2004 The DragonFly Project. All rights reserved.
5 * This code is derived from software contributed to The DragonFly Project
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of The DragonFly Project nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific, prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * Copyright (c) 1982, 1986, 1988, 1990, 1993
36 * The Regents of the University of California. All rights reserved.
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. Neither the name of the University nor the names of its contributors
47 * may be used to endorse or promote products derived from this software
48 * without specific prior written permission.
50 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
51 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
52 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
53 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
54 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
55 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
56 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
57 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
58 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
59 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
62 * @(#)uipc_socket.c 8.3 (Berkeley) 4/15/94
63 * $FreeBSD: src/sys/kern/uipc_socket.c,v 1.68.2.24 2003/11/11 17:18:18 silby Exp $
68 #include <sys/param.h>
69 #include <sys/systm.h>
70 #include <sys/fcntl.h>
71 #include <sys/malloc.h>
73 #include <sys/domain.h>
74 #include <sys/file.h> /* for struct knote */
75 #include <sys/kernel.h>
76 #include <sys/event.h>
78 #include <sys/protosw.h>
79 #include <sys/socket.h>
80 #include <sys/socketvar.h>
81 #include <sys/socketops.h>
82 #include <sys/resourcevar.h>
83 #include <sys/signalvar.h>
84 #include <sys/sysctl.h>
87 #include <vm/vm_zone.h>
89 #include <net/netmsg2.h>
90 #include <net/netisr2.h>
92 #include <sys/thread2.h>
93 #include <sys/socketvar2.h>
94 #include <sys/spinlock2.h>
96 #include <machine/limits.h>
99 extern int tcp_sosend_agglim
;
100 extern int tcp_sosend_async
;
101 extern int tcp_sosend_jcluster
;
102 extern int udp_sosend_async
;
103 extern int udp_sosend_prepend
;
105 static int do_setopt_accept_filter(struct socket
*so
, struct sockopt
*sopt
);
108 static void filt_sordetach(struct knote
*kn
);
109 static int filt_soread(struct knote
*kn
, long hint
);
110 static void filt_sowdetach(struct knote
*kn
);
111 static int filt_sowrite(struct knote
*kn
, long hint
);
112 static int filt_solisten(struct knote
*kn
, long hint
);
114 static int soclose_sync(struct socket
*so
, int fflag
);
115 static void soclose_fast(struct socket
*so
);
117 static struct filterops solisten_filtops
=
118 { FILTEROP_ISFD
|FILTEROP_MPSAFE
, NULL
, filt_sordetach
, filt_solisten
};
119 static struct filterops soread_filtops
=
120 { FILTEROP_ISFD
|FILTEROP_MPSAFE
, NULL
, filt_sordetach
, filt_soread
};
121 static struct filterops sowrite_filtops
=
122 { FILTEROP_ISFD
|FILTEROP_MPSAFE
, NULL
, filt_sowdetach
, filt_sowrite
};
123 static struct filterops soexcept_filtops
=
124 { FILTEROP_ISFD
|FILTEROP_MPSAFE
, NULL
, filt_sordetach
, filt_soread
};
126 MALLOC_DEFINE(M_SOCKET
, "socket", "socket struct");
127 MALLOC_DEFINE(M_SONAME
, "soname", "socket name");
128 MALLOC_DEFINE(M_PCB
, "pcb", "protocol control block");
131 static int somaxconn
= SOMAXCONN
;
132 SYSCTL_INT(_kern_ipc
, KIPC_SOMAXCONN
, somaxconn
, CTLFLAG_RW
,
133 &somaxconn
, 0, "Maximum pending socket connection queue size");
135 static int use_soclose_fast
= 1;
136 SYSCTL_INT(_kern_ipc
, OID_AUTO
, soclose_fast
, CTLFLAG_RW
,
137 &use_soclose_fast
, 0, "Fast socket close");
139 int use_soaccept_pred_fast
= 1;
140 SYSCTL_INT(_kern_ipc
, OID_AUTO
, soaccept_pred_fast
, CTLFLAG_RW
,
141 &use_soaccept_pred_fast
, 0, "Fast socket accept predication");
143 int use_sendfile_async
= 1;
144 SYSCTL_INT(_kern_ipc
, OID_AUTO
, sendfile_async
, CTLFLAG_RW
,
145 &use_sendfile_async
, 0, "sendfile uses asynchronized pru_send");
147 int use_soconnect_async
= 1;
148 SYSCTL_INT(_kern_ipc
, OID_AUTO
, soconnect_async
, CTLFLAG_RW
,
149 &use_soconnect_async
, 0, "soconnect uses asynchronized pru_connect");
151 static int use_socreate_fast
= 1;
152 SYSCTL_INT(_kern_ipc
, OID_AUTO
, socreate_fast
, CTLFLAG_RW
,
153 &use_socreate_fast
, 0, "Fast socket creation");
155 static int soavailconn
= 32;
156 SYSCTL_INT(_kern_ipc
, OID_AUTO
, soavailconn
, CTLFLAG_RW
,
157 &soavailconn
, 0, "Maximum available socket connection queue size");
160 * Socket operation routines.
161 * These routines are called by the routines in
162 * sys_socket.c or from a system process, and
163 * implement the semantics of socket operations by
164 * switching out to the protocol specific routines.
168 * Get a socket structure, and initialize it.
169 * Note that it would probably be better to allocate socket
170 * and PCB at the same time, but I'm not convinced that all
171 * the protocols can be easily modified to do this.
174 soalloc(int waitok
, struct protosw
*pr
)
179 waitmask
= waitok
? M_WAITOK
: M_NOWAIT
;
180 so
= kmalloc(sizeof(struct socket
), M_SOCKET
, M_ZERO
|waitmask
);
182 /* XXX race condition for reentrant kernel */
184 TAILQ_INIT(&so
->so_aiojobq
);
185 TAILQ_INIT(&so
->so_rcv
.ssb_mlist
);
186 TAILQ_INIT(&so
->so_snd
.ssb_mlist
);
187 lwkt_token_init(&so
->so_rcv
.ssb_token
, "rcvtok");
188 lwkt_token_init(&so
->so_snd
.ssb_token
, "sndtok");
189 spin_init(&so
->so_rcvd_spin
, "soalloc");
190 netmsg_init(&so
->so_rcvd_msg
.base
, so
, &netisr_adone_rport
,
191 MSGF_DROPABLE
| MSGF_PRIORITY
,
192 so
->so_proto
->pr_usrreqs
->pru_rcvd
);
193 so
->so_rcvd_msg
.nm_pru_flags
|= PRUR_ASYNC
;
194 so
->so_state
= SS_NOFDREF
;
201 socreate(int dom
, struct socket
**aso
, int type
,
202 int proto
, struct thread
*td
)
204 struct proc
*p
= td
->td_proc
;
207 struct pru_attach_info ai
;
211 prp
= pffindproto(dom
, proto
, type
);
213 prp
= pffindtype(dom
, type
);
215 if (prp
== NULL
|| prp
->pr_usrreqs
->pru_attach
== 0)
216 return (EPROTONOSUPPORT
);
218 if (p
->p_ucred
->cr_prison
&& jail_socket_unixiproute_only
&&
219 prp
->pr_domain
->dom_family
!= PF_LOCAL
&&
220 prp
->pr_domain
->dom_family
!= PF_INET
&&
221 prp
->pr_domain
->dom_family
!= PF_INET6
&&
222 prp
->pr_domain
->dom_family
!= PF_ROUTE
) {
223 return (EPROTONOSUPPORT
);
226 if (prp
->pr_type
!= type
)
228 so
= soalloc(p
!= NULL
, prp
);
233 * Callers of socreate() presumably will connect up a descriptor
234 * and call soclose() if they cannot. This represents our so_refs
235 * (which should be 1) from soalloc().
237 soclrstate(so
, SS_NOFDREF
);
240 * Set a default port for protocol processing. No action will occur
241 * on the socket on this port until an inpcb is attached to it and
242 * is able to match incoming packets, or until the socket becomes
243 * available to userland.
245 * We normally default the socket to the protocol thread on cpu 0,
246 * if protocol does not provide its own method to initialize the
249 * If PR_SYNC_PORT is set (unix domain sockets) there is no protocol
250 * thread and all pr_*()/pru_*() calls are executed synchronously.
252 if (prp
->pr_flags
& PR_SYNC_PORT
)
253 so
->so_port
= &netisr_sync_port
;
254 else if (prp
->pr_initport
!= NULL
)
255 so
->so_port
= prp
->pr_initport();
257 so
->so_port
= netisr_cpuport(0);
259 TAILQ_INIT(&so
->so_incomp
);
260 TAILQ_INIT(&so
->so_comp
);
262 so
->so_cred
= crhold(p
->p_ucred
);
263 ai
.sb_rlimit
= &p
->p_rlimit
[RLIMIT_SBSIZE
];
264 ai
.p_ucred
= p
->p_ucred
;
265 ai
.fd_rdir
= p
->p_fd
->fd_rdir
;
268 * Auto-sizing of socket buffers is managed by the protocols and
269 * the appropriate flags must be set in the pru_attach function.
271 if (use_socreate_fast
&& prp
->pr_usrreqs
->pru_preattach
)
272 error
= so_pru_attach_fast(so
, proto
, &ai
);
274 error
= so_pru_attach(so
, proto
, &ai
);
276 sosetstate(so
, SS_NOFDREF
);
277 sofree(so
); /* from soalloc */
282 * NOTE: Returns referenced socket.
289 sobind(struct socket
*so
, struct sockaddr
*nam
, struct thread
*td
)
293 error
= so_pru_bind(so
, nam
, td
);
298 sodealloc(struct socket
*so
)
300 KKASSERT((so
->so_state
& (SS_INCOMP
| SS_COMP
)) == 0);
303 if (so
->so_options
& SO_ACCEPTCONN
) {
304 KASSERT(TAILQ_EMPTY(&so
->so_comp
), ("so_comp is not empty"));
305 KASSERT(TAILQ_EMPTY(&so
->so_incomp
),
306 ("so_incomp is not empty"));
310 if (so
->so_rcv
.ssb_hiwat
)
311 (void)chgsbsize(so
->so_cred
->cr_uidinfo
,
312 &so
->so_rcv
.ssb_hiwat
, 0, RLIM_INFINITY
);
313 if (so
->so_snd
.ssb_hiwat
)
314 (void)chgsbsize(so
->so_cred
->cr_uidinfo
,
315 &so
->so_snd
.ssb_hiwat
, 0, RLIM_INFINITY
);
317 /* remove accept filter if present */
318 if (so
->so_accf
!= NULL
)
319 do_setopt_accept_filter(so
, NULL
);
322 if (so
->so_faddr
!= NULL
)
323 kfree(so
->so_faddr
, M_SONAME
);
328 solisten(struct socket
*so
, int backlog
, struct thread
*td
)
330 if (so
->so_state
& (SS_ISCONNECTED
| SS_ISCONNECTING
))
333 lwkt_gettoken(&so
->so_rcv
.ssb_token
);
334 if (TAILQ_EMPTY(&so
->so_comp
))
335 so
->so_options
|= SO_ACCEPTCONN
;
336 lwkt_reltoken(&so
->so_rcv
.ssb_token
);
337 if (backlog
< 0 || backlog
> somaxconn
)
339 so
->so_qlimit
= backlog
;
340 return so_pru_listen(so
, td
);
344 soqflush(struct socket
*so
)
346 lwkt_getpooltoken(so
);
347 if (so
->so_options
& SO_ACCEPTCONN
) {
350 while ((sp
= TAILQ_FIRST(&so
->so_incomp
)) != NULL
) {
351 KKASSERT((sp
->so_state
& (SS_INCOMP
| SS_COMP
)) ==
353 TAILQ_REMOVE(&so
->so_incomp
, sp
, so_list
);
355 soclrstate(sp
, SS_INCOMP
);
356 soabort_async(sp
, TRUE
);
358 while ((sp
= TAILQ_FIRST(&so
->so_comp
)) != NULL
) {
359 KKASSERT((sp
->so_state
& (SS_INCOMP
| SS_COMP
)) ==
361 TAILQ_REMOVE(&so
->so_comp
, sp
, so_list
);
363 soclrstate(sp
, SS_COMP
);
364 soabort_async(sp
, TRUE
);
367 lwkt_relpooltoken(so
);
371 * Destroy a disconnected socket. This routine is a NOP if entities
372 * still have a reference on the socket:
374 * so_pcb - The protocol stack still has a reference
375 * SS_NOFDREF - There is no longer a file pointer reference
378 sofree(struct socket
*so
)
383 * This is a bit hackish at the moment. We need to interlock
384 * any accept queue we are on before we potentially lose the
385 * last reference to avoid races against a re-reference from
386 * someone operating on the queue.
388 while ((head
= so
->so_head
) != NULL
) {
389 lwkt_getpooltoken(head
);
390 if (so
->so_head
== head
)
392 lwkt_relpooltoken(head
);
396 * Arbitrage the last free.
398 KKASSERT(so
->so_refs
> 0);
399 if (atomic_fetchadd_int(&so
->so_refs
, -1) != 1) {
401 lwkt_relpooltoken(head
);
405 KKASSERT(so
->so_pcb
== NULL
&& (so
->so_state
& SS_NOFDREF
));
406 KKASSERT((so
->so_state
& SS_ASSERTINPROG
) == 0);
410 * We're done, remove ourselves from the accept queue we are
411 * on, if we are on one.
413 if (so
->so_state
& SS_INCOMP
) {
414 KKASSERT((so
->so_state
& (SS_INCOMP
| SS_COMP
)) ==
416 TAILQ_REMOVE(&head
->so_incomp
, so
, so_list
);
418 } else if (so
->so_state
& SS_COMP
) {
420 * We must not decommission a socket that's
421 * on the accept(2) queue. If we do, then
422 * accept(2) may hang after select(2) indicated
423 * that the listening socket was ready.
425 KKASSERT((so
->so_state
& (SS_INCOMP
| SS_COMP
)) ==
427 lwkt_relpooltoken(head
);
430 panic("sofree: not queued");
432 soclrstate(so
, SS_INCOMP
);
434 lwkt_relpooltoken(head
);
436 /* Flush accept queues, if we are accepting. */
439 ssb_release(&so
->so_snd
, so
);
445 * Close a socket on last file table reference removal.
446 * Initiate disconnect if connected.
447 * Free socket when disconnect complete.
450 soclose(struct socket
*so
, int fflag
)
454 funsetown(&so
->so_sigio
);
455 sosetstate(so
, SS_ISCLOSING
);
456 if (!use_soclose_fast
||
457 (so
->so_proto
->pr_flags
& PR_SYNC_PORT
) ||
458 ((so
->so_state
& SS_ISCONNECTED
) &&
459 (so
->so_options
& SO_LINGER
) &&
460 so
->so_linger
!= 0)) {
461 error
= soclose_sync(so
, fflag
);
470 sodiscard(struct socket
*so
)
472 if (so
->so_state
& SS_NOFDREF
)
473 panic("soclose: NOFDREF");
474 sosetstate(so
, SS_NOFDREF
); /* take ref */
478 * Append the completed queue of head to head_inh (inherting listen socket).
481 soinherit(struct socket
*head
, struct socket
*head_inh
)
483 boolean_t do_wakeup
= FALSE
;
485 KASSERT(head
->so_options
& SO_ACCEPTCONN
,
486 ("head does not accept connection"));
487 KASSERT(head_inh
->so_options
& SO_ACCEPTCONN
,
488 ("head_inh does not accept connection"));
490 lwkt_getpooltoken(head
);
491 lwkt_getpooltoken(head_inh
);
493 if (head
->so_qlen
> 0)
496 while (!TAILQ_EMPTY(&head
->so_comp
)) {
497 struct ucred
*old_cr
;
500 sp
= TAILQ_FIRST(&head
->so_comp
);
501 KKASSERT((sp
->so_state
& (SS_INCOMP
| SS_COMP
)) == SS_COMP
);
504 * Remove this socket from the current listen socket
507 TAILQ_REMOVE(&head
->so_comp
, sp
, so_list
);
510 /* Save the old ucred for later free. */
511 old_cr
= sp
->so_cred
;
514 * Install this socket to the inheriting listen socket
517 sp
->so_cred
= crhold(head_inh
->so_cred
); /* non-blocking */
518 sp
->so_head
= head_inh
;
520 TAILQ_INSERT_TAIL(&head_inh
->so_comp
, sp
, so_list
);
525 * crfree() may block and release the tokens temporarily.
526 * However, we are fine here, since the transition is done.
531 lwkt_relpooltoken(head_inh
);
532 lwkt_relpooltoken(head
);
536 * "New" connections have arrived
539 wakeup(&head_inh
->so_timeo
);
544 soclose_sync(struct socket
*so
, int fflag
)
548 if ((so
->so_proto
->pr_flags
& PR_SYNC_PORT
) == 0)
549 so_pru_sync(so
); /* unpend async prus */
551 if (so
->so_pcb
== NULL
)
554 if (so
->so_state
& SS_ISCONNECTED
) {
555 if ((so
->so_state
& SS_ISDISCONNECTING
) == 0) {
556 error
= sodisconnect(so
);
560 if (so
->so_options
& SO_LINGER
) {
561 if ((so
->so_state
& SS_ISDISCONNECTING
) &&
564 while (so
->so_state
& SS_ISCONNECTED
) {
565 error
= tsleep(&so
->so_timeo
, PCATCH
,
566 "soclos", so
->so_linger
* hz
);
576 error2
= so_pru_detach(so
);
577 if (error2
== EJUSTRETURN
) {
579 * Protocol will call sodiscard()
580 * and sofree() for us.
589 sofree(so
); /* dispose of ref */
595 soclose_fast_handler(netmsg_t msg
)
597 struct socket
*so
= msg
->base
.nm_so
;
599 if (so
->so_pcb
== NULL
)
602 if ((so
->so_state
& SS_ISCONNECTED
) &&
603 (so
->so_state
& SS_ISDISCONNECTING
) == 0)
604 so_pru_disconnect_direct(so
);
609 error
= so_pru_detach_direct(so
);
610 if (error
== EJUSTRETURN
) {
612 * Protocol will call sodiscard()
613 * and sofree() for us.
624 soclose_fast(struct socket
*so
)
626 struct netmsg_base
*base
= &so
->so_clomsg
;
628 netmsg_init(base
, so
, &netisr_apanic_rport
, 0,
629 soclose_fast_handler
);
630 if (so
->so_port
== netisr_curport())
631 lwkt_sendmsg_oncpu(so
->so_port
, &base
->lmsg
);
633 lwkt_sendmsg(so
->so_port
, &base
->lmsg
);
637 * Abort and destroy a socket. Only one abort can be in progress
638 * at any given moment.
641 soabort_async(struct socket
*so
, boolean_t clr_head
)
644 * Keep a reference before clearing the so_head
645 * to avoid racing socket close in netisr.
650 so_pru_abort_async(so
);
654 soabort_direct(struct socket
*so
)
657 so_pru_abort_direct(so
);
661 * so is passed in ref'd, which becomes owned by
662 * the cleared SS_NOFDREF flag.
665 soaccept_generic(struct socket
*so
)
667 if ((so
->so_state
& SS_NOFDREF
) == 0)
668 panic("soaccept: !NOFDREF");
669 soclrstate(so
, SS_NOFDREF
); /* owned by lack of SS_NOFDREF */
673 soaccept(struct socket
*so
, struct sockaddr
**nam
)
677 soaccept_generic(so
);
678 error
= so_pru_accept(so
, nam
);
683 soconnect(struct socket
*so
, struct sockaddr
*nam
, struct thread
*td
,
688 if (so
->so_options
& SO_ACCEPTCONN
)
691 * If protocol is connection-based, can only connect once.
692 * Otherwise, if connected, try to disconnect first.
693 * This allows user to disconnect by connecting to, e.g.,
696 if (so
->so_state
& (SS_ISCONNECTED
|SS_ISCONNECTING
) &&
697 ((so
->so_proto
->pr_flags
& PR_CONNREQUIRED
) ||
698 (error
= sodisconnect(so
)))) {
702 * Prevent accumulated error from previous connection
706 if (!sync
&& so
->so_proto
->pr_usrreqs
->pru_preconnect
)
707 error
= so_pru_connect_async(so
, nam
, td
);
709 error
= so_pru_connect(so
, nam
, td
);
715 soconnect2(struct socket
*so1
, struct socket
*so2
)
719 error
= so_pru_connect2(so1
, so2
);
724 sodisconnect(struct socket
*so
)
728 if ((so
->so_state
& SS_ISCONNECTED
) == 0) {
732 if (so
->so_state
& SS_ISDISCONNECTING
) {
736 error
= so_pru_disconnect(so
);
741 #define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? M_NOWAIT : M_WAITOK)
744 * If send must go all at once and message is larger than
745 * send buffering, then hard error.
746 * Lock against other senders.
747 * If must go all at once and not enough room now, then
748 * inform user that this would block and do nothing.
749 * Otherwise, if nonblocking, send as much as possible.
750 * The data to be sent is described by "uio" if nonzero,
751 * otherwise by the mbuf chain "top" (which must be null
752 * if uio is not). Data provided in mbuf chain must be small
753 * enough to send all at once.
755 * Returns nonzero on error, timeout or signal; callers
756 * must check for short counts if EINTR/ERESTART are returned.
757 * Data and control buffers are freed on return.
760 sosend(struct socket
*so
, struct sockaddr
*addr
, struct uio
*uio
,
761 struct mbuf
*top
, struct mbuf
*control
, int flags
,
768 int clen
= 0, error
, dontroute
, mlen
;
769 int atomic
= sosendallatonce(so
) || top
;
773 resid
= uio
->uio_resid
;
775 resid
= (size_t)top
->m_pkthdr
.len
;
778 for (m
= top
; m
; m
= m
->m_next
)
780 KKASSERT(top
->m_pkthdr
.len
== len
);
785 * WARNING! resid is unsigned, space and len are signed. space
786 * can wind up negative if the sockbuf is overcommitted.
788 * Also check to make sure that MSG_EOR isn't used on SOCK_STREAM
789 * type sockets since that's an error.
791 if (so
->so_type
== SOCK_STREAM
&& (flags
& MSG_EOR
)) {
797 (flags
& MSG_DONTROUTE
) && (so
->so_options
& SO_DONTROUTE
) == 0 &&
798 (so
->so_proto
->pr_flags
& PR_ATOMIC
);
799 if (td
->td_lwp
!= NULL
)
800 td
->td_lwp
->lwp_ru
.ru_msgsnd
++;
802 clen
= control
->m_len
;
803 #define gotoerr(errcode) { error = errcode; goto release; }
806 error
= ssb_lock(&so
->so_snd
, SBLOCKWAIT(flags
));
811 if (so
->so_state
& SS_CANTSENDMORE
)
814 error
= so
->so_error
;
818 if ((so
->so_state
& SS_ISCONNECTED
) == 0) {
820 * `sendto' and `sendmsg' is allowed on a connection-
821 * based socket if it supports implied connect.
822 * Return ENOTCONN if not connected and no address is
825 if ((so
->so_proto
->pr_flags
& PR_CONNREQUIRED
) &&
826 (so
->so_proto
->pr_flags
& PR_IMPLOPCL
) == 0) {
827 if ((so
->so_state
& SS_ISCONFIRMING
) == 0 &&
828 !(resid
== 0 && clen
!= 0))
830 } else if (addr
== NULL
)
831 gotoerr(so
->so_proto
->pr_flags
& PR_CONNREQUIRED
?
832 ENOTCONN
: EDESTADDRREQ
);
834 if ((atomic
&& resid
> so
->so_snd
.ssb_hiwat
) ||
835 clen
> so
->so_snd
.ssb_hiwat
) {
838 space
= ssb_space(&so
->so_snd
);
841 if ((space
< 0 || (size_t)space
< resid
+ clen
) && uio
&&
842 (atomic
|| space
< so
->so_snd
.ssb_lowat
|| space
< clen
)) {
843 if (flags
& (MSG_FNONBLOCKING
|MSG_DONTWAIT
))
844 gotoerr(EWOULDBLOCK
);
845 ssb_unlock(&so
->so_snd
);
846 error
= ssb_wait(&so
->so_snd
);
856 * Data is prepackaged in "top".
860 top
->m_flags
|= M_EOR
;
864 m
= m_getl((int)resid
, M_WAITOK
, MT_DATA
,
865 top
== NULL
? M_PKTHDR
: 0, &mlen
);
868 m
->m_pkthdr
.rcvif
= NULL
;
870 len
= imin((int)szmin(mlen
, resid
), space
);
871 if (resid
< MINCLSIZE
) {
873 * For datagram protocols, leave room
874 * for protocol headers in first mbuf.
876 if (atomic
&& top
== NULL
&& len
< mlen
)
880 error
= uiomove(mtod(m
, caddr_t
), (size_t)len
, uio
);
881 resid
= uio
->uio_resid
;
884 top
->m_pkthdr
.len
+= len
;
890 top
->m_flags
|= M_EOR
;
893 } while (space
> 0 && atomic
);
895 so
->so_options
|= SO_DONTROUTE
;
896 if (flags
& MSG_OOB
) {
897 pru_flags
= PRUS_OOB
;
898 } else if ((flags
& MSG_EOF
) &&
899 (so
->so_proto
->pr_flags
& PR_IMPLOPCL
) &&
902 * If the user set MSG_EOF, the protocol
903 * understands this flag and nothing left to
904 * send then use PRU_SEND_EOF instead of PRU_SEND.
906 pru_flags
= PRUS_EOF
;
907 } else if (resid
> 0 && space
> 0) {
908 /* If there is more to send, set PRUS_MORETOCOME */
909 pru_flags
= PRUS_MORETOCOME
;
914 * XXX all the SS_CANTSENDMORE checks previously
915 * done could be out of date. We could have recieved
916 * a reset packet in an interrupt or maybe we slept
917 * while doing page faults in uiomove() etc. We could
918 * probably recheck again inside the splnet() protection
919 * here, but there are probably other places that this
920 * also happens. We must rethink this.
922 error
= so_pru_send(so
, pru_flags
, top
, addr
, control
, td
);
924 so
->so_options
&= ~SO_DONTROUTE
;
931 } while (resid
&& space
> 0);
935 ssb_unlock(&so
->so_snd
);
946 * A specialization of sosend() for UDP based on protocol-specific knowledge:
947 * so->so_proto->pr_flags has the PR_ATOMIC field set. This means that
948 * sosendallatonce() returns true,
949 * the "atomic" variable is true,
950 * and sosendudp() blocks until space is available for the entire send.
951 * so->so_proto->pr_flags does not have the PR_CONNREQUIRED or
952 * PR_IMPLOPCL flags set.
953 * UDP has no out-of-band data.
954 * UDP has no control data.
955 * UDP does not support MSG_EOR.
958 sosendudp(struct socket
*so
, struct sockaddr
*addr
, struct uio
*uio
,
959 struct mbuf
*top
, struct mbuf
*control
, int flags
, struct thread
*td
)
962 int error
, pru_flags
= 0;
965 if (td
->td_lwp
!= NULL
)
966 td
->td_lwp
->lwp_ru
.ru_msgsnd
++;
970 KASSERT((uio
&& !top
) || (top
&& !uio
), ("bad arguments to sosendudp"));
971 resid
= uio
? uio
->uio_resid
: (size_t)top
->m_pkthdr
.len
;
974 error
= ssb_lock(&so
->so_snd
, SBLOCKWAIT(flags
));
978 if (so
->so_state
& SS_CANTSENDMORE
)
981 error
= so
->so_error
;
985 if (!(so
->so_state
& SS_ISCONNECTED
) && addr
== NULL
)
986 gotoerr(EDESTADDRREQ
);
987 if (resid
> so
->so_snd
.ssb_hiwat
)
989 space
= ssb_space(&so
->so_snd
);
990 if (uio
&& (space
< 0 || (size_t)space
< resid
)) {
991 if (flags
& (MSG_FNONBLOCKING
|MSG_DONTWAIT
))
992 gotoerr(EWOULDBLOCK
);
993 ssb_unlock(&so
->so_snd
);
994 error
= ssb_wait(&so
->so_snd
);
1001 int hdrlen
= max_hdr
;
1004 * We try to optimize out the additional mbuf
1005 * allocations in M_PREPEND() on output path, e.g.
1006 * - udp_output(), when it tries to prepend protocol
1008 * - Link layer output function, when it tries to
1009 * prepend link layer header.
1011 * This probably will not benefit any data that will
1012 * be fragmented, so this optimization is only performed
1013 * when the size of data and max size of protocol+link
1014 * headers fit into one mbuf cluster.
1016 if (uio
->uio_resid
> MCLBYTES
- hdrlen
||
1017 !udp_sosend_prepend
) {
1018 top
= m_uiomove(uio
);
1024 top
= m_getl(uio
->uio_resid
+ hdrlen
, M_WAITOK
,
1025 MT_DATA
, M_PKTHDR
, &nsize
);
1026 KASSERT(nsize
>= uio
->uio_resid
+ hdrlen
,
1027 ("sosendudp invalid nsize %d, "
1028 "resid %zu, hdrlen %d",
1029 nsize
, uio
->uio_resid
, hdrlen
));
1031 top
->m_len
= uio
->uio_resid
;
1032 top
->m_pkthdr
.len
= uio
->uio_resid
;
1033 top
->m_data
+= hdrlen
;
1035 error
= uiomove(mtod(top
, caddr_t
), top
->m_len
, uio
);
1041 if (flags
& MSG_DONTROUTE
)
1042 pru_flags
|= PRUS_DONTROUTE
;
1044 if (udp_sosend_async
&& (flags
& MSG_SYNC
) == 0) {
1045 so_pru_send_async(so
, pru_flags
, top
, addr
, NULL
, td
);
1048 error
= so_pru_send(so
, pru_flags
, top
, addr
, NULL
, td
);
1050 top
= NULL
; /* sent or freed in lower layer */
1053 ssb_unlock(&so
->so_snd
);
1061 sosendtcp(struct socket
*so
, struct sockaddr
*addr
, struct uio
*uio
,
1062 struct mbuf
*top
, struct mbuf
*control
, int flags
,
1074 KKASSERT(top
== NULL
);
1076 resid
= uio
->uio_resid
;
1079 resid
= (size_t)top
->m_pkthdr
.len
;
1082 for (m
= top
; m
; m
= m
->m_next
)
1084 KKASSERT(top
->m_pkthdr
.len
== len
);
1089 * WARNING! resid is unsigned, space and len are signed. space
1090 * can wind up negative if the sockbuf is overcommitted.
1092 * Also check to make sure that MSG_EOR isn't used on TCP
1094 if (flags
& MSG_EOR
) {
1100 /* TCP doesn't do control messages (rights, creds, etc) */
1101 if (control
->m_len
) {
1105 m_freem(control
); /* empty control, just free it */
1109 if (td
->td_lwp
!= NULL
)
1110 td
->td_lwp
->lwp_ru
.ru_msgsnd
++;
1112 #define gotoerr(errcode) { error = errcode; goto release; }
1115 error
= ssb_lock(&so
->so_snd
, SBLOCKWAIT(flags
));
1120 if (so
->so_state
& SS_CANTSENDMORE
)
1123 error
= so
->so_error
;
1127 if ((so
->so_state
& SS_ISCONNECTED
) == 0 &&
1128 (so
->so_state
& SS_ISCONFIRMING
) == 0)
1130 if (allatonce
&& resid
> so
->so_snd
.ssb_hiwat
)
1133 space
= ssb_space_prealloc(&so
->so_snd
);
1134 if (flags
& MSG_OOB
)
1136 if ((space
< 0 || (size_t)space
< resid
) && !allatonce
&&
1137 space
< so
->so_snd
.ssb_lowat
) {
1138 if (flags
& (MSG_FNONBLOCKING
|MSG_DONTWAIT
))
1139 gotoerr(EWOULDBLOCK
);
1140 ssb_unlock(&so
->so_snd
);
1141 error
= ssb_wait(&so
->so_snd
);
1148 int cnt
= 0, async
= 0;
1152 * Data is prepackaged in "top".
1156 if (resid
> INT_MAX
)
1158 if (tcp_sosend_jcluster
) {
1159 m
= m_getlj((int)resid
, M_WAITOK
, MT_DATA
,
1160 top
== NULL
? M_PKTHDR
: 0, &mlen
);
1162 m
= m_getl((int)resid
, M_WAITOK
, MT_DATA
,
1163 top
== NULL
? M_PKTHDR
: 0, &mlen
);
1166 m
->m_pkthdr
.len
= 0;
1167 m
->m_pkthdr
.rcvif
= NULL
;
1169 len
= imin((int)szmin(mlen
, resid
), space
);
1171 error
= uiomove(mtod(m
, caddr_t
), (size_t)len
, uio
);
1172 resid
= uio
->uio_resid
;
1175 top
->m_pkthdr
.len
+= len
;
1182 } while (space
> 0 && cnt
< tcp_sosend_agglim
);
1184 if (tcp_sosend_async
)
1187 if (flags
& MSG_OOB
) {
1188 pru_flags
= PRUS_OOB
;
1190 } else if ((flags
& MSG_EOF
) && resid
== 0) {
1191 pru_flags
= PRUS_EOF
;
1192 } else if (resid
> 0 && space
> 0) {
1193 /* If there is more to send, set PRUS_MORETOCOME */
1194 pru_flags
= PRUS_MORETOCOME
;
1200 if (flags
& MSG_SYNC
)
1204 * XXX all the SS_CANTSENDMORE checks previously
1205 * done could be out of date. We could have recieved
1206 * a reset packet in an interrupt or maybe we slept
1207 * while doing page faults in uiomove() etc. We could
1208 * probably recheck again inside the splnet() protection
1209 * here, but there are probably other places that this
1210 * also happens. We must rethink this.
1212 for (m
= top
; m
; m
= m
->m_next
)
1213 ssb_preallocstream(&so
->so_snd
, m
);
1215 error
= so_pru_send(so
, pru_flags
, top
,
1218 so_pru_send_async(so
, pru_flags
, top
,
1227 } while (resid
&& space
> 0);
1231 ssb_unlock(&so
->so_snd
);
1242 * Implement receive operations on a socket.
1244 * We depend on the way that records are added to the signalsockbuf
1245 * by sbappend*. In particular, each record (mbufs linked through m_next)
1246 * must begin with an address if the protocol so specifies,
1247 * followed by an optional mbuf or mbufs containing ancillary data,
1248 * and then zero or more mbufs of data.
1250 * Although the signalsockbuf is locked, new data may still be appended.
1251 * A token inside the ssb_lock deals with MP issues and still allows
1252 * the network to access the socket if we block in a uio.
1254 * The caller may receive the data as a single mbuf chain by supplying
1255 * an mbuf **mp0 for use in returning the chain. The uio is then used
1256 * only for the count in uio_resid.
1259 soreceive(struct socket
*so
, struct sockaddr
**psa
, struct uio
*uio
,
1260 struct sockbuf
*sio
, struct mbuf
**controlp
, int *flagsp
)
1263 struct mbuf
*free_chain
= NULL
;
1264 int flags
, len
, error
, offset
;
1265 struct protosw
*pr
= so
->so_proto
;
1267 size_t resid
, orig_resid
;
1268 boolean_t free_rights
= FALSE
;
1271 resid
= uio
->uio_resid
;
1273 resid
= (size_t)(sio
->sb_climit
- sio
->sb_cc
);
1281 flags
= *flagsp
&~ MSG_EOR
;
1284 if (flags
& MSG_OOB
) {
1285 m
= m_get(M_WAITOK
, MT_DATA
);
1288 error
= so_pru_rcvoob(so
, m
, flags
& MSG_PEEK
);
1294 KKASSERT(resid
>= (size_t)m
->m_len
);
1295 resid
-= (size_t)m
->m_len
;
1296 } while (resid
> 0 && m
);
1299 uio
->uio_resid
= resid
;
1300 error
= uiomove(mtod(m
, caddr_t
),
1301 (int)szmin(resid
, m
->m_len
),
1303 resid
= uio
->uio_resid
;
1305 } while (uio
->uio_resid
&& error
== 0 && m
);
1312 if ((so
->so_state
& SS_ISCONFIRMING
) && resid
)
1316 * The token interlocks against the protocol thread while
1317 * ssb_lock is a blocking lock against other userland entities.
1319 lwkt_gettoken(&so
->so_rcv
.ssb_token
);
1321 error
= ssb_lock(&so
->so_rcv
, SBLOCKWAIT(flags
));
1325 m
= so
->so_rcv
.ssb_mb
;
1327 * If we have less data than requested, block awaiting more
1328 * (subject to any timeout) if:
1329 * 1. the current count is less than the low water mark, or
1330 * 2. MSG_WAITALL is set, and it is possible to do the entire
1331 * receive operation at once if we block (resid <= hiwat).
1332 * 3. MSG_DONTWAIT is not set
1333 * If MSG_WAITALL is set but resid is larger than the receive buffer,
1334 * we have to do the receive in sections, and thus risk returning
1335 * a short count if a timeout or signal occurs after we start.
1337 if (m
== NULL
|| (((flags
& MSG_DONTWAIT
) == 0 &&
1338 (size_t)so
->so_rcv
.ssb_cc
< resid
) &&
1339 (so
->so_rcv
.ssb_cc
< so
->so_rcv
.ssb_lowat
||
1340 ((flags
& MSG_WAITALL
) && resid
<= (size_t)so
->so_rcv
.ssb_hiwat
)) &&
1341 m
->m_nextpkt
== 0 && (pr
->pr_flags
& PR_ATOMIC
) == 0)) {
1342 KASSERT(m
!= NULL
|| !so
->so_rcv
.ssb_cc
, ("receive 1"));
1346 error
= so
->so_error
;
1347 if ((flags
& MSG_PEEK
) == 0)
1351 if (so
->so_state
& SS_CANTRCVMORE
) {
1357 for (; m
; m
= m
->m_next
) {
1358 if (m
->m_type
== MT_OOBDATA
|| (m
->m_flags
& M_EOR
)) {
1359 m
= so
->so_rcv
.ssb_mb
;
1363 if ((so
->so_state
& (SS_ISCONNECTED
|SS_ISCONNECTING
)) == 0 &&
1364 (pr
->pr_flags
& PR_CONNREQUIRED
)) {
1370 if (flags
& (MSG_FNONBLOCKING
|MSG_DONTWAIT
)) {
1371 error
= EWOULDBLOCK
;
1374 ssb_unlock(&so
->so_rcv
);
1375 error
= ssb_wait(&so
->so_rcv
);
1381 if (uio
&& uio
->uio_td
&& uio
->uio_td
->td_proc
)
1382 uio
->uio_td
->td_lwp
->lwp_ru
.ru_msgrcv
++;
1385 * note: m should be == sb_mb here. Cache the next record while
1386 * cleaning up. Note that calling m_free*() will break out critical
1389 KKASSERT(m
== so
->so_rcv
.ssb_mb
);
1392 * Skip any address mbufs prepending the record.
1394 if (pr
->pr_flags
& PR_ADDR
) {
1395 KASSERT(m
->m_type
== MT_SONAME
, ("receive 1a"));
1398 *psa
= dup_sockaddr(mtod(m
, struct sockaddr
*));
1399 if (flags
& MSG_PEEK
)
1402 m
= sbunlinkmbuf(&so
->so_rcv
.sb
, m
, &free_chain
);
1406 * Skip any control mbufs prepending the record.
1408 while (m
&& m
->m_type
== MT_CONTROL
&& error
== 0) {
1409 if (flags
& MSG_PEEK
) {
1411 *controlp
= m_copy(m
, 0, m
->m_len
);
1412 m
= m
->m_next
; /* XXX race */
1414 const struct cmsghdr
*cm
= mtod(m
, struct cmsghdr
*);
1417 n
= sbunlinkmbuf(&so
->so_rcv
.sb
, m
, NULL
);
1418 if (pr
->pr_domain
->dom_externalize
&&
1419 cm
->cmsg_level
== SOL_SOCKET
&&
1420 cm
->cmsg_type
== SCM_RIGHTS
) {
1421 error
= pr
->pr_domain
->dom_externalize
1427 if (cm
->cmsg_level
== SOL_SOCKET
&&
1428 cm
->cmsg_type
== SCM_RIGHTS
)
1430 m
= sbunlinkmbuf(&so
->so_rcv
.sb
, m
, &free_chain
);
1433 if (controlp
&& *controlp
) {
1435 controlp
= &(*controlp
)->m_next
;
1444 if (type
== MT_OOBDATA
)
1449 * Copy to the UIO or mbuf return chain (*mp).
1453 while (m
&& resid
> 0 && error
== 0) {
1454 if (m
->m_type
== MT_OOBDATA
) {
1455 if (type
!= MT_OOBDATA
)
1457 } else if (type
== MT_OOBDATA
)
1460 KASSERT(m
->m_type
== MT_DATA
|| m
->m_type
== MT_HEADER
,
1462 soclrstate(so
, SS_RCVATMARK
);
1463 len
= (resid
> INT_MAX
) ? INT_MAX
: resid
;
1464 if (so
->so_oobmark
&& len
> so
->so_oobmark
- offset
)
1465 len
= so
->so_oobmark
- offset
;
1466 if (len
> m
->m_len
- moff
)
1467 len
= m
->m_len
- moff
;
1470 * Copy out to the UIO or pass the mbufs back to the SIO.
1471 * The SIO is dealt with when we eat the mbuf, but deal
1472 * with the resid here either way.
1475 uio
->uio_resid
= resid
;
1476 error
= uiomove(mtod(m
, caddr_t
) + moff
, len
, uio
);
1477 resid
= uio
->uio_resid
;
1481 resid
-= (size_t)len
;
1485 * Eat the entire mbuf or just a piece of it
1487 if (len
== m
->m_len
- moff
) {
1488 if (m
->m_flags
& M_EOR
)
1490 if (flags
& MSG_PEEK
) {
1495 n
= sbunlinkmbuf(&so
->so_rcv
.sb
, m
, NULL
);
1499 m
= sbunlinkmbuf(&so
->so_rcv
.sb
, m
, &free_chain
);
1503 if (flags
& MSG_PEEK
) {
1507 n
= m_copym(m
, 0, len
, M_WAITOK
);
1513 so
->so_rcv
.ssb_cc
-= len
;
1516 if (so
->so_oobmark
) {
1517 if ((flags
& MSG_PEEK
) == 0) {
1518 so
->so_oobmark
-= len
;
1519 if (so
->so_oobmark
== 0) {
1520 sosetstate(so
, SS_RCVATMARK
);
1525 if (offset
== so
->so_oobmark
)
1529 if (flags
& MSG_EOR
)
1532 * If the MSG_WAITALL flag is set (for non-atomic socket),
1533 * we must not quit until resid == 0 or an error
1534 * termination. If a signal/timeout occurs, return
1535 * with a short count but without error.
1536 * Keep signalsockbuf locked against other readers.
1538 while ((flags
& MSG_WAITALL
) && m
== NULL
&&
1539 resid
> 0 && !sosendallatonce(so
) &&
1540 so
->so_rcv
.ssb_mb
== NULL
) {
1541 if (so
->so_error
|| so
->so_state
& SS_CANTRCVMORE
)
1544 * The window might have closed to zero, make
1545 * sure we send an ack now that we've drained
1546 * the buffer or we might end up blocking until
1547 * the idle takes over (5 seconds).
1549 if (pr
->pr_flags
& PR_WANTRCVD
&& so
->so_pcb
)
1550 so_pru_rcvd(so
, flags
);
1551 error
= ssb_wait(&so
->so_rcv
);
1553 ssb_unlock(&so
->so_rcv
);
1557 m
= so
->so_rcv
.ssb_mb
;
1562 * If an atomic read was requested but unread data still remains
1563 * in the record, set MSG_TRUNC.
1565 if (m
&& pr
->pr_flags
& PR_ATOMIC
)
1569 * Cleanup. If an atomic read was requested drop any unread data.
1571 if ((flags
& MSG_PEEK
) == 0) {
1572 if (m
&& (pr
->pr_flags
& PR_ATOMIC
))
1573 sbdroprecord(&so
->so_rcv
.sb
);
1574 if ((pr
->pr_flags
& PR_WANTRCVD
) && so
->so_pcb
)
1575 so_pru_rcvd(so
, flags
);
1578 if (orig_resid
== resid
&& orig_resid
&&
1579 (flags
& MSG_EOR
) == 0 && (so
->so_state
& SS_CANTRCVMORE
) == 0) {
1580 ssb_unlock(&so
->so_rcv
);
1587 ssb_unlock(&so
->so_rcv
);
1589 lwkt_reltoken(&so
->so_rcv
.ssb_token
);
1591 if (free_rights
&& (pr
->pr_flags
& PR_RIGHTS
) &&
1592 pr
->pr_domain
->dom_dispose
)
1593 pr
->pr_domain
->dom_dispose(free_chain
);
1594 m_freem(free_chain
);
1600 sorecvtcp(struct socket
*so
, struct sockaddr
**psa
, struct uio
*uio
,
1601 struct sockbuf
*sio
, struct mbuf
**controlp
, int *flagsp
)
1604 struct mbuf
*free_chain
= NULL
;
1605 int flags
, len
, error
, offset
;
1606 struct protosw
*pr
= so
->so_proto
;
1609 size_t resid
, orig_resid
, restmp
;
1612 resid
= uio
->uio_resid
;
1614 resid
= (size_t)(sio
->sb_climit
- sio
->sb_cc
);
1622 flags
= *flagsp
&~ MSG_EOR
;
1625 if (flags
& MSG_OOB
) {
1626 m
= m_get(M_WAITOK
, MT_DATA
);
1629 error
= so_pru_rcvoob(so
, m
, flags
& MSG_PEEK
);
1635 KKASSERT(resid
>= (size_t)m
->m_len
);
1636 resid
-= (size_t)m
->m_len
;
1637 } while (resid
> 0 && m
);
1640 uio
->uio_resid
= resid
;
1641 error
= uiomove(mtod(m
, caddr_t
),
1642 (int)szmin(resid
, m
->m_len
),
1644 resid
= uio
->uio_resid
;
1646 } while (uio
->uio_resid
&& error
== 0 && m
);
1655 * The token interlocks against the protocol thread while
1656 * ssb_lock is a blocking lock against other userland entities.
1658 * Lock a limited number of mbufs (not all, so sbcompress() still
1659 * works well). The token is used as an interlock for sbwait() so
1660 * release it afterwords.
1663 error
= ssb_lock(&so
->so_rcv
, SBLOCKWAIT(flags
));
1667 lwkt_gettoken(&so
->so_rcv
.ssb_token
);
1668 m
= so
->so_rcv
.ssb_mb
;
1671 * If we have less data than requested, block awaiting more
1672 * (subject to any timeout) if:
1673 * 1. the current count is less than the low water mark, or
1674 * 2. MSG_WAITALL is set, and it is possible to do the entire
1675 * receive operation at once if we block (resid <= hiwat).
1676 * 3. MSG_DONTWAIT is not set
1677 * If MSG_WAITALL is set but resid is larger than the receive buffer,
1678 * we have to do the receive in sections, and thus risk returning
1679 * a short count if a timeout or signal occurs after we start.
1681 if (m
== NULL
|| (((flags
& MSG_DONTWAIT
) == 0 &&
1682 (size_t)so
->so_rcv
.ssb_cc
< resid
) &&
1683 (so
->so_rcv
.ssb_cc
< so
->so_rcv
.ssb_lowat
||
1684 ((flags
& MSG_WAITALL
) && resid
<= (size_t)so
->so_rcv
.ssb_hiwat
)))) {
1685 KASSERT(m
!= NULL
|| !so
->so_rcv
.ssb_cc
, ("receive 1"));
1689 lwkt_reltoken(&so
->so_rcv
.ssb_token
);
1690 error
= so
->so_error
;
1691 if ((flags
& MSG_PEEK
) == 0)
1695 if (so
->so_state
& SS_CANTRCVMORE
) {
1698 lwkt_reltoken(&so
->so_rcv
.ssb_token
);
1701 if ((so
->so_state
& (SS_ISCONNECTED
|SS_ISCONNECTING
)) == 0 &&
1702 (pr
->pr_flags
& PR_CONNREQUIRED
)) {
1703 lwkt_reltoken(&so
->so_rcv
.ssb_token
);
1708 lwkt_reltoken(&so
->so_rcv
.ssb_token
);
1711 if (flags
& (MSG_FNONBLOCKING
|MSG_DONTWAIT
)) {
1712 lwkt_reltoken(&so
->so_rcv
.ssb_token
);
1713 error
= EWOULDBLOCK
;
1716 ssb_unlock(&so
->so_rcv
);
1717 error
= ssb_wait(&so
->so_rcv
);
1718 lwkt_reltoken(&so
->so_rcv
.ssb_token
);
1730 while (n
&& restmp
< resid
) {
1731 n
->m_flags
|= M_SOLOCKED
;
1733 if (n
->m_next
== NULL
)
1740 * Release token for loop
1742 lwkt_reltoken(&so
->so_rcv
.ssb_token
);
1743 if (uio
&& uio
->uio_td
&& uio
->uio_td
->td_proc
)
1744 uio
->uio_td
->td_lwp
->lwp_ru
.ru_msgrcv
++;
1747 * note: m should be == sb_mb here. Cache the next record while
1748 * cleaning up. Note that calling m_free*() will break out critical
1751 KKASSERT(m
== so
->so_rcv
.ssb_mb
);
1754 * Copy to the UIO or mbuf return chain (*mp).
1756 * NOTE: Token is not held for loop
1762 while (m
&& (m
->m_flags
& M_SOLOCKED
) && resid
> 0 && error
== 0) {
1763 KASSERT(m
->m_type
== MT_DATA
|| m
->m_type
== MT_HEADER
,
1766 soclrstate(so
, SS_RCVATMARK
);
1767 len
= (resid
> INT_MAX
) ? INT_MAX
: resid
;
1768 if (so
->so_oobmark
&& len
> so
->so_oobmark
- offset
)
1769 len
= so
->so_oobmark
- offset
;
1770 if (len
> m
->m_len
- moff
)
1771 len
= m
->m_len
- moff
;
1774 * Copy out to the UIO or pass the mbufs back to the SIO.
1775 * The SIO is dealt with when we eat the mbuf, but deal
1776 * with the resid here either way.
1779 uio
->uio_resid
= resid
;
1780 error
= uiomove(mtod(m
, caddr_t
) + moff
, len
, uio
);
1781 resid
= uio
->uio_resid
;
1785 resid
-= (size_t)len
;
1789 * Eat the entire mbuf or just a piece of it
1792 if (len
== m
->m_len
- moff
) {
1802 if (so
->so_oobmark
&& offset
== so
->so_oobmark
) {
1809 * Synchronize sockbuf with data we read.
1811 * NOTE: (m) is junk on entry (it could be left over from the
1814 if ((flags
& MSG_PEEK
) == 0) {
1815 lwkt_gettoken(&so
->so_rcv
.ssb_token
);
1816 m
= so
->so_rcv
.ssb_mb
;
1817 while (m
&& offset
>= m
->m_len
) {
1818 if (so
->so_oobmark
) {
1819 so
->so_oobmark
-= m
->m_len
;
1820 if (so
->so_oobmark
== 0) {
1821 sosetstate(so
, SS_RCVATMARK
);
1827 n
= sbunlinkmbuf(&so
->so_rcv
.sb
, m
, NULL
);
1831 m
= sbunlinkmbuf(&so
->so_rcv
.sb
,
1838 n
= m_copym(m
, 0, offset
, M_WAITOK
);
1842 m
->m_data
+= offset
;
1844 so
->so_rcv
.ssb_cc
-= offset
;
1845 if (so
->so_oobmark
) {
1846 so
->so_oobmark
-= offset
;
1847 if (so
->so_oobmark
== 0) {
1848 sosetstate(so
, SS_RCVATMARK
);
1854 lwkt_reltoken(&so
->so_rcv
.ssb_token
);
1858 * If the MSG_WAITALL flag is set (for non-atomic socket),
1859 * we must not quit until resid == 0 or an error termination.
1861 * If a signal/timeout occurs, return with a short count but without
1864 * Keep signalsockbuf locked against other readers.
1866 * XXX if MSG_PEEK we currently do quit.
1868 if ((flags
& MSG_WAITALL
) && !(flags
& MSG_PEEK
) &&
1869 didoob
== 0 && resid
> 0 &&
1870 !sosendallatonce(so
)) {
1871 lwkt_gettoken(&so
->so_rcv
.ssb_token
);
1873 while ((m
= so
->so_rcv
.ssb_mb
) == NULL
) {
1874 if (so
->so_error
|| (so
->so_state
& SS_CANTRCVMORE
)) {
1875 error
= so
->so_error
;
1879 * The window might have closed to zero, make
1880 * sure we send an ack now that we've drained
1881 * the buffer or we might end up blocking until
1882 * the idle takes over (5 seconds).
1885 so_pru_rcvd_async(so
);
1886 if (so
->so_rcv
.ssb_mb
== NULL
)
1887 error
= ssb_wait(&so
->so_rcv
);
1889 lwkt_reltoken(&so
->so_rcv
.ssb_token
);
1890 ssb_unlock(&so
->so_rcv
);
1895 if (m
&& error
== 0)
1897 lwkt_reltoken(&so
->so_rcv
.ssb_token
);
1901 * Token not held here.
1903 * Cleanup. If an atomic read was requested drop any unread data XXX
1905 if ((flags
& MSG_PEEK
) == 0) {
1907 so_pru_rcvd_async(so
);
1910 if (orig_resid
== resid
&& orig_resid
&&
1911 (so
->so_state
& SS_CANTRCVMORE
) == 0) {
1912 ssb_unlock(&so
->so_rcv
);
1919 ssb_unlock(&so
->so_rcv
);
1922 m_freem(free_chain
);
1927 * Shut a socket down. Note that we do not get a frontend lock as we
1928 * want to be able to shut the socket down even if another thread is
1929 * blocked in a read(), thus waking it up.
1932 soshutdown(struct socket
*so
, int how
)
1934 if (!(how
== SHUT_RD
|| how
== SHUT_WR
|| how
== SHUT_RDWR
))
1937 if (how
!= SHUT_WR
) {
1938 /*ssb_lock(&so->so_rcv, M_WAITOK);*/
1940 /*ssb_unlock(&so->so_rcv);*/
1943 return (so_pru_shutdown(so
));
1948 sorflush(struct socket
*so
)
1950 struct signalsockbuf
*ssb
= &so
->so_rcv
;
1951 struct protosw
*pr
= so
->so_proto
;
1952 struct signalsockbuf asb
;
1954 atomic_set_int(&ssb
->ssb_flags
, SSB_NOINTR
);
1956 lwkt_gettoken(&ssb
->ssb_token
);
1961 * Can't just blow up the ssb structure here
1963 bzero(&ssb
->sb
, sizeof(ssb
->sb
));
1968 atomic_clear_int(&ssb
->ssb_flags
, SSB_CLEAR_MASK
);
1970 if ((pr
->pr_flags
& PR_RIGHTS
) && pr
->pr_domain
->dom_dispose
)
1971 (*pr
->pr_domain
->dom_dispose
)(asb
.ssb_mb
);
1972 ssb_release(&asb
, so
);
1974 lwkt_reltoken(&ssb
->ssb_token
);
1979 do_setopt_accept_filter(struct socket
*so
, struct sockopt
*sopt
)
1981 struct accept_filter_arg
*afap
= NULL
;
1982 struct accept_filter
*afp
;
1983 struct so_accf
*af
= so
->so_accf
;
1986 /* do not set/remove accept filters on non listen sockets */
1987 if ((so
->so_options
& SO_ACCEPTCONN
) == 0) {
1992 /* removing the filter */
1995 if (af
->so_accept_filter
!= NULL
&&
1996 af
->so_accept_filter
->accf_destroy
!= NULL
) {
1997 af
->so_accept_filter
->accf_destroy(so
);
1999 if (af
->so_accept_filter_str
!= NULL
) {
2000 kfree(af
->so_accept_filter_str
, M_ACCF
);
2005 so
->so_options
&= ~SO_ACCEPTFILTER
;
2008 /* adding a filter */
2009 /* must remove previous filter first */
2014 /* don't put large objects on the kernel stack */
2015 afap
= kmalloc(sizeof(*afap
), M_TEMP
, M_WAITOK
);
2016 error
= sooptcopyin(sopt
, afap
, sizeof *afap
, sizeof *afap
);
2017 afap
->af_name
[sizeof(afap
->af_name
)-1] = '\0';
2018 afap
->af_arg
[sizeof(afap
->af_arg
)-1] = '\0';
2021 afp
= accept_filt_get(afap
->af_name
);
2026 af
= kmalloc(sizeof(*af
), M_ACCF
, M_WAITOK
| M_ZERO
);
2027 if (afp
->accf_create
!= NULL
) {
2028 if (afap
->af_name
[0] != '\0') {
2029 int len
= strlen(afap
->af_name
) + 1;
2031 af
->so_accept_filter_str
= kmalloc(len
, M_ACCF
,
2033 strcpy(af
->so_accept_filter_str
, afap
->af_name
);
2035 af
->so_accept_filter_arg
= afp
->accf_create(so
, afap
->af_arg
);
2036 if (af
->so_accept_filter_arg
== NULL
) {
2037 kfree(af
->so_accept_filter_str
, M_ACCF
);
2044 af
->so_accept_filter
= afp
;
2046 so
->so_options
|= SO_ACCEPTFILTER
;
2049 kfree(afap
, M_TEMP
);
2055 * Perhaps this routine, and sooptcopyout(), below, ought to come in
2056 * an additional variant to handle the case where the option value needs
2057 * to be some kind of integer, but not a specific size.
2058 * In addition to their use here, these functions are also called by the
2059 * protocol-level pr_ctloutput() routines.
2062 sooptcopyin(struct sockopt
*sopt
, void *buf
, size_t len
, size_t minlen
)
2064 return soopt_to_kbuf(sopt
, buf
, len
, minlen
);
2068 soopt_to_kbuf(struct sockopt
*sopt
, void *buf
, size_t len
, size_t minlen
)
2072 KKASSERT(!sopt
->sopt_val
|| kva_p(sopt
->sopt_val
));
2073 KKASSERT(kva_p(buf
));
2076 * If the user gives us more than we wanted, we ignore it,
2077 * but if we don't get the minimum length the caller
2078 * wants, we return EINVAL. On success, sopt->sopt_valsize
2079 * is set to however much we actually retrieved.
2081 if ((valsize
= sopt
->sopt_valsize
) < minlen
)
2084 sopt
->sopt_valsize
= valsize
= len
;
2086 bcopy(sopt
->sopt_val
, buf
, valsize
);
2092 sosetopt(struct socket
*so
, struct sockopt
*sopt
)
2098 struct signalsockbuf
*sotmp
;
2101 sopt
->sopt_dir
= SOPT_SET
;
2102 if (sopt
->sopt_level
!= SOL_SOCKET
) {
2103 if (so
->so_proto
&& so
->so_proto
->pr_ctloutput
) {
2104 return (so_pr_ctloutput(so
, sopt
));
2106 error
= ENOPROTOOPT
;
2108 switch (sopt
->sopt_name
) {
2110 case SO_ACCEPTFILTER
:
2111 error
= do_setopt_accept_filter(so
, sopt
);
2117 error
= sooptcopyin(sopt
, &l
, sizeof l
, sizeof l
);
2121 so
->so_linger
= l
.l_linger
;
2123 so
->so_options
|= SO_LINGER
;
2125 so
->so_options
&= ~SO_LINGER
;
2131 case SO_USELOOPBACK
:
2138 error
= sooptcopyin(sopt
, &optval
, sizeof optval
,
2143 so
->so_options
|= sopt
->sopt_name
;
2145 so
->so_options
&= ~sopt
->sopt_name
;
2152 error
= sooptcopyin(sopt
, &optval
, sizeof optval
,
2158 * Values < 1 make no sense for any of these
2159 * options, so disallow them.
2166 switch (sopt
->sopt_name
) {
2169 if (ssb_reserve(sopt
->sopt_name
== SO_SNDBUF
?
2170 &so
->so_snd
: &so
->so_rcv
, (u_long
)optval
,
2172 &curproc
->p_rlimit
[RLIMIT_SBSIZE
]) == 0) {
2176 sotmp
= (sopt
->sopt_name
== SO_SNDBUF
) ?
2177 &so
->so_snd
: &so
->so_rcv
;
2178 atomic_clear_int(&sotmp
->ssb_flags
,
2183 * Make sure the low-water is never greater than
2187 so
->so_snd
.ssb_lowat
=
2188 (optval
> so
->so_snd
.ssb_hiwat
) ?
2189 so
->so_snd
.ssb_hiwat
: optval
;
2190 atomic_clear_int(&so
->so_snd
.ssb_flags
,
2194 so
->so_rcv
.ssb_lowat
=
2195 (optval
> so
->so_rcv
.ssb_hiwat
) ?
2196 so
->so_rcv
.ssb_hiwat
: optval
;
2197 atomic_clear_int(&so
->so_rcv
.ssb_flags
,
2205 error
= sooptcopyin(sopt
, &tv
, sizeof tv
,
2210 /* assert(hz > 0); */
2211 if (tv
.tv_sec
< 0 || tv
.tv_sec
> INT_MAX
/ hz
||
2212 tv
.tv_usec
< 0 || tv
.tv_usec
>= 1000000) {
2216 /* assert(tick > 0); */
2217 /* assert(ULONG_MAX - INT_MAX >= 1000000); */
2218 val
= (u_long
)(tv
.tv_sec
* hz
) + tv
.tv_usec
/ ustick
;
2219 if (val
> INT_MAX
) {
2223 if (val
== 0 && tv
.tv_usec
!= 0)
2226 switch (sopt
->sopt_name
) {
2228 so
->so_snd
.ssb_timeo
= val
;
2231 so
->so_rcv
.ssb_timeo
= val
;
2236 error
= ENOPROTOOPT
;
2239 if (error
== 0 && so
->so_proto
&& so
->so_proto
->pr_ctloutput
) {
2240 (void) so_pr_ctloutput(so
, sopt
);
2247 /* Helper routine for getsockopt */
2249 sooptcopyout(struct sockopt
*sopt
, const void *buf
, size_t len
)
2251 soopt_from_kbuf(sopt
, buf
, len
);
2256 soopt_from_kbuf(struct sockopt
*sopt
, const void *buf
, size_t len
)
2261 sopt
->sopt_valsize
= 0;
2265 KKASSERT(!sopt
->sopt_val
|| kva_p(sopt
->sopt_val
));
2266 KKASSERT(kva_p(buf
));
2269 * Documented get behavior is that we always return a value,
2270 * possibly truncated to fit in the user's buffer.
2271 * Traditional behavior is that we always tell the user
2272 * precisely how much we copied, rather than something useful
2273 * like the total amount we had available for her.
2274 * Note that this interface is not idempotent; the entire answer must
2275 * generated ahead of time.
2277 valsize
= szmin(len
, sopt
->sopt_valsize
);
2278 sopt
->sopt_valsize
= valsize
;
2279 if (sopt
->sopt_val
!= 0) {
2280 bcopy(buf
, sopt
->sopt_val
, valsize
);
2285 sogetopt(struct socket
*so
, struct sockopt
*sopt
)
2292 struct accept_filter_arg
*afap
;
2296 sopt
->sopt_dir
= SOPT_GET
;
2297 if (sopt
->sopt_level
!= SOL_SOCKET
) {
2298 if (so
->so_proto
&& so
->so_proto
->pr_ctloutput
) {
2299 return (so_pr_ctloutput(so
, sopt
));
2301 return (ENOPROTOOPT
);
2303 switch (sopt
->sopt_name
) {
2305 case SO_ACCEPTFILTER
:
2306 if ((so
->so_options
& SO_ACCEPTCONN
) == 0)
2308 afap
= kmalloc(sizeof(*afap
), M_TEMP
,
2310 if ((so
->so_options
& SO_ACCEPTFILTER
) != 0) {
2311 strcpy(afap
->af_name
, so
->so_accf
->so_accept_filter
->accf_name
);
2312 if (so
->so_accf
->so_accept_filter_str
!= NULL
)
2313 strcpy(afap
->af_arg
, so
->so_accf
->so_accept_filter_str
);
2315 error
= sooptcopyout(sopt
, afap
, sizeof(*afap
));
2316 kfree(afap
, M_TEMP
);
2321 l
.l_onoff
= so
->so_options
& SO_LINGER
;
2322 l
.l_linger
= so
->so_linger
;
2323 error
= sooptcopyout(sopt
, &l
, sizeof l
);
2326 case SO_USELOOPBACK
:
2336 optval
= so
->so_options
& sopt
->sopt_name
;
2338 error
= sooptcopyout(sopt
, &optval
, sizeof optval
);
2342 optval
= so
->so_type
;
2346 optval
= so
->so_error
;
2351 optval
= so
->so_snd
.ssb_hiwat
;
2355 optval
= so
->so_rcv
.ssb_hiwat
;
2359 optval
= so
->so_snd
.ssb_lowat
;
2363 optval
= so
->so_rcv
.ssb_lowat
;
2368 optval
= (sopt
->sopt_name
== SO_SNDTIMEO
?
2369 so
->so_snd
.ssb_timeo
: so
->so_rcv
.ssb_timeo
);
2371 tv
.tv_sec
= optval
/ hz
;
2372 tv
.tv_usec
= (optval
% hz
) * ustick
;
2373 error
= sooptcopyout(sopt
, &tv
, sizeof tv
);
2377 optval_l
= ssb_space(&so
->so_snd
);
2378 error
= sooptcopyout(sopt
, &optval_l
, sizeof(optval_l
));
2382 optval
= -1; /* no hint */
2386 error
= ENOPROTOOPT
;
2389 if (error
== 0 && so
->so_proto
&& so
->so_proto
->pr_ctloutput
)
2390 so_pr_ctloutput(so
, sopt
);
2395 /* XXX; prepare mbuf for (__FreeBSD__ < 3) routines. */
2397 soopt_getm(struct sockopt
*sopt
, struct mbuf
**mp
)
2399 struct mbuf
*m
, *m_prev
;
2400 int sopt_size
= sopt
->sopt_valsize
, msize
;
2402 m
= m_getl(sopt_size
, sopt
->sopt_td
? M_WAITOK
: M_NOWAIT
, MT_DATA
,
2406 m
->m_len
= min(msize
, sopt_size
);
2407 sopt_size
-= m
->m_len
;
2411 while (sopt_size
> 0) {
2412 m
= m_getl(sopt_size
, sopt
->sopt_td
? M_WAITOK
: M_NOWAIT
,
2413 MT_DATA
, 0, &msize
);
2418 m
->m_len
= min(msize
, sopt_size
);
2419 sopt_size
-= m
->m_len
;
2426 /* XXX; copyin sopt data into mbuf chain for (__FreeBSD__ < 3) routines. */
2428 soopt_mcopyin(struct sockopt
*sopt
, struct mbuf
*m
)
2430 soopt_to_mbuf(sopt
, m
);
2435 soopt_to_mbuf(struct sockopt
*sopt
, struct mbuf
*m
)
2440 KKASSERT(!sopt
->sopt_val
|| kva_p(sopt
->sopt_val
));
2442 if (sopt
->sopt_val
== NULL
)
2444 val
= sopt
->sopt_val
;
2445 valsize
= sopt
->sopt_valsize
;
2446 while (m
!= NULL
&& valsize
>= m
->m_len
) {
2447 bcopy(val
, mtod(m
, char *), m
->m_len
);
2448 valsize
-= m
->m_len
;
2449 val
= (caddr_t
)val
+ m
->m_len
;
2452 if (m
!= NULL
) /* should be allocated enoughly at ip6_sooptmcopyin() */
2453 panic("ip6_sooptmcopyin");
2456 /* XXX; copyout mbuf chain data into soopt for (__FreeBSD__ < 3) routines. */
2458 soopt_mcopyout(struct sockopt
*sopt
, struct mbuf
*m
)
2460 return soopt_from_mbuf(sopt
, m
);
2464 soopt_from_mbuf(struct sockopt
*sopt
, struct mbuf
*m
)
2466 struct mbuf
*m0
= m
;
2471 KKASSERT(!sopt
->sopt_val
|| kva_p(sopt
->sopt_val
));
2473 if (sopt
->sopt_val
== NULL
)
2475 val
= sopt
->sopt_val
;
2476 maxsize
= sopt
->sopt_valsize
;
2477 while (m
!= NULL
&& maxsize
>= m
->m_len
) {
2478 bcopy(mtod(m
, char *), val
, m
->m_len
);
2479 maxsize
-= m
->m_len
;
2480 val
= (caddr_t
)val
+ m
->m_len
;
2481 valsize
+= m
->m_len
;
2485 /* enough soopt buffer should be given from user-land */
2489 sopt
->sopt_valsize
= valsize
;
2494 sohasoutofband(struct socket
*so
)
2496 if (so
->so_sigio
!= NULL
)
2497 pgsigio(so
->so_sigio
, SIGURG
, 0);
2500 * There is no need to use NOTE_OOB as KNOTE hint here:
2501 * soread filter depends on so_oobmark and SS_RCVATMARK
2502 * so_state. NOTE_OOB would cause unnecessary penalty
2503 * in KNOTE, if there was knote processing contention.
2505 KNOTE(&so
->so_rcv
.ssb_kq
.ki_note
, 0);
2509 sokqfilter(struct file
*fp
, struct knote
*kn
)
2511 struct socket
*so
= (struct socket
*)kn
->kn_fp
->f_data
;
2512 struct signalsockbuf
*ssb
;
2514 switch (kn
->kn_filter
) {
2516 if (so
->so_options
& SO_ACCEPTCONN
)
2517 kn
->kn_fop
= &solisten_filtops
;
2519 kn
->kn_fop
= &soread_filtops
;
2523 kn
->kn_fop
= &sowrite_filtops
;
2527 kn
->kn_fop
= &soexcept_filtops
;
2531 return (EOPNOTSUPP
);
2534 knote_insert(&ssb
->ssb_kq
.ki_note
, kn
);
2535 atomic_set_int(&ssb
->ssb_flags
, SSB_KNOTE
);
2540 filt_sordetach(struct knote
*kn
)
2542 struct socket
*so
= (struct socket
*)kn
->kn_fp
->f_data
;
2544 knote_remove(&so
->so_rcv
.ssb_kq
.ki_note
, kn
);
2545 if (SLIST_EMPTY(&so
->so_rcv
.ssb_kq
.ki_note
))
2546 atomic_clear_int(&so
->so_rcv
.ssb_flags
, SSB_KNOTE
);
2551 filt_soread(struct knote
*kn
, long hint __unused
)
2553 struct socket
*so
= (struct socket
*)kn
->kn_fp
->f_data
;
2555 if (kn
->kn_sfflags
& NOTE_OOB
) {
2556 if ((so
->so_oobmark
|| (so
->so_state
& SS_RCVATMARK
))) {
2557 kn
->kn_fflags
|= NOTE_OOB
;
2562 kn
->kn_data
= so
->so_rcv
.ssb_cc
;
2564 if (so
->so_state
& SS_CANTRCVMORE
) {
2566 * Only set NODATA if all data has been exhausted.
2568 if (kn
->kn_data
== 0)
2569 kn
->kn_flags
|= EV_NODATA
;
2570 kn
->kn_flags
|= EV_EOF
;
2571 kn
->kn_fflags
= so
->so_error
;
2574 if (so
->so_error
) /* temporary udp error */
2576 if (kn
->kn_sfflags
& NOTE_LOWAT
)
2577 return (kn
->kn_data
>= kn
->kn_sdata
);
2578 return ((kn
->kn_data
>= so
->so_rcv
.ssb_lowat
) ||
2579 !TAILQ_EMPTY(&so
->so_comp
));
2583 filt_sowdetach(struct knote
*kn
)
2585 struct socket
*so
= (struct socket
*)kn
->kn_fp
->f_data
;
2587 knote_remove(&so
->so_snd
.ssb_kq
.ki_note
, kn
);
2588 if (SLIST_EMPTY(&so
->so_snd
.ssb_kq
.ki_note
))
2589 atomic_clear_int(&so
->so_snd
.ssb_flags
, SSB_KNOTE
);
2594 filt_sowrite(struct knote
*kn
, long hint __unused
)
2596 struct socket
*so
= (struct socket
*)kn
->kn_fp
->f_data
;
2598 if (so
->so_snd
.ssb_flags
& SSB_PREALLOC
)
2599 kn
->kn_data
= ssb_space_prealloc(&so
->so_snd
);
2601 kn
->kn_data
= ssb_space(&so
->so_snd
);
2603 if (so
->so_state
& SS_CANTSENDMORE
) {
2604 kn
->kn_flags
|= (EV_EOF
| EV_NODATA
);
2605 kn
->kn_fflags
= so
->so_error
;
2608 if (so
->so_error
) /* temporary udp error */
2610 if (((so
->so_state
& SS_ISCONNECTED
) == 0) &&
2611 (so
->so_proto
->pr_flags
& PR_CONNREQUIRED
))
2613 if (kn
->kn_sfflags
& NOTE_LOWAT
)
2614 return (kn
->kn_data
>= kn
->kn_sdata
);
2615 return (kn
->kn_data
>= so
->so_snd
.ssb_lowat
);
2620 filt_solisten(struct knote
*kn
, long hint __unused
)
2622 struct socket
*so
= (struct socket
*)kn
->kn_fp
->f_data
;
2623 int qlen
= so
->so_qlen
;
2625 if (soavailconn
> 0 && qlen
> soavailconn
)
2629 return (!TAILQ_EMPTY(&so
->so_comp
));