2 * Copyright (c) 2004 Jeffrey M. Hsu. All rights reserved.
3 * Copyright (c) 2004 The DragonFly Project. All rights reserved.
5 * This code is derived from software contributed to The DragonFly Project
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of The DragonFly Project nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific, prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * Copyright (c) 1982, 1986, 1988, 1990, 1993
36 * The Regents of the University of California. All rights reserved.
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. Neither the name of the University nor the names of its contributors
47 * may be used to endorse or promote products derived from this software
48 * without specific prior written permission.
50 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
51 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
52 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
53 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
54 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
55 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
56 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
57 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
58 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
59 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
62 * @(#)uipc_socket.c 8.3 (Berkeley) 4/15/94
63 * $FreeBSD: src/sys/kern/uipc_socket.c,v 1.68.2.24 2003/11/11 17:18:18 silby Exp $
68 #include <sys/param.h>
69 #include <sys/systm.h>
70 #include <sys/fcntl.h>
71 #include <sys/malloc.h>
73 #include <sys/domain.h>
74 #include <sys/file.h> /* for struct knote */
75 #include <sys/kernel.h>
76 #include <sys/event.h>
78 #include <sys/protosw.h>
79 #include <sys/socket.h>
80 #include <sys/socketvar.h>
81 #include <sys/socketops.h>
82 #include <sys/resourcevar.h>
83 #include <sys/signalvar.h>
84 #include <sys/sysctl.h>
87 #include <vm/vm_zone.h>
89 #include <net/netmsg2.h>
90 #include <net/netisr2.h>
92 #include <sys/thread2.h>
93 #include <sys/socketvar2.h>
94 #include <sys/spinlock2.h>
96 #include <machine/limits.h>
99 extern int tcp_sosend_agglim
;
100 extern int tcp_sosend_async
;
101 extern int tcp_sosend_jcluster
;
102 extern int udp_sosend_async
;
103 extern int udp_sosend_prepend
;
105 static int do_setopt_accept_filter(struct socket
*so
, struct sockopt
*sopt
);
108 static void filt_sordetach(struct knote
*kn
);
109 static int filt_soread(struct knote
*kn
, long hint
);
110 static void filt_sowdetach(struct knote
*kn
);
111 static int filt_sowrite(struct knote
*kn
, long hint
);
112 static int filt_solisten(struct knote
*kn
, long hint
);
114 static int soclose_sync(struct socket
*so
, int fflag
);
115 static void soclose_fast(struct socket
*so
);
117 static struct filterops solisten_filtops
=
118 { FILTEROP_ISFD
|FILTEROP_MPSAFE
, NULL
, filt_sordetach
, filt_solisten
};
119 static struct filterops soread_filtops
=
120 { FILTEROP_ISFD
|FILTEROP_MPSAFE
, NULL
, filt_sordetach
, filt_soread
};
121 static struct filterops sowrite_filtops
=
122 { FILTEROP_ISFD
|FILTEROP_MPSAFE
, NULL
, filt_sowdetach
, filt_sowrite
};
123 static struct filterops soexcept_filtops
=
124 { FILTEROP_ISFD
|FILTEROP_MPSAFE
, NULL
, filt_sordetach
, filt_soread
};
126 MALLOC_DEFINE(M_SOCKET
, "socket", "socket struct");
127 MALLOC_DEFINE(M_SONAME
, "soname", "socket name");
128 MALLOC_DEFINE(M_PCB
, "pcb", "protocol control block");
131 static int somaxconn
= SOMAXCONN
;
132 SYSCTL_INT(_kern_ipc
, KIPC_SOMAXCONN
, somaxconn
, CTLFLAG_RW
,
133 &somaxconn
, 0, "Maximum pending socket connection queue size");
135 static int use_soclose_fast
= 1;
136 SYSCTL_INT(_kern_ipc
, OID_AUTO
, soclose_fast
, CTLFLAG_RW
,
137 &use_soclose_fast
, 0, "Fast socket close");
139 int use_soaccept_pred_fast
= 1;
140 SYSCTL_INT(_kern_ipc
, OID_AUTO
, soaccept_pred_fast
, CTLFLAG_RW
,
141 &use_soaccept_pred_fast
, 0, "Fast socket accept predication");
143 int use_sendfile_async
= 1;
144 SYSCTL_INT(_kern_ipc
, OID_AUTO
, sendfile_async
, CTLFLAG_RW
,
145 &use_sendfile_async
, 0, "sendfile uses asynchronized pru_send");
147 int use_soconnect_async
= 1;
148 SYSCTL_INT(_kern_ipc
, OID_AUTO
, soconnect_async
, CTLFLAG_RW
,
149 &use_soconnect_async
, 0, "soconnect uses asynchronized pru_connect");
151 static int use_socreate_fast
= 1;
152 SYSCTL_INT(_kern_ipc
, OID_AUTO
, socreate_fast
, CTLFLAG_RW
,
153 &use_socreate_fast
, 0, "Fast socket creation");
156 * Socket operation routines.
157 * These routines are called by the routines in
158 * sys_socket.c or from a system process, and
159 * implement the semantics of socket operations by
160 * switching out to the protocol specific routines.
164 * Get a socket structure, and initialize it.
165 * Note that it would probably be better to allocate socket
166 * and PCB at the same time, but I'm not convinced that all
167 * the protocols can be easily modified to do this.
170 soalloc(int waitok
, struct protosw
*pr
)
175 waitmask
= waitok
? M_WAITOK
: M_NOWAIT
;
176 so
= kmalloc(sizeof(struct socket
), M_SOCKET
, M_ZERO
|waitmask
);
178 /* XXX race condition for reentrant kernel */
180 TAILQ_INIT(&so
->so_aiojobq
);
181 TAILQ_INIT(&so
->so_rcv
.ssb_mlist
);
182 TAILQ_INIT(&so
->so_snd
.ssb_mlist
);
183 lwkt_token_init(&so
->so_rcv
.ssb_token
, "rcvtok");
184 lwkt_token_init(&so
->so_snd
.ssb_token
, "sndtok");
185 spin_init(&so
->so_rcvd_spin
, "soalloc");
186 netmsg_init(&so
->so_rcvd_msg
.base
, so
, &netisr_adone_rport
,
187 MSGF_DROPABLE
| MSGF_PRIORITY
,
188 so
->so_proto
->pr_usrreqs
->pru_rcvd
);
189 so
->so_rcvd_msg
.nm_pru_flags
|= PRUR_ASYNC
;
190 so
->so_state
= SS_NOFDREF
;
197 socreate(int dom
, struct socket
**aso
, int type
,
198 int proto
, struct thread
*td
)
200 struct proc
*p
= td
->td_proc
;
203 struct pru_attach_info ai
;
207 prp
= pffindproto(dom
, proto
, type
);
209 prp
= pffindtype(dom
, type
);
211 if (prp
== NULL
|| prp
->pr_usrreqs
->pru_attach
== 0)
212 return (EPROTONOSUPPORT
);
214 if (p
->p_ucred
->cr_prison
&& jail_socket_unixiproute_only
&&
215 prp
->pr_domain
->dom_family
!= PF_LOCAL
&&
216 prp
->pr_domain
->dom_family
!= PF_INET
&&
217 prp
->pr_domain
->dom_family
!= PF_INET6
&&
218 prp
->pr_domain
->dom_family
!= PF_ROUTE
) {
219 return (EPROTONOSUPPORT
);
222 if (prp
->pr_type
!= type
)
224 so
= soalloc(p
!= NULL
, prp
);
229 * Callers of socreate() presumably will connect up a descriptor
230 * and call soclose() if they cannot. This represents our so_refs
231 * (which should be 1) from soalloc().
233 soclrstate(so
, SS_NOFDREF
);
236 * Set a default port for protocol processing. No action will occur
237 * on the socket on this port until an inpcb is attached to it and
238 * is able to match incoming packets, or until the socket becomes
239 * available to userland.
241 * We normally default the socket to the protocol thread on cpu 0,
242 * if protocol does not provide its own method to initialize the
245 * If PR_SYNC_PORT is set (unix domain sockets) there is no protocol
246 * thread and all pr_*()/pru_*() calls are executed synchronously.
248 if (prp
->pr_flags
& PR_SYNC_PORT
)
249 so
->so_port
= &netisr_sync_port
;
250 else if (prp
->pr_initport
!= NULL
)
251 so
->so_port
= prp
->pr_initport();
253 so
->so_port
= netisr_cpuport(0);
255 TAILQ_INIT(&so
->so_incomp
);
256 TAILQ_INIT(&so
->so_comp
);
258 so
->so_cred
= crhold(p
->p_ucred
);
259 ai
.sb_rlimit
= &p
->p_rlimit
[RLIMIT_SBSIZE
];
260 ai
.p_ucred
= p
->p_ucred
;
261 ai
.fd_rdir
= p
->p_fd
->fd_rdir
;
264 * Auto-sizing of socket buffers is managed by the protocols and
265 * the appropriate flags must be set in the pru_attach function.
267 if (use_socreate_fast
&& prp
->pr_usrreqs
->pru_preattach
)
268 error
= so_pru_attach_fast(so
, proto
, &ai
);
270 error
= so_pru_attach(so
, proto
, &ai
);
272 sosetstate(so
, SS_NOFDREF
);
273 sofree(so
); /* from soalloc */
278 * NOTE: Returns referenced socket.
285 sobind(struct socket
*so
, struct sockaddr
*nam
, struct thread
*td
)
289 error
= so_pru_bind(so
, nam
, td
);
294 sodealloc(struct socket
*so
)
296 KKASSERT((so
->so_state
& (SS_INCOMP
| SS_COMP
)) == 0);
299 if (so
->so_options
& SO_ACCEPTCONN
) {
300 KASSERT(TAILQ_EMPTY(&so
->so_comp
), ("so_comp is not empty"));
301 KASSERT(TAILQ_EMPTY(&so
->so_incomp
),
302 ("so_incomp is not empty"));
306 if (so
->so_rcv
.ssb_hiwat
)
307 (void)chgsbsize(so
->so_cred
->cr_uidinfo
,
308 &so
->so_rcv
.ssb_hiwat
, 0, RLIM_INFINITY
);
309 if (so
->so_snd
.ssb_hiwat
)
310 (void)chgsbsize(so
->so_cred
->cr_uidinfo
,
311 &so
->so_snd
.ssb_hiwat
, 0, RLIM_INFINITY
);
313 /* remove accept filter if present */
314 if (so
->so_accf
!= NULL
)
315 do_setopt_accept_filter(so
, NULL
);
318 if (so
->so_faddr
!= NULL
)
319 kfree(so
->so_faddr
, M_SONAME
);
324 solisten(struct socket
*so
, int backlog
, struct thread
*td
)
326 if (so
->so_state
& (SS_ISCONNECTED
| SS_ISCONNECTING
))
329 lwkt_gettoken(&so
->so_rcv
.ssb_token
);
330 if (TAILQ_EMPTY(&so
->so_comp
))
331 so
->so_options
|= SO_ACCEPTCONN
;
332 lwkt_reltoken(&so
->so_rcv
.ssb_token
);
333 if (backlog
< 0 || backlog
> somaxconn
)
335 so
->so_qlimit
= backlog
;
336 return so_pru_listen(so
, td
);
340 soqflush(struct socket
*so
)
342 lwkt_getpooltoken(so
);
343 if (so
->so_options
& SO_ACCEPTCONN
) {
346 while ((sp
= TAILQ_FIRST(&so
->so_incomp
)) != NULL
) {
347 KKASSERT((sp
->so_state
& (SS_INCOMP
| SS_COMP
)) ==
349 TAILQ_REMOVE(&so
->so_incomp
, sp
, so_list
);
351 soclrstate(sp
, SS_INCOMP
);
352 soabort_async(sp
, TRUE
);
354 while ((sp
= TAILQ_FIRST(&so
->so_comp
)) != NULL
) {
355 KKASSERT((sp
->so_state
& (SS_INCOMP
| SS_COMP
)) ==
357 TAILQ_REMOVE(&so
->so_comp
, sp
, so_list
);
359 soclrstate(sp
, SS_COMP
);
360 soabort_async(sp
, TRUE
);
363 lwkt_relpooltoken(so
);
367 * Destroy a disconnected socket. This routine is a NOP if entities
368 * still have a reference on the socket:
370 * so_pcb - The protocol stack still has a reference
371 * SS_NOFDREF - There is no longer a file pointer reference
374 sofree(struct socket
*so
)
379 * This is a bit hackish at the moment. We need to interlock
380 * any accept queue we are on before we potentially lose the
381 * last reference to avoid races against a re-reference from
382 * someone operating on the queue.
384 while ((head
= so
->so_head
) != NULL
) {
385 lwkt_getpooltoken(head
);
386 if (so
->so_head
== head
)
388 lwkt_relpooltoken(head
);
392 * Arbitrage the last free.
394 KKASSERT(so
->so_refs
> 0);
395 if (atomic_fetchadd_int(&so
->so_refs
, -1) != 1) {
397 lwkt_relpooltoken(head
);
401 KKASSERT(so
->so_pcb
== NULL
&& (so
->so_state
& SS_NOFDREF
));
402 KKASSERT((so
->so_state
& SS_ASSERTINPROG
) == 0);
406 * We're done, remove ourselves from the accept queue we are
407 * on, if we are on one.
409 if (so
->so_state
& SS_INCOMP
) {
410 KKASSERT((so
->so_state
& (SS_INCOMP
| SS_COMP
)) ==
412 TAILQ_REMOVE(&head
->so_incomp
, so
, so_list
);
414 } else if (so
->so_state
& SS_COMP
) {
416 * We must not decommission a socket that's
417 * on the accept(2) queue. If we do, then
418 * accept(2) may hang after select(2) indicated
419 * that the listening socket was ready.
421 KKASSERT((so
->so_state
& (SS_INCOMP
| SS_COMP
)) ==
423 lwkt_relpooltoken(head
);
426 panic("sofree: not queued");
428 soclrstate(so
, SS_INCOMP
);
430 lwkt_relpooltoken(head
);
432 /* Flush accept queues, if we are accepting. */
435 ssb_release(&so
->so_snd
, so
);
441 * Close a socket on last file table reference removal.
442 * Initiate disconnect if connected.
443 * Free socket when disconnect complete.
446 soclose(struct socket
*so
, int fflag
)
450 funsetown(&so
->so_sigio
);
451 sosetstate(so
, SS_ISCLOSING
);
452 if (!use_soclose_fast
||
453 (so
->so_proto
->pr_flags
& PR_SYNC_PORT
) ||
454 ((so
->so_state
& SS_ISCONNECTED
) &&
455 (so
->so_options
& SO_LINGER
))) {
456 error
= soclose_sync(so
, fflag
);
465 sodiscard(struct socket
*so
)
467 if (so
->so_state
& SS_NOFDREF
)
468 panic("soclose: NOFDREF");
469 sosetstate(so
, SS_NOFDREF
); /* take ref */
473 * Append the completed queue of head to head_inh (inherting listen socket).
476 soinherit(struct socket
*head
, struct socket
*head_inh
)
478 boolean_t do_wakeup
= FALSE
;
480 KASSERT(head
->so_options
& SO_ACCEPTCONN
,
481 ("head does not accept connection"));
482 KASSERT(head_inh
->so_options
& SO_ACCEPTCONN
,
483 ("head_inh does not accept connection"));
485 lwkt_getpooltoken(head
);
486 lwkt_getpooltoken(head_inh
);
488 if (head
->so_qlen
> 0)
491 while (!TAILQ_EMPTY(&head
->so_comp
)) {
492 struct ucred
*old_cr
;
495 sp
= TAILQ_FIRST(&head
->so_comp
);
496 KKASSERT((sp
->so_state
& (SS_INCOMP
| SS_COMP
)) == SS_COMP
);
499 * Remove this socket from the current listen socket
502 TAILQ_REMOVE(&head
->so_comp
, sp
, so_list
);
505 /* Save the old ucred for later free. */
506 old_cr
= sp
->so_cred
;
509 * Install this socket to the inheriting listen socket
512 sp
->so_cred
= crhold(head_inh
->so_cred
); /* non-blocking */
513 sp
->so_head
= head_inh
;
515 TAILQ_INSERT_TAIL(&head_inh
->so_comp
, sp
, so_list
);
520 * crfree() may block and release the tokens temporarily.
521 * However, we are fine here, since the transition is done.
526 lwkt_relpooltoken(head_inh
);
527 lwkt_relpooltoken(head
);
531 * "New" connections have arrived
534 wakeup(&head_inh
->so_timeo
);
539 soclose_sync(struct socket
*so
, int fflag
)
543 if ((so
->so_proto
->pr_flags
& PR_SYNC_PORT
) == 0)
544 so_pru_sync(so
); /* unpend async prus */
546 if (so
->so_pcb
== NULL
)
549 if (so
->so_state
& SS_ISCONNECTED
) {
550 if ((so
->so_state
& SS_ISDISCONNECTING
) == 0) {
551 error
= sodisconnect(so
);
555 if (so
->so_options
& SO_LINGER
) {
556 if ((so
->so_state
& SS_ISDISCONNECTING
) &&
559 while (so
->so_state
& SS_ISCONNECTED
) {
560 error
= tsleep(&so
->so_timeo
, PCATCH
,
561 "soclos", so
->so_linger
* hz
);
571 error2
= so_pru_detach(so
);
572 if (error2
== EJUSTRETURN
) {
574 * Protocol will call sodiscard()
575 * and sofree() for us.
584 sofree(so
); /* dispose of ref */
590 soclose_fast_handler(netmsg_t msg
)
592 struct socket
*so
= msg
->base
.nm_so
;
594 if (so
->so_pcb
== NULL
)
597 if ((so
->so_state
& SS_ISCONNECTED
) &&
598 (so
->so_state
& SS_ISDISCONNECTING
) == 0)
599 so_pru_disconnect_direct(so
);
604 error
= so_pru_detach_direct(so
);
605 if (error
== EJUSTRETURN
) {
607 * Protocol will call sodiscard()
608 * and sofree() for us.
619 soclose_fast(struct socket
*so
)
621 struct netmsg_base
*base
= &so
->so_clomsg
;
623 netmsg_init(base
, so
, &netisr_apanic_rport
, 0,
624 soclose_fast_handler
);
625 lwkt_sendmsg(so
->so_port
, &base
->lmsg
);
629 * Abort and destroy a socket. Only one abort can be in progress
630 * at any given moment.
633 soabort_async(struct socket
*so
, boolean_t clr_head
)
636 * Keep a reference before clearing the so_head
637 * to avoid racing socket close in netisr.
642 so_pru_abort_async(so
);
646 soabort_direct(struct socket
*so
)
649 so_pru_abort_direct(so
);
653 * so is passed in ref'd, which becomes owned by
654 * the cleared SS_NOFDREF flag.
657 soaccept_generic(struct socket
*so
)
659 if ((so
->so_state
& SS_NOFDREF
) == 0)
660 panic("soaccept: !NOFDREF");
661 soclrstate(so
, SS_NOFDREF
); /* owned by lack of SS_NOFDREF */
665 soaccept(struct socket
*so
, struct sockaddr
**nam
)
669 soaccept_generic(so
);
670 error
= so_pru_accept(so
, nam
);
675 soconnect(struct socket
*so
, struct sockaddr
*nam
, struct thread
*td
,
680 if (so
->so_options
& SO_ACCEPTCONN
)
683 * If protocol is connection-based, can only connect once.
684 * Otherwise, if connected, try to disconnect first.
685 * This allows user to disconnect by connecting to, e.g.,
688 if (so
->so_state
& (SS_ISCONNECTED
|SS_ISCONNECTING
) &&
689 ((so
->so_proto
->pr_flags
& PR_CONNREQUIRED
) ||
690 (error
= sodisconnect(so
)))) {
694 * Prevent accumulated error from previous connection
698 if (!sync
&& so
->so_proto
->pr_usrreqs
->pru_preconnect
)
699 error
= so_pru_connect_async(so
, nam
, td
);
701 error
= so_pru_connect(so
, nam
, td
);
707 soconnect2(struct socket
*so1
, struct socket
*so2
)
711 error
= so_pru_connect2(so1
, so2
);
716 sodisconnect(struct socket
*so
)
720 if ((so
->so_state
& SS_ISCONNECTED
) == 0) {
724 if (so
->so_state
& SS_ISDISCONNECTING
) {
728 error
= so_pru_disconnect(so
);
733 #define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? M_NOWAIT : M_WAITOK)
736 * If send must go all at once and message is larger than
737 * send buffering, then hard error.
738 * Lock against other senders.
739 * If must go all at once and not enough room now, then
740 * inform user that this would block and do nothing.
741 * Otherwise, if nonblocking, send as much as possible.
742 * The data to be sent is described by "uio" if nonzero,
743 * otherwise by the mbuf chain "top" (which must be null
744 * if uio is not). Data provided in mbuf chain must be small
745 * enough to send all at once.
747 * Returns nonzero on error, timeout or signal; callers
748 * must check for short counts if EINTR/ERESTART are returned.
749 * Data and control buffers are freed on return.
752 sosend(struct socket
*so
, struct sockaddr
*addr
, struct uio
*uio
,
753 struct mbuf
*top
, struct mbuf
*control
, int flags
,
760 int clen
= 0, error
, dontroute
, mlen
;
761 int atomic
= sosendallatonce(so
) || top
;
765 resid
= uio
->uio_resid
;
767 resid
= (size_t)top
->m_pkthdr
.len
;
770 for (m
= top
; m
; m
= m
->m_next
)
772 KKASSERT(top
->m_pkthdr
.len
== len
);
777 * WARNING! resid is unsigned, space and len are signed. space
778 * can wind up negative if the sockbuf is overcommitted.
780 * Also check to make sure that MSG_EOR isn't used on SOCK_STREAM
781 * type sockets since that's an error.
783 if (so
->so_type
== SOCK_STREAM
&& (flags
& MSG_EOR
)) {
789 (flags
& MSG_DONTROUTE
) && (so
->so_options
& SO_DONTROUTE
) == 0 &&
790 (so
->so_proto
->pr_flags
& PR_ATOMIC
);
791 if (td
->td_lwp
!= NULL
)
792 td
->td_lwp
->lwp_ru
.ru_msgsnd
++;
794 clen
= control
->m_len
;
795 #define gotoerr(errcode) { error = errcode; goto release; }
798 error
= ssb_lock(&so
->so_snd
, SBLOCKWAIT(flags
));
803 if (so
->so_state
& SS_CANTSENDMORE
)
806 error
= so
->so_error
;
810 if ((so
->so_state
& SS_ISCONNECTED
) == 0) {
812 * `sendto' and `sendmsg' is allowed on a connection-
813 * based socket if it supports implied connect.
814 * Return ENOTCONN if not connected and no address is
817 if ((so
->so_proto
->pr_flags
& PR_CONNREQUIRED
) &&
818 (so
->so_proto
->pr_flags
& PR_IMPLOPCL
) == 0) {
819 if ((so
->so_state
& SS_ISCONFIRMING
) == 0 &&
820 !(resid
== 0 && clen
!= 0))
822 } else if (addr
== NULL
)
823 gotoerr(so
->so_proto
->pr_flags
& PR_CONNREQUIRED
?
824 ENOTCONN
: EDESTADDRREQ
);
826 if ((atomic
&& resid
> so
->so_snd
.ssb_hiwat
) ||
827 clen
> so
->so_snd
.ssb_hiwat
) {
830 space
= ssb_space(&so
->so_snd
);
833 if ((space
< 0 || (size_t)space
< resid
+ clen
) && uio
&&
834 (atomic
|| space
< so
->so_snd
.ssb_lowat
|| space
< clen
)) {
835 if (flags
& (MSG_FNONBLOCKING
|MSG_DONTWAIT
))
836 gotoerr(EWOULDBLOCK
);
837 ssb_unlock(&so
->so_snd
);
838 error
= ssb_wait(&so
->so_snd
);
848 * Data is prepackaged in "top".
852 top
->m_flags
|= M_EOR
;
856 m
= m_getl((int)resid
, M_WAITOK
, MT_DATA
,
857 top
== NULL
? M_PKTHDR
: 0, &mlen
);
860 m
->m_pkthdr
.rcvif
= NULL
;
862 len
= imin((int)szmin(mlen
, resid
), space
);
863 if (resid
< MINCLSIZE
) {
865 * For datagram protocols, leave room
866 * for protocol headers in first mbuf.
868 if (atomic
&& top
== NULL
&& len
< mlen
)
872 error
= uiomove(mtod(m
, caddr_t
), (size_t)len
, uio
);
873 resid
= uio
->uio_resid
;
876 top
->m_pkthdr
.len
+= len
;
882 top
->m_flags
|= M_EOR
;
885 } while (space
> 0 && atomic
);
887 so
->so_options
|= SO_DONTROUTE
;
888 if (flags
& MSG_OOB
) {
889 pru_flags
= PRUS_OOB
;
890 } else if ((flags
& MSG_EOF
) &&
891 (so
->so_proto
->pr_flags
& PR_IMPLOPCL
) &&
894 * If the user set MSG_EOF, the protocol
895 * understands this flag and nothing left to
896 * send then use PRU_SEND_EOF instead of PRU_SEND.
898 pru_flags
= PRUS_EOF
;
899 } else if (resid
> 0 && space
> 0) {
900 /* If there is more to send, set PRUS_MORETOCOME */
901 pru_flags
= PRUS_MORETOCOME
;
906 * XXX all the SS_CANTSENDMORE checks previously
907 * done could be out of date. We could have recieved
908 * a reset packet in an interrupt or maybe we slept
909 * while doing page faults in uiomove() etc. We could
910 * probably recheck again inside the splnet() protection
911 * here, but there are probably other places that this
912 * also happens. We must rethink this.
914 error
= so_pru_send(so
, pru_flags
, top
, addr
, control
, td
);
916 so
->so_options
&= ~SO_DONTROUTE
;
923 } while (resid
&& space
> 0);
927 ssb_unlock(&so
->so_snd
);
938 * A specialization of sosend() for UDP based on protocol-specific knowledge:
939 * so->so_proto->pr_flags has the PR_ATOMIC field set. This means that
940 * sosendallatonce() returns true,
941 * the "atomic" variable is true,
942 * and sosendudp() blocks until space is available for the entire send.
943 * so->so_proto->pr_flags does not have the PR_CONNREQUIRED or
944 * PR_IMPLOPCL flags set.
945 * UDP has no out-of-band data.
946 * UDP has no control data.
947 * UDP does not support MSG_EOR.
950 sosendudp(struct socket
*so
, struct sockaddr
*addr
, struct uio
*uio
,
951 struct mbuf
*top
, struct mbuf
*control
, int flags
, struct thread
*td
)
954 int error
, pru_flags
= 0;
957 if (td
->td_lwp
!= NULL
)
958 td
->td_lwp
->lwp_ru
.ru_msgsnd
++;
962 KASSERT((uio
&& !top
) || (top
&& !uio
), ("bad arguments to sosendudp"));
963 resid
= uio
? uio
->uio_resid
: (size_t)top
->m_pkthdr
.len
;
966 error
= ssb_lock(&so
->so_snd
, SBLOCKWAIT(flags
));
970 if (so
->so_state
& SS_CANTSENDMORE
)
973 error
= so
->so_error
;
977 if (!(so
->so_state
& SS_ISCONNECTED
) && addr
== NULL
)
978 gotoerr(EDESTADDRREQ
);
979 if (resid
> so
->so_snd
.ssb_hiwat
)
981 space
= ssb_space(&so
->so_snd
);
982 if (uio
&& (space
< 0 || (size_t)space
< resid
)) {
983 if (flags
& (MSG_FNONBLOCKING
|MSG_DONTWAIT
))
984 gotoerr(EWOULDBLOCK
);
985 ssb_unlock(&so
->so_snd
);
986 error
= ssb_wait(&so
->so_snd
);
993 int hdrlen
= max_hdr
;
996 * We try to optimize out the additional mbuf
997 * allocations in M_PREPEND() on output path, e.g.
998 * - udp_output(), when it tries to prepend protocol
1000 * - Link layer output function, when it tries to
1001 * prepend link layer header.
1003 * This probably will not benefit any data that will
1004 * be fragmented, so this optimization is only performed
1005 * when the size of data and max size of protocol+link
1006 * headers fit into one mbuf cluster.
1008 if (uio
->uio_resid
> MCLBYTES
- hdrlen
||
1009 !udp_sosend_prepend
) {
1010 top
= m_uiomove(uio
);
1016 top
= m_getl(uio
->uio_resid
+ hdrlen
, M_WAITOK
,
1017 MT_DATA
, M_PKTHDR
, &nsize
);
1018 KASSERT(nsize
>= uio
->uio_resid
+ hdrlen
,
1019 ("sosendudp invalid nsize %d, "
1020 "resid %zu, hdrlen %d",
1021 nsize
, uio
->uio_resid
, hdrlen
));
1023 top
->m_len
= uio
->uio_resid
;
1024 top
->m_pkthdr
.len
= uio
->uio_resid
;
1025 top
->m_data
+= hdrlen
;
1027 error
= uiomove(mtod(top
, caddr_t
), top
->m_len
, uio
);
1033 if (flags
& MSG_DONTROUTE
)
1034 pru_flags
|= PRUS_DONTROUTE
;
1036 if (udp_sosend_async
&& (flags
& MSG_SYNC
) == 0) {
1037 so_pru_send_async(so
, pru_flags
, top
, addr
, NULL
, td
);
1040 error
= so_pru_send(so
, pru_flags
, top
, addr
, NULL
, td
);
1042 top
= NULL
; /* sent or freed in lower layer */
1045 ssb_unlock(&so
->so_snd
);
1053 sosendtcp(struct socket
*so
, struct sockaddr
*addr
, struct uio
*uio
,
1054 struct mbuf
*top
, struct mbuf
*control
, int flags
,
1066 KKASSERT(top
== NULL
);
1068 resid
= uio
->uio_resid
;
1071 resid
= (size_t)top
->m_pkthdr
.len
;
1074 for (m
= top
; m
; m
= m
->m_next
)
1076 KKASSERT(top
->m_pkthdr
.len
== len
);
1081 * WARNING! resid is unsigned, space and len are signed. space
1082 * can wind up negative if the sockbuf is overcommitted.
1084 * Also check to make sure that MSG_EOR isn't used on TCP
1086 if (flags
& MSG_EOR
) {
1092 /* TCP doesn't do control messages (rights, creds, etc) */
1093 if (control
->m_len
) {
1097 m_freem(control
); /* empty control, just free it */
1101 if (td
->td_lwp
!= NULL
)
1102 td
->td_lwp
->lwp_ru
.ru_msgsnd
++;
1104 #define gotoerr(errcode) { error = errcode; goto release; }
1107 error
= ssb_lock(&so
->so_snd
, SBLOCKWAIT(flags
));
1112 if (so
->so_state
& SS_CANTSENDMORE
)
1115 error
= so
->so_error
;
1119 if ((so
->so_state
& SS_ISCONNECTED
) == 0 &&
1120 (so
->so_state
& SS_ISCONFIRMING
) == 0)
1122 if (allatonce
&& resid
> so
->so_snd
.ssb_hiwat
)
1125 space
= ssb_space_prealloc(&so
->so_snd
);
1126 if (flags
& MSG_OOB
)
1128 if ((space
< 0 || (size_t)space
< resid
) && !allatonce
&&
1129 space
< so
->so_snd
.ssb_lowat
) {
1130 if (flags
& (MSG_FNONBLOCKING
|MSG_DONTWAIT
))
1131 gotoerr(EWOULDBLOCK
);
1132 ssb_unlock(&so
->so_snd
);
1133 error
= ssb_wait(&so
->so_snd
);
1140 int cnt
= 0, async
= 0;
1144 * Data is prepackaged in "top".
1148 if (resid
> INT_MAX
)
1150 if (tcp_sosend_jcluster
) {
1151 m
= m_getlj((int)resid
, M_WAITOK
, MT_DATA
,
1152 top
== NULL
? M_PKTHDR
: 0, &mlen
);
1154 m
= m_getl((int)resid
, M_WAITOK
, MT_DATA
,
1155 top
== NULL
? M_PKTHDR
: 0, &mlen
);
1158 m
->m_pkthdr
.len
= 0;
1159 m
->m_pkthdr
.rcvif
= NULL
;
1161 len
= imin((int)szmin(mlen
, resid
), space
);
1163 error
= uiomove(mtod(m
, caddr_t
), (size_t)len
, uio
);
1164 resid
= uio
->uio_resid
;
1167 top
->m_pkthdr
.len
+= len
;
1174 } while (space
> 0 && cnt
< tcp_sosend_agglim
);
1176 if (tcp_sosend_async
)
1179 if (flags
& MSG_OOB
) {
1180 pru_flags
= PRUS_OOB
;
1182 } else if ((flags
& MSG_EOF
) && resid
== 0) {
1183 pru_flags
= PRUS_EOF
;
1184 } else if (resid
> 0 && space
> 0) {
1185 /* If there is more to send, set PRUS_MORETOCOME */
1186 pru_flags
= PRUS_MORETOCOME
;
1192 if (flags
& MSG_SYNC
)
1196 * XXX all the SS_CANTSENDMORE checks previously
1197 * done could be out of date. We could have recieved
1198 * a reset packet in an interrupt or maybe we slept
1199 * while doing page faults in uiomove() etc. We could
1200 * probably recheck again inside the splnet() protection
1201 * here, but there are probably other places that this
1202 * also happens. We must rethink this.
1204 for (m
= top
; m
; m
= m
->m_next
)
1205 ssb_preallocstream(&so
->so_snd
, m
);
1207 error
= so_pru_send(so
, pru_flags
, top
,
1210 so_pru_send_async(so
, pru_flags
, top
,
1219 } while (resid
&& space
> 0);
1223 ssb_unlock(&so
->so_snd
);
1234 * Implement receive operations on a socket.
1236 * We depend on the way that records are added to the signalsockbuf
1237 * by sbappend*. In particular, each record (mbufs linked through m_next)
1238 * must begin with an address if the protocol so specifies,
1239 * followed by an optional mbuf or mbufs containing ancillary data,
1240 * and then zero or more mbufs of data.
1242 * Although the signalsockbuf is locked, new data may still be appended.
1243 * A token inside the ssb_lock deals with MP issues and still allows
1244 * the network to access the socket if we block in a uio.
1246 * The caller may receive the data as a single mbuf chain by supplying
1247 * an mbuf **mp0 for use in returning the chain. The uio is then used
1248 * only for the count in uio_resid.
1251 soreceive(struct socket
*so
, struct sockaddr
**psa
, struct uio
*uio
,
1252 struct sockbuf
*sio
, struct mbuf
**controlp
, int *flagsp
)
1255 struct mbuf
*free_chain
= NULL
;
1256 int flags
, len
, error
, offset
;
1257 struct protosw
*pr
= so
->so_proto
;
1259 size_t resid
, orig_resid
;
1260 boolean_t free_rights
= FALSE
;
1263 resid
= uio
->uio_resid
;
1265 resid
= (size_t)(sio
->sb_climit
- sio
->sb_cc
);
1273 flags
= *flagsp
&~ MSG_EOR
;
1276 if (flags
& MSG_OOB
) {
1277 m
= m_get(M_WAITOK
, MT_DATA
);
1280 error
= so_pru_rcvoob(so
, m
, flags
& MSG_PEEK
);
1286 KKASSERT(resid
>= (size_t)m
->m_len
);
1287 resid
-= (size_t)m
->m_len
;
1288 } while (resid
> 0 && m
);
1291 uio
->uio_resid
= resid
;
1292 error
= uiomove(mtod(m
, caddr_t
),
1293 (int)szmin(resid
, m
->m_len
),
1295 resid
= uio
->uio_resid
;
1297 } while (uio
->uio_resid
&& error
== 0 && m
);
1304 if ((so
->so_state
& SS_ISCONFIRMING
) && resid
)
1308 * The token interlocks against the protocol thread while
1309 * ssb_lock is a blocking lock against other userland entities.
1311 lwkt_gettoken(&so
->so_rcv
.ssb_token
);
1313 error
= ssb_lock(&so
->so_rcv
, SBLOCKWAIT(flags
));
1317 m
= so
->so_rcv
.ssb_mb
;
1319 * If we have less data than requested, block awaiting more
1320 * (subject to any timeout) if:
1321 * 1. the current count is less than the low water mark, or
1322 * 2. MSG_WAITALL is set, and it is possible to do the entire
1323 * receive operation at once if we block (resid <= hiwat).
1324 * 3. MSG_DONTWAIT is not set
1325 * If MSG_WAITALL is set but resid is larger than the receive buffer,
1326 * we have to do the receive in sections, and thus risk returning
1327 * a short count if a timeout or signal occurs after we start.
1329 if (m
== NULL
|| (((flags
& MSG_DONTWAIT
) == 0 &&
1330 (size_t)so
->so_rcv
.ssb_cc
< resid
) &&
1331 (so
->so_rcv
.ssb_cc
< so
->so_rcv
.ssb_lowat
||
1332 ((flags
& MSG_WAITALL
) && resid
<= (size_t)so
->so_rcv
.ssb_hiwat
)) &&
1333 m
->m_nextpkt
== 0 && (pr
->pr_flags
& PR_ATOMIC
) == 0)) {
1334 KASSERT(m
!= NULL
|| !so
->so_rcv
.ssb_cc
, ("receive 1"));
1338 error
= so
->so_error
;
1339 if ((flags
& MSG_PEEK
) == 0)
1343 if (so
->so_state
& SS_CANTRCVMORE
) {
1349 for (; m
; m
= m
->m_next
) {
1350 if (m
->m_type
== MT_OOBDATA
|| (m
->m_flags
& M_EOR
)) {
1351 m
= so
->so_rcv
.ssb_mb
;
1355 if ((so
->so_state
& (SS_ISCONNECTED
|SS_ISCONNECTING
)) == 0 &&
1356 (pr
->pr_flags
& PR_CONNREQUIRED
)) {
1362 if (flags
& (MSG_FNONBLOCKING
|MSG_DONTWAIT
)) {
1363 error
= EWOULDBLOCK
;
1366 ssb_unlock(&so
->so_rcv
);
1367 error
= ssb_wait(&so
->so_rcv
);
1373 if (uio
&& uio
->uio_td
&& uio
->uio_td
->td_proc
)
1374 uio
->uio_td
->td_lwp
->lwp_ru
.ru_msgrcv
++;
1377 * note: m should be == sb_mb here. Cache the next record while
1378 * cleaning up. Note that calling m_free*() will break out critical
1381 KKASSERT(m
== so
->so_rcv
.ssb_mb
);
1384 * Skip any address mbufs prepending the record.
1386 if (pr
->pr_flags
& PR_ADDR
) {
1387 KASSERT(m
->m_type
== MT_SONAME
, ("receive 1a"));
1390 *psa
= dup_sockaddr(mtod(m
, struct sockaddr
*));
1391 if (flags
& MSG_PEEK
)
1394 m
= sbunlinkmbuf(&so
->so_rcv
.sb
, m
, &free_chain
);
1398 * Skip any control mbufs prepending the record.
1400 while (m
&& m
->m_type
== MT_CONTROL
&& error
== 0) {
1401 if (flags
& MSG_PEEK
) {
1403 *controlp
= m_copy(m
, 0, m
->m_len
);
1404 m
= m
->m_next
; /* XXX race */
1406 const struct cmsghdr
*cm
= mtod(m
, struct cmsghdr
*);
1409 n
= sbunlinkmbuf(&so
->so_rcv
.sb
, m
, NULL
);
1410 if (pr
->pr_domain
->dom_externalize
&&
1411 cm
->cmsg_level
== SOL_SOCKET
&&
1412 cm
->cmsg_type
== SCM_RIGHTS
) {
1413 error
= pr
->pr_domain
->dom_externalize
1419 if (cm
->cmsg_level
== SOL_SOCKET
&&
1420 cm
->cmsg_type
== SCM_RIGHTS
)
1422 m
= sbunlinkmbuf(&so
->so_rcv
.sb
, m
, &free_chain
);
1425 if (controlp
&& *controlp
) {
1427 controlp
= &(*controlp
)->m_next
;
1436 if (type
== MT_OOBDATA
)
1441 * Copy to the UIO or mbuf return chain (*mp).
1445 while (m
&& resid
> 0 && error
== 0) {
1446 if (m
->m_type
== MT_OOBDATA
) {
1447 if (type
!= MT_OOBDATA
)
1449 } else if (type
== MT_OOBDATA
)
1452 KASSERT(m
->m_type
== MT_DATA
|| m
->m_type
== MT_HEADER
,
1454 soclrstate(so
, SS_RCVATMARK
);
1455 len
= (resid
> INT_MAX
) ? INT_MAX
: resid
;
1456 if (so
->so_oobmark
&& len
> so
->so_oobmark
- offset
)
1457 len
= so
->so_oobmark
- offset
;
1458 if (len
> m
->m_len
- moff
)
1459 len
= m
->m_len
- moff
;
1462 * Copy out to the UIO or pass the mbufs back to the SIO.
1463 * The SIO is dealt with when we eat the mbuf, but deal
1464 * with the resid here either way.
1467 uio
->uio_resid
= resid
;
1468 error
= uiomove(mtod(m
, caddr_t
) + moff
, len
, uio
);
1469 resid
= uio
->uio_resid
;
1473 resid
-= (size_t)len
;
1477 * Eat the entire mbuf or just a piece of it
1479 if (len
== m
->m_len
- moff
) {
1480 if (m
->m_flags
& M_EOR
)
1482 if (flags
& MSG_PEEK
) {
1487 n
= sbunlinkmbuf(&so
->so_rcv
.sb
, m
, NULL
);
1491 m
= sbunlinkmbuf(&so
->so_rcv
.sb
, m
, &free_chain
);
1495 if (flags
& MSG_PEEK
) {
1499 n
= m_copym(m
, 0, len
, M_WAITOK
);
1505 so
->so_rcv
.ssb_cc
-= len
;
1508 if (so
->so_oobmark
) {
1509 if ((flags
& MSG_PEEK
) == 0) {
1510 so
->so_oobmark
-= len
;
1511 if (so
->so_oobmark
== 0) {
1512 sosetstate(so
, SS_RCVATMARK
);
1517 if (offset
== so
->so_oobmark
)
1521 if (flags
& MSG_EOR
)
1524 * If the MSG_WAITALL flag is set (for non-atomic socket),
1525 * we must not quit until resid == 0 or an error
1526 * termination. If a signal/timeout occurs, return
1527 * with a short count but without error.
1528 * Keep signalsockbuf locked against other readers.
1530 while ((flags
& MSG_WAITALL
) && m
== NULL
&&
1531 resid
> 0 && !sosendallatonce(so
) &&
1532 so
->so_rcv
.ssb_mb
== NULL
) {
1533 if (so
->so_error
|| so
->so_state
& SS_CANTRCVMORE
)
1536 * The window might have closed to zero, make
1537 * sure we send an ack now that we've drained
1538 * the buffer or we might end up blocking until
1539 * the idle takes over (5 seconds).
1541 if (pr
->pr_flags
& PR_WANTRCVD
&& so
->so_pcb
)
1542 so_pru_rcvd(so
, flags
);
1543 error
= ssb_wait(&so
->so_rcv
);
1545 ssb_unlock(&so
->so_rcv
);
1549 m
= so
->so_rcv
.ssb_mb
;
1554 * If an atomic read was requested but unread data still remains
1555 * in the record, set MSG_TRUNC.
1557 if (m
&& pr
->pr_flags
& PR_ATOMIC
)
1561 * Cleanup. If an atomic read was requested drop any unread data.
1563 if ((flags
& MSG_PEEK
) == 0) {
1564 if (m
&& (pr
->pr_flags
& PR_ATOMIC
))
1565 sbdroprecord(&so
->so_rcv
.sb
);
1566 if ((pr
->pr_flags
& PR_WANTRCVD
) && so
->so_pcb
)
1567 so_pru_rcvd(so
, flags
);
1570 if (orig_resid
== resid
&& orig_resid
&&
1571 (flags
& MSG_EOR
) == 0 && (so
->so_state
& SS_CANTRCVMORE
) == 0) {
1572 ssb_unlock(&so
->so_rcv
);
1579 ssb_unlock(&so
->so_rcv
);
1581 lwkt_reltoken(&so
->so_rcv
.ssb_token
);
1583 if (free_rights
&& (pr
->pr_flags
& PR_RIGHTS
) &&
1584 pr
->pr_domain
->dom_dispose
)
1585 pr
->pr_domain
->dom_dispose(free_chain
);
1586 m_freem(free_chain
);
1592 sorecvtcp(struct socket
*so
, struct sockaddr
**psa
, struct uio
*uio
,
1593 struct sockbuf
*sio
, struct mbuf
**controlp
, int *flagsp
)
1596 struct mbuf
*free_chain
= NULL
;
1597 int flags
, len
, error
, offset
;
1598 struct protosw
*pr
= so
->so_proto
;
1601 size_t resid
, orig_resid
, restmp
;
1604 resid
= uio
->uio_resid
;
1606 resid
= (size_t)(sio
->sb_climit
- sio
->sb_cc
);
1614 flags
= *flagsp
&~ MSG_EOR
;
1617 if (flags
& MSG_OOB
) {
1618 m
= m_get(M_WAITOK
, MT_DATA
);
1621 error
= so_pru_rcvoob(so
, m
, flags
& MSG_PEEK
);
1627 KKASSERT(resid
>= (size_t)m
->m_len
);
1628 resid
-= (size_t)m
->m_len
;
1629 } while (resid
> 0 && m
);
1632 uio
->uio_resid
= resid
;
1633 error
= uiomove(mtod(m
, caddr_t
),
1634 (int)szmin(resid
, m
->m_len
),
1636 resid
= uio
->uio_resid
;
1638 } while (uio
->uio_resid
&& error
== 0 && m
);
1647 * The token interlocks against the protocol thread while
1648 * ssb_lock is a blocking lock against other userland entities.
1650 * Lock a limited number of mbufs (not all, so sbcompress() still
1651 * works well). The token is used as an interlock for sbwait() so
1652 * release it afterwords.
1655 error
= ssb_lock(&so
->so_rcv
, SBLOCKWAIT(flags
));
1659 lwkt_gettoken(&so
->so_rcv
.ssb_token
);
1660 m
= so
->so_rcv
.ssb_mb
;
1663 * If we have less data than requested, block awaiting more
1664 * (subject to any timeout) if:
1665 * 1. the current count is less than the low water mark, or
1666 * 2. MSG_WAITALL is set, and it is possible to do the entire
1667 * receive operation at once if we block (resid <= hiwat).
1668 * 3. MSG_DONTWAIT is not set
1669 * If MSG_WAITALL is set but resid is larger than the receive buffer,
1670 * we have to do the receive in sections, and thus risk returning
1671 * a short count if a timeout or signal occurs after we start.
1673 if (m
== NULL
|| (((flags
& MSG_DONTWAIT
) == 0 &&
1674 (size_t)so
->so_rcv
.ssb_cc
< resid
) &&
1675 (so
->so_rcv
.ssb_cc
< so
->so_rcv
.ssb_lowat
||
1676 ((flags
& MSG_WAITALL
) && resid
<= (size_t)so
->so_rcv
.ssb_hiwat
)))) {
1677 KASSERT(m
!= NULL
|| !so
->so_rcv
.ssb_cc
, ("receive 1"));
1681 lwkt_reltoken(&so
->so_rcv
.ssb_token
);
1682 error
= so
->so_error
;
1683 if ((flags
& MSG_PEEK
) == 0)
1687 if (so
->so_state
& SS_CANTRCVMORE
) {
1690 lwkt_reltoken(&so
->so_rcv
.ssb_token
);
1693 if ((so
->so_state
& (SS_ISCONNECTED
|SS_ISCONNECTING
)) == 0 &&
1694 (pr
->pr_flags
& PR_CONNREQUIRED
)) {
1695 lwkt_reltoken(&so
->so_rcv
.ssb_token
);
1700 lwkt_reltoken(&so
->so_rcv
.ssb_token
);
1703 if (flags
& (MSG_FNONBLOCKING
|MSG_DONTWAIT
)) {
1704 lwkt_reltoken(&so
->so_rcv
.ssb_token
);
1705 error
= EWOULDBLOCK
;
1708 ssb_unlock(&so
->so_rcv
);
1709 error
= ssb_wait(&so
->so_rcv
);
1710 lwkt_reltoken(&so
->so_rcv
.ssb_token
);
1722 while (n
&& restmp
< resid
) {
1723 n
->m_flags
|= M_SOLOCKED
;
1725 if (n
->m_next
== NULL
)
1732 * Release token for loop
1734 lwkt_reltoken(&so
->so_rcv
.ssb_token
);
1735 if (uio
&& uio
->uio_td
&& uio
->uio_td
->td_proc
)
1736 uio
->uio_td
->td_lwp
->lwp_ru
.ru_msgrcv
++;
1739 * note: m should be == sb_mb here. Cache the next record while
1740 * cleaning up. Note that calling m_free*() will break out critical
1743 KKASSERT(m
== so
->so_rcv
.ssb_mb
);
1746 * Copy to the UIO or mbuf return chain (*mp).
1748 * NOTE: Token is not held for loop
1754 while (m
&& (m
->m_flags
& M_SOLOCKED
) && resid
> 0 && error
== 0) {
1755 KASSERT(m
->m_type
== MT_DATA
|| m
->m_type
== MT_HEADER
,
1758 soclrstate(so
, SS_RCVATMARK
);
1759 len
= (resid
> INT_MAX
) ? INT_MAX
: resid
;
1760 if (so
->so_oobmark
&& len
> so
->so_oobmark
- offset
)
1761 len
= so
->so_oobmark
- offset
;
1762 if (len
> m
->m_len
- moff
)
1763 len
= m
->m_len
- moff
;
1766 * Copy out to the UIO or pass the mbufs back to the SIO.
1767 * The SIO is dealt with when we eat the mbuf, but deal
1768 * with the resid here either way.
1771 uio
->uio_resid
= resid
;
1772 error
= uiomove(mtod(m
, caddr_t
) + moff
, len
, uio
);
1773 resid
= uio
->uio_resid
;
1777 resid
-= (size_t)len
;
1781 * Eat the entire mbuf or just a piece of it
1784 if (len
== m
->m_len
- moff
) {
1794 if (so
->so_oobmark
&& offset
== so
->so_oobmark
) {
1801 * Synchronize sockbuf with data we read.
1803 * NOTE: (m) is junk on entry (it could be left over from the
1806 if ((flags
& MSG_PEEK
) == 0) {
1807 lwkt_gettoken(&so
->so_rcv
.ssb_token
);
1808 m
= so
->so_rcv
.ssb_mb
;
1809 while (m
&& offset
>= m
->m_len
) {
1810 if (so
->so_oobmark
) {
1811 so
->so_oobmark
-= m
->m_len
;
1812 if (so
->so_oobmark
== 0) {
1813 sosetstate(so
, SS_RCVATMARK
);
1819 n
= sbunlinkmbuf(&so
->so_rcv
.sb
, m
, NULL
);
1823 m
= sbunlinkmbuf(&so
->so_rcv
.sb
,
1830 n
= m_copym(m
, 0, offset
, M_WAITOK
);
1834 m
->m_data
+= offset
;
1836 so
->so_rcv
.ssb_cc
-= offset
;
1837 if (so
->so_oobmark
) {
1838 so
->so_oobmark
-= offset
;
1839 if (so
->so_oobmark
== 0) {
1840 sosetstate(so
, SS_RCVATMARK
);
1846 lwkt_reltoken(&so
->so_rcv
.ssb_token
);
1850 * If the MSG_WAITALL flag is set (for non-atomic socket),
1851 * we must not quit until resid == 0 or an error termination.
1853 * If a signal/timeout occurs, return with a short count but without
1856 * Keep signalsockbuf locked against other readers.
1858 * XXX if MSG_PEEK we currently do quit.
1860 if ((flags
& MSG_WAITALL
) && !(flags
& MSG_PEEK
) &&
1861 didoob
== 0 && resid
> 0 &&
1862 !sosendallatonce(so
)) {
1863 lwkt_gettoken(&so
->so_rcv
.ssb_token
);
1865 while ((m
= so
->so_rcv
.ssb_mb
) == NULL
) {
1866 if (so
->so_error
|| (so
->so_state
& SS_CANTRCVMORE
)) {
1867 error
= so
->so_error
;
1871 * The window might have closed to zero, make
1872 * sure we send an ack now that we've drained
1873 * the buffer or we might end up blocking until
1874 * the idle takes over (5 seconds).
1877 so_pru_rcvd_async(so
);
1878 if (so
->so_rcv
.ssb_mb
== NULL
)
1879 error
= ssb_wait(&so
->so_rcv
);
1881 lwkt_reltoken(&so
->so_rcv
.ssb_token
);
1882 ssb_unlock(&so
->so_rcv
);
1887 if (m
&& error
== 0)
1889 lwkt_reltoken(&so
->so_rcv
.ssb_token
);
1893 * Token not held here.
1895 * Cleanup. If an atomic read was requested drop any unread data XXX
1897 if ((flags
& MSG_PEEK
) == 0) {
1899 so_pru_rcvd_async(so
);
1902 if (orig_resid
== resid
&& orig_resid
&&
1903 (so
->so_state
& SS_CANTRCVMORE
) == 0) {
1904 ssb_unlock(&so
->so_rcv
);
1911 ssb_unlock(&so
->so_rcv
);
1914 m_freem(free_chain
);
1919 * Shut a socket down. Note that we do not get a frontend lock as we
1920 * want to be able to shut the socket down even if another thread is
1921 * blocked in a read(), thus waking it up.
1924 soshutdown(struct socket
*so
, int how
)
1926 if (!(how
== SHUT_RD
|| how
== SHUT_WR
|| how
== SHUT_RDWR
))
1929 if (how
!= SHUT_WR
) {
1930 /*ssb_lock(&so->so_rcv, M_WAITOK);*/
1932 /*ssb_unlock(&so->so_rcv);*/
1935 return (so_pru_shutdown(so
));
1940 sorflush(struct socket
*so
)
1942 struct signalsockbuf
*ssb
= &so
->so_rcv
;
1943 struct protosw
*pr
= so
->so_proto
;
1944 struct signalsockbuf asb
;
1946 atomic_set_int(&ssb
->ssb_flags
, SSB_NOINTR
);
1948 lwkt_gettoken(&ssb
->ssb_token
);
1953 * Can't just blow up the ssb structure here
1955 bzero(&ssb
->sb
, sizeof(ssb
->sb
));
1960 atomic_clear_int(&ssb
->ssb_flags
, SSB_CLEAR_MASK
);
1962 if ((pr
->pr_flags
& PR_RIGHTS
) && pr
->pr_domain
->dom_dispose
)
1963 (*pr
->pr_domain
->dom_dispose
)(asb
.ssb_mb
);
1964 ssb_release(&asb
, so
);
1966 lwkt_reltoken(&ssb
->ssb_token
);
1971 do_setopt_accept_filter(struct socket
*so
, struct sockopt
*sopt
)
1973 struct accept_filter_arg
*afap
= NULL
;
1974 struct accept_filter
*afp
;
1975 struct so_accf
*af
= so
->so_accf
;
1978 /* do not set/remove accept filters on non listen sockets */
1979 if ((so
->so_options
& SO_ACCEPTCONN
) == 0) {
1984 /* removing the filter */
1987 if (af
->so_accept_filter
!= NULL
&&
1988 af
->so_accept_filter
->accf_destroy
!= NULL
) {
1989 af
->so_accept_filter
->accf_destroy(so
);
1991 if (af
->so_accept_filter_str
!= NULL
) {
1992 kfree(af
->so_accept_filter_str
, M_ACCF
);
1997 so
->so_options
&= ~SO_ACCEPTFILTER
;
2000 /* adding a filter */
2001 /* must remove previous filter first */
2006 /* don't put large objects on the kernel stack */
2007 afap
= kmalloc(sizeof(*afap
), M_TEMP
, M_WAITOK
);
2008 error
= sooptcopyin(sopt
, afap
, sizeof *afap
, sizeof *afap
);
2009 afap
->af_name
[sizeof(afap
->af_name
)-1] = '\0';
2010 afap
->af_arg
[sizeof(afap
->af_arg
)-1] = '\0';
2013 afp
= accept_filt_get(afap
->af_name
);
2018 af
= kmalloc(sizeof(*af
), M_ACCF
, M_WAITOK
| M_ZERO
);
2019 if (afp
->accf_create
!= NULL
) {
2020 if (afap
->af_name
[0] != '\0') {
2021 int len
= strlen(afap
->af_name
) + 1;
2023 af
->so_accept_filter_str
= kmalloc(len
, M_ACCF
,
2025 strcpy(af
->so_accept_filter_str
, afap
->af_name
);
2027 af
->so_accept_filter_arg
= afp
->accf_create(so
, afap
->af_arg
);
2028 if (af
->so_accept_filter_arg
== NULL
) {
2029 kfree(af
->so_accept_filter_str
, M_ACCF
);
2036 af
->so_accept_filter
= afp
;
2038 so
->so_options
|= SO_ACCEPTFILTER
;
2041 kfree(afap
, M_TEMP
);
2047 * Perhaps this routine, and sooptcopyout(), below, ought to come in
2048 * an additional variant to handle the case where the option value needs
2049 * to be some kind of integer, but not a specific size.
2050 * In addition to their use here, these functions are also called by the
2051 * protocol-level pr_ctloutput() routines.
2054 sooptcopyin(struct sockopt
*sopt
, void *buf
, size_t len
, size_t minlen
)
2056 return soopt_to_kbuf(sopt
, buf
, len
, minlen
);
2060 soopt_to_kbuf(struct sockopt
*sopt
, void *buf
, size_t len
, size_t minlen
)
2064 KKASSERT(!sopt
->sopt_val
|| kva_p(sopt
->sopt_val
));
2065 KKASSERT(kva_p(buf
));
2068 * If the user gives us more than we wanted, we ignore it,
2069 * but if we don't get the minimum length the caller
2070 * wants, we return EINVAL. On success, sopt->sopt_valsize
2071 * is set to however much we actually retrieved.
2073 if ((valsize
= sopt
->sopt_valsize
) < minlen
)
2076 sopt
->sopt_valsize
= valsize
= len
;
2078 bcopy(sopt
->sopt_val
, buf
, valsize
);
2084 sosetopt(struct socket
*so
, struct sockopt
*sopt
)
2090 struct signalsockbuf
*sotmp
;
2093 sopt
->sopt_dir
= SOPT_SET
;
2094 if (sopt
->sopt_level
!= SOL_SOCKET
) {
2095 if (so
->so_proto
&& so
->so_proto
->pr_ctloutput
) {
2096 return (so_pr_ctloutput(so
, sopt
));
2098 error
= ENOPROTOOPT
;
2100 switch (sopt
->sopt_name
) {
2102 case SO_ACCEPTFILTER
:
2103 error
= do_setopt_accept_filter(so
, sopt
);
2109 error
= sooptcopyin(sopt
, &l
, sizeof l
, sizeof l
);
2113 so
->so_linger
= l
.l_linger
;
2115 so
->so_options
|= SO_LINGER
;
2117 so
->so_options
&= ~SO_LINGER
;
2123 case SO_USELOOPBACK
:
2130 error
= sooptcopyin(sopt
, &optval
, sizeof optval
,
2135 so
->so_options
|= sopt
->sopt_name
;
2137 so
->so_options
&= ~sopt
->sopt_name
;
2144 error
= sooptcopyin(sopt
, &optval
, sizeof optval
,
2150 * Values < 1 make no sense for any of these
2151 * options, so disallow them.
2158 switch (sopt
->sopt_name
) {
2161 if (ssb_reserve(sopt
->sopt_name
== SO_SNDBUF
?
2162 &so
->so_snd
: &so
->so_rcv
, (u_long
)optval
,
2164 &curproc
->p_rlimit
[RLIMIT_SBSIZE
]) == 0) {
2168 sotmp
= (sopt
->sopt_name
== SO_SNDBUF
) ?
2169 &so
->so_snd
: &so
->so_rcv
;
2170 atomic_clear_int(&sotmp
->ssb_flags
,
2175 * Make sure the low-water is never greater than
2179 so
->so_snd
.ssb_lowat
=
2180 (optval
> so
->so_snd
.ssb_hiwat
) ?
2181 so
->so_snd
.ssb_hiwat
: optval
;
2182 atomic_clear_int(&so
->so_snd
.ssb_flags
,
2186 so
->so_rcv
.ssb_lowat
=
2187 (optval
> so
->so_rcv
.ssb_hiwat
) ?
2188 so
->so_rcv
.ssb_hiwat
: optval
;
2189 atomic_clear_int(&so
->so_rcv
.ssb_flags
,
2197 error
= sooptcopyin(sopt
, &tv
, sizeof tv
,
2202 /* assert(hz > 0); */
2203 if (tv
.tv_sec
< 0 || tv
.tv_sec
> INT_MAX
/ hz
||
2204 tv
.tv_usec
< 0 || tv
.tv_usec
>= 1000000) {
2208 /* assert(tick > 0); */
2209 /* assert(ULONG_MAX - INT_MAX >= 1000000); */
2210 val
= (u_long
)(tv
.tv_sec
* hz
) + tv
.tv_usec
/ ustick
;
2211 if (val
> INT_MAX
) {
2215 if (val
== 0 && tv
.tv_usec
!= 0)
2218 switch (sopt
->sopt_name
) {
2220 so
->so_snd
.ssb_timeo
= val
;
2223 so
->so_rcv
.ssb_timeo
= val
;
2228 error
= ENOPROTOOPT
;
2231 if (error
== 0 && so
->so_proto
&& so
->so_proto
->pr_ctloutput
) {
2232 (void) so_pr_ctloutput(so
, sopt
);
2239 /* Helper routine for getsockopt */
2241 sooptcopyout(struct sockopt
*sopt
, const void *buf
, size_t len
)
2243 soopt_from_kbuf(sopt
, buf
, len
);
2248 soopt_from_kbuf(struct sockopt
*sopt
, const void *buf
, size_t len
)
2253 sopt
->sopt_valsize
= 0;
2257 KKASSERT(!sopt
->sopt_val
|| kva_p(sopt
->sopt_val
));
2258 KKASSERT(kva_p(buf
));
2261 * Documented get behavior is that we always return a value,
2262 * possibly truncated to fit in the user's buffer.
2263 * Traditional behavior is that we always tell the user
2264 * precisely how much we copied, rather than something useful
2265 * like the total amount we had available for her.
2266 * Note that this interface is not idempotent; the entire answer must
2267 * generated ahead of time.
2269 valsize
= szmin(len
, sopt
->sopt_valsize
);
2270 sopt
->sopt_valsize
= valsize
;
2271 if (sopt
->sopt_val
!= 0) {
2272 bcopy(buf
, sopt
->sopt_val
, valsize
);
2277 sogetopt(struct socket
*so
, struct sockopt
*sopt
)
2284 struct accept_filter_arg
*afap
;
2288 sopt
->sopt_dir
= SOPT_GET
;
2289 if (sopt
->sopt_level
!= SOL_SOCKET
) {
2290 if (so
->so_proto
&& so
->so_proto
->pr_ctloutput
) {
2291 return (so_pr_ctloutput(so
, sopt
));
2293 return (ENOPROTOOPT
);
2295 switch (sopt
->sopt_name
) {
2297 case SO_ACCEPTFILTER
:
2298 if ((so
->so_options
& SO_ACCEPTCONN
) == 0)
2300 afap
= kmalloc(sizeof(*afap
), M_TEMP
,
2302 if ((so
->so_options
& SO_ACCEPTFILTER
) != 0) {
2303 strcpy(afap
->af_name
, so
->so_accf
->so_accept_filter
->accf_name
);
2304 if (so
->so_accf
->so_accept_filter_str
!= NULL
)
2305 strcpy(afap
->af_arg
, so
->so_accf
->so_accept_filter_str
);
2307 error
= sooptcopyout(sopt
, afap
, sizeof(*afap
));
2308 kfree(afap
, M_TEMP
);
2313 l
.l_onoff
= so
->so_options
& SO_LINGER
;
2314 l
.l_linger
= so
->so_linger
;
2315 error
= sooptcopyout(sopt
, &l
, sizeof l
);
2318 case SO_USELOOPBACK
:
2328 optval
= so
->so_options
& sopt
->sopt_name
;
2330 error
= sooptcopyout(sopt
, &optval
, sizeof optval
);
2334 optval
= so
->so_type
;
2338 optval
= so
->so_error
;
2343 optval
= so
->so_snd
.ssb_hiwat
;
2347 optval
= so
->so_rcv
.ssb_hiwat
;
2351 optval
= so
->so_snd
.ssb_lowat
;
2355 optval
= so
->so_rcv
.ssb_lowat
;
2360 optval
= (sopt
->sopt_name
== SO_SNDTIMEO
?
2361 so
->so_snd
.ssb_timeo
: so
->so_rcv
.ssb_timeo
);
2363 tv
.tv_sec
= optval
/ hz
;
2364 tv
.tv_usec
= (optval
% hz
) * ustick
;
2365 error
= sooptcopyout(sopt
, &tv
, sizeof tv
);
2369 optval_l
= ssb_space(&so
->so_snd
);
2370 error
= sooptcopyout(sopt
, &optval_l
, sizeof(optval_l
));
2374 optval
= -1; /* no hint */
2378 error
= ENOPROTOOPT
;
2381 if (error
== 0 && so
->so_proto
&& so
->so_proto
->pr_ctloutput
)
2382 so_pr_ctloutput(so
, sopt
);
2387 /* XXX; prepare mbuf for (__FreeBSD__ < 3) routines. */
2389 soopt_getm(struct sockopt
*sopt
, struct mbuf
**mp
)
2391 struct mbuf
*m
, *m_prev
;
2392 int sopt_size
= sopt
->sopt_valsize
, msize
;
2394 m
= m_getl(sopt_size
, sopt
->sopt_td
? M_WAITOK
: M_NOWAIT
, MT_DATA
,
2398 m
->m_len
= min(msize
, sopt_size
);
2399 sopt_size
-= m
->m_len
;
2403 while (sopt_size
> 0) {
2404 m
= m_getl(sopt_size
, sopt
->sopt_td
? M_WAITOK
: M_NOWAIT
,
2405 MT_DATA
, 0, &msize
);
2410 m
->m_len
= min(msize
, sopt_size
);
2411 sopt_size
-= m
->m_len
;
2418 /* XXX; copyin sopt data into mbuf chain for (__FreeBSD__ < 3) routines. */
2420 soopt_mcopyin(struct sockopt
*sopt
, struct mbuf
*m
)
2422 soopt_to_mbuf(sopt
, m
);
2427 soopt_to_mbuf(struct sockopt
*sopt
, struct mbuf
*m
)
2432 KKASSERT(!sopt
->sopt_val
|| kva_p(sopt
->sopt_val
));
2434 if (sopt
->sopt_val
== NULL
)
2436 val
= sopt
->sopt_val
;
2437 valsize
= sopt
->sopt_valsize
;
2438 while (m
!= NULL
&& valsize
>= m
->m_len
) {
2439 bcopy(val
, mtod(m
, char *), m
->m_len
);
2440 valsize
-= m
->m_len
;
2441 val
= (caddr_t
)val
+ m
->m_len
;
2444 if (m
!= NULL
) /* should be allocated enoughly at ip6_sooptmcopyin() */
2445 panic("ip6_sooptmcopyin");
2448 /* XXX; copyout mbuf chain data into soopt for (__FreeBSD__ < 3) routines. */
2450 soopt_mcopyout(struct sockopt
*sopt
, struct mbuf
*m
)
2452 return soopt_from_mbuf(sopt
, m
);
2456 soopt_from_mbuf(struct sockopt
*sopt
, struct mbuf
*m
)
2458 struct mbuf
*m0
= m
;
2463 KKASSERT(!sopt
->sopt_val
|| kva_p(sopt
->sopt_val
));
2465 if (sopt
->sopt_val
== NULL
)
2467 val
= sopt
->sopt_val
;
2468 maxsize
= sopt
->sopt_valsize
;
2469 while (m
!= NULL
&& maxsize
>= m
->m_len
) {
2470 bcopy(mtod(m
, char *), val
, m
->m_len
);
2471 maxsize
-= m
->m_len
;
2472 val
= (caddr_t
)val
+ m
->m_len
;
2473 valsize
+= m
->m_len
;
2477 /* enough soopt buffer should be given from user-land */
2481 sopt
->sopt_valsize
= valsize
;
2486 sohasoutofband(struct socket
*so
)
2488 if (so
->so_sigio
!= NULL
)
2489 pgsigio(so
->so_sigio
, SIGURG
, 0);
2492 * There is no need to use NOTE_OOB as KNOTE hint here:
2493 * soread filter depends on so_oobmark and SS_RCVATMARK
2494 * so_state. NOTE_OOB would cause unnecessary penalty
2495 * in KNOTE, if there was knote processing contention.
2497 KNOTE(&so
->so_rcv
.ssb_kq
.ki_note
, 0);
2501 sokqfilter(struct file
*fp
, struct knote
*kn
)
2503 struct socket
*so
= (struct socket
*)kn
->kn_fp
->f_data
;
2504 struct signalsockbuf
*ssb
;
2506 switch (kn
->kn_filter
) {
2508 if (so
->so_options
& SO_ACCEPTCONN
)
2509 kn
->kn_fop
= &solisten_filtops
;
2511 kn
->kn_fop
= &soread_filtops
;
2515 kn
->kn_fop
= &sowrite_filtops
;
2519 kn
->kn_fop
= &soexcept_filtops
;
2523 return (EOPNOTSUPP
);
2526 knote_insert(&ssb
->ssb_kq
.ki_note
, kn
);
2527 atomic_set_int(&ssb
->ssb_flags
, SSB_KNOTE
);
2532 filt_sordetach(struct knote
*kn
)
2534 struct socket
*so
= (struct socket
*)kn
->kn_fp
->f_data
;
2536 knote_remove(&so
->so_rcv
.ssb_kq
.ki_note
, kn
);
2537 if (SLIST_EMPTY(&so
->so_rcv
.ssb_kq
.ki_note
))
2538 atomic_clear_int(&so
->so_rcv
.ssb_flags
, SSB_KNOTE
);
2543 filt_soread(struct knote
*kn
, long hint __unused
)
2545 struct socket
*so
= (struct socket
*)kn
->kn_fp
->f_data
;
2547 if (kn
->kn_sfflags
& NOTE_OOB
) {
2548 if ((so
->so_oobmark
|| (so
->so_state
& SS_RCVATMARK
))) {
2549 kn
->kn_fflags
|= NOTE_OOB
;
2554 kn
->kn_data
= so
->so_rcv
.ssb_cc
;
2556 if (so
->so_state
& SS_CANTRCVMORE
) {
2558 * Only set NODATA if all data has been exhausted.
2560 if (kn
->kn_data
== 0)
2561 kn
->kn_flags
|= EV_NODATA
;
2562 kn
->kn_flags
|= EV_EOF
;
2563 kn
->kn_fflags
= so
->so_error
;
2566 if (so
->so_error
) /* temporary udp error */
2568 if (kn
->kn_sfflags
& NOTE_LOWAT
)
2569 return (kn
->kn_data
>= kn
->kn_sdata
);
2570 return ((kn
->kn_data
>= so
->so_rcv
.ssb_lowat
) ||
2571 !TAILQ_EMPTY(&so
->so_comp
));
2575 filt_sowdetach(struct knote
*kn
)
2577 struct socket
*so
= (struct socket
*)kn
->kn_fp
->f_data
;
2579 knote_remove(&so
->so_snd
.ssb_kq
.ki_note
, kn
);
2580 if (SLIST_EMPTY(&so
->so_snd
.ssb_kq
.ki_note
))
2581 atomic_clear_int(&so
->so_snd
.ssb_flags
, SSB_KNOTE
);
2586 filt_sowrite(struct knote
*kn
, long hint __unused
)
2588 struct socket
*so
= (struct socket
*)kn
->kn_fp
->f_data
;
2590 if (so
->so_snd
.ssb_flags
& SSB_PREALLOC
)
2591 kn
->kn_data
= ssb_space_prealloc(&so
->so_snd
);
2593 kn
->kn_data
= ssb_space(&so
->so_snd
);
2595 if (so
->so_state
& SS_CANTSENDMORE
) {
2596 kn
->kn_flags
|= (EV_EOF
| EV_NODATA
);
2597 kn
->kn_fflags
= so
->so_error
;
2600 if (so
->so_error
) /* temporary udp error */
2602 if (((so
->so_state
& SS_ISCONNECTED
) == 0) &&
2603 (so
->so_proto
->pr_flags
& PR_CONNREQUIRED
))
2605 if (kn
->kn_sfflags
& NOTE_LOWAT
)
2606 return (kn
->kn_data
>= kn
->kn_sdata
);
2607 return (kn
->kn_data
>= so
->so_snd
.ssb_lowat
);
2612 filt_solisten(struct knote
*kn
, long hint __unused
)
2614 struct socket
*so
= (struct socket
*)kn
->kn_fp
->f_data
;
2616 kn
->kn_data
= so
->so_qlen
;
2617 return (! TAILQ_EMPTY(&so
->so_comp
));