2 * Copyright (c) 2004 Jeffrey M. Hsu. All rights reserved.
3 * Copyright (c) 2004 The DragonFly Project. All rights reserved.
5 * This code is derived from software contributed to The DragonFly Project
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of The DragonFly Project nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific, prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * Copyright (c) 1982, 1986, 1988, 1990, 1993
36 * The Regents of the University of California. All rights reserved.
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. Neither the name of the University nor the names of its contributors
47 * may be used to endorse or promote products derived from this software
48 * without specific prior written permission.
50 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
51 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
52 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
53 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
54 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
55 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
56 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
57 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
58 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
59 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
62 * @(#)uipc_socket.c 8.3 (Berkeley) 4/15/94
63 * $FreeBSD: src/sys/kern/uipc_socket.c,v 1.68.2.24 2003/11/11 17:18:18 silby Exp $
68 #include <sys/param.h>
69 #include <sys/systm.h>
70 #include <sys/fcntl.h>
71 #include <sys/malloc.h>
73 #include <sys/domain.h>
74 #include <sys/file.h> /* for struct knote */
75 #include <sys/kernel.h>
76 #include <sys/event.h>
78 #include <sys/protosw.h>
79 #include <sys/socket.h>
80 #include <sys/socketvar.h>
81 #include <sys/socketops.h>
82 #include <sys/resourcevar.h>
83 #include <sys/signalvar.h>
84 #include <sys/sysctl.h>
87 #include <vm/vm_zone.h>
89 #include <net/netmsg2.h>
90 #include <net/netisr2.h>
92 #include <sys/thread2.h>
93 #include <sys/socketvar2.h>
94 #include <sys/spinlock2.h>
96 #include <machine/limits.h>
99 extern int tcp_sosend_agglim
;
100 extern int tcp_sosend_async
;
101 extern int tcp_sosend_jcluster
;
102 extern int udp_sosend_async
;
103 extern int udp_sosend_prepend
;
105 static int do_setopt_accept_filter(struct socket
*so
, struct sockopt
*sopt
);
108 static void filt_sordetach(struct knote
*kn
);
109 static int filt_soread(struct knote
*kn
, long hint
);
110 static void filt_sowdetach(struct knote
*kn
);
111 static int filt_sowrite(struct knote
*kn
, long hint
);
112 static int filt_solisten(struct knote
*kn
, long hint
);
114 static int soclose_sync(struct socket
*so
, int fflag
);
115 static void soclose_fast(struct socket
*so
);
117 static struct filterops solisten_filtops
=
118 { FILTEROP_ISFD
|FILTEROP_MPSAFE
, NULL
, filt_sordetach
, filt_solisten
};
119 static struct filterops soread_filtops
=
120 { FILTEROP_ISFD
|FILTEROP_MPSAFE
, NULL
, filt_sordetach
, filt_soread
};
121 static struct filterops sowrite_filtops
=
122 { FILTEROP_ISFD
|FILTEROP_MPSAFE
, NULL
, filt_sowdetach
, filt_sowrite
};
123 static struct filterops soexcept_filtops
=
124 { FILTEROP_ISFD
|FILTEROP_MPSAFE
, NULL
, filt_sordetach
, filt_soread
};
126 MALLOC_DEFINE(M_SOCKET
, "socket", "socket struct");
127 MALLOC_DEFINE(M_SONAME
, "soname", "socket name");
128 MALLOC_DEFINE(M_PCB
, "pcb", "protocol control block");
131 static int somaxconn
= SOMAXCONN
;
132 SYSCTL_INT(_kern_ipc
, KIPC_SOMAXCONN
, somaxconn
, CTLFLAG_RW
,
133 &somaxconn
, 0, "Maximum pending socket connection queue size");
135 static int use_soclose_fast
= 1;
136 SYSCTL_INT(_kern_ipc
, OID_AUTO
, soclose_fast
, CTLFLAG_RW
,
137 &use_soclose_fast
, 0, "Fast socket close");
139 int use_soaccept_pred_fast
= 1;
140 SYSCTL_INT(_kern_ipc
, OID_AUTO
, soaccept_pred_fast
, CTLFLAG_RW
,
141 &use_soaccept_pred_fast
, 0, "Fast socket accept predication");
143 int use_sendfile_async
= 1;
144 SYSCTL_INT(_kern_ipc
, OID_AUTO
, sendfile_async
, CTLFLAG_RW
,
145 &use_sendfile_async
, 0, "sendfile uses asynchronized pru_send");
147 int use_soconnect_async
= 1;
148 SYSCTL_INT(_kern_ipc
, OID_AUTO
, soconnect_async
, CTLFLAG_RW
,
149 &use_soconnect_async
, 0, "soconnect uses asynchronized pru_connect");
151 static int use_socreate_fast
= 1;
152 SYSCTL_INT(_kern_ipc
, OID_AUTO
, socreate_fast
, CTLFLAG_RW
,
153 &use_socreate_fast
, 0, "Fast socket creation");
155 static int soavailconn
= 32;
156 SYSCTL_INT(_kern_ipc
, OID_AUTO
, soavailconn
, CTLFLAG_RW
,
157 &soavailconn
, 0, "Maximum available socket connection queue size");
160 * Socket operation routines.
161 * These routines are called by the routines in
162 * sys_socket.c or from a system process, and
163 * implement the semantics of socket operations by
164 * switching out to the protocol specific routines.
168 * Get a socket structure, and initialize it.
169 * Note that it would probably be better to allocate socket
170 * and PCB at the same time, but I'm not convinced that all
171 * the protocols can be easily modified to do this.
174 soalloc(int waitok
, struct protosw
*pr
)
179 waitmask
= waitok
? M_WAITOK
: M_NOWAIT
;
180 so
= kmalloc(sizeof(struct socket
), M_SOCKET
, M_ZERO
|waitmask
);
182 /* XXX race condition for reentrant kernel */
184 TAILQ_INIT(&so
->so_aiojobq
);
185 TAILQ_INIT(&so
->so_rcv
.ssb_mlist
);
186 TAILQ_INIT(&so
->so_snd
.ssb_mlist
);
187 lwkt_token_init(&so
->so_rcv
.ssb_token
, "rcvtok");
188 lwkt_token_init(&so
->so_snd
.ssb_token
, "sndtok");
189 spin_init(&so
->so_rcvd_spin
, "soalloc");
190 netmsg_init(&so
->so_rcvd_msg
.base
, so
, &netisr_adone_rport
,
191 MSGF_DROPABLE
| MSGF_PRIORITY
,
192 so
->so_proto
->pr_usrreqs
->pru_rcvd
);
193 so
->so_rcvd_msg
.nm_pru_flags
|= PRUR_ASYNC
;
194 so
->so_state
= SS_NOFDREF
;
201 socreate(int dom
, struct socket
**aso
, int type
,
202 int proto
, struct thread
*td
)
204 struct proc
*p
= td
->td_proc
;
207 struct pru_attach_info ai
;
211 prp
= pffindproto(dom
, proto
, type
);
213 prp
= pffindtype(dom
, type
);
215 if (prp
== NULL
|| prp
->pr_usrreqs
->pru_attach
== 0)
216 return (EPROTONOSUPPORT
);
218 if (p
->p_ucred
->cr_prison
&& jail_socket_unixiproute_only
&&
219 prp
->pr_domain
->dom_family
!= PF_LOCAL
&&
220 prp
->pr_domain
->dom_family
!= PF_INET
&&
221 prp
->pr_domain
->dom_family
!= PF_INET6
&&
222 prp
->pr_domain
->dom_family
!= PF_ROUTE
) {
223 return (EPROTONOSUPPORT
);
226 if (prp
->pr_type
!= type
)
228 so
= soalloc(p
!= NULL
, prp
);
233 * Callers of socreate() presumably will connect up a descriptor
234 * and call soclose() if they cannot. This represents our so_refs
235 * (which should be 1) from soalloc().
237 soclrstate(so
, SS_NOFDREF
);
240 * Set a default port for protocol processing. No action will occur
241 * on the socket on this port until an inpcb is attached to it and
242 * is able to match incoming packets, or until the socket becomes
243 * available to userland.
245 * We normally default the socket to the protocol thread on cpu 0,
246 * if protocol does not provide its own method to initialize the
249 * If PR_SYNC_PORT is set (unix domain sockets) there is no protocol
250 * thread and all pr_*()/pru_*() calls are executed synchronously.
252 if (prp
->pr_flags
& PR_SYNC_PORT
)
253 so
->so_port
= &netisr_sync_port
;
254 else if (prp
->pr_initport
!= NULL
)
255 so
->so_port
= prp
->pr_initport();
257 so
->so_port
= netisr_cpuport(0);
259 TAILQ_INIT(&so
->so_incomp
);
260 TAILQ_INIT(&so
->so_comp
);
262 so
->so_cred
= crhold(p
->p_ucred
);
263 ai
.sb_rlimit
= &p
->p_rlimit
[RLIMIT_SBSIZE
];
264 ai
.p_ucred
= p
->p_ucred
;
265 ai
.fd_rdir
= p
->p_fd
->fd_rdir
;
268 * Auto-sizing of socket buffers is managed by the protocols and
269 * the appropriate flags must be set in the pru_attach function.
271 if (use_socreate_fast
&& prp
->pr_usrreqs
->pru_preattach
)
272 error
= so_pru_attach_fast(so
, proto
, &ai
);
274 error
= so_pru_attach(so
, proto
, &ai
);
276 sosetstate(so
, SS_NOFDREF
);
277 sofree(so
); /* from soalloc */
282 * NOTE: Returns referenced socket.
289 sobind(struct socket
*so
, struct sockaddr
*nam
, struct thread
*td
)
293 error
= so_pru_bind(so
, nam
, td
);
298 sodealloc(struct socket
*so
)
300 KKASSERT((so
->so_state
& (SS_INCOMP
| SS_COMP
)) == 0);
303 if (so
->so_options
& SO_ACCEPTCONN
) {
304 KASSERT(TAILQ_EMPTY(&so
->so_comp
), ("so_comp is not empty"));
305 KASSERT(TAILQ_EMPTY(&so
->so_incomp
),
306 ("so_incomp is not empty"));
310 if (so
->so_rcv
.ssb_hiwat
)
311 (void)chgsbsize(so
->so_cred
->cr_uidinfo
,
312 &so
->so_rcv
.ssb_hiwat
, 0, RLIM_INFINITY
);
313 if (so
->so_snd
.ssb_hiwat
)
314 (void)chgsbsize(so
->so_cred
->cr_uidinfo
,
315 &so
->so_snd
.ssb_hiwat
, 0, RLIM_INFINITY
);
317 /* remove accept filter if present */
318 if (so
->so_accf
!= NULL
)
319 do_setopt_accept_filter(so
, NULL
);
322 if (so
->so_faddr
!= NULL
)
323 kfree(so
->so_faddr
, M_SONAME
);
328 solisten(struct socket
*so
, int backlog
, struct thread
*td
)
330 if (so
->so_state
& (SS_ISCONNECTED
| SS_ISCONNECTING
))
333 lwkt_gettoken(&so
->so_rcv
.ssb_token
);
334 if (TAILQ_EMPTY(&so
->so_comp
))
335 so
->so_options
|= SO_ACCEPTCONN
;
336 lwkt_reltoken(&so
->so_rcv
.ssb_token
);
337 if (backlog
< 0 || backlog
> somaxconn
)
339 so
->so_qlimit
= backlog
;
340 return so_pru_listen(so
, td
);
344 soqflush(struct socket
*so
)
346 lwkt_getpooltoken(so
);
347 if (so
->so_options
& SO_ACCEPTCONN
) {
350 while ((sp
= TAILQ_FIRST(&so
->so_incomp
)) != NULL
) {
351 KKASSERT((sp
->so_state
& (SS_INCOMP
| SS_COMP
)) ==
353 TAILQ_REMOVE(&so
->so_incomp
, sp
, so_list
);
355 soclrstate(sp
, SS_INCOMP
);
356 soabort_async(sp
, TRUE
);
358 while ((sp
= TAILQ_FIRST(&so
->so_comp
)) != NULL
) {
359 KKASSERT((sp
->so_state
& (SS_INCOMP
| SS_COMP
)) ==
361 TAILQ_REMOVE(&so
->so_comp
, sp
, so_list
);
363 soclrstate(sp
, SS_COMP
);
364 soabort_async(sp
, TRUE
);
367 lwkt_relpooltoken(so
);
371 * Destroy a disconnected socket. This routine is a NOP if entities
372 * still have a reference on the socket:
374 * so_pcb - The protocol stack still has a reference
375 * SS_NOFDREF - There is no longer a file pointer reference
378 sofree(struct socket
*so
)
383 * This is a bit hackish at the moment. We need to interlock
384 * any accept queue we are on before we potentially lose the
385 * last reference to avoid races against a re-reference from
386 * someone operating on the queue.
388 while ((head
= so
->so_head
) != NULL
) {
389 lwkt_getpooltoken(head
);
390 if (so
->so_head
== head
)
392 lwkt_relpooltoken(head
);
396 * Arbitrage the last free.
398 KKASSERT(so
->so_refs
> 0);
399 if (atomic_fetchadd_int(&so
->so_refs
, -1) != 1) {
401 lwkt_relpooltoken(head
);
405 KKASSERT(so
->so_pcb
== NULL
&& (so
->so_state
& SS_NOFDREF
));
406 KKASSERT((so
->so_state
& SS_ASSERTINPROG
) == 0);
410 * We're done, remove ourselves from the accept queue we are
411 * on, if we are on one.
413 if (so
->so_state
& SS_INCOMP
) {
414 KKASSERT((so
->so_state
& (SS_INCOMP
| SS_COMP
)) ==
416 TAILQ_REMOVE(&head
->so_incomp
, so
, so_list
);
418 } else if (so
->so_state
& SS_COMP
) {
420 * We must not decommission a socket that's
421 * on the accept(2) queue. If we do, then
422 * accept(2) may hang after select(2) indicated
423 * that the listening socket was ready.
425 KKASSERT((so
->so_state
& (SS_INCOMP
| SS_COMP
)) ==
427 lwkt_relpooltoken(head
);
430 panic("sofree: not queued");
432 soclrstate(so
, SS_INCOMP
);
434 lwkt_relpooltoken(head
);
436 /* Flush accept queues, if we are accepting. */
439 ssb_release(&so
->so_snd
, so
);
445 * Close a socket on last file table reference removal.
446 * Initiate disconnect if connected.
447 * Free socket when disconnect complete.
450 soclose(struct socket
*so
, int fflag
)
454 funsetown(&so
->so_sigio
);
455 sosetstate(so
, SS_ISCLOSING
);
456 if (!use_soclose_fast
||
457 (so
->so_proto
->pr_flags
& PR_SYNC_PORT
) ||
458 ((so
->so_state
& SS_ISCONNECTED
) &&
459 (so
->so_options
& SO_LINGER
))) {
460 error
= soclose_sync(so
, fflag
);
469 sodiscard(struct socket
*so
)
471 if (so
->so_state
& SS_NOFDREF
)
472 panic("soclose: NOFDREF");
473 sosetstate(so
, SS_NOFDREF
); /* take ref */
477 * Append the completed queue of head to head_inh (inherting listen socket).
480 soinherit(struct socket
*head
, struct socket
*head_inh
)
482 boolean_t do_wakeup
= FALSE
;
484 KASSERT(head
->so_options
& SO_ACCEPTCONN
,
485 ("head does not accept connection"));
486 KASSERT(head_inh
->so_options
& SO_ACCEPTCONN
,
487 ("head_inh does not accept connection"));
489 lwkt_getpooltoken(head
);
490 lwkt_getpooltoken(head_inh
);
492 if (head
->so_qlen
> 0)
495 while (!TAILQ_EMPTY(&head
->so_comp
)) {
496 struct ucred
*old_cr
;
499 sp
= TAILQ_FIRST(&head
->so_comp
);
500 KKASSERT((sp
->so_state
& (SS_INCOMP
| SS_COMP
)) == SS_COMP
);
503 * Remove this socket from the current listen socket
506 TAILQ_REMOVE(&head
->so_comp
, sp
, so_list
);
509 /* Save the old ucred for later free. */
510 old_cr
= sp
->so_cred
;
513 * Install this socket to the inheriting listen socket
516 sp
->so_cred
= crhold(head_inh
->so_cred
); /* non-blocking */
517 sp
->so_head
= head_inh
;
519 TAILQ_INSERT_TAIL(&head_inh
->so_comp
, sp
, so_list
);
524 * crfree() may block and release the tokens temporarily.
525 * However, we are fine here, since the transition is done.
530 lwkt_relpooltoken(head_inh
);
531 lwkt_relpooltoken(head
);
535 * "New" connections have arrived
538 wakeup(&head_inh
->so_timeo
);
543 soclose_sync(struct socket
*so
, int fflag
)
547 if ((so
->so_proto
->pr_flags
& PR_SYNC_PORT
) == 0)
548 so_pru_sync(so
); /* unpend async prus */
550 if (so
->so_pcb
== NULL
)
553 if (so
->so_state
& SS_ISCONNECTED
) {
554 if ((so
->so_state
& SS_ISDISCONNECTING
) == 0) {
555 error
= sodisconnect(so
);
559 if (so
->so_options
& SO_LINGER
) {
560 if ((so
->so_state
& SS_ISDISCONNECTING
) &&
563 while (so
->so_state
& SS_ISCONNECTED
) {
564 error
= tsleep(&so
->so_timeo
, PCATCH
,
565 "soclos", so
->so_linger
* hz
);
575 error2
= so_pru_detach(so
);
576 if (error2
== EJUSTRETURN
) {
578 * Protocol will call sodiscard()
579 * and sofree() for us.
588 sofree(so
); /* dispose of ref */
594 soclose_fast_handler(netmsg_t msg
)
596 struct socket
*so
= msg
->base
.nm_so
;
598 if (so
->so_pcb
== NULL
)
601 if ((so
->so_state
& SS_ISCONNECTED
) &&
602 (so
->so_state
& SS_ISDISCONNECTING
) == 0)
603 so_pru_disconnect_direct(so
);
608 error
= so_pru_detach_direct(so
);
609 if (error
== EJUSTRETURN
) {
611 * Protocol will call sodiscard()
612 * and sofree() for us.
623 soclose_fast(struct socket
*so
)
625 struct netmsg_base
*base
= &so
->so_clomsg
;
627 netmsg_init(base
, so
, &netisr_apanic_rport
, 0,
628 soclose_fast_handler
);
629 if (so
->so_port
== netisr_curport())
630 lwkt_sendmsg_oncpu(so
->so_port
, &base
->lmsg
);
632 lwkt_sendmsg(so
->so_port
, &base
->lmsg
);
636 * Abort and destroy a socket. Only one abort can be in progress
637 * at any given moment.
640 soabort_async(struct socket
*so
, boolean_t clr_head
)
643 * Keep a reference before clearing the so_head
644 * to avoid racing socket close in netisr.
649 so_pru_abort_async(so
);
653 soabort_direct(struct socket
*so
)
656 so_pru_abort_direct(so
);
660 * so is passed in ref'd, which becomes owned by
661 * the cleared SS_NOFDREF flag.
664 soaccept_generic(struct socket
*so
)
666 if ((so
->so_state
& SS_NOFDREF
) == 0)
667 panic("soaccept: !NOFDREF");
668 soclrstate(so
, SS_NOFDREF
); /* owned by lack of SS_NOFDREF */
672 soaccept(struct socket
*so
, struct sockaddr
**nam
)
676 soaccept_generic(so
);
677 error
= so_pru_accept(so
, nam
);
682 soconnect(struct socket
*so
, struct sockaddr
*nam
, struct thread
*td
,
687 if (so
->so_options
& SO_ACCEPTCONN
)
690 * If protocol is connection-based, can only connect once.
691 * Otherwise, if connected, try to disconnect first.
692 * This allows user to disconnect by connecting to, e.g.,
695 if (so
->so_state
& (SS_ISCONNECTED
|SS_ISCONNECTING
) &&
696 ((so
->so_proto
->pr_flags
& PR_CONNREQUIRED
) ||
697 (error
= sodisconnect(so
)))) {
701 * Prevent accumulated error from previous connection
705 if (!sync
&& so
->so_proto
->pr_usrreqs
->pru_preconnect
)
706 error
= so_pru_connect_async(so
, nam
, td
);
708 error
= so_pru_connect(so
, nam
, td
);
714 soconnect2(struct socket
*so1
, struct socket
*so2
)
718 error
= so_pru_connect2(so1
, so2
);
723 sodisconnect(struct socket
*so
)
727 if ((so
->so_state
& SS_ISCONNECTED
) == 0) {
731 if (so
->so_state
& SS_ISDISCONNECTING
) {
735 error
= so_pru_disconnect(so
);
740 #define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? M_NOWAIT : M_WAITOK)
743 * If send must go all at once and message is larger than
744 * send buffering, then hard error.
745 * Lock against other senders.
746 * If must go all at once and not enough room now, then
747 * inform user that this would block and do nothing.
748 * Otherwise, if nonblocking, send as much as possible.
749 * The data to be sent is described by "uio" if nonzero,
750 * otherwise by the mbuf chain "top" (which must be null
751 * if uio is not). Data provided in mbuf chain must be small
752 * enough to send all at once.
754 * Returns nonzero on error, timeout or signal; callers
755 * must check for short counts if EINTR/ERESTART are returned.
756 * Data and control buffers are freed on return.
759 sosend(struct socket
*so
, struct sockaddr
*addr
, struct uio
*uio
,
760 struct mbuf
*top
, struct mbuf
*control
, int flags
,
767 int clen
= 0, error
, dontroute
, mlen
;
768 int atomic
= sosendallatonce(so
) || top
;
772 resid
= uio
->uio_resid
;
774 resid
= (size_t)top
->m_pkthdr
.len
;
777 for (m
= top
; m
; m
= m
->m_next
)
779 KKASSERT(top
->m_pkthdr
.len
== len
);
784 * WARNING! resid is unsigned, space and len are signed. space
785 * can wind up negative if the sockbuf is overcommitted.
787 * Also check to make sure that MSG_EOR isn't used on SOCK_STREAM
788 * type sockets since that's an error.
790 if (so
->so_type
== SOCK_STREAM
&& (flags
& MSG_EOR
)) {
796 (flags
& MSG_DONTROUTE
) && (so
->so_options
& SO_DONTROUTE
) == 0 &&
797 (so
->so_proto
->pr_flags
& PR_ATOMIC
);
798 if (td
->td_lwp
!= NULL
)
799 td
->td_lwp
->lwp_ru
.ru_msgsnd
++;
801 clen
= control
->m_len
;
802 #define gotoerr(errcode) { error = errcode; goto release; }
805 error
= ssb_lock(&so
->so_snd
, SBLOCKWAIT(flags
));
810 if (so
->so_state
& SS_CANTSENDMORE
)
813 error
= so
->so_error
;
817 if ((so
->so_state
& SS_ISCONNECTED
) == 0) {
819 * `sendto' and `sendmsg' is allowed on a connection-
820 * based socket if it supports implied connect.
821 * Return ENOTCONN if not connected and no address is
824 if ((so
->so_proto
->pr_flags
& PR_CONNREQUIRED
) &&
825 (so
->so_proto
->pr_flags
& PR_IMPLOPCL
) == 0) {
826 if ((so
->so_state
& SS_ISCONFIRMING
) == 0 &&
827 !(resid
== 0 && clen
!= 0))
829 } else if (addr
== NULL
)
830 gotoerr(so
->so_proto
->pr_flags
& PR_CONNREQUIRED
?
831 ENOTCONN
: EDESTADDRREQ
);
833 if ((atomic
&& resid
> so
->so_snd
.ssb_hiwat
) ||
834 clen
> so
->so_snd
.ssb_hiwat
) {
837 space
= ssb_space(&so
->so_snd
);
840 if ((space
< 0 || (size_t)space
< resid
+ clen
) && uio
&&
841 (atomic
|| space
< so
->so_snd
.ssb_lowat
|| space
< clen
)) {
842 if (flags
& (MSG_FNONBLOCKING
|MSG_DONTWAIT
))
843 gotoerr(EWOULDBLOCK
);
844 ssb_unlock(&so
->so_snd
);
845 error
= ssb_wait(&so
->so_snd
);
855 * Data is prepackaged in "top".
859 top
->m_flags
|= M_EOR
;
863 m
= m_getl((int)resid
, M_WAITOK
, MT_DATA
,
864 top
== NULL
? M_PKTHDR
: 0, &mlen
);
867 m
->m_pkthdr
.rcvif
= NULL
;
869 len
= imin((int)szmin(mlen
, resid
), space
);
870 if (resid
< MINCLSIZE
) {
872 * For datagram protocols, leave room
873 * for protocol headers in first mbuf.
875 if (atomic
&& top
== NULL
&& len
< mlen
)
879 error
= uiomove(mtod(m
, caddr_t
), (size_t)len
, uio
);
880 resid
= uio
->uio_resid
;
883 top
->m_pkthdr
.len
+= len
;
889 top
->m_flags
|= M_EOR
;
892 } while (space
> 0 && atomic
);
894 so
->so_options
|= SO_DONTROUTE
;
895 if (flags
& MSG_OOB
) {
896 pru_flags
= PRUS_OOB
;
897 } else if ((flags
& MSG_EOF
) &&
898 (so
->so_proto
->pr_flags
& PR_IMPLOPCL
) &&
901 * If the user set MSG_EOF, the protocol
902 * understands this flag and nothing left to
903 * send then use PRU_SEND_EOF instead of PRU_SEND.
905 pru_flags
= PRUS_EOF
;
906 } else if (resid
> 0 && space
> 0) {
907 /* If there is more to send, set PRUS_MORETOCOME */
908 pru_flags
= PRUS_MORETOCOME
;
913 * XXX all the SS_CANTSENDMORE checks previously
914 * done could be out of date. We could have recieved
915 * a reset packet in an interrupt or maybe we slept
916 * while doing page faults in uiomove() etc. We could
917 * probably recheck again inside the splnet() protection
918 * here, but there are probably other places that this
919 * also happens. We must rethink this.
921 error
= so_pru_send(so
, pru_flags
, top
, addr
, control
, td
);
923 so
->so_options
&= ~SO_DONTROUTE
;
930 } while (resid
&& space
> 0);
934 ssb_unlock(&so
->so_snd
);
945 * A specialization of sosend() for UDP based on protocol-specific knowledge:
946 * so->so_proto->pr_flags has the PR_ATOMIC field set. This means that
947 * sosendallatonce() returns true,
948 * the "atomic" variable is true,
949 * and sosendudp() blocks until space is available for the entire send.
950 * so->so_proto->pr_flags does not have the PR_CONNREQUIRED or
951 * PR_IMPLOPCL flags set.
952 * UDP has no out-of-band data.
953 * UDP has no control data.
954 * UDP does not support MSG_EOR.
957 sosendudp(struct socket
*so
, struct sockaddr
*addr
, struct uio
*uio
,
958 struct mbuf
*top
, struct mbuf
*control
, int flags
, struct thread
*td
)
961 int error
, pru_flags
= 0;
964 if (td
->td_lwp
!= NULL
)
965 td
->td_lwp
->lwp_ru
.ru_msgsnd
++;
969 KASSERT((uio
&& !top
) || (top
&& !uio
), ("bad arguments to sosendudp"));
970 resid
= uio
? uio
->uio_resid
: (size_t)top
->m_pkthdr
.len
;
973 error
= ssb_lock(&so
->so_snd
, SBLOCKWAIT(flags
));
977 if (so
->so_state
& SS_CANTSENDMORE
)
980 error
= so
->so_error
;
984 if (!(so
->so_state
& SS_ISCONNECTED
) && addr
== NULL
)
985 gotoerr(EDESTADDRREQ
);
986 if (resid
> so
->so_snd
.ssb_hiwat
)
988 space
= ssb_space(&so
->so_snd
);
989 if (uio
&& (space
< 0 || (size_t)space
< resid
)) {
990 if (flags
& (MSG_FNONBLOCKING
|MSG_DONTWAIT
))
991 gotoerr(EWOULDBLOCK
);
992 ssb_unlock(&so
->so_snd
);
993 error
= ssb_wait(&so
->so_snd
);
1000 int hdrlen
= max_hdr
;
1003 * We try to optimize out the additional mbuf
1004 * allocations in M_PREPEND() on output path, e.g.
1005 * - udp_output(), when it tries to prepend protocol
1007 * - Link layer output function, when it tries to
1008 * prepend link layer header.
1010 * This probably will not benefit any data that will
1011 * be fragmented, so this optimization is only performed
1012 * when the size of data and max size of protocol+link
1013 * headers fit into one mbuf cluster.
1015 if (uio
->uio_resid
> MCLBYTES
- hdrlen
||
1016 !udp_sosend_prepend
) {
1017 top
= m_uiomove(uio
);
1023 top
= m_getl(uio
->uio_resid
+ hdrlen
, M_WAITOK
,
1024 MT_DATA
, M_PKTHDR
, &nsize
);
1025 KASSERT(nsize
>= uio
->uio_resid
+ hdrlen
,
1026 ("sosendudp invalid nsize %d, "
1027 "resid %zu, hdrlen %d",
1028 nsize
, uio
->uio_resid
, hdrlen
));
1030 top
->m_len
= uio
->uio_resid
;
1031 top
->m_pkthdr
.len
= uio
->uio_resid
;
1032 top
->m_data
+= hdrlen
;
1034 error
= uiomove(mtod(top
, caddr_t
), top
->m_len
, uio
);
1040 if (flags
& MSG_DONTROUTE
)
1041 pru_flags
|= PRUS_DONTROUTE
;
1043 if (udp_sosend_async
&& (flags
& MSG_SYNC
) == 0) {
1044 so_pru_send_async(so
, pru_flags
, top
, addr
, NULL
, td
);
1047 error
= so_pru_send(so
, pru_flags
, top
, addr
, NULL
, td
);
1049 top
= NULL
; /* sent or freed in lower layer */
1052 ssb_unlock(&so
->so_snd
);
1060 sosendtcp(struct socket
*so
, struct sockaddr
*addr
, struct uio
*uio
,
1061 struct mbuf
*top
, struct mbuf
*control
, int flags
,
1073 KKASSERT(top
== NULL
);
1075 resid
= uio
->uio_resid
;
1078 resid
= (size_t)top
->m_pkthdr
.len
;
1081 for (m
= top
; m
; m
= m
->m_next
)
1083 KKASSERT(top
->m_pkthdr
.len
== len
);
1088 * WARNING! resid is unsigned, space and len are signed. space
1089 * can wind up negative if the sockbuf is overcommitted.
1091 * Also check to make sure that MSG_EOR isn't used on TCP
1093 if (flags
& MSG_EOR
) {
1099 /* TCP doesn't do control messages (rights, creds, etc) */
1100 if (control
->m_len
) {
1104 m_freem(control
); /* empty control, just free it */
1108 if (td
->td_lwp
!= NULL
)
1109 td
->td_lwp
->lwp_ru
.ru_msgsnd
++;
1111 #define gotoerr(errcode) { error = errcode; goto release; }
1114 error
= ssb_lock(&so
->so_snd
, SBLOCKWAIT(flags
));
1119 if (so
->so_state
& SS_CANTSENDMORE
)
1122 error
= so
->so_error
;
1126 if ((so
->so_state
& SS_ISCONNECTED
) == 0 &&
1127 (so
->so_state
& SS_ISCONFIRMING
) == 0)
1129 if (allatonce
&& resid
> so
->so_snd
.ssb_hiwat
)
1132 space
= ssb_space_prealloc(&so
->so_snd
);
1133 if (flags
& MSG_OOB
)
1135 if ((space
< 0 || (size_t)space
< resid
) && !allatonce
&&
1136 space
< so
->so_snd
.ssb_lowat
) {
1137 if (flags
& (MSG_FNONBLOCKING
|MSG_DONTWAIT
))
1138 gotoerr(EWOULDBLOCK
);
1139 ssb_unlock(&so
->so_snd
);
1140 error
= ssb_wait(&so
->so_snd
);
1147 int cnt
= 0, async
= 0;
1151 * Data is prepackaged in "top".
1155 if (resid
> INT_MAX
)
1157 if (tcp_sosend_jcluster
) {
1158 m
= m_getlj((int)resid
, M_WAITOK
, MT_DATA
,
1159 top
== NULL
? M_PKTHDR
: 0, &mlen
);
1161 m
= m_getl((int)resid
, M_WAITOK
, MT_DATA
,
1162 top
== NULL
? M_PKTHDR
: 0, &mlen
);
1165 m
->m_pkthdr
.len
= 0;
1166 m
->m_pkthdr
.rcvif
= NULL
;
1168 len
= imin((int)szmin(mlen
, resid
), space
);
1170 error
= uiomove(mtod(m
, caddr_t
), (size_t)len
, uio
);
1171 resid
= uio
->uio_resid
;
1174 top
->m_pkthdr
.len
+= len
;
1181 } while (space
> 0 && cnt
< tcp_sosend_agglim
);
1183 if (tcp_sosend_async
)
1186 if (flags
& MSG_OOB
) {
1187 pru_flags
= PRUS_OOB
;
1189 } else if ((flags
& MSG_EOF
) && resid
== 0) {
1190 pru_flags
= PRUS_EOF
;
1191 } else if (resid
> 0 && space
> 0) {
1192 /* If there is more to send, set PRUS_MORETOCOME */
1193 pru_flags
= PRUS_MORETOCOME
;
1199 if (flags
& MSG_SYNC
)
1203 * XXX all the SS_CANTSENDMORE checks previously
1204 * done could be out of date. We could have recieved
1205 * a reset packet in an interrupt or maybe we slept
1206 * while doing page faults in uiomove() etc. We could
1207 * probably recheck again inside the splnet() protection
1208 * here, but there are probably other places that this
1209 * also happens. We must rethink this.
1211 for (m
= top
; m
; m
= m
->m_next
)
1212 ssb_preallocstream(&so
->so_snd
, m
);
1214 error
= so_pru_send(so
, pru_flags
, top
,
1217 so_pru_send_async(so
, pru_flags
, top
,
1226 } while (resid
&& space
> 0);
1230 ssb_unlock(&so
->so_snd
);
1241 * Implement receive operations on a socket.
1243 * We depend on the way that records are added to the signalsockbuf
1244 * by sbappend*. In particular, each record (mbufs linked through m_next)
1245 * must begin with an address if the protocol so specifies,
1246 * followed by an optional mbuf or mbufs containing ancillary data,
1247 * and then zero or more mbufs of data.
1249 * Although the signalsockbuf is locked, new data may still be appended.
1250 * A token inside the ssb_lock deals with MP issues and still allows
1251 * the network to access the socket if we block in a uio.
1253 * The caller may receive the data as a single mbuf chain by supplying
1254 * an mbuf **mp0 for use in returning the chain. The uio is then used
1255 * only for the count in uio_resid.
1258 soreceive(struct socket
*so
, struct sockaddr
**psa
, struct uio
*uio
,
1259 struct sockbuf
*sio
, struct mbuf
**controlp
, int *flagsp
)
1262 struct mbuf
*free_chain
= NULL
;
1263 int flags
, len
, error
, offset
;
1264 struct protosw
*pr
= so
->so_proto
;
1266 size_t resid
, orig_resid
;
1267 boolean_t free_rights
= FALSE
;
1270 resid
= uio
->uio_resid
;
1272 resid
= (size_t)(sio
->sb_climit
- sio
->sb_cc
);
1280 flags
= *flagsp
&~ MSG_EOR
;
1283 if (flags
& MSG_OOB
) {
1284 m
= m_get(M_WAITOK
, MT_DATA
);
1287 error
= so_pru_rcvoob(so
, m
, flags
& MSG_PEEK
);
1293 KKASSERT(resid
>= (size_t)m
->m_len
);
1294 resid
-= (size_t)m
->m_len
;
1295 } while (resid
> 0 && m
);
1298 uio
->uio_resid
= resid
;
1299 error
= uiomove(mtod(m
, caddr_t
),
1300 (int)szmin(resid
, m
->m_len
),
1302 resid
= uio
->uio_resid
;
1304 } while (uio
->uio_resid
&& error
== 0 && m
);
1311 if ((so
->so_state
& SS_ISCONFIRMING
) && resid
)
1315 * The token interlocks against the protocol thread while
1316 * ssb_lock is a blocking lock against other userland entities.
1318 lwkt_gettoken(&so
->so_rcv
.ssb_token
);
1320 error
= ssb_lock(&so
->so_rcv
, SBLOCKWAIT(flags
));
1324 m
= so
->so_rcv
.ssb_mb
;
1326 * If we have less data than requested, block awaiting more
1327 * (subject to any timeout) if:
1328 * 1. the current count is less than the low water mark, or
1329 * 2. MSG_WAITALL is set, and it is possible to do the entire
1330 * receive operation at once if we block (resid <= hiwat).
1331 * 3. MSG_DONTWAIT is not set
1332 * If MSG_WAITALL is set but resid is larger than the receive buffer,
1333 * we have to do the receive in sections, and thus risk returning
1334 * a short count if a timeout or signal occurs after we start.
1336 if (m
== NULL
|| (((flags
& MSG_DONTWAIT
) == 0 &&
1337 (size_t)so
->so_rcv
.ssb_cc
< resid
) &&
1338 (so
->so_rcv
.ssb_cc
< so
->so_rcv
.ssb_lowat
||
1339 ((flags
& MSG_WAITALL
) && resid
<= (size_t)so
->so_rcv
.ssb_hiwat
)) &&
1340 m
->m_nextpkt
== 0 && (pr
->pr_flags
& PR_ATOMIC
) == 0)) {
1341 KASSERT(m
!= NULL
|| !so
->so_rcv
.ssb_cc
, ("receive 1"));
1345 error
= so
->so_error
;
1346 if ((flags
& MSG_PEEK
) == 0)
1350 if (so
->so_state
& SS_CANTRCVMORE
) {
1356 for (; m
; m
= m
->m_next
) {
1357 if (m
->m_type
== MT_OOBDATA
|| (m
->m_flags
& M_EOR
)) {
1358 m
= so
->so_rcv
.ssb_mb
;
1362 if ((so
->so_state
& (SS_ISCONNECTED
|SS_ISCONNECTING
)) == 0 &&
1363 (pr
->pr_flags
& PR_CONNREQUIRED
)) {
1369 if (flags
& (MSG_FNONBLOCKING
|MSG_DONTWAIT
)) {
1370 error
= EWOULDBLOCK
;
1373 ssb_unlock(&so
->so_rcv
);
1374 error
= ssb_wait(&so
->so_rcv
);
1380 if (uio
&& uio
->uio_td
&& uio
->uio_td
->td_proc
)
1381 uio
->uio_td
->td_lwp
->lwp_ru
.ru_msgrcv
++;
1384 * note: m should be == sb_mb here. Cache the next record while
1385 * cleaning up. Note that calling m_free*() will break out critical
1388 KKASSERT(m
== so
->so_rcv
.ssb_mb
);
1391 * Skip any address mbufs prepending the record.
1393 if (pr
->pr_flags
& PR_ADDR
) {
1394 KASSERT(m
->m_type
== MT_SONAME
, ("receive 1a"));
1397 *psa
= dup_sockaddr(mtod(m
, struct sockaddr
*));
1398 if (flags
& MSG_PEEK
)
1401 m
= sbunlinkmbuf(&so
->so_rcv
.sb
, m
, &free_chain
);
1405 * Skip any control mbufs prepending the record.
1407 while (m
&& m
->m_type
== MT_CONTROL
&& error
== 0) {
1408 if (flags
& MSG_PEEK
) {
1410 *controlp
= m_copy(m
, 0, m
->m_len
);
1411 m
= m
->m_next
; /* XXX race */
1413 const struct cmsghdr
*cm
= mtod(m
, struct cmsghdr
*);
1416 n
= sbunlinkmbuf(&so
->so_rcv
.sb
, m
, NULL
);
1417 if (pr
->pr_domain
->dom_externalize
&&
1418 cm
->cmsg_level
== SOL_SOCKET
&&
1419 cm
->cmsg_type
== SCM_RIGHTS
) {
1420 error
= pr
->pr_domain
->dom_externalize
1426 if (cm
->cmsg_level
== SOL_SOCKET
&&
1427 cm
->cmsg_type
== SCM_RIGHTS
)
1429 m
= sbunlinkmbuf(&so
->so_rcv
.sb
, m
, &free_chain
);
1432 if (controlp
&& *controlp
) {
1434 controlp
= &(*controlp
)->m_next
;
1443 if (type
== MT_OOBDATA
)
1448 * Copy to the UIO or mbuf return chain (*mp).
1452 while (m
&& resid
> 0 && error
== 0) {
1453 if (m
->m_type
== MT_OOBDATA
) {
1454 if (type
!= MT_OOBDATA
)
1456 } else if (type
== MT_OOBDATA
)
1459 KASSERT(m
->m_type
== MT_DATA
|| m
->m_type
== MT_HEADER
,
1461 soclrstate(so
, SS_RCVATMARK
);
1462 len
= (resid
> INT_MAX
) ? INT_MAX
: resid
;
1463 if (so
->so_oobmark
&& len
> so
->so_oobmark
- offset
)
1464 len
= so
->so_oobmark
- offset
;
1465 if (len
> m
->m_len
- moff
)
1466 len
= m
->m_len
- moff
;
1469 * Copy out to the UIO or pass the mbufs back to the SIO.
1470 * The SIO is dealt with when we eat the mbuf, but deal
1471 * with the resid here either way.
1474 uio
->uio_resid
= resid
;
1475 error
= uiomove(mtod(m
, caddr_t
) + moff
, len
, uio
);
1476 resid
= uio
->uio_resid
;
1480 resid
-= (size_t)len
;
1484 * Eat the entire mbuf or just a piece of it
1486 if (len
== m
->m_len
- moff
) {
1487 if (m
->m_flags
& M_EOR
)
1489 if (flags
& MSG_PEEK
) {
1494 n
= sbunlinkmbuf(&so
->so_rcv
.sb
, m
, NULL
);
1498 m
= sbunlinkmbuf(&so
->so_rcv
.sb
, m
, &free_chain
);
1502 if (flags
& MSG_PEEK
) {
1506 n
= m_copym(m
, 0, len
, M_WAITOK
);
1512 so
->so_rcv
.ssb_cc
-= len
;
1515 if (so
->so_oobmark
) {
1516 if ((flags
& MSG_PEEK
) == 0) {
1517 so
->so_oobmark
-= len
;
1518 if (so
->so_oobmark
== 0) {
1519 sosetstate(so
, SS_RCVATMARK
);
1524 if (offset
== so
->so_oobmark
)
1528 if (flags
& MSG_EOR
)
1531 * If the MSG_WAITALL flag is set (for non-atomic socket),
1532 * we must not quit until resid == 0 or an error
1533 * termination. If a signal/timeout occurs, return
1534 * with a short count but without error.
1535 * Keep signalsockbuf locked against other readers.
1537 while ((flags
& MSG_WAITALL
) && m
== NULL
&&
1538 resid
> 0 && !sosendallatonce(so
) &&
1539 so
->so_rcv
.ssb_mb
== NULL
) {
1540 if (so
->so_error
|| so
->so_state
& SS_CANTRCVMORE
)
1543 * The window might have closed to zero, make
1544 * sure we send an ack now that we've drained
1545 * the buffer or we might end up blocking until
1546 * the idle takes over (5 seconds).
1548 if (pr
->pr_flags
& PR_WANTRCVD
&& so
->so_pcb
)
1549 so_pru_rcvd(so
, flags
);
1550 error
= ssb_wait(&so
->so_rcv
);
1552 ssb_unlock(&so
->so_rcv
);
1556 m
= so
->so_rcv
.ssb_mb
;
1561 * If an atomic read was requested but unread data still remains
1562 * in the record, set MSG_TRUNC.
1564 if (m
&& pr
->pr_flags
& PR_ATOMIC
)
1568 * Cleanup. If an atomic read was requested drop any unread data.
1570 if ((flags
& MSG_PEEK
) == 0) {
1571 if (m
&& (pr
->pr_flags
& PR_ATOMIC
))
1572 sbdroprecord(&so
->so_rcv
.sb
);
1573 if ((pr
->pr_flags
& PR_WANTRCVD
) && so
->so_pcb
)
1574 so_pru_rcvd(so
, flags
);
1577 if (orig_resid
== resid
&& orig_resid
&&
1578 (flags
& MSG_EOR
) == 0 && (so
->so_state
& SS_CANTRCVMORE
) == 0) {
1579 ssb_unlock(&so
->so_rcv
);
1586 ssb_unlock(&so
->so_rcv
);
1588 lwkt_reltoken(&so
->so_rcv
.ssb_token
);
1590 if (free_rights
&& (pr
->pr_flags
& PR_RIGHTS
) &&
1591 pr
->pr_domain
->dom_dispose
)
1592 pr
->pr_domain
->dom_dispose(free_chain
);
1593 m_freem(free_chain
);
1599 sorecvtcp(struct socket
*so
, struct sockaddr
**psa
, struct uio
*uio
,
1600 struct sockbuf
*sio
, struct mbuf
**controlp
, int *flagsp
)
1603 struct mbuf
*free_chain
= NULL
;
1604 int flags
, len
, error
, offset
;
1605 struct protosw
*pr
= so
->so_proto
;
1608 size_t resid
, orig_resid
, restmp
;
1611 resid
= uio
->uio_resid
;
1613 resid
= (size_t)(sio
->sb_climit
- sio
->sb_cc
);
1621 flags
= *flagsp
&~ MSG_EOR
;
1624 if (flags
& MSG_OOB
) {
1625 m
= m_get(M_WAITOK
, MT_DATA
);
1628 error
= so_pru_rcvoob(so
, m
, flags
& MSG_PEEK
);
1634 KKASSERT(resid
>= (size_t)m
->m_len
);
1635 resid
-= (size_t)m
->m_len
;
1636 } while (resid
> 0 && m
);
1639 uio
->uio_resid
= resid
;
1640 error
= uiomove(mtod(m
, caddr_t
),
1641 (int)szmin(resid
, m
->m_len
),
1643 resid
= uio
->uio_resid
;
1645 } while (uio
->uio_resid
&& error
== 0 && m
);
1654 * The token interlocks against the protocol thread while
1655 * ssb_lock is a blocking lock against other userland entities.
1657 * Lock a limited number of mbufs (not all, so sbcompress() still
1658 * works well). The token is used as an interlock for sbwait() so
1659 * release it afterwords.
1662 error
= ssb_lock(&so
->so_rcv
, SBLOCKWAIT(flags
));
1666 lwkt_gettoken(&so
->so_rcv
.ssb_token
);
1667 m
= so
->so_rcv
.ssb_mb
;
1670 * If we have less data than requested, block awaiting more
1671 * (subject to any timeout) if:
1672 * 1. the current count is less than the low water mark, or
1673 * 2. MSG_WAITALL is set, and it is possible to do the entire
1674 * receive operation at once if we block (resid <= hiwat).
1675 * 3. MSG_DONTWAIT is not set
1676 * If MSG_WAITALL is set but resid is larger than the receive buffer,
1677 * we have to do the receive in sections, and thus risk returning
1678 * a short count if a timeout or signal occurs after we start.
1680 if (m
== NULL
|| (((flags
& MSG_DONTWAIT
) == 0 &&
1681 (size_t)so
->so_rcv
.ssb_cc
< resid
) &&
1682 (so
->so_rcv
.ssb_cc
< so
->so_rcv
.ssb_lowat
||
1683 ((flags
& MSG_WAITALL
) && resid
<= (size_t)so
->so_rcv
.ssb_hiwat
)))) {
1684 KASSERT(m
!= NULL
|| !so
->so_rcv
.ssb_cc
, ("receive 1"));
1688 lwkt_reltoken(&so
->so_rcv
.ssb_token
);
1689 error
= so
->so_error
;
1690 if ((flags
& MSG_PEEK
) == 0)
1694 if (so
->so_state
& SS_CANTRCVMORE
) {
1697 lwkt_reltoken(&so
->so_rcv
.ssb_token
);
1700 if ((so
->so_state
& (SS_ISCONNECTED
|SS_ISCONNECTING
)) == 0 &&
1701 (pr
->pr_flags
& PR_CONNREQUIRED
)) {
1702 lwkt_reltoken(&so
->so_rcv
.ssb_token
);
1707 lwkt_reltoken(&so
->so_rcv
.ssb_token
);
1710 if (flags
& (MSG_FNONBLOCKING
|MSG_DONTWAIT
)) {
1711 lwkt_reltoken(&so
->so_rcv
.ssb_token
);
1712 error
= EWOULDBLOCK
;
1715 ssb_unlock(&so
->so_rcv
);
1716 error
= ssb_wait(&so
->so_rcv
);
1717 lwkt_reltoken(&so
->so_rcv
.ssb_token
);
1729 while (n
&& restmp
< resid
) {
1730 n
->m_flags
|= M_SOLOCKED
;
1732 if (n
->m_next
== NULL
)
1739 * Release token for loop
1741 lwkt_reltoken(&so
->so_rcv
.ssb_token
);
1742 if (uio
&& uio
->uio_td
&& uio
->uio_td
->td_proc
)
1743 uio
->uio_td
->td_lwp
->lwp_ru
.ru_msgrcv
++;
1746 * note: m should be == sb_mb here. Cache the next record while
1747 * cleaning up. Note that calling m_free*() will break out critical
1750 KKASSERT(m
== so
->so_rcv
.ssb_mb
);
1753 * Copy to the UIO or mbuf return chain (*mp).
1755 * NOTE: Token is not held for loop
1761 while (m
&& (m
->m_flags
& M_SOLOCKED
) && resid
> 0 && error
== 0) {
1762 KASSERT(m
->m_type
== MT_DATA
|| m
->m_type
== MT_HEADER
,
1765 soclrstate(so
, SS_RCVATMARK
);
1766 len
= (resid
> INT_MAX
) ? INT_MAX
: resid
;
1767 if (so
->so_oobmark
&& len
> so
->so_oobmark
- offset
)
1768 len
= so
->so_oobmark
- offset
;
1769 if (len
> m
->m_len
- moff
)
1770 len
= m
->m_len
- moff
;
1773 * Copy out to the UIO or pass the mbufs back to the SIO.
1774 * The SIO is dealt with when we eat the mbuf, but deal
1775 * with the resid here either way.
1778 uio
->uio_resid
= resid
;
1779 error
= uiomove(mtod(m
, caddr_t
) + moff
, len
, uio
);
1780 resid
= uio
->uio_resid
;
1784 resid
-= (size_t)len
;
1788 * Eat the entire mbuf or just a piece of it
1791 if (len
== m
->m_len
- moff
) {
1801 if (so
->so_oobmark
&& offset
== so
->so_oobmark
) {
1808 * Synchronize sockbuf with data we read.
1810 * NOTE: (m) is junk on entry (it could be left over from the
1813 if ((flags
& MSG_PEEK
) == 0) {
1814 lwkt_gettoken(&so
->so_rcv
.ssb_token
);
1815 m
= so
->so_rcv
.ssb_mb
;
1816 while (m
&& offset
>= m
->m_len
) {
1817 if (so
->so_oobmark
) {
1818 so
->so_oobmark
-= m
->m_len
;
1819 if (so
->so_oobmark
== 0) {
1820 sosetstate(so
, SS_RCVATMARK
);
1826 n
= sbunlinkmbuf(&so
->so_rcv
.sb
, m
, NULL
);
1830 m
= sbunlinkmbuf(&so
->so_rcv
.sb
,
1837 n
= m_copym(m
, 0, offset
, M_WAITOK
);
1841 m
->m_data
+= offset
;
1843 so
->so_rcv
.ssb_cc
-= offset
;
1844 if (so
->so_oobmark
) {
1845 so
->so_oobmark
-= offset
;
1846 if (so
->so_oobmark
== 0) {
1847 sosetstate(so
, SS_RCVATMARK
);
1853 lwkt_reltoken(&so
->so_rcv
.ssb_token
);
1857 * If the MSG_WAITALL flag is set (for non-atomic socket),
1858 * we must not quit until resid == 0 or an error termination.
1860 * If a signal/timeout occurs, return with a short count but without
1863 * Keep signalsockbuf locked against other readers.
1865 * XXX if MSG_PEEK we currently do quit.
1867 if ((flags
& MSG_WAITALL
) && !(flags
& MSG_PEEK
) &&
1868 didoob
== 0 && resid
> 0 &&
1869 !sosendallatonce(so
)) {
1870 lwkt_gettoken(&so
->so_rcv
.ssb_token
);
1872 while ((m
= so
->so_rcv
.ssb_mb
) == NULL
) {
1873 if (so
->so_error
|| (so
->so_state
& SS_CANTRCVMORE
)) {
1874 error
= so
->so_error
;
1878 * The window might have closed to zero, make
1879 * sure we send an ack now that we've drained
1880 * the buffer or we might end up blocking until
1881 * the idle takes over (5 seconds).
1884 so_pru_rcvd_async(so
);
1885 if (so
->so_rcv
.ssb_mb
== NULL
)
1886 error
= ssb_wait(&so
->so_rcv
);
1888 lwkt_reltoken(&so
->so_rcv
.ssb_token
);
1889 ssb_unlock(&so
->so_rcv
);
1894 if (m
&& error
== 0)
1896 lwkt_reltoken(&so
->so_rcv
.ssb_token
);
1900 * Token not held here.
1902 * Cleanup. If an atomic read was requested drop any unread data XXX
1904 if ((flags
& MSG_PEEK
) == 0) {
1906 so_pru_rcvd_async(so
);
1909 if (orig_resid
== resid
&& orig_resid
&&
1910 (so
->so_state
& SS_CANTRCVMORE
) == 0) {
1911 ssb_unlock(&so
->so_rcv
);
1918 ssb_unlock(&so
->so_rcv
);
1921 m_freem(free_chain
);
1926 * Shut a socket down. Note that we do not get a frontend lock as we
1927 * want to be able to shut the socket down even if another thread is
1928 * blocked in a read(), thus waking it up.
1931 soshutdown(struct socket
*so
, int how
)
1933 if (!(how
== SHUT_RD
|| how
== SHUT_WR
|| how
== SHUT_RDWR
))
1936 if (how
!= SHUT_WR
) {
1937 /*ssb_lock(&so->so_rcv, M_WAITOK);*/
1939 /*ssb_unlock(&so->so_rcv);*/
1942 return (so_pru_shutdown(so
));
1947 sorflush(struct socket
*so
)
1949 struct signalsockbuf
*ssb
= &so
->so_rcv
;
1950 struct protosw
*pr
= so
->so_proto
;
1951 struct signalsockbuf asb
;
1953 atomic_set_int(&ssb
->ssb_flags
, SSB_NOINTR
);
1955 lwkt_gettoken(&ssb
->ssb_token
);
1960 * Can't just blow up the ssb structure here
1962 bzero(&ssb
->sb
, sizeof(ssb
->sb
));
1967 atomic_clear_int(&ssb
->ssb_flags
, SSB_CLEAR_MASK
);
1969 if ((pr
->pr_flags
& PR_RIGHTS
) && pr
->pr_domain
->dom_dispose
)
1970 (*pr
->pr_domain
->dom_dispose
)(asb
.ssb_mb
);
1971 ssb_release(&asb
, so
);
1973 lwkt_reltoken(&ssb
->ssb_token
);
1978 do_setopt_accept_filter(struct socket
*so
, struct sockopt
*sopt
)
1980 struct accept_filter_arg
*afap
= NULL
;
1981 struct accept_filter
*afp
;
1982 struct so_accf
*af
= so
->so_accf
;
1985 /* do not set/remove accept filters on non listen sockets */
1986 if ((so
->so_options
& SO_ACCEPTCONN
) == 0) {
1991 /* removing the filter */
1994 if (af
->so_accept_filter
!= NULL
&&
1995 af
->so_accept_filter
->accf_destroy
!= NULL
) {
1996 af
->so_accept_filter
->accf_destroy(so
);
1998 if (af
->so_accept_filter_str
!= NULL
) {
1999 kfree(af
->so_accept_filter_str
, M_ACCF
);
2004 so
->so_options
&= ~SO_ACCEPTFILTER
;
2007 /* adding a filter */
2008 /* must remove previous filter first */
2013 /* don't put large objects on the kernel stack */
2014 afap
= kmalloc(sizeof(*afap
), M_TEMP
, M_WAITOK
);
2015 error
= sooptcopyin(sopt
, afap
, sizeof *afap
, sizeof *afap
);
2016 afap
->af_name
[sizeof(afap
->af_name
)-1] = '\0';
2017 afap
->af_arg
[sizeof(afap
->af_arg
)-1] = '\0';
2020 afp
= accept_filt_get(afap
->af_name
);
2025 af
= kmalloc(sizeof(*af
), M_ACCF
, M_WAITOK
| M_ZERO
);
2026 if (afp
->accf_create
!= NULL
) {
2027 if (afap
->af_name
[0] != '\0') {
2028 int len
= strlen(afap
->af_name
) + 1;
2030 af
->so_accept_filter_str
= kmalloc(len
, M_ACCF
,
2032 strcpy(af
->so_accept_filter_str
, afap
->af_name
);
2034 af
->so_accept_filter_arg
= afp
->accf_create(so
, afap
->af_arg
);
2035 if (af
->so_accept_filter_arg
== NULL
) {
2036 kfree(af
->so_accept_filter_str
, M_ACCF
);
2043 af
->so_accept_filter
= afp
;
2045 so
->so_options
|= SO_ACCEPTFILTER
;
2048 kfree(afap
, M_TEMP
);
2054 * Perhaps this routine, and sooptcopyout(), below, ought to come in
2055 * an additional variant to handle the case where the option value needs
2056 * to be some kind of integer, but not a specific size.
2057 * In addition to their use here, these functions are also called by the
2058 * protocol-level pr_ctloutput() routines.
2061 sooptcopyin(struct sockopt
*sopt
, void *buf
, size_t len
, size_t minlen
)
2063 return soopt_to_kbuf(sopt
, buf
, len
, minlen
);
2067 soopt_to_kbuf(struct sockopt
*sopt
, void *buf
, size_t len
, size_t minlen
)
2071 KKASSERT(!sopt
->sopt_val
|| kva_p(sopt
->sopt_val
));
2072 KKASSERT(kva_p(buf
));
2075 * If the user gives us more than we wanted, we ignore it,
2076 * but if we don't get the minimum length the caller
2077 * wants, we return EINVAL. On success, sopt->sopt_valsize
2078 * is set to however much we actually retrieved.
2080 if ((valsize
= sopt
->sopt_valsize
) < minlen
)
2083 sopt
->sopt_valsize
= valsize
= len
;
2085 bcopy(sopt
->sopt_val
, buf
, valsize
);
2091 sosetopt(struct socket
*so
, struct sockopt
*sopt
)
2097 struct signalsockbuf
*sotmp
;
2100 sopt
->sopt_dir
= SOPT_SET
;
2101 if (sopt
->sopt_level
!= SOL_SOCKET
) {
2102 if (so
->so_proto
&& so
->so_proto
->pr_ctloutput
) {
2103 return (so_pr_ctloutput(so
, sopt
));
2105 error
= ENOPROTOOPT
;
2107 switch (sopt
->sopt_name
) {
2109 case SO_ACCEPTFILTER
:
2110 error
= do_setopt_accept_filter(so
, sopt
);
2116 error
= sooptcopyin(sopt
, &l
, sizeof l
, sizeof l
);
2120 so
->so_linger
= l
.l_linger
;
2122 so
->so_options
|= SO_LINGER
;
2124 so
->so_options
&= ~SO_LINGER
;
2130 case SO_USELOOPBACK
:
2137 error
= sooptcopyin(sopt
, &optval
, sizeof optval
,
2142 so
->so_options
|= sopt
->sopt_name
;
2144 so
->so_options
&= ~sopt
->sopt_name
;
2151 error
= sooptcopyin(sopt
, &optval
, sizeof optval
,
2157 * Values < 1 make no sense for any of these
2158 * options, so disallow them.
2165 switch (sopt
->sopt_name
) {
2168 if (ssb_reserve(sopt
->sopt_name
== SO_SNDBUF
?
2169 &so
->so_snd
: &so
->so_rcv
, (u_long
)optval
,
2171 &curproc
->p_rlimit
[RLIMIT_SBSIZE
]) == 0) {
2175 sotmp
= (sopt
->sopt_name
== SO_SNDBUF
) ?
2176 &so
->so_snd
: &so
->so_rcv
;
2177 atomic_clear_int(&sotmp
->ssb_flags
,
2182 * Make sure the low-water is never greater than
2186 so
->so_snd
.ssb_lowat
=
2187 (optval
> so
->so_snd
.ssb_hiwat
) ?
2188 so
->so_snd
.ssb_hiwat
: optval
;
2189 atomic_clear_int(&so
->so_snd
.ssb_flags
,
2193 so
->so_rcv
.ssb_lowat
=
2194 (optval
> so
->so_rcv
.ssb_hiwat
) ?
2195 so
->so_rcv
.ssb_hiwat
: optval
;
2196 atomic_clear_int(&so
->so_rcv
.ssb_flags
,
2204 error
= sooptcopyin(sopt
, &tv
, sizeof tv
,
2209 /* assert(hz > 0); */
2210 if (tv
.tv_sec
< 0 || tv
.tv_sec
> INT_MAX
/ hz
||
2211 tv
.tv_usec
< 0 || tv
.tv_usec
>= 1000000) {
2215 /* assert(tick > 0); */
2216 /* assert(ULONG_MAX - INT_MAX >= 1000000); */
2217 val
= (u_long
)(tv
.tv_sec
* hz
) + tv
.tv_usec
/ ustick
;
2218 if (val
> INT_MAX
) {
2222 if (val
== 0 && tv
.tv_usec
!= 0)
2225 switch (sopt
->sopt_name
) {
2227 so
->so_snd
.ssb_timeo
= val
;
2230 so
->so_rcv
.ssb_timeo
= val
;
2235 error
= ENOPROTOOPT
;
2238 if (error
== 0 && so
->so_proto
&& so
->so_proto
->pr_ctloutput
) {
2239 (void) so_pr_ctloutput(so
, sopt
);
2246 /* Helper routine for getsockopt */
2248 sooptcopyout(struct sockopt
*sopt
, const void *buf
, size_t len
)
2250 soopt_from_kbuf(sopt
, buf
, len
);
2255 soopt_from_kbuf(struct sockopt
*sopt
, const void *buf
, size_t len
)
2260 sopt
->sopt_valsize
= 0;
2264 KKASSERT(!sopt
->sopt_val
|| kva_p(sopt
->sopt_val
));
2265 KKASSERT(kva_p(buf
));
2268 * Documented get behavior is that we always return a value,
2269 * possibly truncated to fit in the user's buffer.
2270 * Traditional behavior is that we always tell the user
2271 * precisely how much we copied, rather than something useful
2272 * like the total amount we had available for her.
2273 * Note that this interface is not idempotent; the entire answer must
2274 * generated ahead of time.
2276 valsize
= szmin(len
, sopt
->sopt_valsize
);
2277 sopt
->sopt_valsize
= valsize
;
2278 if (sopt
->sopt_val
!= 0) {
2279 bcopy(buf
, sopt
->sopt_val
, valsize
);
2284 sogetopt(struct socket
*so
, struct sockopt
*sopt
)
2291 struct accept_filter_arg
*afap
;
2295 sopt
->sopt_dir
= SOPT_GET
;
2296 if (sopt
->sopt_level
!= SOL_SOCKET
) {
2297 if (so
->so_proto
&& so
->so_proto
->pr_ctloutput
) {
2298 return (so_pr_ctloutput(so
, sopt
));
2300 return (ENOPROTOOPT
);
2302 switch (sopt
->sopt_name
) {
2304 case SO_ACCEPTFILTER
:
2305 if ((so
->so_options
& SO_ACCEPTCONN
) == 0)
2307 afap
= kmalloc(sizeof(*afap
), M_TEMP
,
2309 if ((so
->so_options
& SO_ACCEPTFILTER
) != 0) {
2310 strcpy(afap
->af_name
, so
->so_accf
->so_accept_filter
->accf_name
);
2311 if (so
->so_accf
->so_accept_filter_str
!= NULL
)
2312 strcpy(afap
->af_arg
, so
->so_accf
->so_accept_filter_str
);
2314 error
= sooptcopyout(sopt
, afap
, sizeof(*afap
));
2315 kfree(afap
, M_TEMP
);
2320 l
.l_onoff
= so
->so_options
& SO_LINGER
;
2321 l
.l_linger
= so
->so_linger
;
2322 error
= sooptcopyout(sopt
, &l
, sizeof l
);
2325 case SO_USELOOPBACK
:
2335 optval
= so
->so_options
& sopt
->sopt_name
;
2337 error
= sooptcopyout(sopt
, &optval
, sizeof optval
);
2341 optval
= so
->so_type
;
2345 optval
= so
->so_error
;
2350 optval
= so
->so_snd
.ssb_hiwat
;
2354 optval
= so
->so_rcv
.ssb_hiwat
;
2358 optval
= so
->so_snd
.ssb_lowat
;
2362 optval
= so
->so_rcv
.ssb_lowat
;
2367 optval
= (sopt
->sopt_name
== SO_SNDTIMEO
?
2368 so
->so_snd
.ssb_timeo
: so
->so_rcv
.ssb_timeo
);
2370 tv
.tv_sec
= optval
/ hz
;
2371 tv
.tv_usec
= (optval
% hz
) * ustick
;
2372 error
= sooptcopyout(sopt
, &tv
, sizeof tv
);
2376 optval_l
= ssb_space(&so
->so_snd
);
2377 error
= sooptcopyout(sopt
, &optval_l
, sizeof(optval_l
));
2381 optval
= -1; /* no hint */
2385 error
= ENOPROTOOPT
;
2388 if (error
== 0 && so
->so_proto
&& so
->so_proto
->pr_ctloutput
)
2389 so_pr_ctloutput(so
, sopt
);
2394 /* XXX; prepare mbuf for (__FreeBSD__ < 3) routines. */
2396 soopt_getm(struct sockopt
*sopt
, struct mbuf
**mp
)
2398 struct mbuf
*m
, *m_prev
;
2399 int sopt_size
= sopt
->sopt_valsize
, msize
;
2401 m
= m_getl(sopt_size
, sopt
->sopt_td
? M_WAITOK
: M_NOWAIT
, MT_DATA
,
2405 m
->m_len
= min(msize
, sopt_size
);
2406 sopt_size
-= m
->m_len
;
2410 while (sopt_size
> 0) {
2411 m
= m_getl(sopt_size
, sopt
->sopt_td
? M_WAITOK
: M_NOWAIT
,
2412 MT_DATA
, 0, &msize
);
2417 m
->m_len
= min(msize
, sopt_size
);
2418 sopt_size
-= m
->m_len
;
2425 /* XXX; copyin sopt data into mbuf chain for (__FreeBSD__ < 3) routines. */
2427 soopt_mcopyin(struct sockopt
*sopt
, struct mbuf
*m
)
2429 soopt_to_mbuf(sopt
, m
);
2434 soopt_to_mbuf(struct sockopt
*sopt
, struct mbuf
*m
)
2439 KKASSERT(!sopt
->sopt_val
|| kva_p(sopt
->sopt_val
));
2441 if (sopt
->sopt_val
== NULL
)
2443 val
= sopt
->sopt_val
;
2444 valsize
= sopt
->sopt_valsize
;
2445 while (m
!= NULL
&& valsize
>= m
->m_len
) {
2446 bcopy(val
, mtod(m
, char *), m
->m_len
);
2447 valsize
-= m
->m_len
;
2448 val
= (caddr_t
)val
+ m
->m_len
;
2451 if (m
!= NULL
) /* should be allocated enoughly at ip6_sooptmcopyin() */
2452 panic("ip6_sooptmcopyin");
2455 /* XXX; copyout mbuf chain data into soopt for (__FreeBSD__ < 3) routines. */
2457 soopt_mcopyout(struct sockopt
*sopt
, struct mbuf
*m
)
2459 return soopt_from_mbuf(sopt
, m
);
2463 soopt_from_mbuf(struct sockopt
*sopt
, struct mbuf
*m
)
2465 struct mbuf
*m0
= m
;
2470 KKASSERT(!sopt
->sopt_val
|| kva_p(sopt
->sopt_val
));
2472 if (sopt
->sopt_val
== NULL
)
2474 val
= sopt
->sopt_val
;
2475 maxsize
= sopt
->sopt_valsize
;
2476 while (m
!= NULL
&& maxsize
>= m
->m_len
) {
2477 bcopy(mtod(m
, char *), val
, m
->m_len
);
2478 maxsize
-= m
->m_len
;
2479 val
= (caddr_t
)val
+ m
->m_len
;
2480 valsize
+= m
->m_len
;
2484 /* enough soopt buffer should be given from user-land */
2488 sopt
->sopt_valsize
= valsize
;
2493 sohasoutofband(struct socket
*so
)
2495 if (so
->so_sigio
!= NULL
)
2496 pgsigio(so
->so_sigio
, SIGURG
, 0);
2499 * There is no need to use NOTE_OOB as KNOTE hint here:
2500 * soread filter depends on so_oobmark and SS_RCVATMARK
2501 * so_state. NOTE_OOB would cause unnecessary penalty
2502 * in KNOTE, if there was knote processing contention.
2504 KNOTE(&so
->so_rcv
.ssb_kq
.ki_note
, 0);
2508 sokqfilter(struct file
*fp
, struct knote
*kn
)
2510 struct socket
*so
= (struct socket
*)kn
->kn_fp
->f_data
;
2511 struct signalsockbuf
*ssb
;
2513 switch (kn
->kn_filter
) {
2515 if (so
->so_options
& SO_ACCEPTCONN
)
2516 kn
->kn_fop
= &solisten_filtops
;
2518 kn
->kn_fop
= &soread_filtops
;
2522 kn
->kn_fop
= &sowrite_filtops
;
2526 kn
->kn_fop
= &soexcept_filtops
;
2530 return (EOPNOTSUPP
);
2533 knote_insert(&ssb
->ssb_kq
.ki_note
, kn
);
2534 atomic_set_int(&ssb
->ssb_flags
, SSB_KNOTE
);
2539 filt_sordetach(struct knote
*kn
)
2541 struct socket
*so
= (struct socket
*)kn
->kn_fp
->f_data
;
2543 knote_remove(&so
->so_rcv
.ssb_kq
.ki_note
, kn
);
2544 if (SLIST_EMPTY(&so
->so_rcv
.ssb_kq
.ki_note
))
2545 atomic_clear_int(&so
->so_rcv
.ssb_flags
, SSB_KNOTE
);
2550 filt_soread(struct knote
*kn
, long hint __unused
)
2552 struct socket
*so
= (struct socket
*)kn
->kn_fp
->f_data
;
2554 if (kn
->kn_sfflags
& NOTE_OOB
) {
2555 if ((so
->so_oobmark
|| (so
->so_state
& SS_RCVATMARK
))) {
2556 kn
->kn_fflags
|= NOTE_OOB
;
2561 kn
->kn_data
= so
->so_rcv
.ssb_cc
;
2563 if (so
->so_state
& SS_CANTRCVMORE
) {
2565 * Only set NODATA if all data has been exhausted.
2567 if (kn
->kn_data
== 0)
2568 kn
->kn_flags
|= EV_NODATA
;
2569 kn
->kn_flags
|= EV_EOF
;
2570 kn
->kn_fflags
= so
->so_error
;
2573 if (so
->so_error
) /* temporary udp error */
2575 if (kn
->kn_sfflags
& NOTE_LOWAT
)
2576 return (kn
->kn_data
>= kn
->kn_sdata
);
2577 return ((kn
->kn_data
>= so
->so_rcv
.ssb_lowat
) ||
2578 !TAILQ_EMPTY(&so
->so_comp
));
2582 filt_sowdetach(struct knote
*kn
)
2584 struct socket
*so
= (struct socket
*)kn
->kn_fp
->f_data
;
2586 knote_remove(&so
->so_snd
.ssb_kq
.ki_note
, kn
);
2587 if (SLIST_EMPTY(&so
->so_snd
.ssb_kq
.ki_note
))
2588 atomic_clear_int(&so
->so_snd
.ssb_flags
, SSB_KNOTE
);
2593 filt_sowrite(struct knote
*kn
, long hint __unused
)
2595 struct socket
*so
= (struct socket
*)kn
->kn_fp
->f_data
;
2597 if (so
->so_snd
.ssb_flags
& SSB_PREALLOC
)
2598 kn
->kn_data
= ssb_space_prealloc(&so
->so_snd
);
2600 kn
->kn_data
= ssb_space(&so
->so_snd
);
2602 if (so
->so_state
& SS_CANTSENDMORE
) {
2603 kn
->kn_flags
|= (EV_EOF
| EV_NODATA
);
2604 kn
->kn_fflags
= so
->so_error
;
2607 if (so
->so_error
) /* temporary udp error */
2609 if (((so
->so_state
& SS_ISCONNECTED
) == 0) &&
2610 (so
->so_proto
->pr_flags
& PR_CONNREQUIRED
))
2612 if (kn
->kn_sfflags
& NOTE_LOWAT
)
2613 return (kn
->kn_data
>= kn
->kn_sdata
);
2614 return (kn
->kn_data
>= so
->so_snd
.ssb_lowat
);
2619 filt_solisten(struct knote
*kn
, long hint __unused
)
2621 struct socket
*so
= (struct socket
*)kn
->kn_fp
->f_data
;
2622 int qlen
= so
->so_qlen
;
2624 if (soavailconn
> 0 && qlen
> soavailconn
)
2628 return (!TAILQ_EMPTY(&so
->so_comp
));