2 * Copyright (c) 2004 Jeffrey M. Hsu. All rights reserved.
3 * Copyright (c) 2004 The DragonFly Project. All rights reserved.
5 * This code is derived from software contributed to The DragonFly Project
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of The DragonFly Project nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific, prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * Copyright (c) 1982, 1986, 1988, 1990, 1993
36 * The Regents of the University of California. All rights reserved.
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. Neither the name of the University nor the names of its contributors
47 * may be used to endorse or promote products derived from this software
48 * without specific prior written permission.
50 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
51 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
52 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
53 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
54 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
55 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
56 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
57 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
58 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
59 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
62 * @(#)uipc_socket.c 8.3 (Berkeley) 4/15/94
63 * $FreeBSD: src/sys/kern/uipc_socket.c,v 1.68.2.24 2003/11/11 17:18:18 silby Exp $
68 #include <sys/param.h>
69 #include <sys/systm.h>
70 #include <sys/fcntl.h>
71 #include <sys/malloc.h>
73 #include <sys/domain.h>
74 #include <sys/file.h> /* for struct knote */
75 #include <sys/kernel.h>
76 #include <sys/event.h>
78 #include <sys/protosw.h>
79 #include <sys/socket.h>
80 #include <sys/socketvar.h>
81 #include <sys/socketops.h>
82 #include <sys/resourcevar.h>
83 #include <sys/signalvar.h>
84 #include <sys/sysctl.h>
87 #include <vm/vm_zone.h>
89 #include <net/netmsg2.h>
90 #include <net/netisr2.h>
92 #include <sys/socketvar2.h>
93 #include <sys/spinlock2.h>
95 #include <machine/limits.h>
98 extern int tcp_sosend_agglim
;
99 extern int tcp_sosend_async
;
100 extern int tcp_sosend_jcluster
;
101 extern int udp_sosend_async
;
102 extern int udp_sosend_prepend
;
104 static int do_setopt_accept_filter(struct socket
*so
, struct sockopt
*sopt
);
107 static void filt_sordetach(struct knote
*kn
);
108 static int filt_soread(struct knote
*kn
, long hint
);
109 static void filt_sowdetach(struct knote
*kn
);
110 static int filt_sowrite(struct knote
*kn
, long hint
);
111 static int filt_solisten(struct knote
*kn
, long hint
);
113 static int soclose_sync(struct socket
*so
, int fflag
);
114 static void soclose_fast(struct socket
*so
);
116 static struct filterops solisten_filtops
=
117 { FILTEROP_ISFD
|FILTEROP_MPSAFE
, NULL
, filt_sordetach
, filt_solisten
};
118 static struct filterops soread_filtops
=
119 { FILTEROP_ISFD
|FILTEROP_MPSAFE
, NULL
, filt_sordetach
, filt_soread
};
120 static struct filterops sowrite_filtops
=
121 { FILTEROP_ISFD
|FILTEROP_MPSAFE
, NULL
, filt_sowdetach
, filt_sowrite
};
122 static struct filterops soexcept_filtops
=
123 { FILTEROP_ISFD
|FILTEROP_MPSAFE
, NULL
, filt_sordetach
, filt_soread
};
125 MALLOC_DEFINE(M_SOCKET
, "socket", "socket struct");
126 MALLOC_DEFINE(M_SONAME
, "soname", "socket name");
127 MALLOC_DEFINE(M_PCB
, "pcb", "protocol control block");
130 static int somaxconn
= SOMAXCONN
;
131 SYSCTL_INT(_kern_ipc
, KIPC_SOMAXCONN
, somaxconn
, CTLFLAG_RW
,
132 &somaxconn
, 0, "Maximum pending socket connection queue size");
134 static int use_soclose_fast
= 1;
135 SYSCTL_INT(_kern_ipc
, OID_AUTO
, soclose_fast
, CTLFLAG_RW
,
136 &use_soclose_fast
, 0, "Fast socket close");
138 int use_soaccept_pred_fast
= 1;
139 SYSCTL_INT(_kern_ipc
, OID_AUTO
, soaccept_pred_fast
, CTLFLAG_RW
,
140 &use_soaccept_pred_fast
, 0, "Fast socket accept predication");
142 int use_sendfile_async
= 1;
143 SYSCTL_INT(_kern_ipc
, OID_AUTO
, sendfile_async
, CTLFLAG_RW
,
144 &use_sendfile_async
, 0, "sendfile uses asynchronized pru_send");
146 int use_soconnect_async
= 1;
147 SYSCTL_INT(_kern_ipc
, OID_AUTO
, soconnect_async
, CTLFLAG_RW
,
148 &use_soconnect_async
, 0, "soconnect uses asynchronized pru_connect");
150 static int use_socreate_fast
= 1;
151 SYSCTL_INT(_kern_ipc
, OID_AUTO
, socreate_fast
, CTLFLAG_RW
,
152 &use_socreate_fast
, 0, "Fast socket creation");
154 static int soavailconn
= 32;
155 SYSCTL_INT(_kern_ipc
, OID_AUTO
, soavailconn
, CTLFLAG_RW
,
156 &soavailconn
, 0, "Maximum available socket connection queue size");
159 * Socket operation routines.
160 * These routines are called by the routines in
161 * sys_socket.c or from a system process, and
162 * implement the semantics of socket operations by
163 * switching out to the protocol specific routines.
167 * Get a socket structure, and initialize it.
168 * Note that it would probably be better to allocate socket
169 * and PCB at the same time, but I'm not convinced that all
170 * the protocols can be easily modified to do this.
173 soalloc(int waitok
, struct protosw
*pr
)
175 globaldata_t gd
= mycpu
;
179 waitmask
= waitok
? M_WAITOK
: M_NOWAIT
;
180 so
= kmalloc(sizeof(struct socket
), M_SOCKET
, M_ZERO
|waitmask
);
182 /* XXX race condition for reentrant kernel */
184 TAILQ_INIT(&so
->so_aiojobq
);
185 TAILQ_INIT(&so
->so_rcv
.ssb_mlist
);
186 TAILQ_INIT(&so
->so_snd
.ssb_mlist
);
187 lwkt_token_init(&so
->so_rcv
.ssb_token
, "rcvtok");
188 lwkt_token_init(&so
->so_snd
.ssb_token
, "sndtok");
189 spin_init(&so
->so_rcvd_spin
, "soalloc");
190 netmsg_init(&so
->so_rcvd_msg
.base
, so
, &netisr_adone_rport
,
191 MSGF_DROPABLE
| MSGF_PRIORITY
,
192 so
->so_proto
->pr_usrreqs
->pru_rcvd
);
193 so
->so_rcvd_msg
.nm_pru_flags
|= PRUR_ASYNC
;
194 so
->so_state
= SS_NOFDREF
;
196 so
->so_inum
= gd
->gd_anoninum
++ * ncpus
+ gd
->gd_cpuid
+ 2;
202 socreate(int dom
, struct socket
**aso
, int type
,
203 int proto
, struct thread
*td
)
205 struct proc
*p
= td
->td_proc
;
208 struct pru_attach_info ai
;
209 struct prison
*pr
= p
->p_ucred
->cr_prison
;
213 prp
= pffindproto(dom
, proto
, type
);
215 prp
= pffindtype(dom
, type
);
217 if (prp
== NULL
|| prp
->pr_usrreqs
->pru_attach
== 0)
218 return (EPROTONOSUPPORT
);
220 if (pr
&& PRISON_CAP_ISSET(pr
->pr_caps
, PRISON_CAP_NET_UNIXIPROUTE
) &&
221 prp
->pr_domain
->dom_family
!= PF_LOCAL
&&
222 prp
->pr_domain
->dom_family
!= PF_INET
&&
223 prp
->pr_domain
->dom_family
!= PF_INET6
&&
224 prp
->pr_domain
->dom_family
!= PF_ROUTE
) {
225 return (EPROTONOSUPPORT
);
228 if (prp
->pr_type
!= type
)
230 so
= soalloc(p
!= NULL
, prp
);
235 * Callers of socreate() presumably will connect up a descriptor
236 * and call soclose() if they cannot. This represents our so_refs
237 * (which should be 1) from soalloc().
239 soclrstate(so
, SS_NOFDREF
);
242 * Set a default port for protocol processing. No action will occur
243 * on the socket on this port until an inpcb is attached to it and
244 * is able to match incoming packets, or until the socket becomes
245 * available to userland.
247 * We normally default the socket to the protocol thread on cpu 0,
248 * if protocol does not provide its own method to initialize the
251 * If PR_SYNC_PORT is set (unix domain sockets) there is no protocol
252 * thread and all pr_*()/pru_*() calls are executed synchronously.
254 if (prp
->pr_flags
& PR_SYNC_PORT
)
255 so
->so_port
= &netisr_sync_port
;
256 else if (prp
->pr_initport
!= NULL
)
257 so
->so_port
= prp
->pr_initport();
259 so
->so_port
= netisr_cpuport(0);
261 TAILQ_INIT(&so
->so_incomp
);
262 TAILQ_INIT(&so
->so_comp
);
264 so
->so_cred
= crhold(p
->p_ucred
);
265 ai
.sb_rlimit
= &p
->p_rlimit
[RLIMIT_SBSIZE
];
266 ai
.p_ucred
= p
->p_ucred
;
267 ai
.fd_rdir
= p
->p_fd
->fd_rdir
;
270 * Auto-sizing of socket buffers is managed by the protocols and
271 * the appropriate flags must be set in the pru_attach function.
273 if (use_socreate_fast
&& prp
->pr_usrreqs
->pru_preattach
)
274 error
= so_pru_attach_fast(so
, proto
, &ai
);
276 error
= so_pru_attach(so
, proto
, &ai
);
278 sosetstate(so
, SS_NOFDREF
);
279 sofree(so
); /* from soalloc */
284 * NOTE: Returns referenced socket.
291 sobind(struct socket
*so
, struct sockaddr
*nam
, struct thread
*td
)
295 error
= so_pru_bind(so
, nam
, td
);
300 sodealloc(struct socket
*so
)
302 KKASSERT((so
->so_state
& (SS_INCOMP
| SS_COMP
)) == 0);
305 if (so
->so_options
& SO_ACCEPTCONN
) {
306 KASSERT(TAILQ_EMPTY(&so
->so_comp
), ("so_comp is not empty"));
307 KASSERT(TAILQ_EMPTY(&so
->so_incomp
),
308 ("so_incomp is not empty"));
312 if (so
->so_rcv
.ssb_hiwat
)
313 (void)chgsbsize(so
->so_cred
->cr_uidinfo
,
314 &so
->so_rcv
.ssb_hiwat
, 0, RLIM_INFINITY
);
315 if (so
->so_snd
.ssb_hiwat
)
316 (void)chgsbsize(so
->so_cred
->cr_uidinfo
,
317 &so
->so_snd
.ssb_hiwat
, 0, RLIM_INFINITY
);
319 /* remove accept filter if present */
320 if (so
->so_accf
!= NULL
)
321 do_setopt_accept_filter(so
, NULL
);
324 if (so
->so_faddr
!= NULL
)
325 kfree(so
->so_faddr
, M_SONAME
);
330 solisten(struct socket
*so
, int backlog
, struct thread
*td
)
332 if (so
->so_state
& (SS_ISCONNECTED
| SS_ISCONNECTING
))
335 lwkt_gettoken(&so
->so_rcv
.ssb_token
);
336 if (TAILQ_EMPTY(&so
->so_comp
))
337 so
->so_options
|= SO_ACCEPTCONN
;
338 lwkt_reltoken(&so
->so_rcv
.ssb_token
);
339 if (backlog
< 0 || backlog
> somaxconn
)
341 so
->so_qlimit
= backlog
;
342 return so_pru_listen(so
, td
);
346 soqflush(struct socket
*so
)
348 lwkt_getpooltoken(so
);
349 if (so
->so_options
& SO_ACCEPTCONN
) {
352 while ((sp
= TAILQ_FIRST(&so
->so_incomp
)) != NULL
) {
353 KKASSERT((sp
->so_state
& (SS_INCOMP
| SS_COMP
)) ==
355 TAILQ_REMOVE(&so
->so_incomp
, sp
, so_list
);
357 soclrstate(sp
, SS_INCOMP
);
358 soabort_async(sp
, TRUE
);
360 while ((sp
= TAILQ_FIRST(&so
->so_comp
)) != NULL
) {
361 KKASSERT((sp
->so_state
& (SS_INCOMP
| SS_COMP
)) ==
363 TAILQ_REMOVE(&so
->so_comp
, sp
, so_list
);
365 soclrstate(sp
, SS_COMP
);
366 soabort_async(sp
, TRUE
);
369 lwkt_relpooltoken(so
);
373 * Destroy a disconnected socket. This routine is a NOP if entities
374 * still have a reference on the socket:
376 * so_pcb - The protocol stack still has a reference
377 * SS_NOFDREF - There is no longer a file pointer reference
380 sofree(struct socket
*so
)
385 * This is a bit hackish at the moment. We need to interlock
386 * any accept queue we are on before we potentially lose the
387 * last reference to avoid races against a re-reference from
388 * someone operating on the queue.
390 while ((head
= so
->so_head
) != NULL
) {
391 lwkt_getpooltoken(head
);
392 if (so
->so_head
== head
)
394 lwkt_relpooltoken(head
);
398 * Arbitrage the last free.
400 KKASSERT(so
->so_refs
> 0);
401 if (atomic_fetchadd_int(&so
->so_refs
, -1) != 1) {
403 lwkt_relpooltoken(head
);
407 KKASSERT(so
->so_pcb
== NULL
&& (so
->so_state
& SS_NOFDREF
));
408 KKASSERT((so
->so_state
& SS_ASSERTINPROG
) == 0);
412 * We're done, remove ourselves from the accept queue we are
413 * on, if we are on one.
415 if (so
->so_state
& SS_INCOMP
) {
416 KKASSERT((so
->so_state
& (SS_INCOMP
| SS_COMP
)) ==
418 TAILQ_REMOVE(&head
->so_incomp
, so
, so_list
);
420 } else if (so
->so_state
& SS_COMP
) {
422 * We must not decommission a socket that's
423 * on the accept(2) queue. If we do, then
424 * accept(2) may hang after select(2) indicated
425 * that the listening socket was ready.
427 KKASSERT((so
->so_state
& (SS_INCOMP
| SS_COMP
)) ==
429 lwkt_relpooltoken(head
);
432 panic("sofree: not queued");
434 soclrstate(so
, SS_INCOMP
);
436 lwkt_relpooltoken(head
);
438 /* Flush accept queues, if we are accepting. */
441 ssb_release(&so
->so_snd
, so
);
447 * Close a socket on last file table reference removal.
448 * Initiate disconnect if connected.
449 * Free socket when disconnect complete.
452 soclose(struct socket
*so
, int fflag
)
456 funsetown(&so
->so_sigio
);
457 sosetstate(so
, SS_ISCLOSING
);
458 if (!use_soclose_fast
||
459 (so
->so_proto
->pr_flags
& PR_SYNC_PORT
) ||
460 ((so
->so_state
& SS_ISCONNECTED
) &&
461 (so
->so_options
& SO_LINGER
) &&
462 so
->so_linger
!= 0)) {
463 error
= soclose_sync(so
, fflag
);
472 sodiscard(struct socket
*so
)
474 if (so
->so_state
& SS_NOFDREF
)
475 panic("soclose: NOFDREF");
476 sosetstate(so
, SS_NOFDREF
); /* take ref */
480 * Append the completed queue of head to head_inh (inherting listen socket).
483 soinherit(struct socket
*head
, struct socket
*head_inh
)
485 boolean_t do_wakeup
= FALSE
;
487 KASSERT(head
->so_options
& SO_ACCEPTCONN
,
488 ("head does not accept connection"));
489 KASSERT(head_inh
->so_options
& SO_ACCEPTCONN
,
490 ("head_inh does not accept connection"));
492 lwkt_getpooltoken(head
);
493 lwkt_getpooltoken(head_inh
);
495 if (head
->so_qlen
> 0)
498 while (!TAILQ_EMPTY(&head
->so_comp
)) {
499 struct ucred
*old_cr
;
502 sp
= TAILQ_FIRST(&head
->so_comp
);
503 KKASSERT((sp
->so_state
& (SS_INCOMP
| SS_COMP
)) == SS_COMP
);
506 * Remove this socket from the current listen socket
509 TAILQ_REMOVE(&head
->so_comp
, sp
, so_list
);
512 /* Save the old ucred for later free. */
513 old_cr
= sp
->so_cred
;
516 * Install this socket to the inheriting listen socket
519 sp
->so_cred
= crhold(head_inh
->so_cred
); /* non-blocking */
520 sp
->so_head
= head_inh
;
522 TAILQ_INSERT_TAIL(&head_inh
->so_comp
, sp
, so_list
);
527 * crfree() may block and release the tokens temporarily.
528 * However, we are fine here, since the transition is done.
533 lwkt_relpooltoken(head_inh
);
534 lwkt_relpooltoken(head
);
538 * "New" connections have arrived
541 wakeup(&head_inh
->so_timeo
);
546 soclose_sync(struct socket
*so
, int fflag
)
550 if ((so
->so_proto
->pr_flags
& PR_SYNC_PORT
) == 0)
551 so_pru_sync(so
); /* unpend async prus */
553 if (so
->so_pcb
== NULL
)
556 if (so
->so_state
& SS_ISCONNECTED
) {
557 if ((so
->so_state
& SS_ISDISCONNECTING
) == 0) {
558 error
= sodisconnect(so
);
562 if (so
->so_options
& SO_LINGER
) {
563 if ((so
->so_state
& SS_ISDISCONNECTING
) &&
566 while (so
->so_state
& SS_ISCONNECTED
) {
567 error
= tsleep(&so
->so_timeo
, PCATCH
,
568 "soclos", so
->so_linger
* hz
);
578 error2
= so_pru_detach(so
);
579 if (error2
== EJUSTRETURN
) {
581 * Protocol will call sodiscard()
582 * and sofree() for us.
591 sofree(so
); /* dispose of ref */
597 soclose_fast_handler(netmsg_t msg
)
599 struct socket
*so
= msg
->base
.nm_so
;
601 if (so
->so_pcb
== NULL
)
604 if ((so
->so_state
& SS_ISCONNECTED
) &&
605 (so
->so_state
& SS_ISDISCONNECTING
) == 0)
606 so_pru_disconnect_direct(so
);
611 error
= so_pru_detach_direct(so
);
612 if (error
== EJUSTRETURN
) {
614 * Protocol will call sodiscard()
615 * and sofree() for us.
626 soclose_fast(struct socket
*so
)
628 struct netmsg_base
*base
= &so
->so_clomsg
;
630 netmsg_init(base
, so
, &netisr_apanic_rport
, 0,
631 soclose_fast_handler
);
632 if (so
->so_port
== netisr_curport())
633 lwkt_sendmsg_oncpu(so
->so_port
, &base
->lmsg
);
635 lwkt_sendmsg(so
->so_port
, &base
->lmsg
);
639 * Abort and destroy a socket. Only one abort can be in progress
640 * at any given moment.
643 soabort_async(struct socket
*so
, boolean_t clr_head
)
646 * Keep a reference before clearing the so_head
647 * to avoid racing socket close in netisr.
652 so_pru_abort_async(so
);
656 soabort_direct(struct socket
*so
)
659 so_pru_abort_direct(so
);
663 * so is passed in ref'd, which becomes owned by
664 * the cleared SS_NOFDREF flag.
667 soaccept_generic(struct socket
*so
)
669 if ((so
->so_state
& SS_NOFDREF
) == 0)
670 panic("soaccept: !NOFDREF");
671 soclrstate(so
, SS_NOFDREF
); /* owned by lack of SS_NOFDREF */
675 soaccept(struct socket
*so
, struct sockaddr
**nam
)
679 soaccept_generic(so
);
680 error
= so_pru_accept(so
, nam
);
685 soconnect(struct socket
*so
, struct sockaddr
*nam
, struct thread
*td
,
690 if (so
->so_options
& SO_ACCEPTCONN
)
693 * If protocol is connection-based, can only connect once.
694 * Otherwise, if connected, try to disconnect first.
695 * This allows user to disconnect by connecting to, e.g.,
698 if (so
->so_state
& (SS_ISCONNECTED
|SS_ISCONNECTING
) &&
699 ((so
->so_proto
->pr_flags
& PR_CONNREQUIRED
) ||
700 (error
= sodisconnect(so
)))) {
704 * Prevent accumulated error from previous connection
708 if (!sync
&& so
->so_proto
->pr_usrreqs
->pru_preconnect
)
709 error
= so_pru_connect_async(so
, nam
, td
);
711 error
= so_pru_connect(so
, nam
, td
);
717 soconnect2(struct socket
*so1
, struct socket
*so2
)
721 error
= so_pru_connect2(so1
, so2
);
726 sodisconnect(struct socket
*so
)
730 if ((so
->so_state
& SS_ISCONNECTED
) == 0) {
734 if (so
->so_state
& SS_ISDISCONNECTING
) {
738 error
= so_pru_disconnect(so
);
743 #define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? M_NOWAIT : M_WAITOK)
746 * If send must go all at once and message is larger than
747 * send buffering, then hard error.
748 * Lock against other senders.
749 * If must go all at once and not enough room now, then
750 * inform user that this would block and do nothing.
751 * Otherwise, if nonblocking, send as much as possible.
752 * The data to be sent is described by "uio" if nonzero,
753 * otherwise by the mbuf chain "top" (which must be null
754 * if uio is not). Data provided in mbuf chain must be small
755 * enough to send all at once.
757 * Returns nonzero on error, timeout or signal; callers
758 * must check for short counts if EINTR/ERESTART are returned.
759 * Data and control buffers are freed on return.
762 sosend(struct socket
*so
, struct sockaddr
*addr
, struct uio
*uio
,
763 struct mbuf
*top
, struct mbuf
*control
, int flags
,
770 int clen
= 0, error
, dontroute
, mlen
;
771 int atomic
= sosendallatonce(so
) || top
;
775 resid
= uio
->uio_resid
;
777 resid
= (size_t)top
->m_pkthdr
.len
;
780 for (m
= top
; m
; m
= m
->m_next
)
782 KKASSERT(top
->m_pkthdr
.len
== len
);
787 * WARNING! resid is unsigned, space and len are signed. space
788 * can wind up negative if the sockbuf is overcommitted.
790 * Also check to make sure that MSG_EOR isn't used on SOCK_STREAM
791 * type sockets since that's an error.
793 if (so
->so_type
== SOCK_STREAM
&& (flags
& MSG_EOR
)) {
799 (flags
& MSG_DONTROUTE
) && (so
->so_options
& SO_DONTROUTE
) == 0 &&
800 (so
->so_proto
->pr_flags
& PR_ATOMIC
);
801 if (td
->td_lwp
!= NULL
)
802 td
->td_lwp
->lwp_ru
.ru_msgsnd
++;
804 clen
= control
->m_len
;
805 #define gotoerr(errcode) { error = errcode; goto release; }
808 error
= ssb_lock(&so
->so_snd
, SBLOCKWAIT(flags
));
813 if (so
->so_state
& SS_CANTSENDMORE
)
816 error
= so
->so_error
;
820 if ((so
->so_state
& SS_ISCONNECTED
) == 0) {
822 * `sendto' and `sendmsg' is allowed on a connection-
823 * based socket if it supports implied connect.
824 * Return ENOTCONN if not connected and no address is
827 if ((so
->so_proto
->pr_flags
& PR_CONNREQUIRED
) &&
828 (so
->so_proto
->pr_flags
& PR_IMPLOPCL
) == 0) {
829 if ((so
->so_state
& SS_ISCONFIRMING
) == 0 &&
830 !(resid
== 0 && clen
!= 0))
832 } else if (addr
== NULL
)
833 gotoerr(so
->so_proto
->pr_flags
& PR_CONNREQUIRED
?
834 ENOTCONN
: EDESTADDRREQ
);
836 if ((atomic
&& resid
> so
->so_snd
.ssb_hiwat
) ||
837 clen
> so
->so_snd
.ssb_hiwat
) {
840 space
= ssb_space(&so
->so_snd
);
843 if ((space
< 0 || (size_t)space
< resid
+ clen
) && uio
&&
844 (atomic
|| space
< so
->so_snd
.ssb_lowat
|| space
< clen
)) {
845 if (flags
& (MSG_FNONBLOCKING
|MSG_DONTWAIT
))
846 gotoerr(EWOULDBLOCK
);
847 ssb_unlock(&so
->so_snd
);
848 error
= ssb_wait(&so
->so_snd
);
858 * Data is prepackaged in "top".
862 top
->m_flags
|= M_EOR
;
866 m
= m_getl((int)resid
, M_WAITOK
, MT_DATA
,
867 top
== NULL
? M_PKTHDR
: 0, &mlen
);
870 m
->m_pkthdr
.rcvif
= NULL
;
872 len
= imin((int)szmin(mlen
, resid
), space
);
873 if (resid
< MINCLSIZE
) {
875 * For datagram protocols, leave room
876 * for protocol headers in first mbuf.
878 if (atomic
&& top
== NULL
&& len
< mlen
)
882 error
= uiomove(mtod(m
, caddr_t
), (size_t)len
, uio
);
883 resid
= uio
->uio_resid
;
886 top
->m_pkthdr
.len
+= len
;
892 top
->m_flags
|= M_EOR
;
895 } while (space
> 0 && atomic
);
897 so
->so_options
|= SO_DONTROUTE
;
898 if (flags
& MSG_OOB
) {
899 pru_flags
= PRUS_OOB
;
900 } else if ((flags
& MSG_EOF
) &&
901 (so
->so_proto
->pr_flags
& PR_IMPLOPCL
) &&
904 * If the user set MSG_EOF, the protocol
905 * understands this flag and nothing left to
906 * send then use PRU_SEND_EOF instead of PRU_SEND.
908 pru_flags
= PRUS_EOF
;
909 } else if (resid
> 0 && space
> 0) {
910 /* If there is more to send, set PRUS_MORETOCOME */
911 pru_flags
= PRUS_MORETOCOME
;
916 * XXX all the SS_CANTSENDMORE checks previously
917 * done could be out of date. We could have recieved
918 * a reset packet in an interrupt or maybe we slept
919 * while doing page faults in uiomove() etc. We could
920 * probably recheck again inside the splnet() protection
921 * here, but there are probably other places that this
922 * also happens. We must rethink this.
924 error
= so_pru_send(so
, pru_flags
, top
, addr
, control
, td
);
926 so
->so_options
&= ~SO_DONTROUTE
;
933 } while (resid
&& space
> 0);
937 ssb_unlock(&so
->so_snd
);
948 * A specialization of sosend() for UDP based on protocol-specific knowledge:
949 * so->so_proto->pr_flags has the PR_ATOMIC field set. This means that
950 * sosendallatonce() returns true,
951 * the "atomic" variable is true,
952 * and sosendudp() blocks until space is available for the entire send.
953 * so->so_proto->pr_flags does not have the PR_CONNREQUIRED or
954 * PR_IMPLOPCL flags set.
955 * UDP has no out-of-band data.
956 * UDP has no control data.
957 * UDP does not support MSG_EOR.
960 sosendudp(struct socket
*so
, struct sockaddr
*addr
, struct uio
*uio
,
961 struct mbuf
*top
, struct mbuf
*control
, int flags
, struct thread
*td
)
964 int error
, pru_flags
= 0;
967 if (td
->td_lwp
!= NULL
)
968 td
->td_lwp
->lwp_ru
.ru_msgsnd
++;
972 KASSERT((uio
&& !top
) || (top
&& !uio
), ("bad arguments to sosendudp"));
973 resid
= uio
? uio
->uio_resid
: (size_t)top
->m_pkthdr
.len
;
976 error
= ssb_lock(&so
->so_snd
, SBLOCKWAIT(flags
));
980 if (so
->so_state
& SS_CANTSENDMORE
)
983 error
= so
->so_error
;
987 if (!(so
->so_state
& SS_ISCONNECTED
) && addr
== NULL
)
988 gotoerr(EDESTADDRREQ
);
989 if (resid
> so
->so_snd
.ssb_hiwat
)
991 space
= ssb_space(&so
->so_snd
);
992 if (uio
&& (space
< 0 || (size_t)space
< resid
)) {
993 if (flags
& (MSG_FNONBLOCKING
|MSG_DONTWAIT
))
994 gotoerr(EWOULDBLOCK
);
995 ssb_unlock(&so
->so_snd
);
996 error
= ssb_wait(&so
->so_snd
);
1003 int hdrlen
= max_hdr
;
1006 * We try to optimize out the additional mbuf
1007 * allocations in M_PREPEND() on output path, e.g.
1008 * - udp_output(), when it tries to prepend protocol
1010 * - Link layer output function, when it tries to
1011 * prepend link layer header.
1013 * This probably will not benefit any data that will
1014 * be fragmented, so this optimization is only performed
1015 * when the size of data and max size of protocol+link
1016 * headers fit into one mbuf cluster.
1018 if (uio
->uio_resid
> MCLBYTES
- hdrlen
||
1019 !udp_sosend_prepend
) {
1020 top
= m_uiomove(uio
);
1026 top
= m_getl(uio
->uio_resid
+ hdrlen
, M_WAITOK
,
1027 MT_DATA
, M_PKTHDR
, &nsize
);
1028 KASSERT(nsize
>= uio
->uio_resid
+ hdrlen
,
1029 ("sosendudp invalid nsize %d, "
1030 "resid %zu, hdrlen %d",
1031 nsize
, uio
->uio_resid
, hdrlen
));
1033 top
->m_len
= uio
->uio_resid
;
1034 top
->m_pkthdr
.len
= uio
->uio_resid
;
1035 top
->m_data
+= hdrlen
;
1037 error
= uiomove(mtod(top
, caddr_t
), top
->m_len
, uio
);
1043 if (flags
& MSG_DONTROUTE
)
1044 pru_flags
|= PRUS_DONTROUTE
;
1046 if (udp_sosend_async
&& (flags
& MSG_SYNC
) == 0) {
1047 so_pru_send_async(so
, pru_flags
, top
, addr
, NULL
, td
);
1050 error
= so_pru_send(so
, pru_flags
, top
, addr
, NULL
, td
);
1052 top
= NULL
; /* sent or freed in lower layer */
1055 ssb_unlock(&so
->so_snd
);
1063 sosendtcp(struct socket
*so
, struct sockaddr
*addr
, struct uio
*uio
,
1064 struct mbuf
*top
, struct mbuf
*control
, int flags
,
1076 KKASSERT(top
== NULL
);
1078 resid
= uio
->uio_resid
;
1081 resid
= (size_t)top
->m_pkthdr
.len
;
1084 for (m
= top
; m
; m
= m
->m_next
)
1086 KKASSERT(top
->m_pkthdr
.len
== len
);
1091 * WARNING! resid is unsigned, space and len are signed. space
1092 * can wind up negative if the sockbuf is overcommitted.
1094 * Also check to make sure that MSG_EOR isn't used on TCP
1096 if (flags
& MSG_EOR
) {
1102 /* TCP doesn't do control messages (rights, creds, etc) */
1103 if (control
->m_len
) {
1107 m_freem(control
); /* empty control, just free it */
1111 if (td
->td_lwp
!= NULL
)
1112 td
->td_lwp
->lwp_ru
.ru_msgsnd
++;
1114 #define gotoerr(errcode) { error = errcode; goto release; }
1117 error
= ssb_lock(&so
->so_snd
, SBLOCKWAIT(flags
));
1122 if (so
->so_state
& SS_CANTSENDMORE
)
1125 error
= so
->so_error
;
1129 if ((so
->so_state
& SS_ISCONNECTED
) == 0 &&
1130 (so
->so_state
& SS_ISCONFIRMING
) == 0)
1132 if (allatonce
&& resid
> so
->so_snd
.ssb_hiwat
)
1135 space
= ssb_space_prealloc(&so
->so_snd
);
1136 if (flags
& MSG_OOB
)
1138 if ((space
< 0 || (size_t)space
< resid
) && !allatonce
&&
1139 space
< so
->so_snd
.ssb_lowat
) {
1140 if (flags
& (MSG_FNONBLOCKING
|MSG_DONTWAIT
))
1141 gotoerr(EWOULDBLOCK
);
1142 ssb_unlock(&so
->so_snd
);
1143 error
= ssb_wait(&so
->so_snd
);
1150 int cnt
= 0, async
= 0;
1154 * Data is prepackaged in "top".
1158 if (resid
> INT_MAX
)
1160 if (tcp_sosend_jcluster
) {
1161 m
= m_getlj((int)resid
, M_WAITOK
, MT_DATA
,
1162 top
== NULL
? M_PKTHDR
: 0, &mlen
);
1164 m
= m_getl((int)resid
, M_WAITOK
, MT_DATA
,
1165 top
== NULL
? M_PKTHDR
: 0, &mlen
);
1168 m
->m_pkthdr
.len
= 0;
1169 m
->m_pkthdr
.rcvif
= NULL
;
1171 len
= imin((int)szmin(mlen
, resid
), space
);
1173 error
= uiomove(mtod(m
, caddr_t
), (size_t)len
, uio
);
1174 resid
= uio
->uio_resid
;
1177 top
->m_pkthdr
.len
+= len
;
1184 } while (space
> 0 && cnt
< tcp_sosend_agglim
);
1186 if (tcp_sosend_async
)
1189 if (flags
& MSG_OOB
) {
1190 pru_flags
= PRUS_OOB
;
1192 } else if ((flags
& MSG_EOF
) && resid
== 0) {
1193 pru_flags
= PRUS_EOF
;
1194 } else if (resid
> 0 && space
> 0) {
1195 /* If there is more to send, set PRUS_MORETOCOME */
1196 pru_flags
= PRUS_MORETOCOME
;
1202 if (flags
& MSG_SYNC
)
1206 * XXX all the SS_CANTSENDMORE checks previously
1207 * done could be out of date. We could have recieved
1208 * a reset packet in an interrupt or maybe we slept
1209 * while doing page faults in uiomove() etc. We could
1210 * probably recheck again inside the splnet() protection
1211 * here, but there are probably other places that this
1212 * also happens. We must rethink this.
1214 for (m
= top
; m
; m
= m
->m_next
)
1215 ssb_preallocstream(&so
->so_snd
, m
);
1217 error
= so_pru_send(so
, pru_flags
, top
,
1220 so_pru_send_async(so
, pru_flags
, top
,
1229 } while (resid
&& space
> 0);
1233 ssb_unlock(&so
->so_snd
);
1244 * Implement receive operations on a socket.
1246 * We depend on the way that records are added to the signalsockbuf
1247 * by sbappend*. In particular, each record (mbufs linked through m_next)
1248 * must begin with an address if the protocol so specifies,
1249 * followed by an optional mbuf or mbufs containing ancillary data,
1250 * and then zero or more mbufs of data.
1252 * Although the signalsockbuf is locked, new data may still be appended.
1253 * A token inside the ssb_lock deals with MP issues and still allows
1254 * the network to access the socket if we block in a uio.
1256 * The caller may receive the data as a single mbuf chain by supplying
1257 * an mbuf **mp0 for use in returning the chain. The uio is then used
1258 * only for the count in uio_resid.
1261 soreceive(struct socket
*so
, struct sockaddr
**psa
, struct uio
*uio
,
1262 struct sockbuf
*sio
, struct mbuf
**controlp
, int *flagsp
)
1265 struct mbuf
*free_chain
= NULL
;
1266 int flags
, len
, error
, offset
;
1267 struct protosw
*pr
= so
->so_proto
;
1269 size_t resid
, orig_resid
;
1270 boolean_t free_rights
= FALSE
;
1273 resid
= uio
->uio_resid
;
1275 resid
= (size_t)(sio
->sb_climit
- sio
->sb_cc
);
1283 flags
= *flagsp
&~ MSG_EOR
;
1286 if (flags
& MSG_OOB
) {
1287 m
= m_get(M_WAITOK
, MT_DATA
);
1290 error
= so_pru_rcvoob(so
, m
, flags
& MSG_PEEK
);
1296 KKASSERT(resid
>= (size_t)m
->m_len
);
1297 resid
-= (size_t)m
->m_len
;
1298 } while (resid
> 0 && m
);
1301 uio
->uio_resid
= resid
;
1302 error
= uiomove(mtod(m
, caddr_t
),
1303 (int)szmin(resid
, m
->m_len
),
1305 resid
= uio
->uio_resid
;
1307 } while (uio
->uio_resid
&& error
== 0 && m
);
1314 if ((so
->so_state
& SS_ISCONFIRMING
) && resid
)
1318 * The token interlocks against the protocol thread while
1319 * ssb_lock is a blocking lock against other userland entities.
1321 lwkt_gettoken(&so
->so_rcv
.ssb_token
);
1323 error
= ssb_lock(&so
->so_rcv
, SBLOCKWAIT(flags
));
1327 m
= so
->so_rcv
.ssb_mb
;
1329 * If we have less data than requested, block awaiting more
1330 * (subject to any timeout) if:
1331 * 1. the current count is less than the low water mark, or
1332 * 2. MSG_WAITALL is set, and it is possible to do the entire
1333 * receive operation at once if we block (resid <= hiwat).
1334 * 3. MSG_DONTWAIT is not set
1335 * If MSG_WAITALL is set but resid is larger than the receive buffer,
1336 * we have to do the receive in sections, and thus risk returning
1337 * a short count if a timeout or signal occurs after we start.
1339 if (m
== NULL
|| (((flags
& MSG_DONTWAIT
) == 0 &&
1340 (size_t)so
->so_rcv
.ssb_cc
< resid
) &&
1341 (so
->so_rcv
.ssb_cc
< so
->so_rcv
.ssb_lowat
||
1342 ((flags
& MSG_WAITALL
) && resid
<= (size_t)so
->so_rcv
.ssb_hiwat
)) &&
1343 m
->m_nextpkt
== 0 && (pr
->pr_flags
& PR_ATOMIC
) == 0)) {
1344 KASSERT(m
!= NULL
|| !so
->so_rcv
.ssb_cc
, ("receive 1"));
1345 if (so
->so_error
|| so
->so_rerror
) {
1349 error
= so
->so_error
;
1351 error
= so
->so_rerror
;
1352 if ((flags
& MSG_PEEK
) == 0) {
1360 if (so
->so_state
& SS_CANTRCVMORE
) {
1366 for (; m
; m
= m
->m_next
) {
1367 if (m
->m_type
== MT_OOBDATA
|| (m
->m_flags
& M_EOR
)) {
1368 m
= so
->so_rcv
.ssb_mb
;
1372 if ((so
->so_state
& (SS_ISCONNECTED
|SS_ISCONNECTING
)) == 0 &&
1373 (pr
->pr_flags
& PR_CONNREQUIRED
)) {
1379 if (flags
& (MSG_FNONBLOCKING
|MSG_DONTWAIT
)) {
1380 error
= EWOULDBLOCK
;
1383 ssb_unlock(&so
->so_rcv
);
1384 error
= ssb_wait(&so
->so_rcv
);
1390 if (uio
&& uio
->uio_td
&& uio
->uio_td
->td_proc
)
1391 uio
->uio_td
->td_lwp
->lwp_ru
.ru_msgrcv
++;
1394 * note: m should be == sb_mb here. Cache the next record while
1395 * cleaning up. Note that calling m_free*() will break out critical
1398 KKASSERT(m
== so
->so_rcv
.ssb_mb
);
1401 * Skip any address mbufs prepending the record.
1403 if (pr
->pr_flags
& PR_ADDR
) {
1404 KASSERT(m
->m_type
== MT_SONAME
, ("receive 1a"));
1407 *psa
= dup_sockaddr(mtod(m
, struct sockaddr
*));
1408 if (flags
& MSG_PEEK
)
1411 m
= sbunlinkmbuf(&so
->so_rcv
.sb
, m
, &free_chain
);
1415 * Skip any control mbufs prepending the record.
1417 while (m
&& m
->m_type
== MT_CONTROL
&& error
== 0) {
1418 if (flags
& MSG_PEEK
) {
1420 *controlp
= m_copy(m
, 0, m
->m_len
);
1421 m
= m
->m_next
; /* XXX race */
1423 const struct cmsghdr
*cm
= mtod(m
, struct cmsghdr
*);
1426 n
= sbunlinkmbuf(&so
->so_rcv
.sb
, m
, NULL
);
1427 if (pr
->pr_domain
->dom_externalize
&&
1428 cm
->cmsg_level
== SOL_SOCKET
&&
1429 cm
->cmsg_type
== SCM_RIGHTS
) {
1430 error
= pr
->pr_domain
->dom_externalize
1436 if (cm
->cmsg_level
== SOL_SOCKET
&&
1437 cm
->cmsg_type
== SCM_RIGHTS
)
1439 m
= sbunlinkmbuf(&so
->so_rcv
.sb
, m
, &free_chain
);
1442 if (controlp
&& *controlp
) {
1444 controlp
= &(*controlp
)->m_next
;
1453 if (type
== MT_OOBDATA
)
1458 * Copy to the UIO or mbuf return chain (*mp).
1462 while (m
&& resid
> 0 && error
== 0) {
1463 if (m
->m_type
== MT_OOBDATA
) {
1464 if (type
!= MT_OOBDATA
)
1466 } else if (type
== MT_OOBDATA
)
1469 KASSERT(m
->m_type
== MT_DATA
|| m
->m_type
== MT_HEADER
,
1471 soclrstate(so
, SS_RCVATMARK
);
1472 len
= (resid
> INT_MAX
) ? INT_MAX
: resid
;
1473 if (so
->so_oobmark
&& len
> so
->so_oobmark
- offset
)
1474 len
= so
->so_oobmark
- offset
;
1475 if (len
> m
->m_len
- moff
)
1476 len
= m
->m_len
- moff
;
1479 * Copy out to the UIO or pass the mbufs back to the SIO.
1480 * The SIO is dealt with when we eat the mbuf, but deal
1481 * with the resid here either way.
1484 uio
->uio_resid
= resid
;
1485 error
= uiomove(mtod(m
, caddr_t
) + moff
, len
, uio
);
1486 resid
= uio
->uio_resid
;
1490 resid
-= (size_t)len
;
1494 * Eat the entire mbuf or just a piece of it
1496 if (len
== m
->m_len
- moff
) {
1497 if (m
->m_flags
& M_EOR
)
1499 if (flags
& MSG_PEEK
) {
1504 n
= sbunlinkmbuf(&so
->so_rcv
.sb
, m
, NULL
);
1508 m
= sbunlinkmbuf(&so
->so_rcv
.sb
, m
, &free_chain
);
1512 if (flags
& MSG_PEEK
) {
1516 n
= m_copym(m
, 0, len
, M_WAITOK
);
1522 so
->so_rcv
.ssb_cc
-= len
;
1525 if (so
->so_oobmark
) {
1526 if ((flags
& MSG_PEEK
) == 0) {
1527 so
->so_oobmark
-= len
;
1528 if (so
->so_oobmark
== 0) {
1529 sosetstate(so
, SS_RCVATMARK
);
1534 if (offset
== so
->so_oobmark
)
1538 if (flags
& MSG_EOR
)
1541 * If the MSG_WAITALL flag is set (for non-atomic socket),
1542 * we must not quit until resid == 0 or an error
1543 * termination. If a signal/timeout occurs, return
1544 * with a short count but without error.
1545 * Keep signalsockbuf locked against other readers.
1547 while ((flags
& MSG_WAITALL
) && m
== NULL
&&
1548 resid
> 0 && !sosendallatonce(so
) &&
1549 so
->so_rcv
.ssb_mb
== NULL
) {
1550 if (so
->so_error
|| so
->so_rerror
||
1551 so
->so_state
& SS_CANTRCVMORE
)
1554 * The window might have closed to zero, make
1555 * sure we send an ack now that we've drained
1556 * the buffer or we might end up blocking until
1557 * the idle takes over (5 seconds).
1559 if (pr
->pr_flags
& PR_WANTRCVD
&& so
->so_pcb
)
1560 so_pru_rcvd(so
, flags
);
1561 error
= ssb_wait(&so
->so_rcv
);
1563 ssb_unlock(&so
->so_rcv
);
1567 m
= so
->so_rcv
.ssb_mb
;
1572 * If an atomic read was requested but unread data still remains
1573 * in the record, set MSG_TRUNC.
1575 if (m
&& pr
->pr_flags
& PR_ATOMIC
)
1579 * Cleanup. If an atomic read was requested drop any unread data.
1581 if ((flags
& MSG_PEEK
) == 0) {
1582 if (m
&& (pr
->pr_flags
& PR_ATOMIC
))
1583 sbdroprecord(&so
->so_rcv
.sb
);
1584 if ((pr
->pr_flags
& PR_WANTRCVD
) && so
->so_pcb
)
1585 so_pru_rcvd(so
, flags
);
1588 if (orig_resid
== resid
&& orig_resid
&&
1589 (flags
& MSG_EOR
) == 0 && (so
->so_state
& SS_CANTRCVMORE
) == 0) {
1590 ssb_unlock(&so
->so_rcv
);
1597 ssb_unlock(&so
->so_rcv
);
1599 lwkt_reltoken(&so
->so_rcv
.ssb_token
);
1601 if (free_rights
&& (pr
->pr_flags
& PR_RIGHTS
) &&
1602 pr
->pr_domain
->dom_dispose
)
1603 pr
->pr_domain
->dom_dispose(free_chain
);
1604 m_freem(free_chain
);
1610 sorecvtcp(struct socket
*so
, struct sockaddr
**psa
, struct uio
*uio
,
1611 struct sockbuf
*sio
, struct mbuf
**controlp
, int *flagsp
)
1614 struct mbuf
*free_chain
= NULL
;
1615 int flags
, len
, error
, offset
;
1616 struct protosw
*pr
= so
->so_proto
;
1619 size_t resid
, orig_resid
, restmp
;
1622 resid
= uio
->uio_resid
;
1624 resid
= (size_t)(sio
->sb_climit
- sio
->sb_cc
);
1632 flags
= *flagsp
&~ MSG_EOR
;
1635 if (flags
& MSG_OOB
) {
1636 m
= m_get(M_WAITOK
, MT_DATA
);
1639 error
= so_pru_rcvoob(so
, m
, flags
& MSG_PEEK
);
1645 KKASSERT(resid
>= (size_t)m
->m_len
);
1646 resid
-= (size_t)m
->m_len
;
1647 } while (resid
> 0 && m
);
1650 uio
->uio_resid
= resid
;
1651 error
= uiomove(mtod(m
, caddr_t
),
1652 (int)szmin(resid
, m
->m_len
),
1654 resid
= uio
->uio_resid
;
1656 } while (uio
->uio_resid
&& error
== 0 && m
);
1665 * The token interlocks against the protocol thread while
1666 * ssb_lock is a blocking lock against other userland entities.
1668 * Lock a limited number of mbufs (not all, so sbcompress() still
1669 * works well). The token is used as an interlock for sbwait() so
1670 * release it afterwords.
1673 error
= ssb_lock(&so
->so_rcv
, SBLOCKWAIT(flags
));
1677 lwkt_gettoken(&so
->so_rcv
.ssb_token
);
1678 m
= so
->so_rcv
.ssb_mb
;
1681 * If we have less data than requested, block awaiting more
1682 * (subject to any timeout) if:
1683 * 1. the current count is less than the low water mark, or
1684 * 2. MSG_WAITALL is set, and it is possible to do the entire
1685 * receive operation at once if we block (resid <= hiwat).
1686 * 3. MSG_DONTWAIT is not set
1687 * If MSG_WAITALL is set but resid is larger than the receive buffer,
1688 * we have to do the receive in sections, and thus risk returning
1689 * a short count if a timeout or signal occurs after we start.
1691 if (m
== NULL
|| (((flags
& MSG_DONTWAIT
) == 0 &&
1692 (size_t)so
->so_rcv
.ssb_cc
< resid
) &&
1693 (so
->so_rcv
.ssb_cc
< so
->so_rcv
.ssb_lowat
||
1694 ((flags
& MSG_WAITALL
) && resid
<= (size_t)so
->so_rcv
.ssb_hiwat
)))) {
1695 KASSERT(m
!= NULL
|| !so
->so_rcv
.ssb_cc
, ("receive 1"));
1699 lwkt_reltoken(&so
->so_rcv
.ssb_token
);
1700 error
= so
->so_error
;
1701 if ((flags
& MSG_PEEK
) == 0)
1705 if (so
->so_state
& SS_CANTRCVMORE
) {
1708 lwkt_reltoken(&so
->so_rcv
.ssb_token
);
1711 if ((so
->so_state
& (SS_ISCONNECTED
|SS_ISCONNECTING
)) == 0 &&
1712 (pr
->pr_flags
& PR_CONNREQUIRED
)) {
1713 lwkt_reltoken(&so
->so_rcv
.ssb_token
);
1718 lwkt_reltoken(&so
->so_rcv
.ssb_token
);
1721 if (flags
& (MSG_FNONBLOCKING
|MSG_DONTWAIT
)) {
1722 lwkt_reltoken(&so
->so_rcv
.ssb_token
);
1723 error
= EWOULDBLOCK
;
1726 ssb_unlock(&so
->so_rcv
);
1727 error
= ssb_wait(&so
->so_rcv
);
1728 lwkt_reltoken(&so
->so_rcv
.ssb_token
);
1740 while (n
&& restmp
< resid
) {
1741 n
->m_flags
|= M_SOLOCKED
;
1743 if (n
->m_next
== NULL
)
1750 * Release token for loop
1752 lwkt_reltoken(&so
->so_rcv
.ssb_token
);
1753 if (uio
&& uio
->uio_td
&& uio
->uio_td
->td_proc
)
1754 uio
->uio_td
->td_lwp
->lwp_ru
.ru_msgrcv
++;
1757 * note: m should be == sb_mb here. Cache the next record while
1758 * cleaning up. Note that calling m_free*() will break out critical
1761 KKASSERT(m
== so
->so_rcv
.ssb_mb
);
1764 * Copy to the UIO or mbuf return chain (*mp).
1766 * NOTE: Token is not held for loop
1772 while (m
&& (m
->m_flags
& M_SOLOCKED
) && resid
> 0 && error
== 0) {
1773 KASSERT(m
->m_type
== MT_DATA
|| m
->m_type
== MT_HEADER
,
1776 soclrstate(so
, SS_RCVATMARK
);
1777 len
= (resid
> INT_MAX
) ? INT_MAX
: resid
;
1778 if (so
->so_oobmark
&& len
> so
->so_oobmark
- offset
)
1779 len
= so
->so_oobmark
- offset
;
1780 if (len
> m
->m_len
- moff
)
1781 len
= m
->m_len
- moff
;
1784 * Copy out to the UIO or pass the mbufs back to the SIO.
1785 * The SIO is dealt with when we eat the mbuf, but deal
1786 * with the resid here either way.
1789 uio
->uio_resid
= resid
;
1790 error
= uiomove(mtod(m
, caddr_t
) + moff
, len
, uio
);
1791 resid
= uio
->uio_resid
;
1795 resid
-= (size_t)len
;
1799 * Eat the entire mbuf or just a piece of it
1802 if (len
== m
->m_len
- moff
) {
1812 if (so
->so_oobmark
&& offset
== so
->so_oobmark
) {
1819 * Synchronize sockbuf with data we read.
1821 * NOTE: (m) is junk on entry (it could be left over from the
1824 if ((flags
& MSG_PEEK
) == 0) {
1825 lwkt_gettoken(&so
->so_rcv
.ssb_token
);
1826 m
= so
->so_rcv
.ssb_mb
;
1827 while (m
&& offset
>= m
->m_len
) {
1828 if (so
->so_oobmark
) {
1829 so
->so_oobmark
-= m
->m_len
;
1830 if (so
->so_oobmark
== 0) {
1831 sosetstate(so
, SS_RCVATMARK
);
1837 n
= sbunlinkmbuf(&so
->so_rcv
.sb
, m
, NULL
);
1841 m
= sbunlinkmbuf(&so
->so_rcv
.sb
,
1848 n
= m_copym(m
, 0, offset
, M_WAITOK
);
1852 m
->m_data
+= offset
;
1854 so
->so_rcv
.ssb_cc
-= offset
;
1855 if (so
->so_oobmark
) {
1856 so
->so_oobmark
-= offset
;
1857 if (so
->so_oobmark
== 0) {
1858 sosetstate(so
, SS_RCVATMARK
);
1864 lwkt_reltoken(&so
->so_rcv
.ssb_token
);
1868 * If the MSG_WAITALL flag is set (for non-atomic socket),
1869 * we must not quit until resid == 0 or an error termination.
1871 * If a signal/timeout occurs, return with a short count but without
1874 * Keep signalsockbuf locked against other readers.
1876 * XXX if MSG_PEEK we currently do quit.
1878 if ((flags
& MSG_WAITALL
) && !(flags
& MSG_PEEK
) &&
1879 didoob
== 0 && resid
> 0 &&
1880 !sosendallatonce(so
)) {
1881 lwkt_gettoken(&so
->so_rcv
.ssb_token
);
1883 while ((m
= so
->so_rcv
.ssb_mb
) == NULL
) {
1884 if (so
->so_error
|| (so
->so_state
& SS_CANTRCVMORE
)) {
1885 error
= so
->so_error
;
1889 * The window might have closed to zero, make
1890 * sure we send an ack now that we've drained
1891 * the buffer or we might end up blocking until
1892 * the idle takes over (5 seconds).
1895 so_pru_rcvd_async(so
);
1896 if (so
->so_rcv
.ssb_mb
== NULL
)
1897 error
= ssb_wait(&so
->so_rcv
);
1899 lwkt_reltoken(&so
->so_rcv
.ssb_token
);
1900 ssb_unlock(&so
->so_rcv
);
1905 if (m
&& error
== 0)
1907 lwkt_reltoken(&so
->so_rcv
.ssb_token
);
1911 * Token not held here.
1913 * Cleanup. If an atomic read was requested drop any unread data XXX
1915 if ((flags
& MSG_PEEK
) == 0) {
1917 so_pru_rcvd_async(so
);
1920 if (orig_resid
== resid
&& orig_resid
&&
1921 (so
->so_state
& SS_CANTRCVMORE
) == 0) {
1922 ssb_unlock(&so
->so_rcv
);
1929 ssb_unlock(&so
->so_rcv
);
1932 m_freem(free_chain
);
1937 * Shut a socket down. Note that we do not get a frontend lock as we
1938 * want to be able to shut the socket down even if another thread is
1939 * blocked in a read(), thus waking it up.
1942 soshutdown(struct socket
*so
, int how
)
1944 if (!(how
== SHUT_RD
|| how
== SHUT_WR
|| how
== SHUT_RDWR
))
1947 if (how
!= SHUT_WR
) {
1948 /*ssb_lock(&so->so_rcv, M_WAITOK);*/
1950 /*ssb_unlock(&so->so_rcv);*/
1953 return (so_pru_shutdown(so
));
1958 sorflush(struct socket
*so
)
1960 struct signalsockbuf
*ssb
= &so
->so_rcv
;
1961 struct protosw
*pr
= so
->so_proto
;
1962 struct signalsockbuf asb
;
1964 atomic_set_int(&ssb
->ssb_flags
, SSB_NOINTR
);
1966 lwkt_gettoken(&ssb
->ssb_token
);
1971 * Can't just blow up the ssb structure here
1973 bzero(&ssb
->sb
, sizeof(ssb
->sb
));
1978 atomic_clear_int(&ssb
->ssb_flags
, SSB_CLEAR_MASK
);
1980 if ((pr
->pr_flags
& PR_RIGHTS
) && pr
->pr_domain
->dom_dispose
)
1981 (*pr
->pr_domain
->dom_dispose
)(asb
.ssb_mb
);
1982 ssb_release(&asb
, so
);
1984 lwkt_reltoken(&ssb
->ssb_token
);
1989 do_setopt_accept_filter(struct socket
*so
, struct sockopt
*sopt
)
1991 struct accept_filter_arg
*afap
= NULL
;
1992 struct accept_filter
*afp
;
1993 struct so_accf
*af
= so
->so_accf
;
1996 /* do not set/remove accept filters on non listen sockets */
1997 if ((so
->so_options
& SO_ACCEPTCONN
) == 0) {
2002 /* removing the filter */
2005 if (af
->so_accept_filter
!= NULL
&&
2006 af
->so_accept_filter
->accf_destroy
!= NULL
) {
2007 af
->so_accept_filter
->accf_destroy(so
);
2009 if (af
->so_accept_filter_str
!= NULL
) {
2010 kfree(af
->so_accept_filter_str
, M_ACCF
);
2015 so
->so_options
&= ~SO_ACCEPTFILTER
;
2018 /* adding a filter */
2019 /* must remove previous filter first */
2024 /* don't put large objects on the kernel stack */
2025 afap
= kmalloc(sizeof(*afap
), M_TEMP
, M_WAITOK
);
2026 error
= sooptcopyin(sopt
, afap
, sizeof *afap
, sizeof *afap
);
2027 afap
->af_name
[sizeof(afap
->af_name
)-1] = '\0';
2028 afap
->af_arg
[sizeof(afap
->af_arg
)-1] = '\0';
2031 afp
= accept_filt_get(afap
->af_name
);
2036 af
= kmalloc(sizeof(*af
), M_ACCF
, M_WAITOK
| M_ZERO
);
2037 if (afp
->accf_create
!= NULL
) {
2038 if (afap
->af_name
[0] != '\0') {
2039 int len
= strlen(afap
->af_name
) + 1;
2041 af
->so_accept_filter_str
= kmalloc(len
, M_ACCF
,
2043 strcpy(af
->so_accept_filter_str
, afap
->af_name
);
2045 af
->so_accept_filter_arg
= afp
->accf_create(so
, afap
->af_arg
);
2046 if (af
->so_accept_filter_arg
== NULL
) {
2047 kfree(af
->so_accept_filter_str
, M_ACCF
);
2054 af
->so_accept_filter
= afp
;
2056 so
->so_options
|= SO_ACCEPTFILTER
;
2059 kfree(afap
, M_TEMP
);
2065 * Perhaps this routine, and sooptcopyout(), below, ought to come in
2066 * an additional variant to handle the case where the option value needs
2067 * to be some kind of integer, but not a specific size.
2068 * In addition to their use here, these functions are also called by the
2069 * protocol-level pr_ctloutput() routines.
2072 sooptcopyin(struct sockopt
*sopt
, void *buf
, size_t len
, size_t minlen
)
2074 return soopt_to_kbuf(sopt
, buf
, len
, minlen
);
2078 soopt_to_kbuf(struct sockopt
*sopt
, void *buf
, size_t len
, size_t minlen
)
2082 KKASSERT(!sopt
->sopt_val
|| kva_p(sopt
->sopt_val
));
2083 KKASSERT(kva_p(buf
));
2086 * If the user gives us more than we wanted, we ignore it,
2087 * but if we don't get the minimum length the caller
2088 * wants, we return EINVAL. On success, sopt->sopt_valsize
2089 * is set to however much we actually retrieved.
2091 if ((valsize
= sopt
->sopt_valsize
) < minlen
)
2094 sopt
->sopt_valsize
= valsize
= len
;
2096 bcopy(sopt
->sopt_val
, buf
, valsize
);
2102 sosetopt(struct socket
*so
, struct sockopt
*sopt
)
2108 struct signalsockbuf
*sotmp
;
2111 sopt
->sopt_dir
= SOPT_SET
;
2112 if (sopt
->sopt_level
!= SOL_SOCKET
) {
2113 if (so
->so_proto
&& so
->so_proto
->pr_ctloutput
) {
2114 return (so_pr_ctloutput(so
, sopt
));
2116 error
= ENOPROTOOPT
;
2118 switch (sopt
->sopt_name
) {
2120 case SO_ACCEPTFILTER
:
2121 error
= do_setopt_accept_filter(so
, sopt
);
2127 error
= sooptcopyin(sopt
, &l
, sizeof l
, sizeof l
);
2131 so
->so_linger
= l
.l_linger
;
2133 so
->so_options
|= SO_LINGER
;
2135 so
->so_options
&= ~SO_LINGER
;
2141 case SO_USELOOPBACK
:
2149 error
= sooptcopyin(sopt
, &optval
, sizeof optval
,
2154 so
->so_options
|= sopt
->sopt_name
;
2156 so
->so_options
&= ~sopt
->sopt_name
;
2163 error
= sooptcopyin(sopt
, &optval
, sizeof optval
,
2169 * Values < 1 make no sense for any of these
2170 * options, so disallow them.
2177 switch (sopt
->sopt_name
) {
2180 if (ssb_reserve(sopt
->sopt_name
== SO_SNDBUF
?
2181 &so
->so_snd
: &so
->so_rcv
, (u_long
)optval
,
2183 &curproc
->p_rlimit
[RLIMIT_SBSIZE
]) == 0) {
2187 sotmp
= (sopt
->sopt_name
== SO_SNDBUF
) ?
2188 &so
->so_snd
: &so
->so_rcv
;
2189 atomic_clear_int(&sotmp
->ssb_flags
,
2194 * Make sure the low-water is never greater than
2198 so
->so_snd
.ssb_lowat
=
2199 (optval
> so
->so_snd
.ssb_hiwat
) ?
2200 so
->so_snd
.ssb_hiwat
: optval
;
2201 atomic_clear_int(&so
->so_snd
.ssb_flags
,
2205 so
->so_rcv
.ssb_lowat
=
2206 (optval
> so
->so_rcv
.ssb_hiwat
) ?
2207 so
->so_rcv
.ssb_hiwat
: optval
;
2208 atomic_clear_int(&so
->so_rcv
.ssb_flags
,
2216 error
= sooptcopyin(sopt
, &tv
, sizeof tv
,
2221 /* assert(hz > 0); */
2222 if (tv
.tv_sec
< 0 || tv
.tv_sec
> INT_MAX
/ hz
||
2223 tv
.tv_usec
< 0 || tv
.tv_usec
>= 1000000) {
2227 /* assert(tick > 0); */
2228 /* assert(ULONG_MAX - INT_MAX >= 1000000); */
2229 val
= (u_long
)(tv
.tv_sec
* hz
) + tv
.tv_usec
/ ustick
;
2230 if (val
> INT_MAX
) {
2234 if (val
== 0 && tv
.tv_usec
!= 0)
2237 switch (sopt
->sopt_name
) {
2239 so
->so_snd
.ssb_timeo
= val
;
2242 so
->so_rcv
.ssb_timeo
= val
;
2247 error
= ENOPROTOOPT
;
2250 if (error
== 0 && so
->so_proto
&& so
->so_proto
->pr_ctloutput
) {
2251 (void) so_pr_ctloutput(so
, sopt
);
2258 /* Helper routine for getsockopt */
2260 sooptcopyout(struct sockopt
*sopt
, const void *buf
, size_t len
)
2262 soopt_from_kbuf(sopt
, buf
, len
);
2267 soopt_from_kbuf(struct sockopt
*sopt
, const void *buf
, size_t len
)
2272 sopt
->sopt_valsize
= 0;
2276 KKASSERT(!sopt
->sopt_val
|| kva_p(sopt
->sopt_val
));
2277 KKASSERT(kva_p(buf
));
2280 * Documented get behavior is that we always return a value,
2281 * possibly truncated to fit in the user's buffer.
2282 * Traditional behavior is that we always tell the user
2283 * precisely how much we copied, rather than something useful
2284 * like the total amount we had available for her.
2285 * Note that this interface is not idempotent; the entire answer must
2286 * generated ahead of time.
2288 valsize
= szmin(len
, sopt
->sopt_valsize
);
2289 sopt
->sopt_valsize
= valsize
;
2290 if (sopt
->sopt_val
!= 0) {
2291 bcopy(buf
, sopt
->sopt_val
, valsize
);
2296 sogetopt(struct socket
*so
, struct sockopt
*sopt
)
2303 struct accept_filter_arg
*afap
;
2307 sopt
->sopt_dir
= SOPT_GET
;
2308 if (sopt
->sopt_level
!= SOL_SOCKET
) {
2309 if (so
->so_proto
&& so
->so_proto
->pr_ctloutput
) {
2310 return (so_pr_ctloutput(so
, sopt
));
2312 return (ENOPROTOOPT
);
2314 switch (sopt
->sopt_name
) {
2316 case SO_ACCEPTFILTER
:
2317 if ((so
->so_options
& SO_ACCEPTCONN
) == 0)
2319 afap
= kmalloc(sizeof(*afap
), M_TEMP
,
2321 if ((so
->so_options
& SO_ACCEPTFILTER
) != 0) {
2322 strcpy(afap
->af_name
, so
->so_accf
->so_accept_filter
->accf_name
);
2323 if (so
->so_accf
->so_accept_filter_str
!= NULL
)
2324 strcpy(afap
->af_arg
, so
->so_accf
->so_accept_filter_str
);
2326 error
= sooptcopyout(sopt
, afap
, sizeof(*afap
));
2327 kfree(afap
, M_TEMP
);
2332 l
.l_onoff
= so
->so_options
& SO_LINGER
;
2333 l
.l_linger
= so
->so_linger
;
2334 error
= sooptcopyout(sopt
, &l
, sizeof l
);
2337 case SO_USELOOPBACK
:
2348 optval
= so
->so_options
& sopt
->sopt_name
;
2350 error
= sooptcopyout(sopt
, &optval
, sizeof optval
);
2354 optval
= so
->so_type
;
2359 optval
= so
->so_error
;
2362 optval
= so
->so_rerror
;
2368 optval
= so
->so_snd
.ssb_hiwat
;
2372 optval
= so
->so_rcv
.ssb_hiwat
;
2376 optval
= so
->so_snd
.ssb_lowat
;
2380 optval
= so
->so_rcv
.ssb_lowat
;
2385 optval
= (sopt
->sopt_name
== SO_SNDTIMEO
?
2386 so
->so_snd
.ssb_timeo
: so
->so_rcv
.ssb_timeo
);
2388 tv
.tv_sec
= optval
/ hz
;
2389 tv
.tv_usec
= (optval
% hz
) * ustick
;
2390 error
= sooptcopyout(sopt
, &tv
, sizeof tv
);
2394 optval_l
= ssb_space(&so
->so_snd
);
2395 error
= sooptcopyout(sopt
, &optval_l
, sizeof(optval_l
));
2399 optval
= -1; /* no hint */
2403 error
= ENOPROTOOPT
;
2406 if (error
== 0 && so
->so_proto
&& so
->so_proto
->pr_ctloutput
)
2407 so_pr_ctloutput(so
, sopt
);
2412 /* XXX; prepare mbuf for (__FreeBSD__ < 3) routines. */
2414 soopt_getm(struct sockopt
*sopt
, struct mbuf
**mp
)
2416 struct mbuf
*m
, *m_prev
;
2417 int sopt_size
= sopt
->sopt_valsize
, msize
;
2419 m
= m_getl(sopt_size
, sopt
->sopt_td
? M_WAITOK
: M_NOWAIT
, MT_DATA
,
2423 m
->m_len
= min(msize
, sopt_size
);
2424 sopt_size
-= m
->m_len
;
2428 while (sopt_size
> 0) {
2429 m
= m_getl(sopt_size
, sopt
->sopt_td
? M_WAITOK
: M_NOWAIT
,
2430 MT_DATA
, 0, &msize
);
2435 m
->m_len
= min(msize
, sopt_size
);
2436 sopt_size
-= m
->m_len
;
2443 /* XXX; copyin sopt data into mbuf chain for (__FreeBSD__ < 3) routines. */
2445 soopt_mcopyin(struct sockopt
*sopt
, struct mbuf
*m
)
2447 soopt_to_mbuf(sopt
, m
);
2452 soopt_to_mbuf(struct sockopt
*sopt
, struct mbuf
*m
)
2457 KKASSERT(!sopt
->sopt_val
|| kva_p(sopt
->sopt_val
));
2459 if (sopt
->sopt_val
== NULL
)
2461 val
= sopt
->sopt_val
;
2462 valsize
= sopt
->sopt_valsize
;
2463 while (m
!= NULL
&& valsize
>= m
->m_len
) {
2464 bcopy(val
, mtod(m
, char *), m
->m_len
);
2465 valsize
-= m
->m_len
;
2466 val
= (caddr_t
)val
+ m
->m_len
;
2469 if (m
!= NULL
) /* should be allocated enoughly at ip6_sooptmcopyin() */
2470 panic("ip6_sooptmcopyin");
2473 /* XXX; copyout mbuf chain data into soopt for (__FreeBSD__ < 3) routines. */
2475 soopt_mcopyout(struct sockopt
*sopt
, struct mbuf
*m
)
2477 return soopt_from_mbuf(sopt
, m
);
2481 soopt_from_mbuf(struct sockopt
*sopt
, struct mbuf
*m
)
2483 struct mbuf
*m0
= m
;
2488 KKASSERT(!sopt
->sopt_val
|| kva_p(sopt
->sopt_val
));
2490 if (sopt
->sopt_val
== NULL
)
2492 val
= sopt
->sopt_val
;
2493 maxsize
= sopt
->sopt_valsize
;
2494 while (m
!= NULL
&& maxsize
>= m
->m_len
) {
2495 bcopy(mtod(m
, char *), val
, m
->m_len
);
2496 maxsize
-= m
->m_len
;
2497 val
= (caddr_t
)val
+ m
->m_len
;
2498 valsize
+= m
->m_len
;
2502 /* enough soopt buffer should be given from user-land */
2506 sopt
->sopt_valsize
= valsize
;
2511 sohasoutofband(struct socket
*so
)
2513 if (so
->so_sigio
!= NULL
)
2514 pgsigio(so
->so_sigio
, SIGURG
, 0);
2517 * There is no need to use NOTE_OOB as KNOTE hint here:
2518 * soread filter depends on so_oobmark and SS_RCVATMARK
2519 * so_state. NOTE_OOB would cause unnecessary penalty
2520 * in KNOTE, if there was knote processing contention.
2522 KNOTE(&so
->so_rcv
.ssb_kq
.ki_note
, 0);
2526 sokqfilter(struct file
*fp
, struct knote
*kn
)
2528 struct socket
*so
= (struct socket
*)kn
->kn_fp
->f_data
;
2529 struct signalsockbuf
*ssb
;
2531 switch (kn
->kn_filter
) {
2533 if (so
->so_options
& SO_ACCEPTCONN
)
2534 kn
->kn_fop
= &solisten_filtops
;
2536 kn
->kn_fop
= &soread_filtops
;
2540 kn
->kn_fop
= &sowrite_filtops
;
2544 kn
->kn_fop
= &soexcept_filtops
;
2548 return (EOPNOTSUPP
);
2551 knote_insert(&ssb
->ssb_kq
.ki_note
, kn
);
2552 atomic_set_int(&ssb
->ssb_flags
, SSB_KNOTE
);
2557 filt_sordetach(struct knote
*kn
)
2559 struct socket
*so
= (struct socket
*)kn
->kn_fp
->f_data
;
2561 knote_remove(&so
->so_rcv
.ssb_kq
.ki_note
, kn
);
2562 if (SLIST_EMPTY(&so
->so_rcv
.ssb_kq
.ki_note
))
2563 atomic_clear_int(&so
->so_rcv
.ssb_flags
, SSB_KNOTE
);
2568 filt_soread(struct knote
*kn
, long hint __unused
)
2570 struct socket
*so
= (struct socket
*)kn
->kn_fp
->f_data
;
2572 if (kn
->kn_sfflags
& NOTE_OOB
) {
2573 if ((so
->so_oobmark
|| (so
->so_state
& SS_RCVATMARK
))) {
2574 kn
->kn_fflags
|= NOTE_OOB
;
2579 kn
->kn_data
= so
->so_rcv
.ssb_cc
;
2581 if (so
->so_state
& SS_CANTRCVMORE
) {
2583 * Only set NODATA if all data has been exhausted.
2585 if (kn
->kn_data
== 0)
2586 kn
->kn_flags
|= EV_NODATA
;
2587 kn
->kn_flags
|= EV_EOF
;
2588 if (so
->so_state
& SS_CANTSENDMORE
)
2589 kn
->kn_flags
|= EV_HUP
;
2590 kn
->kn_fflags
= so
->so_error
;
2593 if (so
->so_error
|| so
->so_rerror
)
2595 if (kn
->kn_sfflags
& NOTE_LOWAT
)
2596 return (kn
->kn_data
>= kn
->kn_sdata
);
2597 return ((kn
->kn_data
>= so
->so_rcv
.ssb_lowat
) ||
2598 !TAILQ_EMPTY(&so
->so_comp
));
2602 filt_sowdetach(struct knote
*kn
)
2604 struct socket
*so
= (struct socket
*)kn
->kn_fp
->f_data
;
2606 knote_remove(&so
->so_snd
.ssb_kq
.ki_note
, kn
);
2607 if (SLIST_EMPTY(&so
->so_snd
.ssb_kq
.ki_note
))
2608 atomic_clear_int(&so
->so_snd
.ssb_flags
, SSB_KNOTE
);
2613 filt_sowrite(struct knote
*kn
, long hint __unused
)
2615 struct socket
*so
= (struct socket
*)kn
->kn_fp
->f_data
;
2617 if (so
->so_snd
.ssb_flags
& SSB_PREALLOC
)
2618 kn
->kn_data
= ssb_space_prealloc(&so
->so_snd
);
2620 kn
->kn_data
= ssb_space(&so
->so_snd
);
2622 if (so
->so_state
& SS_CANTSENDMORE
) {
2623 kn
->kn_flags
|= (EV_EOF
| EV_NODATA
);
2624 if (so
->so_state
& SS_CANTRCVMORE
)
2625 kn
->kn_flags
|= EV_HUP
;
2626 kn
->kn_fflags
= so
->so_error
;
2629 if (so
->so_error
) /* temporary udp error */
2631 if (((so
->so_state
& SS_ISCONNECTED
) == 0) &&
2632 (so
->so_proto
->pr_flags
& PR_CONNREQUIRED
))
2634 if (kn
->kn_sfflags
& NOTE_LOWAT
)
2635 return (kn
->kn_data
>= kn
->kn_sdata
);
2636 return (kn
->kn_data
>= so
->so_snd
.ssb_lowat
);
2641 filt_solisten(struct knote
*kn
, long hint __unused
)
2643 struct socket
*so
= (struct socket
*)kn
->kn_fp
->f_data
;
2644 int qlen
= so
->so_qlen
;
2646 if (soavailconn
> 0 && qlen
> soavailconn
)
2650 return (!TAILQ_EMPTY(&so
->so_comp
));