socket: Simplify soclose_fast by always sending the close message
[dragonfly.git] / sys / kern / uipc_socket.c
blob30ff61f40a599926f7381f0ef14d0e491b054077
1 /*
2 * Copyright (c) 2004 Jeffrey M. Hsu. All rights reserved.
3 * Copyright (c) 2004 The DragonFly Project. All rights reserved.
4 *
5 * This code is derived from software contributed to The DragonFly Project
6 * by Jeffrey M. Hsu.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of The DragonFly Project nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific, prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
35 * Copyright (c) 1982, 1986, 1988, 1990, 1993
36 * The Regents of the University of California. All rights reserved.
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. Neither the name of the University nor the names of its contributors
47 * may be used to endorse or promote products derived from this software
48 * without specific prior written permission.
50 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
51 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
52 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
53 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
54 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
55 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
56 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
57 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
58 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
59 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
60 * SUCH DAMAGE.
62 * @(#)uipc_socket.c 8.3 (Berkeley) 4/15/94
63 * $FreeBSD: src/sys/kern/uipc_socket.c,v 1.68.2.24 2003/11/11 17:18:18 silby Exp $
66 #include "opt_inet.h"
68 #include <sys/param.h>
69 #include <sys/systm.h>
70 #include <sys/fcntl.h>
71 #include <sys/malloc.h>
72 #include <sys/mbuf.h>
73 #include <sys/domain.h>
74 #include <sys/file.h> /* for struct knote */
75 #include <sys/kernel.h>
76 #include <sys/event.h>
77 #include <sys/proc.h>
78 #include <sys/protosw.h>
79 #include <sys/socket.h>
80 #include <sys/socketvar.h>
81 #include <sys/socketops.h>
82 #include <sys/resourcevar.h>
83 #include <sys/signalvar.h>
84 #include <sys/sysctl.h>
85 #include <sys/uio.h>
86 #include <sys/jail.h>
87 #include <vm/vm_zone.h>
88 #include <vm/pmap.h>
89 #include <net/netmsg2.h>
90 #include <net/netisr2.h>
92 #include <sys/thread2.h>
93 #include <sys/socketvar2.h>
94 #include <sys/spinlock2.h>
96 #include <machine/limits.h>
98 #ifdef INET
99 extern int tcp_sosend_agglim;
100 extern int tcp_sosend_async;
101 extern int tcp_sosend_jcluster;
102 extern int udp_sosend_async;
103 extern int udp_sosend_prepend;
105 static int do_setopt_accept_filter(struct socket *so, struct sockopt *sopt);
106 #endif /* INET */
108 static void filt_sordetach(struct knote *kn);
109 static int filt_soread(struct knote *kn, long hint);
110 static void filt_sowdetach(struct knote *kn);
111 static int filt_sowrite(struct knote *kn, long hint);
112 static int filt_solisten(struct knote *kn, long hint);
114 static int soclose_sync(struct socket *so, int fflag);
115 static void soclose_fast(struct socket *so);
117 static struct filterops solisten_filtops =
118 { FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, filt_sordetach, filt_solisten };
119 static struct filterops soread_filtops =
120 { FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, filt_sordetach, filt_soread };
121 static struct filterops sowrite_filtops =
122 { FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, filt_sowdetach, filt_sowrite };
123 static struct filterops soexcept_filtops =
124 { FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, filt_sordetach, filt_soread };
126 MALLOC_DEFINE(M_SOCKET, "socket", "socket struct");
127 MALLOC_DEFINE(M_SONAME, "soname", "socket name");
128 MALLOC_DEFINE(M_PCB, "pcb", "protocol control block");
131 static int somaxconn = SOMAXCONN;
132 SYSCTL_INT(_kern_ipc, KIPC_SOMAXCONN, somaxconn, CTLFLAG_RW,
133 &somaxconn, 0, "Maximum pending socket connection queue size");
135 static int use_soclose_fast = 1;
136 SYSCTL_INT(_kern_ipc, OID_AUTO, soclose_fast, CTLFLAG_RW,
137 &use_soclose_fast, 0, "Fast socket close");
139 int use_soaccept_pred_fast = 1;
140 SYSCTL_INT(_kern_ipc, OID_AUTO, soaccept_pred_fast, CTLFLAG_RW,
141 &use_soaccept_pred_fast, 0, "Fast socket accept predication");
143 int use_sendfile_async = 1;
144 SYSCTL_INT(_kern_ipc, OID_AUTO, sendfile_async, CTLFLAG_RW,
145 &use_sendfile_async, 0, "sendfile uses asynchronized pru_send");
147 int use_soconnect_async = 1;
148 SYSCTL_INT(_kern_ipc, OID_AUTO, soconnect_async, CTLFLAG_RW,
149 &use_soconnect_async, 0, "soconnect uses asynchronized pru_connect");
152 * Socket operation routines.
153 * These routines are called by the routines in
154 * sys_socket.c or from a system process, and
155 * implement the semantics of socket operations by
156 * switching out to the protocol specific routines.
160 * Get a socket structure, and initialize it.
161 * Note that it would probably be better to allocate socket
162 * and PCB at the same time, but I'm not convinced that all
163 * the protocols can be easily modified to do this.
165 struct socket *
166 soalloc(int waitok, struct protosw *pr)
168 struct socket *so;
169 unsigned waitmask;
171 waitmask = waitok ? M_WAITOK : M_NOWAIT;
172 so = kmalloc(sizeof(struct socket), M_SOCKET, M_ZERO|waitmask);
173 if (so) {
174 /* XXX race condition for reentrant kernel */
175 so->so_proto = pr;
176 TAILQ_INIT(&so->so_aiojobq);
177 TAILQ_INIT(&so->so_rcv.ssb_kq.ki_mlist);
178 TAILQ_INIT(&so->so_snd.ssb_kq.ki_mlist);
179 lwkt_token_init(&so->so_rcv.ssb_token, "rcvtok");
180 lwkt_token_init(&so->so_snd.ssb_token, "sndtok");
181 spin_init(&so->so_rcvd_spin, "soalloc");
182 netmsg_init(&so->so_rcvd_msg.base, so, &netisr_adone_rport,
183 MSGF_DROPABLE | MSGF_PRIORITY,
184 so->so_proto->pr_usrreqs->pru_rcvd);
185 so->so_rcvd_msg.nm_pru_flags |= PRUR_ASYNC;
186 so->so_state = SS_NOFDREF;
187 so->so_refs = 1;
189 return so;
193 socreate(int dom, struct socket **aso, int type,
194 int proto, struct thread *td)
196 struct proc *p = td->td_proc;
197 struct protosw *prp;
198 struct socket *so;
199 struct pru_attach_info ai;
200 int error;
202 if (proto)
203 prp = pffindproto(dom, proto, type);
204 else
205 prp = pffindtype(dom, type);
207 if (prp == NULL || prp->pr_usrreqs->pru_attach == 0)
208 return (EPROTONOSUPPORT);
210 if (p->p_ucred->cr_prison && jail_socket_unixiproute_only &&
211 prp->pr_domain->dom_family != PF_LOCAL &&
212 prp->pr_domain->dom_family != PF_INET &&
213 prp->pr_domain->dom_family != PF_INET6 &&
214 prp->pr_domain->dom_family != PF_ROUTE) {
215 return (EPROTONOSUPPORT);
218 if (prp->pr_type != type)
219 return (EPROTOTYPE);
220 so = soalloc(p != NULL, prp);
221 if (so == NULL)
222 return (ENOBUFS);
225 * Callers of socreate() presumably will connect up a descriptor
226 * and call soclose() if they cannot. This represents our so_refs
227 * (which should be 1) from soalloc().
229 soclrstate(so, SS_NOFDREF);
232 * Set a default port for protocol processing. No action will occur
233 * on the socket on this port until an inpcb is attached to it and
234 * is able to match incoming packets, or until the socket becomes
235 * available to userland.
237 * We normally default the socket to the protocol thread on cpu 0,
238 * if protocol does not provide its own method to initialize the
239 * default port.
241 * If PR_SYNC_PORT is set (unix domain sockets) there is no protocol
242 * thread and all pr_*()/pru_*() calls are executed synchronously.
244 if (prp->pr_flags & PR_SYNC_PORT)
245 so->so_port = &netisr_sync_port;
246 else if (prp->pr_initport != NULL)
247 so->so_port = prp->pr_initport();
248 else
249 so->so_port = netisr_cpuport(0);
251 TAILQ_INIT(&so->so_incomp);
252 TAILQ_INIT(&so->so_comp);
253 so->so_type = type;
254 so->so_cred = crhold(p->p_ucred);
255 ai.sb_rlimit = &p->p_rlimit[RLIMIT_SBSIZE];
256 ai.p_ucred = p->p_ucred;
257 ai.fd_rdir = p->p_fd->fd_rdir;
260 * Auto-sizing of socket buffers is managed by the protocols and
261 * the appropriate flags must be set in the pru_attach function.
263 error = so_pru_attach(so, proto, &ai);
264 if (error) {
265 sosetstate(so, SS_NOFDREF);
266 sofree(so); /* from soalloc */
267 return error;
271 * NOTE: Returns referenced socket.
273 *aso = so;
274 return (0);
278 sobind(struct socket *so, struct sockaddr *nam, struct thread *td)
280 int error;
282 error = so_pru_bind(so, nam, td);
283 return (error);
286 static void
287 sodealloc(struct socket *so)
289 KKASSERT((so->so_state & (SS_INCOMP | SS_COMP)) == 0);
290 /* TODO: assert accept queues are empty, after unix socket is fixed */
292 if (so->so_rcv.ssb_hiwat)
293 (void)chgsbsize(so->so_cred->cr_uidinfo,
294 &so->so_rcv.ssb_hiwat, 0, RLIM_INFINITY);
295 if (so->so_snd.ssb_hiwat)
296 (void)chgsbsize(so->so_cred->cr_uidinfo,
297 &so->so_snd.ssb_hiwat, 0, RLIM_INFINITY);
298 #ifdef INET
299 /* remove accept filter if present */
300 if (so->so_accf != NULL)
301 do_setopt_accept_filter(so, NULL);
302 #endif /* INET */
303 crfree(so->so_cred);
304 if (so->so_faddr != NULL)
305 kfree(so->so_faddr, M_SONAME);
306 kfree(so, M_SOCKET);
310 solisten(struct socket *so, int backlog, struct thread *td)
312 if (so->so_state & (SS_ISCONNECTED | SS_ISCONNECTING))
313 return (EINVAL);
315 lwkt_gettoken(&so->so_rcv.ssb_token);
316 if (TAILQ_EMPTY(&so->so_comp))
317 so->so_options |= SO_ACCEPTCONN;
318 lwkt_reltoken(&so->so_rcv.ssb_token);
319 if (backlog < 0 || backlog > somaxconn)
320 backlog = somaxconn;
321 so->so_qlimit = backlog;
322 return so_pru_listen(so, td);
325 static void
326 soqflush(struct socket *so)
328 lwkt_getpooltoken(so);
329 if (so->so_options & SO_ACCEPTCONN) {
330 struct socket *sp;
332 while ((sp = TAILQ_FIRST(&so->so_incomp)) != NULL) {
333 KKASSERT((sp->so_state & (SS_INCOMP | SS_COMP)) ==
334 SS_INCOMP);
335 TAILQ_REMOVE(&so->so_incomp, sp, so_list);
336 so->so_incqlen--;
337 soclrstate(sp, SS_INCOMP);
338 soabort_async(sp, TRUE);
340 while ((sp = TAILQ_FIRST(&so->so_comp)) != NULL) {
341 KKASSERT((sp->so_state & (SS_INCOMP | SS_COMP)) ==
342 SS_COMP);
343 TAILQ_REMOVE(&so->so_comp, sp, so_list);
344 so->so_qlen--;
345 soclrstate(sp, SS_COMP);
346 soabort_async(sp, TRUE);
349 lwkt_relpooltoken(so);
353 * Destroy a disconnected socket. This routine is a NOP if entities
354 * still have a reference on the socket:
356 * so_pcb - The protocol stack still has a reference
357 * SS_NOFDREF - There is no longer a file pointer reference
359 void
360 sofree(struct socket *so)
362 struct socket *head;
365 * This is a bit hackish at the moment. We need to interlock
366 * any accept queue we are on before we potentially lose the
367 * last reference to avoid races against a re-reference from
368 * someone operating on the queue.
370 while ((head = so->so_head) != NULL) {
371 lwkt_getpooltoken(head);
372 if (so->so_head == head)
373 break;
374 lwkt_relpooltoken(head);
378 * Arbitrage the last free.
380 KKASSERT(so->so_refs > 0);
381 if (atomic_fetchadd_int(&so->so_refs, -1) != 1) {
382 if (head)
383 lwkt_relpooltoken(head);
384 return;
387 KKASSERT(so->so_pcb == NULL && (so->so_state & SS_NOFDREF));
388 KKASSERT((so->so_state & SS_ASSERTINPROG) == 0);
390 if (head != NULL) {
392 * We're done, remove ourselves from the accept queue we are
393 * on, if we are on one.
395 if (so->so_state & SS_INCOMP) {
396 KKASSERT((so->so_state & (SS_INCOMP | SS_COMP)) ==
397 SS_INCOMP);
398 TAILQ_REMOVE(&head->so_incomp, so, so_list);
399 head->so_incqlen--;
400 } else if (so->so_state & SS_COMP) {
402 * We must not decommission a socket that's
403 * on the accept(2) queue. If we do, then
404 * accept(2) may hang after select(2) indicated
405 * that the listening socket was ready.
407 KKASSERT((so->so_state & (SS_INCOMP | SS_COMP)) ==
408 SS_COMP);
409 lwkt_relpooltoken(head);
410 return;
411 } else {
412 panic("sofree: not queued");
414 soclrstate(so, SS_INCOMP);
415 so->so_head = NULL;
416 lwkt_relpooltoken(head);
417 } else {
418 /* Flush accept queues, if we are accepting. */
419 soqflush(so);
421 ssb_release(&so->so_snd, so);
422 sorflush(so);
423 sodealloc(so);
427 * Close a socket on last file table reference removal.
428 * Initiate disconnect if connected.
429 * Free socket when disconnect complete.
432 soclose(struct socket *so, int fflag)
434 int error;
436 funsetown(&so->so_sigio);
437 sosetstate(so, SS_ISCLOSING);
438 if (!use_soclose_fast ||
439 (so->so_proto->pr_flags & PR_SYNC_PORT) ||
440 ((so->so_state & SS_ISCONNECTED) &&
441 (so->so_options & SO_LINGER))) {
442 error = soclose_sync(so, fflag);
443 } else {
444 soclose_fast(so);
445 error = 0;
447 return error;
450 void
451 sodiscard(struct socket *so)
453 if (so->so_state & SS_NOFDREF)
454 panic("soclose: NOFDREF");
455 sosetstate(so, SS_NOFDREF); /* take ref */
459 * Append the completed queue of head to head_inh (inherting listen socket).
461 void
462 soinherit(struct socket *head, struct socket *head_inh)
464 boolean_t do_wakeup = FALSE;
466 KASSERT(head->so_options & SO_ACCEPTCONN,
467 ("head does not accept connection"));
468 KASSERT(head_inh->so_options & SO_ACCEPTCONN,
469 ("head_inh does not accept connection"));
471 lwkt_getpooltoken(head);
472 lwkt_getpooltoken(head_inh);
474 if (head->so_qlen > 0)
475 do_wakeup = TRUE;
477 while (!TAILQ_EMPTY(&head->so_comp)) {
478 struct ucred *old_cr;
479 struct socket *sp;
481 sp = TAILQ_FIRST(&head->so_comp);
482 KKASSERT((sp->so_state & (SS_INCOMP | SS_COMP)) == SS_COMP);
485 * Remove this socket from the current listen socket
486 * completed queue.
488 TAILQ_REMOVE(&head->so_comp, sp, so_list);
489 head->so_qlen--;
491 /* Save the old ucred for later free. */
492 old_cr = sp->so_cred;
495 * Install this socket to the inheriting listen socket
496 * completed queue.
498 sp->so_cred = crhold(head_inh->so_cred); /* non-blocking */
499 sp->so_head = head_inh;
501 TAILQ_INSERT_TAIL(&head_inh->so_comp, sp, so_list);
502 head_inh->so_qlen++;
505 * NOTE:
506 * crfree() may block and release the tokens temporarily.
507 * However, we are fine here, since the transition is done.
509 crfree(old_cr);
512 lwkt_relpooltoken(head_inh);
513 lwkt_relpooltoken(head);
515 if (do_wakeup) {
517 * "New" connections have arrived
519 sorwakeup(head_inh);
520 wakeup(&head_inh->so_timeo);
524 static int
525 soclose_sync(struct socket *so, int fflag)
527 int error = 0;
529 if ((so->so_proto->pr_flags & PR_SYNC_PORT) == 0)
530 so_pru_sync(so); /* unpend async prus */
532 if (so->so_pcb == NULL)
533 goto discard;
535 if (so->so_state & SS_ISCONNECTED) {
536 if ((so->so_state & SS_ISDISCONNECTING) == 0) {
537 error = sodisconnect(so);
538 if (error)
539 goto drop;
541 if (so->so_options & SO_LINGER) {
542 if ((so->so_state & SS_ISDISCONNECTING) &&
543 (fflag & FNONBLOCK))
544 goto drop;
545 while (so->so_state & SS_ISCONNECTED) {
546 error = tsleep(&so->so_timeo, PCATCH,
547 "soclos", so->so_linger * hz);
548 if (error)
549 break;
553 drop:
554 if (so->so_pcb) {
555 int error2;
557 error2 = so_pru_detach(so);
558 if (error2 == EJUSTRETURN) {
560 * Protocol will call sodiscard()
561 * and sofree() for us.
563 return error;
565 if (error == 0)
566 error = error2;
568 discard:
569 sodiscard(so);
570 sofree(so); /* dispose of ref */
572 return (error);
575 static void
576 soclose_fast_handler(netmsg_t msg)
578 struct socket *so = msg->base.nm_so;
580 if (so->so_pcb == NULL)
581 goto discard;
583 if ((so->so_state & SS_ISCONNECTED) &&
584 (so->so_state & SS_ISDISCONNECTING) == 0)
585 so_pru_disconnect_direct(so);
587 if (so->so_pcb) {
588 int error;
590 error = so_pru_detach_direct(so);
591 if (error == EJUSTRETURN) {
593 * Protocol will call sodiscard()
594 * and sofree() for us.
596 return;
599 discard:
600 sodiscard(so);
601 sofree(so);
604 static void
605 soclose_fast(struct socket *so)
607 struct netmsg_base *base = &so->so_clomsg;
609 netmsg_init(base, so, &netisr_apanic_rport, 0,
610 soclose_fast_handler);
611 lwkt_sendmsg(so->so_port, &base->lmsg);
615 * Abort and destroy a socket. Only one abort can be in progress
616 * at any given moment.
618 void
619 soabort_async(struct socket *so, boolean_t clr_head)
622 * Keep a reference before clearing the so_head
623 * to avoid racing socket close in netisr.
625 soreference(so);
626 if (clr_head)
627 so->so_head = NULL;
628 so_pru_abort_async(so);
631 void
632 soabort_direct(struct socket *so)
634 soreference(so);
635 so_pru_abort_direct(so);
639 * so is passed in ref'd, which becomes owned by
640 * the cleared SS_NOFDREF flag.
642 void
643 soaccept_generic(struct socket *so)
645 if ((so->so_state & SS_NOFDREF) == 0)
646 panic("soaccept: !NOFDREF");
647 soclrstate(so, SS_NOFDREF); /* owned by lack of SS_NOFDREF */
651 soaccept(struct socket *so, struct sockaddr **nam)
653 int error;
655 soaccept_generic(so);
656 error = so_pru_accept(so, nam);
657 return (error);
661 soconnect(struct socket *so, struct sockaddr *nam, struct thread *td,
662 boolean_t sync)
664 int error;
666 if (so->so_options & SO_ACCEPTCONN)
667 return (EOPNOTSUPP);
669 * If protocol is connection-based, can only connect once.
670 * Otherwise, if connected, try to disconnect first.
671 * This allows user to disconnect by connecting to, e.g.,
672 * a null address.
674 if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) &&
675 ((so->so_proto->pr_flags & PR_CONNREQUIRED) ||
676 (error = sodisconnect(so)))) {
677 error = EISCONN;
678 } else {
680 * Prevent accumulated error from previous connection
681 * from biting us.
683 so->so_error = 0;
684 if (!sync && so->so_proto->pr_usrreqs->pru_preconnect)
685 error = so_pru_connect_async(so, nam, td);
686 else
687 error = so_pru_connect(so, nam, td);
689 return (error);
693 soconnect2(struct socket *so1, struct socket *so2)
695 int error;
697 error = so_pru_connect2(so1, so2);
698 return (error);
702 sodisconnect(struct socket *so)
704 int error;
706 if ((so->so_state & SS_ISCONNECTED) == 0) {
707 error = ENOTCONN;
708 goto bad;
710 if (so->so_state & SS_ISDISCONNECTING) {
711 error = EALREADY;
712 goto bad;
714 error = so_pru_disconnect(so);
715 bad:
716 return (error);
719 #define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? M_NOWAIT : M_WAITOK)
721 * Send on a socket.
722 * If send must go all at once and message is larger than
723 * send buffering, then hard error.
724 * Lock against other senders.
725 * If must go all at once and not enough room now, then
726 * inform user that this would block and do nothing.
727 * Otherwise, if nonblocking, send as much as possible.
728 * The data to be sent is described by "uio" if nonzero,
729 * otherwise by the mbuf chain "top" (which must be null
730 * if uio is not). Data provided in mbuf chain must be small
731 * enough to send all at once.
733 * Returns nonzero on error, timeout or signal; callers
734 * must check for short counts if EINTR/ERESTART are returned.
735 * Data and control buffers are freed on return.
738 sosend(struct socket *so, struct sockaddr *addr, struct uio *uio,
739 struct mbuf *top, struct mbuf *control, int flags,
740 struct thread *td)
742 struct mbuf **mp;
743 struct mbuf *m;
744 size_t resid;
745 int space, len;
746 int clen = 0, error, dontroute, mlen;
747 int atomic = sosendallatonce(so) || top;
748 int pru_flags;
750 if (uio) {
751 resid = uio->uio_resid;
752 } else {
753 resid = (size_t)top->m_pkthdr.len;
754 #ifdef INVARIANTS
755 len = 0;
756 for (m = top; m; m = m->m_next)
757 len += m->m_len;
758 KKASSERT(top->m_pkthdr.len == len);
759 #endif
763 * WARNING! resid is unsigned, space and len are signed. space
764 * can wind up negative if the sockbuf is overcommitted.
766 * Also check to make sure that MSG_EOR isn't used on SOCK_STREAM
767 * type sockets since that's an error.
769 if (so->so_type == SOCK_STREAM && (flags & MSG_EOR)) {
770 error = EINVAL;
771 goto out;
774 dontroute =
775 (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 &&
776 (so->so_proto->pr_flags & PR_ATOMIC);
777 if (td->td_lwp != NULL)
778 td->td_lwp->lwp_ru.ru_msgsnd++;
779 if (control)
780 clen = control->m_len;
781 #define gotoerr(errcode) { error = errcode; goto release; }
783 restart:
784 error = ssb_lock(&so->so_snd, SBLOCKWAIT(flags));
785 if (error)
786 goto out;
788 do {
789 if (so->so_state & SS_CANTSENDMORE)
790 gotoerr(EPIPE);
791 if (so->so_error) {
792 error = so->so_error;
793 so->so_error = 0;
794 goto release;
796 if ((so->so_state & SS_ISCONNECTED) == 0) {
798 * `sendto' and `sendmsg' is allowed on a connection-
799 * based socket if it supports implied connect.
800 * Return ENOTCONN if not connected and no address is
801 * supplied.
803 if ((so->so_proto->pr_flags & PR_CONNREQUIRED) &&
804 (so->so_proto->pr_flags & PR_IMPLOPCL) == 0) {
805 if ((so->so_state & SS_ISCONFIRMING) == 0 &&
806 !(resid == 0 && clen != 0))
807 gotoerr(ENOTCONN);
808 } else if (addr == NULL)
809 gotoerr(so->so_proto->pr_flags & PR_CONNREQUIRED ?
810 ENOTCONN : EDESTADDRREQ);
812 if ((atomic && resid > so->so_snd.ssb_hiwat) ||
813 clen > so->so_snd.ssb_hiwat) {
814 gotoerr(EMSGSIZE);
816 space = ssb_space(&so->so_snd);
817 if (flags & MSG_OOB)
818 space += 1024;
819 if ((space < 0 || (size_t)space < resid + clen) && uio &&
820 (atomic || space < so->so_snd.ssb_lowat || space < clen)) {
821 if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT))
822 gotoerr(EWOULDBLOCK);
823 ssb_unlock(&so->so_snd);
824 error = ssb_wait(&so->so_snd);
825 if (error)
826 goto out;
827 goto restart;
829 mp = &top;
830 space -= clen;
831 do {
832 if (uio == NULL) {
834 * Data is prepackaged in "top".
836 resid = 0;
837 if (flags & MSG_EOR)
838 top->m_flags |= M_EOR;
839 } else do {
840 if (resid > INT_MAX)
841 resid = INT_MAX;
842 m = m_getl((int)resid, M_WAITOK, MT_DATA,
843 top == NULL ? M_PKTHDR : 0, &mlen);
844 if (top == NULL) {
845 m->m_pkthdr.len = 0;
846 m->m_pkthdr.rcvif = NULL;
848 len = imin((int)szmin(mlen, resid), space);
849 if (resid < MINCLSIZE) {
851 * For datagram protocols, leave room
852 * for protocol headers in first mbuf.
854 if (atomic && top == NULL && len < mlen)
855 MH_ALIGN(m, len);
857 space -= len;
858 error = uiomove(mtod(m, caddr_t), (size_t)len, uio);
859 resid = uio->uio_resid;
860 m->m_len = len;
861 *mp = m;
862 top->m_pkthdr.len += len;
863 if (error)
864 goto release;
865 mp = &m->m_next;
866 if (resid == 0) {
867 if (flags & MSG_EOR)
868 top->m_flags |= M_EOR;
869 break;
871 } while (space > 0 && atomic);
872 if (dontroute)
873 so->so_options |= SO_DONTROUTE;
874 if (flags & MSG_OOB) {
875 pru_flags = PRUS_OOB;
876 } else if ((flags & MSG_EOF) &&
877 (so->so_proto->pr_flags & PR_IMPLOPCL) &&
878 (resid == 0)) {
880 * If the user set MSG_EOF, the protocol
881 * understands this flag and nothing left to
882 * send then use PRU_SEND_EOF instead of PRU_SEND.
884 pru_flags = PRUS_EOF;
885 } else if (resid > 0 && space > 0) {
886 /* If there is more to send, set PRUS_MORETOCOME */
887 pru_flags = PRUS_MORETOCOME;
888 } else {
889 pru_flags = 0;
892 * XXX all the SS_CANTSENDMORE checks previously
893 * done could be out of date. We could have recieved
894 * a reset packet in an interrupt or maybe we slept
895 * while doing page faults in uiomove() etc. We could
896 * probably recheck again inside the splnet() protection
897 * here, but there are probably other places that this
898 * also happens. We must rethink this.
900 error = so_pru_send(so, pru_flags, top, addr, control, td);
901 if (dontroute)
902 so->so_options &= ~SO_DONTROUTE;
903 clen = 0;
904 control = NULL;
905 top = NULL;
906 mp = &top;
907 if (error)
908 goto release;
909 } while (resid && space > 0);
910 } while (resid);
912 release:
913 ssb_unlock(&so->so_snd);
914 out:
915 if (top)
916 m_freem(top);
917 if (control)
918 m_freem(control);
919 return (error);
922 #ifdef INET
924 * A specialization of sosend() for UDP based on protocol-specific knowledge:
925 * so->so_proto->pr_flags has the PR_ATOMIC field set. This means that
926 * sosendallatonce() returns true,
927 * the "atomic" variable is true,
928 * and sosendudp() blocks until space is available for the entire send.
929 * so->so_proto->pr_flags does not have the PR_CONNREQUIRED or
930 * PR_IMPLOPCL flags set.
931 * UDP has no out-of-band data.
932 * UDP has no control data.
933 * UDP does not support MSG_EOR.
936 sosendudp(struct socket *so, struct sockaddr *addr, struct uio *uio,
937 struct mbuf *top, struct mbuf *control, int flags, struct thread *td)
939 size_t resid;
940 int error, pru_flags = 0;
941 int space;
943 if (td->td_lwp != NULL)
944 td->td_lwp->lwp_ru.ru_msgsnd++;
945 if (control)
946 m_freem(control);
948 KASSERT((uio && !top) || (top && !uio), ("bad arguments to sosendudp"));
949 resid = uio ? uio->uio_resid : (size_t)top->m_pkthdr.len;
951 restart:
952 error = ssb_lock(&so->so_snd, SBLOCKWAIT(flags));
953 if (error)
954 goto out;
956 if (so->so_state & SS_CANTSENDMORE)
957 gotoerr(EPIPE);
958 if (so->so_error) {
959 error = so->so_error;
960 so->so_error = 0;
961 goto release;
963 if (!(so->so_state & SS_ISCONNECTED) && addr == NULL)
964 gotoerr(EDESTADDRREQ);
965 if (resid > so->so_snd.ssb_hiwat)
966 gotoerr(EMSGSIZE);
967 space = ssb_space(&so->so_snd);
968 if (uio && (space < 0 || (size_t)space < resid)) {
969 if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT))
970 gotoerr(EWOULDBLOCK);
971 ssb_unlock(&so->so_snd);
972 error = ssb_wait(&so->so_snd);
973 if (error)
974 goto out;
975 goto restart;
978 if (uio) {
979 int hdrlen = max_hdr;
982 * We try to optimize out the additional mbuf
983 * allocations in M_PREPEND() on output path, e.g.
984 * - udp_output(), when it tries to prepend protocol
985 * headers.
986 * - Link layer output function, when it tries to
987 * prepend link layer header.
989 * This probably will not benefit any data that will
990 * be fragmented, so this optimization is only performed
991 * when the size of data and max size of protocol+link
992 * headers fit into one mbuf cluster.
994 if (uio->uio_resid > MCLBYTES - hdrlen ||
995 !udp_sosend_prepend) {
996 top = m_uiomove(uio);
997 if (top == NULL)
998 goto release;
999 } else {
1000 int nsize;
1002 top = m_getl(uio->uio_resid + hdrlen, M_WAITOK,
1003 MT_DATA, M_PKTHDR, &nsize);
1004 KASSERT(nsize >= uio->uio_resid + hdrlen,
1005 ("sosendudp invalid nsize %d, "
1006 "resid %zu, hdrlen %d",
1007 nsize, uio->uio_resid, hdrlen));
1009 top->m_len = uio->uio_resid;
1010 top->m_pkthdr.len = uio->uio_resid;
1011 top->m_data += hdrlen;
1013 error = uiomove(mtod(top, caddr_t), top->m_len, uio);
1014 if (error)
1015 goto out;
1019 if (flags & MSG_DONTROUTE)
1020 pru_flags |= PRUS_DONTROUTE;
1022 if (udp_sosend_async && (flags & MSG_SYNC) == 0) {
1023 so_pru_send_async(so, pru_flags, top, addr, NULL, td);
1024 error = 0;
1025 } else {
1026 error = so_pru_send(so, pru_flags, top, addr, NULL, td);
1028 top = NULL; /* sent or freed in lower layer */
1030 release:
1031 ssb_unlock(&so->so_snd);
1032 out:
1033 if (top)
1034 m_freem(top);
1035 return (error);
1039 sosendtcp(struct socket *so, struct sockaddr *addr, struct uio *uio,
1040 struct mbuf *top, struct mbuf *control, int flags,
1041 struct thread *td)
1043 struct mbuf **mp;
1044 struct mbuf *m;
1045 size_t resid;
1046 int space, len;
1047 int error, mlen;
1048 int allatonce;
1049 int pru_flags;
1051 if (uio) {
1052 KKASSERT(top == NULL);
1053 allatonce = 0;
1054 resid = uio->uio_resid;
1055 } else {
1056 allatonce = 1;
1057 resid = (size_t)top->m_pkthdr.len;
1058 #ifdef INVARIANTS
1059 len = 0;
1060 for (m = top; m; m = m->m_next)
1061 len += m->m_len;
1062 KKASSERT(top->m_pkthdr.len == len);
1063 #endif
1067 * WARNING! resid is unsigned, space and len are signed. space
1068 * can wind up negative if the sockbuf is overcommitted.
1070 * Also check to make sure that MSG_EOR isn't used on TCP
1072 if (flags & MSG_EOR) {
1073 error = EINVAL;
1074 goto out;
1077 if (control) {
1078 /* TCP doesn't do control messages (rights, creds, etc) */
1079 if (control->m_len) {
1080 error = EINVAL;
1081 goto out;
1083 m_freem(control); /* empty control, just free it */
1084 control = NULL;
1087 if (td->td_lwp != NULL)
1088 td->td_lwp->lwp_ru.ru_msgsnd++;
1090 #define gotoerr(errcode) { error = errcode; goto release; }
1092 restart:
1093 error = ssb_lock(&so->so_snd, SBLOCKWAIT(flags));
1094 if (error)
1095 goto out;
1097 do {
1098 if (so->so_state & SS_CANTSENDMORE)
1099 gotoerr(EPIPE);
1100 if (so->so_error) {
1101 error = so->so_error;
1102 so->so_error = 0;
1103 goto release;
1105 if ((so->so_state & SS_ISCONNECTED) == 0 &&
1106 (so->so_state & SS_ISCONFIRMING) == 0)
1107 gotoerr(ENOTCONN);
1108 if (allatonce && resid > so->so_snd.ssb_hiwat)
1109 gotoerr(EMSGSIZE);
1111 space = ssb_space_prealloc(&so->so_snd);
1112 if (flags & MSG_OOB)
1113 space += 1024;
1114 if ((space < 0 || (size_t)space < resid) && !allatonce &&
1115 space < so->so_snd.ssb_lowat) {
1116 if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT))
1117 gotoerr(EWOULDBLOCK);
1118 ssb_unlock(&so->so_snd);
1119 error = ssb_wait(&so->so_snd);
1120 if (error)
1121 goto out;
1122 goto restart;
1124 mp = &top;
1125 do {
1126 int cnt = 0, async = 0;
1128 if (uio == NULL) {
1130 * Data is prepackaged in "top".
1132 resid = 0;
1133 } else do {
1134 if (resid > INT_MAX)
1135 resid = INT_MAX;
1136 if (tcp_sosend_jcluster) {
1137 m = m_getlj((int)resid, M_WAITOK, MT_DATA,
1138 top == NULL ? M_PKTHDR : 0, &mlen);
1139 } else {
1140 m = m_getl((int)resid, M_WAITOK, MT_DATA,
1141 top == NULL ? M_PKTHDR : 0, &mlen);
1143 if (top == NULL) {
1144 m->m_pkthdr.len = 0;
1145 m->m_pkthdr.rcvif = NULL;
1147 len = imin((int)szmin(mlen, resid), space);
1148 space -= len;
1149 error = uiomove(mtod(m, caddr_t), (size_t)len, uio);
1150 resid = uio->uio_resid;
1151 m->m_len = len;
1152 *mp = m;
1153 top->m_pkthdr.len += len;
1154 if (error)
1155 goto release;
1156 mp = &m->m_next;
1157 if (resid == 0)
1158 break;
1159 ++cnt;
1160 } while (space > 0 && cnt < tcp_sosend_agglim);
1162 if (tcp_sosend_async)
1163 async = 1;
1165 if (flags & MSG_OOB) {
1166 pru_flags = PRUS_OOB;
1167 async = 0;
1168 } else if ((flags & MSG_EOF) && resid == 0) {
1169 pru_flags = PRUS_EOF;
1170 } else if (resid > 0 && space > 0) {
1171 /* If there is more to send, set PRUS_MORETOCOME */
1172 pru_flags = PRUS_MORETOCOME;
1173 async = 1;
1174 } else {
1175 pru_flags = 0;
1178 if (flags & MSG_SYNC)
1179 async = 0;
1182 * XXX all the SS_CANTSENDMORE checks previously
1183 * done could be out of date. We could have recieved
1184 * a reset packet in an interrupt or maybe we slept
1185 * while doing page faults in uiomove() etc. We could
1186 * probably recheck again inside the splnet() protection
1187 * here, but there are probably other places that this
1188 * also happens. We must rethink this.
1190 for (m = top; m; m = m->m_next)
1191 ssb_preallocstream(&so->so_snd, m);
1192 if (!async) {
1193 error = so_pru_send(so, pru_flags, top,
1194 NULL, NULL, td);
1195 } else {
1196 so_pru_send_async(so, pru_flags, top,
1197 NULL, NULL, td);
1198 error = 0;
1201 top = NULL;
1202 mp = &top;
1203 if (error)
1204 goto release;
1205 } while (resid && space > 0);
1206 } while (resid);
1208 release:
1209 ssb_unlock(&so->so_snd);
1210 out:
1211 if (top)
1212 m_freem(top);
1213 if (control)
1214 m_freem(control);
1215 return (error);
1217 #endif
1220 * Implement receive operations on a socket.
1222 * We depend on the way that records are added to the signalsockbuf
1223 * by sbappend*. In particular, each record (mbufs linked through m_next)
1224 * must begin with an address if the protocol so specifies,
1225 * followed by an optional mbuf or mbufs containing ancillary data,
1226 * and then zero or more mbufs of data.
1228 * Although the signalsockbuf is locked, new data may still be appended.
1229 * A token inside the ssb_lock deals with MP issues and still allows
1230 * the network to access the socket if we block in a uio.
1232 * The caller may receive the data as a single mbuf chain by supplying
1233 * an mbuf **mp0 for use in returning the chain. The uio is then used
1234 * only for the count in uio_resid.
1237 soreceive(struct socket *so, struct sockaddr **psa, struct uio *uio,
1238 struct sockbuf *sio, struct mbuf **controlp, int *flagsp)
1240 struct mbuf *m, *n;
1241 struct mbuf *free_chain = NULL;
1242 int flags, len, error, offset;
1243 struct protosw *pr = so->so_proto;
1244 int moff, type = 0;
1245 size_t resid, orig_resid;
1247 if (uio)
1248 resid = uio->uio_resid;
1249 else
1250 resid = (size_t)(sio->sb_climit - sio->sb_cc);
1251 orig_resid = resid;
1253 if (psa)
1254 *psa = NULL;
1255 if (controlp)
1256 *controlp = NULL;
1257 if (flagsp)
1258 flags = *flagsp &~ MSG_EOR;
1259 else
1260 flags = 0;
1261 if (flags & MSG_OOB) {
1262 m = m_get(M_WAITOK, MT_DATA);
1263 if (m == NULL)
1264 return (ENOBUFS);
1265 error = so_pru_rcvoob(so, m, flags & MSG_PEEK);
1266 if (error)
1267 goto bad;
1268 if (sio) {
1269 do {
1270 sbappend(sio, m);
1271 KKASSERT(resid >= (size_t)m->m_len);
1272 resid -= (size_t)m->m_len;
1273 } while (resid > 0 && m);
1274 } else {
1275 do {
1276 uio->uio_resid = resid;
1277 error = uiomove(mtod(m, caddr_t),
1278 (int)szmin(resid, m->m_len),
1279 uio);
1280 resid = uio->uio_resid;
1281 m = m_free(m);
1282 } while (uio->uio_resid && error == 0 && m);
1284 bad:
1285 if (m)
1286 m_freem(m);
1287 return (error);
1289 if ((so->so_state & SS_ISCONFIRMING) && resid)
1290 so_pru_rcvd(so, 0);
1293 * The token interlocks against the protocol thread while
1294 * ssb_lock is a blocking lock against other userland entities.
1296 lwkt_gettoken(&so->so_rcv.ssb_token);
1297 restart:
1298 error = ssb_lock(&so->so_rcv, SBLOCKWAIT(flags));
1299 if (error)
1300 goto done;
1302 m = so->so_rcv.ssb_mb;
1304 * If we have less data than requested, block awaiting more
1305 * (subject to any timeout) if:
1306 * 1. the current count is less than the low water mark, or
1307 * 2. MSG_WAITALL is set, and it is possible to do the entire
1308 * receive operation at once if we block (resid <= hiwat).
1309 * 3. MSG_DONTWAIT is not set
1310 * If MSG_WAITALL is set but resid is larger than the receive buffer,
1311 * we have to do the receive in sections, and thus risk returning
1312 * a short count if a timeout or signal occurs after we start.
1314 if (m == NULL || (((flags & MSG_DONTWAIT) == 0 &&
1315 (size_t)so->so_rcv.ssb_cc < resid) &&
1316 (so->so_rcv.ssb_cc < so->so_rcv.ssb_lowat ||
1317 ((flags & MSG_WAITALL) && resid <= (size_t)so->so_rcv.ssb_hiwat)) &&
1318 m->m_nextpkt == 0 && (pr->pr_flags & PR_ATOMIC) == 0)) {
1319 KASSERT(m != NULL || !so->so_rcv.ssb_cc, ("receive 1"));
1320 if (so->so_error) {
1321 if (m)
1322 goto dontblock;
1323 error = so->so_error;
1324 if ((flags & MSG_PEEK) == 0)
1325 so->so_error = 0;
1326 goto release;
1328 if (so->so_state & SS_CANTRCVMORE) {
1329 if (m)
1330 goto dontblock;
1331 else
1332 goto release;
1334 for (; m; m = m->m_next) {
1335 if (m->m_type == MT_OOBDATA || (m->m_flags & M_EOR)) {
1336 m = so->so_rcv.ssb_mb;
1337 goto dontblock;
1340 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 &&
1341 (pr->pr_flags & PR_CONNREQUIRED)) {
1342 error = ENOTCONN;
1343 goto release;
1345 if (resid == 0)
1346 goto release;
1347 if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT)) {
1348 error = EWOULDBLOCK;
1349 goto release;
1351 ssb_unlock(&so->so_rcv);
1352 error = ssb_wait(&so->so_rcv);
1353 if (error)
1354 goto done;
1355 goto restart;
1357 dontblock:
1358 if (uio && uio->uio_td && uio->uio_td->td_proc)
1359 uio->uio_td->td_lwp->lwp_ru.ru_msgrcv++;
1362 * note: m should be == sb_mb here. Cache the next record while
1363 * cleaning up. Note that calling m_free*() will break out critical
1364 * section.
1366 KKASSERT(m == so->so_rcv.ssb_mb);
1369 * Skip any address mbufs prepending the record.
1371 if (pr->pr_flags & PR_ADDR) {
1372 KASSERT(m->m_type == MT_SONAME, ("receive 1a"));
1373 orig_resid = 0;
1374 if (psa)
1375 *psa = dup_sockaddr(mtod(m, struct sockaddr *));
1376 if (flags & MSG_PEEK)
1377 m = m->m_next;
1378 else
1379 m = sbunlinkmbuf(&so->so_rcv.sb, m, &free_chain);
1383 * Skip any control mbufs prepending the record.
1385 while (m && m->m_type == MT_CONTROL && error == 0) {
1386 if (flags & MSG_PEEK) {
1387 if (controlp)
1388 *controlp = m_copy(m, 0, m->m_len);
1389 m = m->m_next; /* XXX race */
1390 } else {
1391 if (controlp) {
1392 n = sbunlinkmbuf(&so->so_rcv.sb, m, NULL);
1393 if (pr->pr_domain->dom_externalize &&
1394 mtod(m, struct cmsghdr *)->cmsg_type ==
1395 SCM_RIGHTS)
1396 error = (*pr->pr_domain->dom_externalize)(m);
1397 *controlp = m;
1398 m = n;
1399 } else {
1400 m = sbunlinkmbuf(&so->so_rcv.sb, m, &free_chain);
1403 if (controlp && *controlp) {
1404 orig_resid = 0;
1405 controlp = &(*controlp)->m_next;
1410 * flag OOB data.
1412 if (m) {
1413 type = m->m_type;
1414 if (type == MT_OOBDATA)
1415 flags |= MSG_OOB;
1419 * Copy to the UIO or mbuf return chain (*mp).
1421 moff = 0;
1422 offset = 0;
1423 while (m && resid > 0 && error == 0) {
1424 if (m->m_type == MT_OOBDATA) {
1425 if (type != MT_OOBDATA)
1426 break;
1427 } else if (type == MT_OOBDATA)
1428 break;
1429 else
1430 KASSERT(m->m_type == MT_DATA || m->m_type == MT_HEADER,
1431 ("receive 3"));
1432 soclrstate(so, SS_RCVATMARK);
1433 len = (resid > INT_MAX) ? INT_MAX : resid;
1434 if (so->so_oobmark && len > so->so_oobmark - offset)
1435 len = so->so_oobmark - offset;
1436 if (len > m->m_len - moff)
1437 len = m->m_len - moff;
1440 * Copy out to the UIO or pass the mbufs back to the SIO.
1441 * The SIO is dealt with when we eat the mbuf, but deal
1442 * with the resid here either way.
1444 if (uio) {
1445 uio->uio_resid = resid;
1446 error = uiomove(mtod(m, caddr_t) + moff, len, uio);
1447 resid = uio->uio_resid;
1448 if (error)
1449 goto release;
1450 } else {
1451 resid -= (size_t)len;
1455 * Eat the entire mbuf or just a piece of it
1457 if (len == m->m_len - moff) {
1458 if (m->m_flags & M_EOR)
1459 flags |= MSG_EOR;
1460 if (flags & MSG_PEEK) {
1461 m = m->m_next;
1462 moff = 0;
1463 } else {
1464 if (sio) {
1465 n = sbunlinkmbuf(&so->so_rcv.sb, m, NULL);
1466 sbappend(sio, m);
1467 m = n;
1468 } else {
1469 m = sbunlinkmbuf(&so->so_rcv.sb, m, &free_chain);
1472 } else {
1473 if (flags & MSG_PEEK) {
1474 moff += len;
1475 } else {
1476 if (sio) {
1477 n = m_copym(m, 0, len, M_WAITOK);
1478 if (n)
1479 sbappend(sio, n);
1481 m->m_data += len;
1482 m->m_len -= len;
1483 so->so_rcv.ssb_cc -= len;
1486 if (so->so_oobmark) {
1487 if ((flags & MSG_PEEK) == 0) {
1488 so->so_oobmark -= len;
1489 if (so->so_oobmark == 0) {
1490 sosetstate(so, SS_RCVATMARK);
1491 break;
1493 } else {
1494 offset += len;
1495 if (offset == so->so_oobmark)
1496 break;
1499 if (flags & MSG_EOR)
1500 break;
1502 * If the MSG_WAITALL flag is set (for non-atomic socket),
1503 * we must not quit until resid == 0 or an error
1504 * termination. If a signal/timeout occurs, return
1505 * with a short count but without error.
1506 * Keep signalsockbuf locked against other readers.
1508 while ((flags & MSG_WAITALL) && m == NULL &&
1509 resid > 0 && !sosendallatonce(so) &&
1510 so->so_rcv.ssb_mb == NULL) {
1511 if (so->so_error || so->so_state & SS_CANTRCVMORE)
1512 break;
1514 * The window might have closed to zero, make
1515 * sure we send an ack now that we've drained
1516 * the buffer or we might end up blocking until
1517 * the idle takes over (5 seconds).
1519 if (pr->pr_flags & PR_WANTRCVD && so->so_pcb)
1520 so_pru_rcvd(so, flags);
1521 error = ssb_wait(&so->so_rcv);
1522 if (error) {
1523 ssb_unlock(&so->so_rcv);
1524 error = 0;
1525 goto done;
1527 m = so->so_rcv.ssb_mb;
1532 * If an atomic read was requested but unread data still remains
1533 * in the record, set MSG_TRUNC.
1535 if (m && pr->pr_flags & PR_ATOMIC)
1536 flags |= MSG_TRUNC;
1539 * Cleanup. If an atomic read was requested drop any unread data.
1541 if ((flags & MSG_PEEK) == 0) {
1542 if (m && (pr->pr_flags & PR_ATOMIC))
1543 sbdroprecord(&so->so_rcv.sb);
1544 if ((pr->pr_flags & PR_WANTRCVD) && so->so_pcb)
1545 so_pru_rcvd(so, flags);
1548 if (orig_resid == resid && orig_resid &&
1549 (flags & MSG_EOR) == 0 && (so->so_state & SS_CANTRCVMORE) == 0) {
1550 ssb_unlock(&so->so_rcv);
1551 goto restart;
1554 if (flagsp)
1555 *flagsp |= flags;
1556 release:
1557 ssb_unlock(&so->so_rcv);
1558 done:
1559 lwkt_reltoken(&so->so_rcv.ssb_token);
1560 if (free_chain)
1561 m_freem(free_chain);
1562 return (error);
1566 sorecvtcp(struct socket *so, struct sockaddr **psa, struct uio *uio,
1567 struct sockbuf *sio, struct mbuf **controlp, int *flagsp)
1569 struct mbuf *m, *n;
1570 struct mbuf *free_chain = NULL;
1571 int flags, len, error, offset;
1572 struct protosw *pr = so->so_proto;
1573 int moff;
1574 int didoob;
1575 size_t resid, orig_resid, restmp;
1577 if (uio)
1578 resid = uio->uio_resid;
1579 else
1580 resid = (size_t)(sio->sb_climit - sio->sb_cc);
1581 orig_resid = resid;
1583 if (psa)
1584 *psa = NULL;
1585 if (controlp)
1586 *controlp = NULL;
1587 if (flagsp)
1588 flags = *flagsp &~ MSG_EOR;
1589 else
1590 flags = 0;
1591 if (flags & MSG_OOB) {
1592 m = m_get(M_WAITOK, MT_DATA);
1593 if (m == NULL)
1594 return (ENOBUFS);
1595 error = so_pru_rcvoob(so, m, flags & MSG_PEEK);
1596 if (error)
1597 goto bad;
1598 if (sio) {
1599 do {
1600 sbappend(sio, m);
1601 KKASSERT(resid >= (size_t)m->m_len);
1602 resid -= (size_t)m->m_len;
1603 } while (resid > 0 && m);
1604 } else {
1605 do {
1606 uio->uio_resid = resid;
1607 error = uiomove(mtod(m, caddr_t),
1608 (int)szmin(resid, m->m_len),
1609 uio);
1610 resid = uio->uio_resid;
1611 m = m_free(m);
1612 } while (uio->uio_resid && error == 0 && m);
1614 bad:
1615 if (m)
1616 m_freem(m);
1617 return (error);
1621 * The token interlocks against the protocol thread while
1622 * ssb_lock is a blocking lock against other userland entities.
1624 * Lock a limited number of mbufs (not all, so sbcompress() still
1625 * works well). The token is used as an interlock for sbwait() so
1626 * release it afterwords.
1628 restart:
1629 error = ssb_lock(&so->so_rcv, SBLOCKWAIT(flags));
1630 if (error)
1631 goto done;
1633 lwkt_gettoken(&so->so_rcv.ssb_token);
1634 m = so->so_rcv.ssb_mb;
1637 * If we have less data than requested, block awaiting more
1638 * (subject to any timeout) if:
1639 * 1. the current count is less than the low water mark, or
1640 * 2. MSG_WAITALL is set, and it is possible to do the entire
1641 * receive operation at once if we block (resid <= hiwat).
1642 * 3. MSG_DONTWAIT is not set
1643 * If MSG_WAITALL is set but resid is larger than the receive buffer,
1644 * we have to do the receive in sections, and thus risk returning
1645 * a short count if a timeout or signal occurs after we start.
1647 if (m == NULL || (((flags & MSG_DONTWAIT) == 0 &&
1648 (size_t)so->so_rcv.ssb_cc < resid) &&
1649 (so->so_rcv.ssb_cc < so->so_rcv.ssb_lowat ||
1650 ((flags & MSG_WAITALL) && resid <= (size_t)so->so_rcv.ssb_hiwat)))) {
1651 KASSERT(m != NULL || !so->so_rcv.ssb_cc, ("receive 1"));
1652 if (so->so_error) {
1653 if (m)
1654 goto dontblock;
1655 lwkt_reltoken(&so->so_rcv.ssb_token);
1656 error = so->so_error;
1657 if ((flags & MSG_PEEK) == 0)
1658 so->so_error = 0;
1659 goto release;
1661 if (so->so_state & SS_CANTRCVMORE) {
1662 if (m)
1663 goto dontblock;
1664 lwkt_reltoken(&so->so_rcv.ssb_token);
1665 goto release;
1667 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 &&
1668 (pr->pr_flags & PR_CONNREQUIRED)) {
1669 lwkt_reltoken(&so->so_rcv.ssb_token);
1670 error = ENOTCONN;
1671 goto release;
1673 if (resid == 0) {
1674 lwkt_reltoken(&so->so_rcv.ssb_token);
1675 goto release;
1677 if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT)) {
1678 lwkt_reltoken(&so->so_rcv.ssb_token);
1679 error = EWOULDBLOCK;
1680 goto release;
1682 ssb_unlock(&so->so_rcv);
1683 error = ssb_wait(&so->so_rcv);
1684 lwkt_reltoken(&so->so_rcv.ssb_token);
1685 if (error)
1686 goto done;
1687 goto restart;
1691 * Token still held
1693 dontblock:
1694 n = m;
1695 restmp = 0;
1696 while (n && restmp < resid) {
1697 n->m_flags |= M_SOLOCKED;
1698 restmp += n->m_len;
1699 if (n->m_next == NULL)
1700 n = n->m_nextpkt;
1701 else
1702 n = n->m_next;
1706 * Release token for loop
1708 lwkt_reltoken(&so->so_rcv.ssb_token);
1709 if (uio && uio->uio_td && uio->uio_td->td_proc)
1710 uio->uio_td->td_lwp->lwp_ru.ru_msgrcv++;
1713 * note: m should be == sb_mb here. Cache the next record while
1714 * cleaning up. Note that calling m_free*() will break out critical
1715 * section.
1717 KKASSERT(m == so->so_rcv.ssb_mb);
1720 * Copy to the UIO or mbuf return chain (*mp).
1722 * NOTE: Token is not held for loop
1724 moff = 0;
1725 offset = 0;
1726 didoob = 0;
1728 while (m && (m->m_flags & M_SOLOCKED) && resid > 0 && error == 0) {
1729 KASSERT(m->m_type == MT_DATA || m->m_type == MT_HEADER,
1730 ("receive 3"));
1732 soclrstate(so, SS_RCVATMARK);
1733 len = (resid > INT_MAX) ? INT_MAX : resid;
1734 if (so->so_oobmark && len > so->so_oobmark - offset)
1735 len = so->so_oobmark - offset;
1736 if (len > m->m_len - moff)
1737 len = m->m_len - moff;
1740 * Copy out to the UIO or pass the mbufs back to the SIO.
1741 * The SIO is dealt with when we eat the mbuf, but deal
1742 * with the resid here either way.
1744 if (uio) {
1745 uio->uio_resid = resid;
1746 error = uiomove(mtod(m, caddr_t) + moff, len, uio);
1747 resid = uio->uio_resid;
1748 if (error)
1749 goto release;
1750 } else {
1751 resid -= (size_t)len;
1755 * Eat the entire mbuf or just a piece of it
1757 offset += len;
1758 if (len == m->m_len - moff) {
1759 m = m->m_next;
1760 moff = 0;
1761 } else {
1762 moff += len;
1766 * Check oobmark
1768 if (so->so_oobmark && offset == so->so_oobmark) {
1769 didoob = 1;
1770 break;
1775 * Synchronize sockbuf with data we read.
1777 * NOTE: (m) is junk on entry (it could be left over from the
1778 * previous loop).
1780 if ((flags & MSG_PEEK) == 0) {
1781 lwkt_gettoken(&so->so_rcv.ssb_token);
1782 m = so->so_rcv.ssb_mb;
1783 while (m && offset >= m->m_len) {
1784 if (so->so_oobmark) {
1785 so->so_oobmark -= m->m_len;
1786 if (so->so_oobmark == 0) {
1787 sosetstate(so, SS_RCVATMARK);
1788 didoob = 1;
1791 offset -= m->m_len;
1792 if (sio) {
1793 n = sbunlinkmbuf(&so->so_rcv.sb, m, NULL);
1794 sbappend(sio, m);
1795 m = n;
1796 } else {
1797 m = sbunlinkmbuf(&so->so_rcv.sb,
1798 m, &free_chain);
1801 if (offset) {
1802 KKASSERT(m);
1803 if (sio) {
1804 n = m_copym(m, 0, offset, M_WAITOK);
1805 if (n)
1806 sbappend(sio, n);
1808 m->m_data += offset;
1809 m->m_len -= offset;
1810 so->so_rcv.ssb_cc -= offset;
1811 if (so->so_oobmark) {
1812 so->so_oobmark -= offset;
1813 if (so->so_oobmark == 0) {
1814 sosetstate(so, SS_RCVATMARK);
1815 didoob = 1;
1818 offset = 0;
1820 lwkt_reltoken(&so->so_rcv.ssb_token);
1824 * If the MSG_WAITALL flag is set (for non-atomic socket),
1825 * we must not quit until resid == 0 or an error termination.
1827 * If a signal/timeout occurs, return with a short count but without
1828 * error.
1830 * Keep signalsockbuf locked against other readers.
1832 * XXX if MSG_PEEK we currently do quit.
1834 if ((flags & MSG_WAITALL) && !(flags & MSG_PEEK) &&
1835 didoob == 0 && resid > 0 &&
1836 !sosendallatonce(so)) {
1837 lwkt_gettoken(&so->so_rcv.ssb_token);
1838 error = 0;
1839 while ((m = so->so_rcv.ssb_mb) == NULL) {
1840 if (so->so_error || (so->so_state & SS_CANTRCVMORE)) {
1841 error = so->so_error;
1842 break;
1845 * The window might have closed to zero, make
1846 * sure we send an ack now that we've drained
1847 * the buffer or we might end up blocking until
1848 * the idle takes over (5 seconds).
1850 if (so->so_pcb)
1851 so_pru_rcvd_async(so);
1852 if (so->so_rcv.ssb_mb == NULL)
1853 error = ssb_wait(&so->so_rcv);
1854 if (error) {
1855 lwkt_reltoken(&so->so_rcv.ssb_token);
1856 ssb_unlock(&so->so_rcv);
1857 error = 0;
1858 goto done;
1861 if (m && error == 0)
1862 goto dontblock;
1863 lwkt_reltoken(&so->so_rcv.ssb_token);
1867 * Token not held here.
1869 * Cleanup. If an atomic read was requested drop any unread data XXX
1871 if ((flags & MSG_PEEK) == 0) {
1872 if (so->so_pcb)
1873 so_pru_rcvd_async(so);
1876 if (orig_resid == resid && orig_resid &&
1877 (so->so_state & SS_CANTRCVMORE) == 0) {
1878 ssb_unlock(&so->so_rcv);
1879 goto restart;
1882 if (flagsp)
1883 *flagsp |= flags;
1884 release:
1885 ssb_unlock(&so->so_rcv);
1886 done:
1887 if (free_chain)
1888 m_freem(free_chain);
1889 return (error);
1893 * Shut a socket down. Note that we do not get a frontend lock as we
1894 * want to be able to shut the socket down even if another thread is
1895 * blocked in a read(), thus waking it up.
1898 soshutdown(struct socket *so, int how)
1900 if (!(how == SHUT_RD || how == SHUT_WR || how == SHUT_RDWR))
1901 return (EINVAL);
1903 if (how != SHUT_WR) {
1904 /*ssb_lock(&so->so_rcv, M_WAITOK);*/
1905 sorflush(so);
1906 /*ssb_unlock(&so->so_rcv);*/
1908 if (how != SHUT_RD)
1909 return (so_pru_shutdown(so));
1910 return (0);
1913 void
1914 sorflush(struct socket *so)
1916 struct signalsockbuf *ssb = &so->so_rcv;
1917 struct protosw *pr = so->so_proto;
1918 struct signalsockbuf asb;
1920 atomic_set_int(&ssb->ssb_flags, SSB_NOINTR);
1922 lwkt_gettoken(&ssb->ssb_token);
1923 socantrcvmore(so);
1924 asb = *ssb;
1927 * Can't just blow up the ssb structure here
1929 bzero(&ssb->sb, sizeof(ssb->sb));
1930 ssb->ssb_timeo = 0;
1931 ssb->ssb_lowat = 0;
1932 ssb->ssb_hiwat = 0;
1933 ssb->ssb_mbmax = 0;
1934 atomic_clear_int(&ssb->ssb_flags, SSB_CLEAR_MASK);
1936 if ((pr->pr_flags & PR_RIGHTS) && pr->pr_domain->dom_dispose)
1937 (*pr->pr_domain->dom_dispose)(asb.ssb_mb);
1938 ssb_release(&asb, so);
1940 lwkt_reltoken(&ssb->ssb_token);
1943 #ifdef INET
1944 static int
1945 do_setopt_accept_filter(struct socket *so, struct sockopt *sopt)
1947 struct accept_filter_arg *afap = NULL;
1948 struct accept_filter *afp;
1949 struct so_accf *af = so->so_accf;
1950 int error = 0;
1952 /* do not set/remove accept filters on non listen sockets */
1953 if ((so->so_options & SO_ACCEPTCONN) == 0) {
1954 error = EINVAL;
1955 goto out;
1958 /* removing the filter */
1959 if (sopt == NULL) {
1960 if (af != NULL) {
1961 if (af->so_accept_filter != NULL &&
1962 af->so_accept_filter->accf_destroy != NULL) {
1963 af->so_accept_filter->accf_destroy(so);
1965 if (af->so_accept_filter_str != NULL) {
1966 kfree(af->so_accept_filter_str, M_ACCF);
1968 kfree(af, M_ACCF);
1969 so->so_accf = NULL;
1971 so->so_options &= ~SO_ACCEPTFILTER;
1972 return (0);
1974 /* adding a filter */
1975 /* must remove previous filter first */
1976 if (af != NULL) {
1977 error = EINVAL;
1978 goto out;
1980 /* don't put large objects on the kernel stack */
1981 afap = kmalloc(sizeof(*afap), M_TEMP, M_WAITOK);
1982 error = sooptcopyin(sopt, afap, sizeof *afap, sizeof *afap);
1983 afap->af_name[sizeof(afap->af_name)-1] = '\0';
1984 afap->af_arg[sizeof(afap->af_arg)-1] = '\0';
1985 if (error)
1986 goto out;
1987 afp = accept_filt_get(afap->af_name);
1988 if (afp == NULL) {
1989 error = ENOENT;
1990 goto out;
1992 af = kmalloc(sizeof(*af), M_ACCF, M_WAITOK | M_ZERO);
1993 if (afp->accf_create != NULL) {
1994 if (afap->af_name[0] != '\0') {
1995 int len = strlen(afap->af_name) + 1;
1997 af->so_accept_filter_str = kmalloc(len, M_ACCF,
1998 M_WAITOK);
1999 strcpy(af->so_accept_filter_str, afap->af_name);
2001 af->so_accept_filter_arg = afp->accf_create(so, afap->af_arg);
2002 if (af->so_accept_filter_arg == NULL) {
2003 kfree(af->so_accept_filter_str, M_ACCF);
2004 kfree(af, M_ACCF);
2005 so->so_accf = NULL;
2006 error = EINVAL;
2007 goto out;
2010 af->so_accept_filter = afp;
2011 so->so_accf = af;
2012 so->so_options |= SO_ACCEPTFILTER;
2013 out:
2014 if (afap != NULL)
2015 kfree(afap, M_TEMP);
2016 return (error);
2018 #endif /* INET */
2021 * Perhaps this routine, and sooptcopyout(), below, ought to come in
2022 * an additional variant to handle the case where the option value needs
2023 * to be some kind of integer, but not a specific size.
2024 * In addition to their use here, these functions are also called by the
2025 * protocol-level pr_ctloutput() routines.
2028 sooptcopyin(struct sockopt *sopt, void *buf, size_t len, size_t minlen)
2030 return soopt_to_kbuf(sopt, buf, len, minlen);
2034 soopt_to_kbuf(struct sockopt *sopt, void *buf, size_t len, size_t minlen)
2036 size_t valsize;
2038 KKASSERT(!sopt->sopt_val || kva_p(sopt->sopt_val));
2039 KKASSERT(kva_p(buf));
2042 * If the user gives us more than we wanted, we ignore it,
2043 * but if we don't get the minimum length the caller
2044 * wants, we return EINVAL. On success, sopt->sopt_valsize
2045 * is set to however much we actually retrieved.
2047 if ((valsize = sopt->sopt_valsize) < minlen)
2048 return EINVAL;
2049 if (valsize > len)
2050 sopt->sopt_valsize = valsize = len;
2052 bcopy(sopt->sopt_val, buf, valsize);
2053 return 0;
2058 sosetopt(struct socket *so, struct sockopt *sopt)
2060 int error, optval;
2061 struct linger l;
2062 struct timeval tv;
2063 u_long val;
2064 struct signalsockbuf *sotmp;
2066 error = 0;
2067 sopt->sopt_dir = SOPT_SET;
2068 if (sopt->sopt_level != SOL_SOCKET) {
2069 if (so->so_proto && so->so_proto->pr_ctloutput) {
2070 return (so_pr_ctloutput(so, sopt));
2072 error = ENOPROTOOPT;
2073 } else {
2074 switch (sopt->sopt_name) {
2075 #ifdef INET
2076 case SO_ACCEPTFILTER:
2077 error = do_setopt_accept_filter(so, sopt);
2078 if (error)
2079 goto bad;
2080 break;
2081 #endif /* INET */
2082 case SO_LINGER:
2083 error = sooptcopyin(sopt, &l, sizeof l, sizeof l);
2084 if (error)
2085 goto bad;
2087 so->so_linger = l.l_linger;
2088 if (l.l_onoff)
2089 so->so_options |= SO_LINGER;
2090 else
2091 so->so_options &= ~SO_LINGER;
2092 break;
2094 case SO_DEBUG:
2095 case SO_KEEPALIVE:
2096 case SO_DONTROUTE:
2097 case SO_USELOOPBACK:
2098 case SO_BROADCAST:
2099 case SO_REUSEADDR:
2100 case SO_REUSEPORT:
2101 case SO_OOBINLINE:
2102 case SO_TIMESTAMP:
2103 case SO_NOSIGPIPE:
2104 error = sooptcopyin(sopt, &optval, sizeof optval,
2105 sizeof optval);
2106 if (error)
2107 goto bad;
2108 if (optval)
2109 so->so_options |= sopt->sopt_name;
2110 else
2111 so->so_options &= ~sopt->sopt_name;
2112 break;
2114 case SO_SNDBUF:
2115 case SO_RCVBUF:
2116 case SO_SNDLOWAT:
2117 case SO_RCVLOWAT:
2118 error = sooptcopyin(sopt, &optval, sizeof optval,
2119 sizeof optval);
2120 if (error)
2121 goto bad;
2124 * Values < 1 make no sense for any of these
2125 * options, so disallow them.
2127 if (optval < 1) {
2128 error = EINVAL;
2129 goto bad;
2132 switch (sopt->sopt_name) {
2133 case SO_SNDBUF:
2134 case SO_RCVBUF:
2135 if (ssb_reserve(sopt->sopt_name == SO_SNDBUF ?
2136 &so->so_snd : &so->so_rcv, (u_long)optval,
2138 &curproc->p_rlimit[RLIMIT_SBSIZE]) == 0) {
2139 error = ENOBUFS;
2140 goto bad;
2142 sotmp = (sopt->sopt_name == SO_SNDBUF) ?
2143 &so->so_snd : &so->so_rcv;
2144 atomic_clear_int(&sotmp->ssb_flags,
2145 SSB_AUTOSIZE);
2146 break;
2149 * Make sure the low-water is never greater than
2150 * the high-water.
2152 case SO_SNDLOWAT:
2153 so->so_snd.ssb_lowat =
2154 (optval > so->so_snd.ssb_hiwat) ?
2155 so->so_snd.ssb_hiwat : optval;
2156 atomic_clear_int(&so->so_snd.ssb_flags,
2157 SSB_AUTOLOWAT);
2158 break;
2159 case SO_RCVLOWAT:
2160 so->so_rcv.ssb_lowat =
2161 (optval > so->so_rcv.ssb_hiwat) ?
2162 so->so_rcv.ssb_hiwat : optval;
2163 atomic_clear_int(&so->so_rcv.ssb_flags,
2164 SSB_AUTOLOWAT);
2165 break;
2167 break;
2169 case SO_SNDTIMEO:
2170 case SO_RCVTIMEO:
2171 error = sooptcopyin(sopt, &tv, sizeof tv,
2172 sizeof tv);
2173 if (error)
2174 goto bad;
2176 /* assert(hz > 0); */
2177 if (tv.tv_sec < 0 || tv.tv_sec > INT_MAX / hz ||
2178 tv.tv_usec < 0 || tv.tv_usec >= 1000000) {
2179 error = EDOM;
2180 goto bad;
2182 /* assert(tick > 0); */
2183 /* assert(ULONG_MAX - INT_MAX >= 1000000); */
2184 val = (u_long)(tv.tv_sec * hz) + tv.tv_usec / ustick;
2185 if (val > INT_MAX) {
2186 error = EDOM;
2187 goto bad;
2189 if (val == 0 && tv.tv_usec != 0)
2190 val = 1;
2192 switch (sopt->sopt_name) {
2193 case SO_SNDTIMEO:
2194 so->so_snd.ssb_timeo = val;
2195 break;
2196 case SO_RCVTIMEO:
2197 so->so_rcv.ssb_timeo = val;
2198 break;
2200 break;
2201 default:
2202 error = ENOPROTOOPT;
2203 break;
2205 if (error == 0 && so->so_proto && so->so_proto->pr_ctloutput) {
2206 (void) so_pr_ctloutput(so, sopt);
2209 bad:
2210 return (error);
2213 /* Helper routine for getsockopt */
2215 sooptcopyout(struct sockopt *sopt, const void *buf, size_t len)
2217 soopt_from_kbuf(sopt, buf, len);
2218 return 0;
2221 void
2222 soopt_from_kbuf(struct sockopt *sopt, const void *buf, size_t len)
2224 size_t valsize;
2226 if (len == 0) {
2227 sopt->sopt_valsize = 0;
2228 return;
2231 KKASSERT(!sopt->sopt_val || kva_p(sopt->sopt_val));
2232 KKASSERT(kva_p(buf));
2235 * Documented get behavior is that we always return a value,
2236 * possibly truncated to fit in the user's buffer.
2237 * Traditional behavior is that we always tell the user
2238 * precisely how much we copied, rather than something useful
2239 * like the total amount we had available for her.
2240 * Note that this interface is not idempotent; the entire answer must
2241 * generated ahead of time.
2243 valsize = szmin(len, sopt->sopt_valsize);
2244 sopt->sopt_valsize = valsize;
2245 if (sopt->sopt_val != 0) {
2246 bcopy(buf, sopt->sopt_val, valsize);
2251 sogetopt(struct socket *so, struct sockopt *sopt)
2253 int error, optval;
2254 long optval_l;
2255 struct linger l;
2256 struct timeval tv;
2257 #ifdef INET
2258 struct accept_filter_arg *afap;
2259 #endif
2261 error = 0;
2262 sopt->sopt_dir = SOPT_GET;
2263 if (sopt->sopt_level != SOL_SOCKET) {
2264 if (so->so_proto && so->so_proto->pr_ctloutput) {
2265 return (so_pr_ctloutput(so, sopt));
2266 } else
2267 return (ENOPROTOOPT);
2268 } else {
2269 switch (sopt->sopt_name) {
2270 #ifdef INET
2271 case SO_ACCEPTFILTER:
2272 if ((so->so_options & SO_ACCEPTCONN) == 0)
2273 return (EINVAL);
2274 afap = kmalloc(sizeof(*afap), M_TEMP,
2275 M_WAITOK | M_ZERO);
2276 if ((so->so_options & SO_ACCEPTFILTER) != 0) {
2277 strcpy(afap->af_name, so->so_accf->so_accept_filter->accf_name);
2278 if (so->so_accf->so_accept_filter_str != NULL)
2279 strcpy(afap->af_arg, so->so_accf->so_accept_filter_str);
2281 error = sooptcopyout(sopt, afap, sizeof(*afap));
2282 kfree(afap, M_TEMP);
2283 break;
2284 #endif /* INET */
2286 case SO_LINGER:
2287 l.l_onoff = so->so_options & SO_LINGER;
2288 l.l_linger = so->so_linger;
2289 error = sooptcopyout(sopt, &l, sizeof l);
2290 break;
2292 case SO_USELOOPBACK:
2293 case SO_DONTROUTE:
2294 case SO_DEBUG:
2295 case SO_KEEPALIVE:
2296 case SO_REUSEADDR:
2297 case SO_REUSEPORT:
2298 case SO_BROADCAST:
2299 case SO_OOBINLINE:
2300 case SO_TIMESTAMP:
2301 case SO_NOSIGPIPE:
2302 optval = so->so_options & sopt->sopt_name;
2303 integer:
2304 error = sooptcopyout(sopt, &optval, sizeof optval);
2305 break;
2307 case SO_TYPE:
2308 optval = so->so_type;
2309 goto integer;
2311 case SO_ERROR:
2312 optval = so->so_error;
2313 so->so_error = 0;
2314 goto integer;
2316 case SO_SNDBUF:
2317 optval = so->so_snd.ssb_hiwat;
2318 goto integer;
2320 case SO_RCVBUF:
2321 optval = so->so_rcv.ssb_hiwat;
2322 goto integer;
2324 case SO_SNDLOWAT:
2325 optval = so->so_snd.ssb_lowat;
2326 goto integer;
2328 case SO_RCVLOWAT:
2329 optval = so->so_rcv.ssb_lowat;
2330 goto integer;
2332 case SO_SNDTIMEO:
2333 case SO_RCVTIMEO:
2334 optval = (sopt->sopt_name == SO_SNDTIMEO ?
2335 so->so_snd.ssb_timeo : so->so_rcv.ssb_timeo);
2337 tv.tv_sec = optval / hz;
2338 tv.tv_usec = (optval % hz) * ustick;
2339 error = sooptcopyout(sopt, &tv, sizeof tv);
2340 break;
2342 case SO_SNDSPACE:
2343 optval_l = ssb_space(&so->so_snd);
2344 error = sooptcopyout(sopt, &optval_l, sizeof(optval_l));
2345 break;
2347 case SO_CPUHINT:
2348 optval = -1; /* no hint */
2349 goto integer;
2351 default:
2352 error = ENOPROTOOPT;
2353 break;
2355 if (error == 0 && so->so_proto && so->so_proto->pr_ctloutput)
2356 so_pr_ctloutput(so, sopt);
2357 return (error);
2361 /* XXX; prepare mbuf for (__FreeBSD__ < 3) routines. */
2363 soopt_getm(struct sockopt *sopt, struct mbuf **mp)
2365 struct mbuf *m, *m_prev;
2366 int sopt_size = sopt->sopt_valsize, msize;
2368 m = m_getl(sopt_size, sopt->sopt_td ? M_WAITOK : M_NOWAIT, MT_DATA,
2369 0, &msize);
2370 if (m == NULL)
2371 return (ENOBUFS);
2372 m->m_len = min(msize, sopt_size);
2373 sopt_size -= m->m_len;
2374 *mp = m;
2375 m_prev = m;
2377 while (sopt_size > 0) {
2378 m = m_getl(sopt_size, sopt->sopt_td ? M_WAITOK : M_NOWAIT,
2379 MT_DATA, 0, &msize);
2380 if (m == NULL) {
2381 m_freem(*mp);
2382 return (ENOBUFS);
2384 m->m_len = min(msize, sopt_size);
2385 sopt_size -= m->m_len;
2386 m_prev->m_next = m;
2387 m_prev = m;
2389 return (0);
2392 /* XXX; copyin sopt data into mbuf chain for (__FreeBSD__ < 3) routines. */
2394 soopt_mcopyin(struct sockopt *sopt, struct mbuf *m)
2396 soopt_to_mbuf(sopt, m);
2397 return 0;
2400 void
2401 soopt_to_mbuf(struct sockopt *sopt, struct mbuf *m)
2403 size_t valsize;
2404 void *val;
2406 KKASSERT(!sopt->sopt_val || kva_p(sopt->sopt_val));
2407 KKASSERT(kva_p(m));
2408 if (sopt->sopt_val == NULL)
2409 return;
2410 val = sopt->sopt_val;
2411 valsize = sopt->sopt_valsize;
2412 while (m != NULL && valsize >= m->m_len) {
2413 bcopy(val, mtod(m, char *), m->m_len);
2414 valsize -= m->m_len;
2415 val = (caddr_t)val + m->m_len;
2416 m = m->m_next;
2418 if (m != NULL) /* should be allocated enoughly at ip6_sooptmcopyin() */
2419 panic("ip6_sooptmcopyin");
2422 /* XXX; copyout mbuf chain data into soopt for (__FreeBSD__ < 3) routines. */
2424 soopt_mcopyout(struct sockopt *sopt, struct mbuf *m)
2426 return soopt_from_mbuf(sopt, m);
2430 soopt_from_mbuf(struct sockopt *sopt, struct mbuf *m)
2432 struct mbuf *m0 = m;
2433 size_t valsize = 0;
2434 size_t maxsize;
2435 void *val;
2437 KKASSERT(!sopt->sopt_val || kva_p(sopt->sopt_val));
2438 KKASSERT(kva_p(m));
2439 if (sopt->sopt_val == NULL)
2440 return 0;
2441 val = sopt->sopt_val;
2442 maxsize = sopt->sopt_valsize;
2443 while (m != NULL && maxsize >= m->m_len) {
2444 bcopy(mtod(m, char *), val, m->m_len);
2445 maxsize -= m->m_len;
2446 val = (caddr_t)val + m->m_len;
2447 valsize += m->m_len;
2448 m = m->m_next;
2450 if (m != NULL) {
2451 /* enough soopt buffer should be given from user-land */
2452 m_freem(m0);
2453 return (EINVAL);
2455 sopt->sopt_valsize = valsize;
2456 return 0;
2459 void
2460 sohasoutofband(struct socket *so)
2462 if (so->so_sigio != NULL)
2463 pgsigio(so->so_sigio, SIGURG, 0);
2464 KNOTE(&so->so_rcv.ssb_kq.ki_note, NOTE_OOB);
2468 sokqfilter(struct file *fp, struct knote *kn)
2470 struct socket *so = (struct socket *)kn->kn_fp->f_data;
2471 struct signalsockbuf *ssb;
2473 switch (kn->kn_filter) {
2474 case EVFILT_READ:
2475 if (so->so_options & SO_ACCEPTCONN)
2476 kn->kn_fop = &solisten_filtops;
2477 else
2478 kn->kn_fop = &soread_filtops;
2479 ssb = &so->so_rcv;
2480 break;
2481 case EVFILT_WRITE:
2482 kn->kn_fop = &sowrite_filtops;
2483 ssb = &so->so_snd;
2484 break;
2485 case EVFILT_EXCEPT:
2486 kn->kn_fop = &soexcept_filtops;
2487 ssb = &so->so_rcv;
2488 break;
2489 default:
2490 return (EOPNOTSUPP);
2493 knote_insert(&ssb->ssb_kq.ki_note, kn);
2494 atomic_set_int(&ssb->ssb_flags, SSB_KNOTE);
2495 return (0);
2498 static void
2499 filt_sordetach(struct knote *kn)
2501 struct socket *so = (struct socket *)kn->kn_fp->f_data;
2503 knote_remove(&so->so_rcv.ssb_kq.ki_note, kn);
2504 if (SLIST_EMPTY(&so->so_rcv.ssb_kq.ki_note))
2505 atomic_clear_int(&so->so_rcv.ssb_flags, SSB_KNOTE);
2508 /*ARGSUSED*/
2509 static int
2510 filt_soread(struct knote *kn, long hint)
2512 struct socket *so = (struct socket *)kn->kn_fp->f_data;
2514 if (kn->kn_sfflags & NOTE_OOB) {
2515 if ((so->so_oobmark || (so->so_state & SS_RCVATMARK))) {
2516 kn->kn_fflags |= NOTE_OOB;
2517 return (1);
2519 return (0);
2521 kn->kn_data = so->so_rcv.ssb_cc;
2523 if (so->so_state & SS_CANTRCVMORE) {
2525 * Only set NODATA if all data has been exhausted.
2527 if (kn->kn_data == 0)
2528 kn->kn_flags |= EV_NODATA;
2529 kn->kn_flags |= EV_EOF;
2530 kn->kn_fflags = so->so_error;
2531 return (1);
2533 if (so->so_error) /* temporary udp error */
2534 return (1);
2535 if (kn->kn_sfflags & NOTE_LOWAT)
2536 return (kn->kn_data >= kn->kn_sdata);
2537 return ((kn->kn_data >= so->so_rcv.ssb_lowat) ||
2538 !TAILQ_EMPTY(&so->so_comp));
2541 static void
2542 filt_sowdetach(struct knote *kn)
2544 struct socket *so = (struct socket *)kn->kn_fp->f_data;
2546 knote_remove(&so->so_snd.ssb_kq.ki_note, kn);
2547 if (SLIST_EMPTY(&so->so_snd.ssb_kq.ki_note))
2548 atomic_clear_int(&so->so_snd.ssb_flags, SSB_KNOTE);
2551 /*ARGSUSED*/
2552 static int
2553 filt_sowrite(struct knote *kn, long hint)
2555 struct socket *so = (struct socket *)kn->kn_fp->f_data;
2557 kn->kn_data = ssb_space(&so->so_snd);
2558 if (so->so_state & SS_CANTSENDMORE) {
2559 kn->kn_flags |= (EV_EOF | EV_NODATA);
2560 kn->kn_fflags = so->so_error;
2561 return (1);
2563 if (so->so_error) /* temporary udp error */
2564 return (1);
2565 if (((so->so_state & SS_ISCONNECTED) == 0) &&
2566 (so->so_proto->pr_flags & PR_CONNREQUIRED))
2567 return (0);
2568 if (kn->kn_sfflags & NOTE_LOWAT)
2569 return (kn->kn_data >= kn->kn_sdata);
2570 return (kn->kn_data >= so->so_snd.ssb_lowat);
2573 /*ARGSUSED*/
2574 static int
2575 filt_solisten(struct knote *kn, long hint)
2577 struct socket *so = (struct socket *)kn->kn_fp->f_data;
2579 kn->kn_data = so->so_qlen;
2580 return (! TAILQ_EMPTY(&so->so_comp));