Ignore machine-check MSRs
[freebsd-src/fkvm-freebsd.git] / sys / kern / uipc_socket.c
blob217918c411e97bbdd9b0dfa5bbf5980f7496a82e
1 /*-
2 * Copyright (c) 1982, 1986, 1988, 1990, 1993
3 * The Regents of the University of California.
4 * Copyright (c) 2004 The FreeBSD Foundation
5 * Copyright (c) 2004-2008 Robert N. M. Watson
6 * All rights reserved.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 4. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
32 * @(#)uipc_socket.c 8.3 (Berkeley) 4/15/94
36 * Comments on the socket life cycle:
38 * soalloc() sets of socket layer state for a socket, called only by
39 * socreate() and sonewconn(). Socket layer private.
41 * sodealloc() tears down socket layer state for a socket, called only by
42 * sofree() and sonewconn(). Socket layer private.
44 * pru_attach() associates protocol layer state with an allocated socket;
45 * called only once, may fail, aborting socket allocation. This is called
46 * from socreate() and sonewconn(). Socket layer private.
48 * pru_detach() disassociates protocol layer state from an attached socket,
49 * and will be called exactly once for sockets in which pru_attach() has
50 * been successfully called. If pru_attach() returned an error,
51 * pru_detach() will not be called. Socket layer private.
53 * pru_abort() and pru_close() notify the protocol layer that the last
54 * consumer of a socket is starting to tear down the socket, and that the
55 * protocol should terminate the connection. Historically, pru_abort() also
56 * detached protocol state from the socket state, but this is no longer the
57 * case.
59 * socreate() creates a socket and attaches protocol state. This is a public
60 * interface that may be used by socket layer consumers to create new
61 * sockets.
63 * sonewconn() creates a socket and attaches protocol state. This is a
64 * public interface that may be used by protocols to create new sockets when
65 * a new connection is received and will be available for accept() on a
66 * listen socket.
68 * soclose() destroys a socket after possibly waiting for it to disconnect.
69 * This is a public interface that socket consumers should use to close and
70 * release a socket when done with it.
72 * soabort() destroys a socket without waiting for it to disconnect (used
73 * only for incoming connections that are already partially or fully
74 * connected). This is used internally by the socket layer when clearing
75 * listen socket queues (due to overflow or close on the listen socket), but
76 * is also a public interface protocols may use to abort connections in
77 * their incomplete listen queues should they no longer be required. Sockets
78 * placed in completed connection listen queues should not be aborted for
79 * reasons described in the comment above the soclose() implementation. This
80 * is not a general purpose close routine, and except in the specific
81 * circumstances described here, should not be used.
83 * sofree() will free a socket and its protocol state if all references on
84 * the socket have been released, and is the public interface to attempt to
85 * free a socket when a reference is removed. This is a socket layer private
86 * interface.
88 * NOTE: In addition to socreate() and soclose(), which provide a single
89 * socket reference to the consumer to be managed as required, there are two
90 * calls to explicitly manage socket references, soref(), and sorele().
91 * Currently, these are generally required only when transitioning a socket
92 * from a listen queue to a file descriptor, in order to prevent garbage
93 * collection of the socket at an untimely moment. For a number of reasons,
94 * these interfaces are not preferred, and should be avoided.
97 #include <sys/cdefs.h>
98 __FBSDID("$FreeBSD$");
100 #include "opt_inet.h"
101 #include "opt_mac.h"
102 #include "opt_zero.h"
103 #include "opt_compat.h"
105 #include <sys/param.h>
106 #include <sys/systm.h>
107 #include <sys/fcntl.h>
108 #include <sys/limits.h>
109 #include <sys/lock.h>
110 #include <sys/mac.h>
111 #include <sys/malloc.h>
112 #include <sys/mbuf.h>
113 #include <sys/mutex.h>
114 #include <sys/domain.h>
115 #include <sys/file.h> /* for struct knote */
116 #include <sys/kernel.h>
117 #include <sys/event.h>
118 #include <sys/eventhandler.h>
119 #include <sys/poll.h>
120 #include <sys/proc.h>
121 #include <sys/protosw.h>
122 #include <sys/socket.h>
123 #include <sys/socketvar.h>
124 #include <sys/resourcevar.h>
125 #include <net/route.h>
126 #include <sys/signalvar.h>
127 #include <sys/stat.h>
128 #include <sys/sx.h>
129 #include <sys/sysctl.h>
130 #include <sys/uio.h>
131 #include <sys/jail.h>
133 #include <security/mac/mac_framework.h>
135 #include <vm/uma.h>
137 #ifdef COMPAT_IA32
138 #include <sys/mount.h>
139 #include <compat/freebsd32/freebsd32.h>
141 extern struct sysentvec ia32_freebsd_sysvec;
142 #endif
144 static int soreceive_rcvoob(struct socket *so, struct uio *uio,
145 int flags);
147 static void filt_sordetach(struct knote *kn);
148 static int filt_soread(struct knote *kn, long hint);
149 static void filt_sowdetach(struct knote *kn);
150 static int filt_sowrite(struct knote *kn, long hint);
151 static int filt_solisten(struct knote *kn, long hint);
153 static struct filterops solisten_filtops =
154 { 1, NULL, filt_sordetach, filt_solisten };
155 static struct filterops soread_filtops =
156 { 1, NULL, filt_sordetach, filt_soread };
157 static struct filterops sowrite_filtops =
158 { 1, NULL, filt_sowdetach, filt_sowrite };
160 uma_zone_t socket_zone;
161 so_gen_t so_gencnt; /* generation count for sockets */
163 int maxsockets;
165 MALLOC_DEFINE(M_SONAME, "soname", "socket name");
166 MALLOC_DEFINE(M_PCB, "pcb", "protocol control block");
168 static int somaxconn = SOMAXCONN;
169 static int sysctl_somaxconn(SYSCTL_HANDLER_ARGS);
170 /* XXX: we dont have SYSCTL_USHORT */
171 SYSCTL_PROC(_kern_ipc, KIPC_SOMAXCONN, somaxconn, CTLTYPE_UINT | CTLFLAG_RW,
172 0, sizeof(int), sysctl_somaxconn, "I", "Maximum pending socket connection "
173 "queue size");
174 static int numopensockets;
175 SYSCTL_INT(_kern_ipc, OID_AUTO, numopensockets, CTLFLAG_RD,
176 &numopensockets, 0, "Number of open sockets");
177 #ifdef ZERO_COPY_SOCKETS
178 /* These aren't static because they're used in other files. */
179 int so_zero_copy_send = 1;
180 int so_zero_copy_receive = 1;
181 SYSCTL_NODE(_kern_ipc, OID_AUTO, zero_copy, CTLFLAG_RD, 0,
182 "Zero copy controls");
183 SYSCTL_INT(_kern_ipc_zero_copy, OID_AUTO, receive, CTLFLAG_RW,
184 &so_zero_copy_receive, 0, "Enable zero copy receive");
185 SYSCTL_INT(_kern_ipc_zero_copy, OID_AUTO, send, CTLFLAG_RW,
186 &so_zero_copy_send, 0, "Enable zero copy send");
187 #endif /* ZERO_COPY_SOCKETS */
190 * accept_mtx locks down per-socket fields relating to accept queues. See
191 * socketvar.h for an annotation of the protected fields of struct socket.
193 struct mtx accept_mtx;
194 MTX_SYSINIT(accept_mtx, &accept_mtx, "accept", MTX_DEF);
197 * so_global_mtx protects so_gencnt, numopensockets, and the per-socket
198 * so_gencnt field.
200 static struct mtx so_global_mtx;
201 MTX_SYSINIT(so_global_mtx, &so_global_mtx, "so_glabel", MTX_DEF);
204 * General IPC sysctl name space, used by sockets and a variety of other IPC
205 * types.
207 SYSCTL_NODE(_kern, KERN_IPC, ipc, CTLFLAG_RW, 0, "IPC");
210 * Sysctl to get and set the maximum global sockets limit. Notify protocols
211 * of the change so that they can update their dependent limits as required.
213 static int
214 sysctl_maxsockets(SYSCTL_HANDLER_ARGS)
216 int error, newmaxsockets;
218 newmaxsockets = maxsockets;
219 error = sysctl_handle_int(oidp, &newmaxsockets, 0, req);
220 if (error == 0 && req->newptr) {
221 if (newmaxsockets > maxsockets) {
222 maxsockets = newmaxsockets;
223 if (maxsockets > ((maxfiles / 4) * 3)) {
224 maxfiles = (maxsockets * 5) / 4;
225 maxfilesperproc = (maxfiles * 9) / 10;
227 EVENTHANDLER_INVOKE(maxsockets_change);
228 } else
229 error = EINVAL;
231 return (error);
234 SYSCTL_PROC(_kern_ipc, OID_AUTO, maxsockets, CTLTYPE_INT|CTLFLAG_RW,
235 &maxsockets, 0, sysctl_maxsockets, "IU",
236 "Maximum number of sockets avaliable");
239 * Initialise maxsockets.
241 static void init_maxsockets(void *ignored)
243 TUNABLE_INT_FETCH("kern.ipc.maxsockets", &maxsockets);
244 maxsockets = imax(maxsockets, imax(maxfiles, nmbclusters));
246 SYSINIT(param, SI_SUB_TUNABLES, SI_ORDER_ANY, init_maxsockets, NULL);
249 * Socket operation routines. These routines are called by the routines in
250 * sys_socket.c or from a system process, and implement the semantics of
251 * socket operations by switching out to the protocol specific routines.
255 * Get a socket structure from our zone, and initialize it. Note that it
256 * would probably be better to allocate socket and PCB at the same time, but
257 * I'm not convinced that all the protocols can be easily modified to do
258 * this.
260 * soalloc() returns a socket with a ref count of 0.
262 static struct socket *
263 soalloc(void)
265 struct socket *so;
267 so = uma_zalloc(socket_zone, M_NOWAIT | M_ZERO);
268 if (so == NULL)
269 return (NULL);
270 #ifdef MAC
271 if (mac_socket_init(so, M_NOWAIT) != 0) {
272 uma_zfree(socket_zone, so);
273 return (NULL);
275 #endif
276 SOCKBUF_LOCK_INIT(&so->so_snd, "so_snd");
277 SOCKBUF_LOCK_INIT(&so->so_rcv, "so_rcv");
278 sx_init(&so->so_snd.sb_sx, "so_snd_sx");
279 sx_init(&so->so_rcv.sb_sx, "so_rcv_sx");
280 TAILQ_INIT(&so->so_aiojobq);
281 mtx_lock(&so_global_mtx);
282 so->so_gencnt = ++so_gencnt;
283 ++numopensockets;
284 mtx_unlock(&so_global_mtx);
285 return (so);
289 * Free the storage associated with a socket at the socket layer, tear down
290 * locks, labels, etc. All protocol state is assumed already to have been
291 * torn down (and possibly never set up) by the caller.
293 static void
294 sodealloc(struct socket *so)
297 KASSERT(so->so_count == 0, ("sodealloc(): so_count %d", so->so_count));
298 KASSERT(so->so_pcb == NULL, ("sodealloc(): so_pcb != NULL"));
300 mtx_lock(&so_global_mtx);
301 so->so_gencnt = ++so_gencnt;
302 --numopensockets; /* Could be below, but faster here. */
303 mtx_unlock(&so_global_mtx);
304 if (so->so_rcv.sb_hiwat)
305 (void)chgsbsize(so->so_cred->cr_uidinfo,
306 &so->so_rcv.sb_hiwat, 0, RLIM_INFINITY);
307 if (so->so_snd.sb_hiwat)
308 (void)chgsbsize(so->so_cred->cr_uidinfo,
309 &so->so_snd.sb_hiwat, 0, RLIM_INFINITY);
310 #ifdef INET
311 /* remove acccept filter if one is present. */
312 if (so->so_accf != NULL)
313 do_setopt_accept_filter(so, NULL);
314 #endif
315 #ifdef MAC
316 mac_socket_destroy(so);
317 #endif
318 crfree(so->so_cred);
319 sx_destroy(&so->so_snd.sb_sx);
320 sx_destroy(&so->so_rcv.sb_sx);
321 SOCKBUF_LOCK_DESTROY(&so->so_snd);
322 SOCKBUF_LOCK_DESTROY(&so->so_rcv);
323 uma_zfree(socket_zone, so);
327 * socreate returns a socket with a ref count of 1. The socket should be
328 * closed with soclose().
331 socreate(int dom, struct socket **aso, int type, int proto,
332 struct ucred *cred, struct thread *td)
334 struct protosw *prp;
335 struct socket *so;
336 int error;
338 if (proto)
339 prp = pffindproto(dom, proto, type);
340 else
341 prp = pffindtype(dom, type);
343 if (prp == NULL || prp->pr_usrreqs->pru_attach == NULL ||
344 prp->pr_usrreqs->pru_attach == pru_attach_notsupp)
345 return (EPROTONOSUPPORT);
347 if (jailed(cred) && jail_socket_unixiproute_only &&
348 prp->pr_domain->dom_family != PF_LOCAL &&
349 prp->pr_domain->dom_family != PF_INET &&
350 prp->pr_domain->dom_family != PF_ROUTE) {
351 return (EPROTONOSUPPORT);
354 if (prp->pr_type != type)
355 return (EPROTOTYPE);
356 so = soalloc();
357 if (so == NULL)
358 return (ENOBUFS);
360 TAILQ_INIT(&so->so_incomp);
361 TAILQ_INIT(&so->so_comp);
362 so->so_type = type;
363 so->so_cred = crhold(cred);
364 if ((prp->pr_domain->dom_family == PF_INET) ||
365 (prp->pr_domain->dom_family == PF_ROUTE))
366 so->so_fibnum = td->td_proc->p_fibnum;
367 else
368 so->so_fibnum = 0;
369 so->so_proto = prp;
370 #ifdef MAC
371 mac_socket_create(cred, so);
372 #endif
373 knlist_init(&so->so_rcv.sb_sel.si_note, SOCKBUF_MTX(&so->so_rcv),
374 NULL, NULL, NULL);
375 knlist_init(&so->so_snd.sb_sel.si_note, SOCKBUF_MTX(&so->so_snd),
376 NULL, NULL, NULL);
377 so->so_count = 1;
379 * Auto-sizing of socket buffers is managed by the protocols and
380 * the appropriate flags must be set in the pru_attach function.
382 error = (*prp->pr_usrreqs->pru_attach)(so, proto, td);
383 if (error) {
384 KASSERT(so->so_count == 1, ("socreate: so_count %d",
385 so->so_count));
386 so->so_count = 0;
387 sodealloc(so);
388 return (error);
390 *aso = so;
391 return (0);
394 #ifdef REGRESSION
395 static int regression_sonewconn_earlytest = 1;
396 SYSCTL_INT(_regression, OID_AUTO, sonewconn_earlytest, CTLFLAG_RW,
397 &regression_sonewconn_earlytest, 0, "Perform early sonewconn limit test");
398 #endif
401 * When an attempt at a new connection is noted on a socket which accepts
402 * connections, sonewconn is called. If the connection is possible (subject
403 * to space constraints, etc.) then we allocate a new structure, propoerly
404 * linked into the data structure of the original socket, and return this.
405 * Connstatus may be 0, or SO_ISCONFIRMING, or SO_ISCONNECTED.
407 * Note: the ref count on the socket is 0 on return.
409 struct socket *
410 sonewconn(struct socket *head, int connstatus)
412 struct socket *so;
413 int over;
415 ACCEPT_LOCK();
416 over = (head->so_qlen > 3 * head->so_qlimit / 2);
417 ACCEPT_UNLOCK();
418 #ifdef REGRESSION
419 if (regression_sonewconn_earlytest && over)
420 #else
421 if (over)
422 #endif
423 return (NULL);
424 so = soalloc();
425 if (so == NULL)
426 return (NULL);
427 if ((head->so_options & SO_ACCEPTFILTER) != 0)
428 connstatus = 0;
429 so->so_head = head;
430 so->so_type = head->so_type;
431 so->so_options = head->so_options &~ SO_ACCEPTCONN;
432 so->so_linger = head->so_linger;
433 so->so_state = head->so_state | SS_NOFDREF;
434 so->so_proto = head->so_proto;
435 so->so_cred = crhold(head->so_cred);
436 #ifdef MAC
437 SOCK_LOCK(head);
438 mac_socket_newconn(head, so);
439 SOCK_UNLOCK(head);
440 #endif
441 knlist_init(&so->so_rcv.sb_sel.si_note, SOCKBUF_MTX(&so->so_rcv),
442 NULL, NULL, NULL);
443 knlist_init(&so->so_snd.sb_sel.si_note, SOCKBUF_MTX(&so->so_snd),
444 NULL, NULL, NULL);
445 if (soreserve(so, head->so_snd.sb_hiwat, head->so_rcv.sb_hiwat) ||
446 (*so->so_proto->pr_usrreqs->pru_attach)(so, 0, NULL)) {
447 sodealloc(so);
448 return (NULL);
450 so->so_rcv.sb_lowat = head->so_rcv.sb_lowat;
451 so->so_snd.sb_lowat = head->so_snd.sb_lowat;
452 so->so_rcv.sb_timeo = head->so_rcv.sb_timeo;
453 so->so_snd.sb_timeo = head->so_snd.sb_timeo;
454 so->so_rcv.sb_flags |= head->so_rcv.sb_flags & SB_AUTOSIZE;
455 so->so_snd.sb_flags |= head->so_snd.sb_flags & SB_AUTOSIZE;
456 so->so_state |= connstatus;
457 ACCEPT_LOCK();
458 if (connstatus) {
459 TAILQ_INSERT_TAIL(&head->so_comp, so, so_list);
460 so->so_qstate |= SQ_COMP;
461 head->so_qlen++;
462 } else {
464 * Keep removing sockets from the head until there's room for
465 * us to insert on the tail. In pre-locking revisions, this
466 * was a simple if(), but as we could be racing with other
467 * threads and soabort() requires dropping locks, we must
468 * loop waiting for the condition to be true.
470 while (head->so_incqlen > head->so_qlimit) {
471 struct socket *sp;
472 sp = TAILQ_FIRST(&head->so_incomp);
473 TAILQ_REMOVE(&head->so_incomp, sp, so_list);
474 head->so_incqlen--;
475 sp->so_qstate &= ~SQ_INCOMP;
476 sp->so_head = NULL;
477 ACCEPT_UNLOCK();
478 soabort(sp);
479 ACCEPT_LOCK();
481 TAILQ_INSERT_TAIL(&head->so_incomp, so, so_list);
482 so->so_qstate |= SQ_INCOMP;
483 head->so_incqlen++;
485 ACCEPT_UNLOCK();
486 if (connstatus) {
487 sorwakeup(head);
488 wakeup_one(&head->so_timeo);
490 return (so);
494 sobind(struct socket *so, struct sockaddr *nam, struct thread *td)
497 return ((*so->so_proto->pr_usrreqs->pru_bind)(so, nam, td));
501 * solisten() transitions a socket from a non-listening state to a listening
502 * state, but can also be used to update the listen queue depth on an
503 * existing listen socket. The protocol will call back into the sockets
504 * layer using solisten_proto_check() and solisten_proto() to check and set
505 * socket-layer listen state. Call backs are used so that the protocol can
506 * acquire both protocol and socket layer locks in whatever order is required
507 * by the protocol.
509 * Protocol implementors are advised to hold the socket lock across the
510 * socket-layer test and set to avoid races at the socket layer.
513 solisten(struct socket *so, int backlog, struct thread *td)
516 return ((*so->so_proto->pr_usrreqs->pru_listen)(so, backlog, td));
520 solisten_proto_check(struct socket *so)
523 SOCK_LOCK_ASSERT(so);
525 if (so->so_state & (SS_ISCONNECTED | SS_ISCONNECTING |
526 SS_ISDISCONNECTING))
527 return (EINVAL);
528 return (0);
531 void
532 solisten_proto(struct socket *so, int backlog)
535 SOCK_LOCK_ASSERT(so);
537 if (backlog < 0 || backlog > somaxconn)
538 backlog = somaxconn;
539 so->so_qlimit = backlog;
540 so->so_options |= SO_ACCEPTCONN;
544 * Attempt to free a socket. This should really be sotryfree().
546 * sofree() will succeed if:
548 * - There are no outstanding file descriptor references or related consumers
549 * (so_count == 0).
551 * - The socket has been closed by user space, if ever open (SS_NOFDREF).
553 * - The protocol does not have an outstanding strong reference on the socket
554 * (SS_PROTOREF).
556 * - The socket is not in a completed connection queue, so a process has been
557 * notified that it is present. If it is removed, the user process may
558 * block in accept() despite select() saying the socket was ready.
560 * Otherwise, it will quietly abort so that a future call to sofree(), when
561 * conditions are right, can succeed.
563 void
564 sofree(struct socket *so)
566 struct protosw *pr = so->so_proto;
567 struct socket *head;
569 ACCEPT_LOCK_ASSERT();
570 SOCK_LOCK_ASSERT(so);
572 if ((so->so_state & SS_NOFDREF) == 0 || so->so_count != 0 ||
573 (so->so_state & SS_PROTOREF) || (so->so_qstate & SQ_COMP)) {
574 SOCK_UNLOCK(so);
575 ACCEPT_UNLOCK();
576 return;
579 head = so->so_head;
580 if (head != NULL) {
581 KASSERT((so->so_qstate & SQ_COMP) != 0 ||
582 (so->so_qstate & SQ_INCOMP) != 0,
583 ("sofree: so_head != NULL, but neither SQ_COMP nor "
584 "SQ_INCOMP"));
585 KASSERT((so->so_qstate & SQ_COMP) == 0 ||
586 (so->so_qstate & SQ_INCOMP) == 0,
587 ("sofree: so->so_qstate is SQ_COMP and also SQ_INCOMP"));
588 TAILQ_REMOVE(&head->so_incomp, so, so_list);
589 head->so_incqlen--;
590 so->so_qstate &= ~SQ_INCOMP;
591 so->so_head = NULL;
593 KASSERT((so->so_qstate & SQ_COMP) == 0 &&
594 (so->so_qstate & SQ_INCOMP) == 0,
595 ("sofree: so_head == NULL, but still SQ_COMP(%d) or SQ_INCOMP(%d)",
596 so->so_qstate & SQ_COMP, so->so_qstate & SQ_INCOMP));
597 if (so->so_options & SO_ACCEPTCONN) {
598 KASSERT((TAILQ_EMPTY(&so->so_comp)), ("sofree: so_comp populated"));
599 KASSERT((TAILQ_EMPTY(&so->so_incomp)), ("sofree: so_comp populated"));
601 SOCK_UNLOCK(so);
602 ACCEPT_UNLOCK();
604 if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose != NULL)
605 (*pr->pr_domain->dom_dispose)(so->so_rcv.sb_mb);
606 if (pr->pr_usrreqs->pru_detach != NULL)
607 (*pr->pr_usrreqs->pru_detach)(so);
610 * From this point on, we assume that no other references to this
611 * socket exist anywhere else in the stack. Therefore, no locks need
612 * to be acquired or held.
614 * We used to do a lot of socket buffer and socket locking here, as
615 * well as invoke sorflush() and perform wakeups. The direct call to
616 * dom_dispose() and sbrelease_internal() are an inlining of what was
617 * necessary from sorflush().
619 * Notice that the socket buffer and kqueue state are torn down
620 * before calling pru_detach. This means that protocols shold not
621 * assume they can perform socket wakeups, etc, in their detach code.
623 sbdestroy(&so->so_snd, so);
624 sbdestroy(&so->so_rcv, so);
625 knlist_destroy(&so->so_rcv.sb_sel.si_note);
626 knlist_destroy(&so->so_snd.sb_sel.si_note);
627 sodealloc(so);
631 * Close a socket on last file table reference removal. Initiate disconnect
632 * if connected. Free socket when disconnect complete.
634 * This function will sorele() the socket. Note that soclose() may be called
635 * prior to the ref count reaching zero. The actual socket structure will
636 * not be freed until the ref count reaches zero.
639 soclose(struct socket *so)
641 int error = 0;
643 KASSERT(!(so->so_state & SS_NOFDREF), ("soclose: SS_NOFDREF on enter"));
645 funsetown(&so->so_sigio);
646 if (so->so_state & SS_ISCONNECTED) {
647 if ((so->so_state & SS_ISDISCONNECTING) == 0) {
648 error = sodisconnect(so);
649 if (error)
650 goto drop;
652 if (so->so_options & SO_LINGER) {
653 if ((so->so_state & SS_ISDISCONNECTING) &&
654 (so->so_state & SS_NBIO))
655 goto drop;
656 while (so->so_state & SS_ISCONNECTED) {
657 error = tsleep(&so->so_timeo,
658 PSOCK | PCATCH, "soclos", so->so_linger * hz);
659 if (error)
660 break;
665 drop:
666 if (so->so_proto->pr_usrreqs->pru_close != NULL)
667 (*so->so_proto->pr_usrreqs->pru_close)(so);
668 if (so->so_options & SO_ACCEPTCONN) {
669 struct socket *sp;
670 ACCEPT_LOCK();
671 while ((sp = TAILQ_FIRST(&so->so_incomp)) != NULL) {
672 TAILQ_REMOVE(&so->so_incomp, sp, so_list);
673 so->so_incqlen--;
674 sp->so_qstate &= ~SQ_INCOMP;
675 sp->so_head = NULL;
676 ACCEPT_UNLOCK();
677 soabort(sp);
678 ACCEPT_LOCK();
680 while ((sp = TAILQ_FIRST(&so->so_comp)) != NULL) {
681 TAILQ_REMOVE(&so->so_comp, sp, so_list);
682 so->so_qlen--;
683 sp->so_qstate &= ~SQ_COMP;
684 sp->so_head = NULL;
685 ACCEPT_UNLOCK();
686 soabort(sp);
687 ACCEPT_LOCK();
689 ACCEPT_UNLOCK();
691 ACCEPT_LOCK();
692 SOCK_LOCK(so);
693 KASSERT((so->so_state & SS_NOFDREF) == 0, ("soclose: NOFDREF"));
694 so->so_state |= SS_NOFDREF;
695 sorele(so);
696 return (error);
700 * soabort() is used to abruptly tear down a connection, such as when a
701 * resource limit is reached (listen queue depth exceeded), or if a listen
702 * socket is closed while there are sockets waiting to be accepted.
704 * This interface is tricky, because it is called on an unreferenced socket,
705 * and must be called only by a thread that has actually removed the socket
706 * from the listen queue it was on, or races with other threads are risked.
708 * This interface will call into the protocol code, so must not be called
709 * with any socket locks held. Protocols do call it while holding their own
710 * recursible protocol mutexes, but this is something that should be subject
711 * to review in the future.
713 void
714 soabort(struct socket *so)
718 * In as much as is possible, assert that no references to this
719 * socket are held. This is not quite the same as asserting that the
720 * current thread is responsible for arranging for no references, but
721 * is as close as we can get for now.
723 KASSERT(so->so_count == 0, ("soabort: so_count"));
724 KASSERT((so->so_state & SS_PROTOREF) == 0, ("soabort: SS_PROTOREF"));
725 KASSERT(so->so_state & SS_NOFDREF, ("soabort: !SS_NOFDREF"));
726 KASSERT((so->so_state & SQ_COMP) == 0, ("soabort: SQ_COMP"));
727 KASSERT((so->so_state & SQ_INCOMP) == 0, ("soabort: SQ_INCOMP"));
729 if (so->so_proto->pr_usrreqs->pru_abort != NULL)
730 (*so->so_proto->pr_usrreqs->pru_abort)(so);
731 ACCEPT_LOCK();
732 SOCK_LOCK(so);
733 sofree(so);
737 soaccept(struct socket *so, struct sockaddr **nam)
739 int error;
741 SOCK_LOCK(so);
742 KASSERT((so->so_state & SS_NOFDREF) != 0, ("soaccept: !NOFDREF"));
743 so->so_state &= ~SS_NOFDREF;
744 SOCK_UNLOCK(so);
745 error = (*so->so_proto->pr_usrreqs->pru_accept)(so, nam);
746 return (error);
750 soconnect(struct socket *so, struct sockaddr *nam, struct thread *td)
752 int error;
754 if (so->so_options & SO_ACCEPTCONN)
755 return (EOPNOTSUPP);
757 * If protocol is connection-based, can only connect once.
758 * Otherwise, if connected, try to disconnect first. This allows
759 * user to disconnect by connecting to, e.g., a null address.
761 if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) &&
762 ((so->so_proto->pr_flags & PR_CONNREQUIRED) ||
763 (error = sodisconnect(so)))) {
764 error = EISCONN;
765 } else {
767 * Prevent accumulated error from previous connection from
768 * biting us.
770 so->so_error = 0;
771 error = (*so->so_proto->pr_usrreqs->pru_connect)(so, nam, td);
774 return (error);
778 soconnect2(struct socket *so1, struct socket *so2)
781 return ((*so1->so_proto->pr_usrreqs->pru_connect2)(so1, so2));
785 sodisconnect(struct socket *so)
787 int error;
789 if ((so->so_state & SS_ISCONNECTED) == 0)
790 return (ENOTCONN);
791 if (so->so_state & SS_ISDISCONNECTING)
792 return (EALREADY);
793 error = (*so->so_proto->pr_usrreqs->pru_disconnect)(so);
794 return (error);
797 #ifdef ZERO_COPY_SOCKETS
798 struct so_zerocopy_stats{
799 int size_ok;
800 int align_ok;
801 int found_ifp;
803 struct so_zerocopy_stats so_zerocp_stats = {0,0,0};
804 #include <netinet/in.h>
805 #include <net/route.h>
806 #include <netinet/in_pcb.h>
807 #include <vm/vm.h>
808 #include <vm/vm_page.h>
809 #include <vm/vm_object.h>
812 * sosend_copyin() is only used if zero copy sockets are enabled. Otherwise
813 * sosend_dgram() and sosend_generic() use m_uiotombuf().
815 * sosend_copyin() accepts a uio and prepares an mbuf chain holding part or
816 * all of the data referenced by the uio. If desired, it uses zero-copy.
817 * *space will be updated to reflect data copied in.
819 * NB: If atomic I/O is requested, the caller must already have checked that
820 * space can hold resid bytes.
822 * NB: In the event of an error, the caller may need to free the partial
823 * chain pointed to by *mpp. The contents of both *uio and *space may be
824 * modified even in the case of an error.
826 static int
827 sosend_copyin(struct uio *uio, struct mbuf **retmp, int atomic, long *space,
828 int flags)
830 struct mbuf *m, **mp, *top;
831 long len, resid;
832 int error;
833 #ifdef ZERO_COPY_SOCKETS
834 int cow_send;
835 #endif
837 *retmp = top = NULL;
838 mp = &top;
839 len = 0;
840 resid = uio->uio_resid;
841 error = 0;
842 do {
843 #ifdef ZERO_COPY_SOCKETS
844 cow_send = 0;
845 #endif /* ZERO_COPY_SOCKETS */
846 if (resid >= MINCLSIZE) {
847 #ifdef ZERO_COPY_SOCKETS
848 if (top == NULL) {
849 m = m_gethdr(M_WAITOK, MT_DATA);
850 m->m_pkthdr.len = 0;
851 m->m_pkthdr.rcvif = NULL;
852 } else
853 m = m_get(M_WAITOK, MT_DATA);
854 if (so_zero_copy_send &&
855 resid>=PAGE_SIZE &&
856 *space>=PAGE_SIZE &&
857 uio->uio_iov->iov_len>=PAGE_SIZE) {
858 so_zerocp_stats.size_ok++;
859 so_zerocp_stats.align_ok++;
860 cow_send = socow_setup(m, uio);
861 len = cow_send;
863 if (!cow_send) {
864 m_clget(m, M_WAITOK);
865 len = min(min(MCLBYTES, resid), *space);
867 #else /* ZERO_COPY_SOCKETS */
868 if (top == NULL) {
869 m = m_getcl(M_WAIT, MT_DATA, M_PKTHDR);
870 m->m_pkthdr.len = 0;
871 m->m_pkthdr.rcvif = NULL;
872 } else
873 m = m_getcl(M_WAIT, MT_DATA, 0);
874 len = min(min(MCLBYTES, resid), *space);
875 #endif /* ZERO_COPY_SOCKETS */
876 } else {
877 if (top == NULL) {
878 m = m_gethdr(M_WAIT, MT_DATA);
879 m->m_pkthdr.len = 0;
880 m->m_pkthdr.rcvif = NULL;
882 len = min(min(MHLEN, resid), *space);
884 * For datagram protocols, leave room
885 * for protocol headers in first mbuf.
887 if (atomic && m && len < MHLEN)
888 MH_ALIGN(m, len);
889 } else {
890 m = m_get(M_WAIT, MT_DATA);
891 len = min(min(MLEN, resid), *space);
894 if (m == NULL) {
895 error = ENOBUFS;
896 goto out;
899 *space -= len;
900 #ifdef ZERO_COPY_SOCKETS
901 if (cow_send)
902 error = 0;
903 else
904 #endif /* ZERO_COPY_SOCKETS */
905 error = uiomove(mtod(m, void *), (int)len, uio);
906 resid = uio->uio_resid;
907 m->m_len = len;
908 *mp = m;
909 top->m_pkthdr.len += len;
910 if (error)
911 goto out;
912 mp = &m->m_next;
913 if (resid <= 0) {
914 if (flags & MSG_EOR)
915 top->m_flags |= M_EOR;
916 break;
918 } while (*space > 0 && atomic);
919 out:
920 *retmp = top;
921 return (error);
923 #endif /*ZERO_COPY_SOCKETS*/
925 #define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? 0 : SBL_WAIT)
928 sosend_dgram(struct socket *so, struct sockaddr *addr, struct uio *uio,
929 struct mbuf *top, struct mbuf *control, int flags, struct thread *td)
931 long space, resid;
932 int clen = 0, error, dontroute;
933 #ifdef ZERO_COPY_SOCKETS
934 int atomic = sosendallatonce(so) || top;
935 #endif
937 KASSERT(so->so_type == SOCK_DGRAM, ("sodgram_send: !SOCK_DGRAM"));
938 KASSERT(so->so_proto->pr_flags & PR_ATOMIC,
939 ("sodgram_send: !PR_ATOMIC"));
941 if (uio != NULL)
942 resid = uio->uio_resid;
943 else
944 resid = top->m_pkthdr.len;
946 * In theory resid should be unsigned. However, space must be
947 * signed, as it might be less than 0 if we over-committed, and we
948 * must use a signed comparison of space and resid. On the other
949 * hand, a negative resid causes us to loop sending 0-length
950 * segments to the protocol.
952 * Also check to make sure that MSG_EOR isn't used on SOCK_STREAM
953 * type sockets since that's an error.
955 if (resid < 0) {
956 error = EINVAL;
957 goto out;
960 dontroute =
961 (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0;
962 if (td != NULL)
963 td->td_ru.ru_msgsnd++;
964 if (control != NULL)
965 clen = control->m_len;
967 SOCKBUF_LOCK(&so->so_snd);
968 if (so->so_snd.sb_state & SBS_CANTSENDMORE) {
969 SOCKBUF_UNLOCK(&so->so_snd);
970 error = EPIPE;
971 goto out;
973 if (so->so_error) {
974 error = so->so_error;
975 so->so_error = 0;
976 SOCKBUF_UNLOCK(&so->so_snd);
977 goto out;
979 if ((so->so_state & SS_ISCONNECTED) == 0) {
981 * `sendto' and `sendmsg' is allowed on a connection-based
982 * socket if it supports implied connect. Return ENOTCONN if
983 * not connected and no address is supplied.
985 if ((so->so_proto->pr_flags & PR_CONNREQUIRED) &&
986 (so->so_proto->pr_flags & PR_IMPLOPCL) == 0) {
987 if ((so->so_state & SS_ISCONFIRMING) == 0 &&
988 !(resid == 0 && clen != 0)) {
989 SOCKBUF_UNLOCK(&so->so_snd);
990 error = ENOTCONN;
991 goto out;
993 } else if (addr == NULL) {
994 if (so->so_proto->pr_flags & PR_CONNREQUIRED)
995 error = ENOTCONN;
996 else
997 error = EDESTADDRREQ;
998 SOCKBUF_UNLOCK(&so->so_snd);
999 goto out;
1004 * Do we need MSG_OOB support in SOCK_DGRAM? Signs here may be a
1005 * problem and need fixing.
1007 space = sbspace(&so->so_snd);
1008 if (flags & MSG_OOB)
1009 space += 1024;
1010 space -= clen;
1011 SOCKBUF_UNLOCK(&so->so_snd);
1012 if (resid > space) {
1013 error = EMSGSIZE;
1014 goto out;
1016 if (uio == NULL) {
1017 resid = 0;
1018 if (flags & MSG_EOR)
1019 top->m_flags |= M_EOR;
1020 } else {
1021 #ifdef ZERO_COPY_SOCKETS
1022 error = sosend_copyin(uio, &top, atomic, &space, flags);
1023 if (error)
1024 goto out;
1025 #else
1027 * Copy the data from userland into a mbuf chain.
1028 * If no data is to be copied in, a single empty mbuf
1029 * is returned.
1031 top = m_uiotombuf(uio, M_WAITOK, space, max_hdr,
1032 (M_PKTHDR | ((flags & MSG_EOR) ? M_EOR : 0)));
1033 if (top == NULL) {
1034 error = EFAULT; /* only possible error */
1035 goto out;
1037 space -= resid - uio->uio_resid;
1038 #endif
1039 resid = uio->uio_resid;
1041 KASSERT(resid == 0, ("sosend_dgram: resid != 0"));
1043 * XXXRW: Frobbing SO_DONTROUTE here is even worse without sblock
1044 * than with.
1046 if (dontroute) {
1047 SOCK_LOCK(so);
1048 so->so_options |= SO_DONTROUTE;
1049 SOCK_UNLOCK(so);
1052 * XXX all the SBS_CANTSENDMORE checks previously done could be out
1053 * of date. We could have recieved a reset packet in an interrupt or
1054 * maybe we slept while doing page faults in uiomove() etc. We could
1055 * probably recheck again inside the locking protection here, but
1056 * there are probably other places that this also happens. We must
1057 * rethink this.
1059 error = (*so->so_proto->pr_usrreqs->pru_send)(so,
1060 (flags & MSG_OOB) ? PRUS_OOB :
1062 * If the user set MSG_EOF, the protocol understands this flag and
1063 * nothing left to send then use PRU_SEND_EOF instead of PRU_SEND.
1065 ((flags & MSG_EOF) &&
1066 (so->so_proto->pr_flags & PR_IMPLOPCL) &&
1067 (resid <= 0)) ?
1068 PRUS_EOF :
1069 /* If there is more to send set PRUS_MORETOCOME */
1070 (resid > 0 && space > 0) ? PRUS_MORETOCOME : 0,
1071 top, addr, control, td);
1072 if (dontroute) {
1073 SOCK_LOCK(so);
1074 so->so_options &= ~SO_DONTROUTE;
1075 SOCK_UNLOCK(so);
1077 clen = 0;
1078 control = NULL;
1079 top = NULL;
1080 out:
1081 if (top != NULL)
1082 m_freem(top);
1083 if (control != NULL)
1084 m_freem(control);
1085 return (error);
1089 * Send on a socket. If send must go all at once and message is larger than
1090 * send buffering, then hard error. Lock against other senders. If must go
1091 * all at once and not enough room now, then inform user that this would
1092 * block and do nothing. Otherwise, if nonblocking, send as much as
1093 * possible. The data to be sent is described by "uio" if nonzero, otherwise
1094 * by the mbuf chain "top" (which must be null if uio is not). Data provided
1095 * in mbuf chain must be small enough to send all at once.
1097 * Returns nonzero on error, timeout or signal; callers must check for short
1098 * counts if EINTR/ERESTART are returned. Data and control buffers are freed
1099 * on return.
1102 sosend_generic(struct socket *so, struct sockaddr *addr, struct uio *uio,
1103 struct mbuf *top, struct mbuf *control, int flags, struct thread *td)
1105 long space, resid;
1106 int clen = 0, error, dontroute;
1107 int atomic = sosendallatonce(so) || top;
1109 if (uio != NULL)
1110 resid = uio->uio_resid;
1111 else
1112 resid = top->m_pkthdr.len;
1114 * In theory resid should be unsigned. However, space must be
1115 * signed, as it might be less than 0 if we over-committed, and we
1116 * must use a signed comparison of space and resid. On the other
1117 * hand, a negative resid causes us to loop sending 0-length
1118 * segments to the protocol.
1120 * Also check to make sure that MSG_EOR isn't used on SOCK_STREAM
1121 * type sockets since that's an error.
1123 if (resid < 0 || (so->so_type == SOCK_STREAM && (flags & MSG_EOR))) {
1124 error = EINVAL;
1125 goto out;
1128 dontroute =
1129 (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 &&
1130 (so->so_proto->pr_flags & PR_ATOMIC);
1131 if (td != NULL)
1132 td->td_ru.ru_msgsnd++;
1133 if (control != NULL)
1134 clen = control->m_len;
1136 error = sblock(&so->so_snd, SBLOCKWAIT(flags));
1137 if (error)
1138 goto out;
1140 restart:
1141 do {
1142 SOCKBUF_LOCK(&so->so_snd);
1143 if (so->so_snd.sb_state & SBS_CANTSENDMORE) {
1144 SOCKBUF_UNLOCK(&so->so_snd);
1145 error = EPIPE;
1146 goto release;
1148 if (so->so_error) {
1149 error = so->so_error;
1150 so->so_error = 0;
1151 SOCKBUF_UNLOCK(&so->so_snd);
1152 goto release;
1154 if ((so->so_state & SS_ISCONNECTED) == 0) {
1156 * `sendto' and `sendmsg' is allowed on a connection-
1157 * based socket if it supports implied connect.
1158 * Return ENOTCONN if not connected and no address is
1159 * supplied.
1161 if ((so->so_proto->pr_flags & PR_CONNREQUIRED) &&
1162 (so->so_proto->pr_flags & PR_IMPLOPCL) == 0) {
1163 if ((so->so_state & SS_ISCONFIRMING) == 0 &&
1164 !(resid == 0 && clen != 0)) {
1165 SOCKBUF_UNLOCK(&so->so_snd);
1166 error = ENOTCONN;
1167 goto release;
1169 } else if (addr == NULL) {
1170 SOCKBUF_UNLOCK(&so->so_snd);
1171 if (so->so_proto->pr_flags & PR_CONNREQUIRED)
1172 error = ENOTCONN;
1173 else
1174 error = EDESTADDRREQ;
1175 goto release;
1178 space = sbspace(&so->so_snd);
1179 if (flags & MSG_OOB)
1180 space += 1024;
1181 if ((atomic && resid > so->so_snd.sb_hiwat) ||
1182 clen > so->so_snd.sb_hiwat) {
1183 SOCKBUF_UNLOCK(&so->so_snd);
1184 error = EMSGSIZE;
1185 goto release;
1187 if (space < resid + clen &&
1188 (atomic || space < so->so_snd.sb_lowat || space < clen)) {
1189 if ((so->so_state & SS_NBIO) || (flags & MSG_NBIO)) {
1190 SOCKBUF_UNLOCK(&so->so_snd);
1191 error = EWOULDBLOCK;
1192 goto release;
1194 error = sbwait(&so->so_snd);
1195 SOCKBUF_UNLOCK(&so->so_snd);
1196 if (error)
1197 goto release;
1198 goto restart;
1200 SOCKBUF_UNLOCK(&so->so_snd);
1201 space -= clen;
1202 do {
1203 if (uio == NULL) {
1204 resid = 0;
1205 if (flags & MSG_EOR)
1206 top->m_flags |= M_EOR;
1207 } else {
1208 #ifdef ZERO_COPY_SOCKETS
1209 error = sosend_copyin(uio, &top, atomic,
1210 &space, flags);
1211 if (error != 0)
1212 goto release;
1213 #else
1215 * Copy the data from userland into a mbuf
1216 * chain. If no data is to be copied in,
1217 * a single empty mbuf is returned.
1219 top = m_uiotombuf(uio, M_WAITOK, space,
1220 (atomic ? max_hdr : 0),
1221 (atomic ? M_PKTHDR : 0) |
1222 ((flags & MSG_EOR) ? M_EOR : 0));
1223 if (top == NULL) {
1224 error = EFAULT; /* only possible error */
1225 goto release;
1227 space -= resid - uio->uio_resid;
1228 #endif
1229 resid = uio->uio_resid;
1231 if (dontroute) {
1232 SOCK_LOCK(so);
1233 so->so_options |= SO_DONTROUTE;
1234 SOCK_UNLOCK(so);
1237 * XXX all the SBS_CANTSENDMORE checks previously
1238 * done could be out of date. We could have recieved
1239 * a reset packet in an interrupt or maybe we slept
1240 * while doing page faults in uiomove() etc. We
1241 * could probably recheck again inside the locking
1242 * protection here, but there are probably other
1243 * places that this also happens. We must rethink
1244 * this.
1246 error = (*so->so_proto->pr_usrreqs->pru_send)(so,
1247 (flags & MSG_OOB) ? PRUS_OOB :
1249 * If the user set MSG_EOF, the protocol understands
1250 * this flag and nothing left to send then use
1251 * PRU_SEND_EOF instead of PRU_SEND.
1253 ((flags & MSG_EOF) &&
1254 (so->so_proto->pr_flags & PR_IMPLOPCL) &&
1255 (resid <= 0)) ?
1256 PRUS_EOF :
1257 /* If there is more to send set PRUS_MORETOCOME. */
1258 (resid > 0 && space > 0) ? PRUS_MORETOCOME : 0,
1259 top, addr, control, td);
1260 if (dontroute) {
1261 SOCK_LOCK(so);
1262 so->so_options &= ~SO_DONTROUTE;
1263 SOCK_UNLOCK(so);
1265 clen = 0;
1266 control = NULL;
1267 top = NULL;
1268 if (error)
1269 goto release;
1270 } while (resid && space > 0);
1271 } while (resid);
1273 release:
1274 sbunlock(&so->so_snd);
1275 out:
1276 if (top != NULL)
1277 m_freem(top);
1278 if (control != NULL)
1279 m_freem(control);
1280 return (error);
1284 sosend(struct socket *so, struct sockaddr *addr, struct uio *uio,
1285 struct mbuf *top, struct mbuf *control, int flags, struct thread *td)
1288 /* XXXRW: Temporary debugging. */
1289 KASSERT(so->so_proto->pr_usrreqs->pru_sosend != sosend,
1290 ("sosend: protocol calls sosend"));
1292 return (so->so_proto->pr_usrreqs->pru_sosend(so, addr, uio, top,
1293 control, flags, td));
1297 * The part of soreceive() that implements reading non-inline out-of-band
1298 * data from a socket. For more complete comments, see soreceive(), from
1299 * which this code originated.
1301 * Note that soreceive_rcvoob(), unlike the remainder of soreceive(), is
1302 * unable to return an mbuf chain to the caller.
1304 static int
1305 soreceive_rcvoob(struct socket *so, struct uio *uio, int flags)
1307 struct protosw *pr = so->so_proto;
1308 struct mbuf *m;
1309 int error;
1311 KASSERT(flags & MSG_OOB, ("soreceive_rcvoob: (flags & MSG_OOB) == 0"));
1313 m = m_get(M_WAIT, MT_DATA);
1314 error = (*pr->pr_usrreqs->pru_rcvoob)(so, m, flags & MSG_PEEK);
1315 if (error)
1316 goto bad;
1317 do {
1318 #ifdef ZERO_COPY_SOCKETS
1319 if (so_zero_copy_receive) {
1320 int disposable;
1322 if ((m->m_flags & M_EXT)
1323 && (m->m_ext.ext_type == EXT_DISPOSABLE))
1324 disposable = 1;
1325 else
1326 disposable = 0;
1328 error = uiomoveco(mtod(m, void *),
1329 min(uio->uio_resid, m->m_len),
1330 uio, disposable);
1331 } else
1332 #endif /* ZERO_COPY_SOCKETS */
1333 error = uiomove(mtod(m, void *),
1334 (int) min(uio->uio_resid, m->m_len), uio);
1335 m = m_free(m);
1336 } while (uio->uio_resid && error == 0 && m);
1337 bad:
1338 if (m != NULL)
1339 m_freem(m);
1340 return (error);
1344 * Following replacement or removal of the first mbuf on the first mbuf chain
1345 * of a socket buffer, push necessary state changes back into the socket
1346 * buffer so that other consumers see the values consistently. 'nextrecord'
1347 * is the callers locally stored value of the original value of
1348 * sb->sb_mb->m_nextpkt which must be restored when the lead mbuf changes.
1349 * NOTE: 'nextrecord' may be NULL.
1351 static __inline void
1352 sockbuf_pushsync(struct sockbuf *sb, struct mbuf *nextrecord)
1355 SOCKBUF_LOCK_ASSERT(sb);
1357 * First, update for the new value of nextrecord. If necessary, make
1358 * it the first record.
1360 if (sb->sb_mb != NULL)
1361 sb->sb_mb->m_nextpkt = nextrecord;
1362 else
1363 sb->sb_mb = nextrecord;
1366 * Now update any dependent socket buffer fields to reflect the new
1367 * state. This is an expanded inline of SB_EMPTY_FIXUP(), with the
1368 * addition of a second clause that takes care of the case where
1369 * sb_mb has been updated, but remains the last record.
1371 if (sb->sb_mb == NULL) {
1372 sb->sb_mbtail = NULL;
1373 sb->sb_lastrecord = NULL;
1374 } else if (sb->sb_mb->m_nextpkt == NULL)
1375 sb->sb_lastrecord = sb->sb_mb;
1380 * Implement receive operations on a socket. We depend on the way that
1381 * records are added to the sockbuf by sbappend. In particular, each record
1382 * (mbufs linked through m_next) must begin with an address if the protocol
1383 * so specifies, followed by an optional mbuf or mbufs containing ancillary
1384 * data, and then zero or more mbufs of data. In order to allow parallelism
1385 * between network receive and copying to user space, as well as avoid
1386 * sleeping with a mutex held, we release the socket buffer mutex during the
1387 * user space copy. Although the sockbuf is locked, new data may still be
1388 * appended, and thus we must maintain consistency of the sockbuf during that
1389 * time.
1391 * The caller may receive the data as a single mbuf chain by supplying an
1392 * mbuf **mp0 for use in returning the chain. The uio is then used only for
1393 * the count in uio_resid.
1396 soreceive_generic(struct socket *so, struct sockaddr **psa, struct uio *uio,
1397 struct mbuf **mp0, struct mbuf **controlp, int *flagsp)
1399 struct mbuf *m, **mp;
1400 int flags, len, error, offset;
1401 struct protosw *pr = so->so_proto;
1402 struct mbuf *nextrecord;
1403 int moff, type = 0;
1404 int orig_resid = uio->uio_resid;
1406 mp = mp0;
1407 if (psa != NULL)
1408 *psa = NULL;
1409 if (controlp != NULL)
1410 *controlp = NULL;
1411 if (flagsp != NULL)
1412 flags = *flagsp &~ MSG_EOR;
1413 else
1414 flags = 0;
1415 if (flags & MSG_OOB)
1416 return (soreceive_rcvoob(so, uio, flags));
1417 if (mp != NULL)
1418 *mp = NULL;
1419 if ((pr->pr_flags & PR_WANTRCVD) && (so->so_state & SS_ISCONFIRMING)
1420 && uio->uio_resid)
1421 (*pr->pr_usrreqs->pru_rcvd)(so, 0);
1423 error = sblock(&so->so_rcv, SBLOCKWAIT(flags));
1424 if (error)
1425 return (error);
1427 restart:
1428 SOCKBUF_LOCK(&so->so_rcv);
1429 m = so->so_rcv.sb_mb;
1431 * If we have less data than requested, block awaiting more (subject
1432 * to any timeout) if:
1433 * 1. the current count is less than the low water mark, or
1434 * 2. MSG_WAITALL is set, and it is possible to do the entire
1435 * receive operation at once if we block (resid <= hiwat).
1436 * 3. MSG_DONTWAIT is not set
1437 * If MSG_WAITALL is set but resid is larger than the receive buffer,
1438 * we have to do the receive in sections, and thus risk returning a
1439 * short count if a timeout or signal occurs after we start.
1441 if (m == NULL || (((flags & MSG_DONTWAIT) == 0 &&
1442 so->so_rcv.sb_cc < uio->uio_resid) &&
1443 (so->so_rcv.sb_cc < so->so_rcv.sb_lowat ||
1444 ((flags & MSG_WAITALL) && uio->uio_resid <= so->so_rcv.sb_hiwat)) &&
1445 m->m_nextpkt == NULL && (pr->pr_flags & PR_ATOMIC) == 0)) {
1446 KASSERT(m != NULL || !so->so_rcv.sb_cc,
1447 ("receive: m == %p so->so_rcv.sb_cc == %u",
1448 m, so->so_rcv.sb_cc));
1449 if (so->so_error) {
1450 if (m != NULL)
1451 goto dontblock;
1452 error = so->so_error;
1453 if ((flags & MSG_PEEK) == 0)
1454 so->so_error = 0;
1455 SOCKBUF_UNLOCK(&so->so_rcv);
1456 goto release;
1458 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1459 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
1460 if (m == NULL) {
1461 SOCKBUF_UNLOCK(&so->so_rcv);
1462 goto release;
1463 } else
1464 goto dontblock;
1466 for (; m != NULL; m = m->m_next)
1467 if (m->m_type == MT_OOBDATA || (m->m_flags & M_EOR)) {
1468 m = so->so_rcv.sb_mb;
1469 goto dontblock;
1471 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 &&
1472 (so->so_proto->pr_flags & PR_CONNREQUIRED)) {
1473 SOCKBUF_UNLOCK(&so->so_rcv);
1474 error = ENOTCONN;
1475 goto release;
1477 if (uio->uio_resid == 0) {
1478 SOCKBUF_UNLOCK(&so->so_rcv);
1479 goto release;
1481 if ((so->so_state & SS_NBIO) ||
1482 (flags & (MSG_DONTWAIT|MSG_NBIO))) {
1483 SOCKBUF_UNLOCK(&so->so_rcv);
1484 error = EWOULDBLOCK;
1485 goto release;
1487 SBLASTRECORDCHK(&so->so_rcv);
1488 SBLASTMBUFCHK(&so->so_rcv);
1489 error = sbwait(&so->so_rcv);
1490 SOCKBUF_UNLOCK(&so->so_rcv);
1491 if (error)
1492 goto release;
1493 goto restart;
1495 dontblock:
1497 * From this point onward, we maintain 'nextrecord' as a cache of the
1498 * pointer to the next record in the socket buffer. We must keep the
1499 * various socket buffer pointers and local stack versions of the
1500 * pointers in sync, pushing out modifications before dropping the
1501 * socket buffer mutex, and re-reading them when picking it up.
1503 * Otherwise, we will race with the network stack appending new data
1504 * or records onto the socket buffer by using inconsistent/stale
1505 * versions of the field, possibly resulting in socket buffer
1506 * corruption.
1508 * By holding the high-level sblock(), we prevent simultaneous
1509 * readers from pulling off the front of the socket buffer.
1511 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1512 if (uio->uio_td)
1513 uio->uio_td->td_ru.ru_msgrcv++;
1514 KASSERT(m == so->so_rcv.sb_mb, ("soreceive: m != so->so_rcv.sb_mb"));
1515 SBLASTRECORDCHK(&so->so_rcv);
1516 SBLASTMBUFCHK(&so->so_rcv);
1517 nextrecord = m->m_nextpkt;
1518 if (pr->pr_flags & PR_ADDR) {
1519 KASSERT(m->m_type == MT_SONAME,
1520 ("m->m_type == %d", m->m_type));
1521 orig_resid = 0;
1522 if (psa != NULL)
1523 *psa = sodupsockaddr(mtod(m, struct sockaddr *),
1524 M_NOWAIT);
1525 if (flags & MSG_PEEK) {
1526 m = m->m_next;
1527 } else {
1528 sbfree(&so->so_rcv, m);
1529 so->so_rcv.sb_mb = m_free(m);
1530 m = so->so_rcv.sb_mb;
1531 sockbuf_pushsync(&so->so_rcv, nextrecord);
1536 * Process one or more MT_CONTROL mbufs present before any data mbufs
1537 * in the first mbuf chain on the socket buffer. If MSG_PEEK, we
1538 * just copy the data; if !MSG_PEEK, we call into the protocol to
1539 * perform externalization (or freeing if controlp == NULL).
1541 if (m != NULL && m->m_type == MT_CONTROL) {
1542 struct mbuf *cm = NULL, *cmn;
1543 struct mbuf **cme = &cm;
1545 do {
1546 if (flags & MSG_PEEK) {
1547 if (controlp != NULL) {
1548 *controlp = m_copy(m, 0, m->m_len);
1549 controlp = &(*controlp)->m_next;
1551 m = m->m_next;
1552 } else {
1553 sbfree(&so->so_rcv, m);
1554 so->so_rcv.sb_mb = m->m_next;
1555 m->m_next = NULL;
1556 *cme = m;
1557 cme = &(*cme)->m_next;
1558 m = so->so_rcv.sb_mb;
1560 } while (m != NULL && m->m_type == MT_CONTROL);
1561 if ((flags & MSG_PEEK) == 0)
1562 sockbuf_pushsync(&so->so_rcv, nextrecord);
1563 while (cm != NULL) {
1564 cmn = cm->m_next;
1565 cm->m_next = NULL;
1566 if (pr->pr_domain->dom_externalize != NULL) {
1567 SOCKBUF_UNLOCK(&so->so_rcv);
1568 error = (*pr->pr_domain->dom_externalize)
1569 (cm, controlp);
1570 SOCKBUF_LOCK(&so->so_rcv);
1571 } else if (controlp != NULL)
1572 *controlp = cm;
1573 else
1574 m_freem(cm);
1575 if (controlp != NULL) {
1576 orig_resid = 0;
1577 while (*controlp != NULL)
1578 controlp = &(*controlp)->m_next;
1580 cm = cmn;
1582 if (m != NULL)
1583 nextrecord = so->so_rcv.sb_mb->m_nextpkt;
1584 else
1585 nextrecord = so->so_rcv.sb_mb;
1586 orig_resid = 0;
1588 if (m != NULL) {
1589 if ((flags & MSG_PEEK) == 0) {
1590 KASSERT(m->m_nextpkt == nextrecord,
1591 ("soreceive: post-control, nextrecord !sync"));
1592 if (nextrecord == NULL) {
1593 KASSERT(so->so_rcv.sb_mb == m,
1594 ("soreceive: post-control, sb_mb!=m"));
1595 KASSERT(so->so_rcv.sb_lastrecord == m,
1596 ("soreceive: post-control, lastrecord!=m"));
1599 type = m->m_type;
1600 if (type == MT_OOBDATA)
1601 flags |= MSG_OOB;
1602 } else {
1603 if ((flags & MSG_PEEK) == 0) {
1604 KASSERT(so->so_rcv.sb_mb == nextrecord,
1605 ("soreceive: sb_mb != nextrecord"));
1606 if (so->so_rcv.sb_mb == NULL) {
1607 KASSERT(so->so_rcv.sb_lastrecord == NULL,
1608 ("soreceive: sb_lastercord != NULL"));
1612 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1613 SBLASTRECORDCHK(&so->so_rcv);
1614 SBLASTMBUFCHK(&so->so_rcv);
1617 * Now continue to read any data mbufs off of the head of the socket
1618 * buffer until the read request is satisfied. Note that 'type' is
1619 * used to store the type of any mbuf reads that have happened so far
1620 * such that soreceive() can stop reading if the type changes, which
1621 * causes soreceive() to return only one of regular data and inline
1622 * out-of-band data in a single socket receive operation.
1624 moff = 0;
1625 offset = 0;
1626 while (m != NULL && uio->uio_resid > 0 && error == 0) {
1628 * If the type of mbuf has changed since the last mbuf
1629 * examined ('type'), end the receive operation.
1631 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1632 if (m->m_type == MT_OOBDATA) {
1633 if (type != MT_OOBDATA)
1634 break;
1635 } else if (type == MT_OOBDATA)
1636 break;
1637 else
1638 KASSERT(m->m_type == MT_DATA,
1639 ("m->m_type == %d", m->m_type));
1640 so->so_rcv.sb_state &= ~SBS_RCVATMARK;
1641 len = uio->uio_resid;
1642 if (so->so_oobmark && len > so->so_oobmark - offset)
1643 len = so->so_oobmark - offset;
1644 if (len > m->m_len - moff)
1645 len = m->m_len - moff;
1647 * If mp is set, just pass back the mbufs. Otherwise copy
1648 * them out via the uio, then free. Sockbuf must be
1649 * consistent here (points to current mbuf, it points to next
1650 * record) when we drop priority; we must note any additions
1651 * to the sockbuf when we block interrupts again.
1653 if (mp == NULL) {
1654 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1655 SBLASTRECORDCHK(&so->so_rcv);
1656 SBLASTMBUFCHK(&so->so_rcv);
1657 SOCKBUF_UNLOCK(&so->so_rcv);
1658 #ifdef ZERO_COPY_SOCKETS
1659 if (so_zero_copy_receive) {
1660 int disposable;
1662 if ((m->m_flags & M_EXT)
1663 && (m->m_ext.ext_type == EXT_DISPOSABLE))
1664 disposable = 1;
1665 else
1666 disposable = 0;
1668 error = uiomoveco(mtod(m, char *) + moff,
1669 (int)len, uio,
1670 disposable);
1671 } else
1672 #endif /* ZERO_COPY_SOCKETS */
1673 error = uiomove(mtod(m, char *) + moff, (int)len, uio);
1674 SOCKBUF_LOCK(&so->so_rcv);
1675 if (error) {
1677 * The MT_SONAME mbuf has already been removed
1678 * from the record, so it is necessary to
1679 * remove the data mbufs, if any, to preserve
1680 * the invariant in the case of PR_ADDR that
1681 * requires MT_SONAME mbufs at the head of
1682 * each record.
1684 if (m && pr->pr_flags & PR_ATOMIC &&
1685 ((flags & MSG_PEEK) == 0))
1686 (void)sbdroprecord_locked(&so->so_rcv);
1687 SOCKBUF_UNLOCK(&so->so_rcv);
1688 goto release;
1690 } else
1691 uio->uio_resid -= len;
1692 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1693 if (len == m->m_len - moff) {
1694 if (m->m_flags & M_EOR)
1695 flags |= MSG_EOR;
1696 if (flags & MSG_PEEK) {
1697 m = m->m_next;
1698 moff = 0;
1699 } else {
1700 nextrecord = m->m_nextpkt;
1701 sbfree(&so->so_rcv, m);
1702 if (mp != NULL) {
1703 *mp = m;
1704 mp = &m->m_next;
1705 so->so_rcv.sb_mb = m = m->m_next;
1706 *mp = NULL;
1707 } else {
1708 so->so_rcv.sb_mb = m_free(m);
1709 m = so->so_rcv.sb_mb;
1711 sockbuf_pushsync(&so->so_rcv, nextrecord);
1712 SBLASTRECORDCHK(&so->so_rcv);
1713 SBLASTMBUFCHK(&so->so_rcv);
1715 } else {
1716 if (flags & MSG_PEEK)
1717 moff += len;
1718 else {
1719 if (mp != NULL) {
1720 int copy_flag;
1722 if (flags & MSG_DONTWAIT)
1723 copy_flag = M_DONTWAIT;
1724 else
1725 copy_flag = M_WAIT;
1726 if (copy_flag == M_WAIT)
1727 SOCKBUF_UNLOCK(&so->so_rcv);
1728 *mp = m_copym(m, 0, len, copy_flag);
1729 if (copy_flag == M_WAIT)
1730 SOCKBUF_LOCK(&so->so_rcv);
1731 if (*mp == NULL) {
1733 * m_copym() couldn't
1734 * allocate an mbuf. Adjust
1735 * uio_resid back (it was
1736 * adjusted down by len
1737 * bytes, which we didn't end
1738 * up "copying" over).
1740 uio->uio_resid += len;
1741 break;
1744 m->m_data += len;
1745 m->m_len -= len;
1746 so->so_rcv.sb_cc -= len;
1749 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1750 if (so->so_oobmark) {
1751 if ((flags & MSG_PEEK) == 0) {
1752 so->so_oobmark -= len;
1753 if (so->so_oobmark == 0) {
1754 so->so_rcv.sb_state |= SBS_RCVATMARK;
1755 break;
1757 } else {
1758 offset += len;
1759 if (offset == so->so_oobmark)
1760 break;
1763 if (flags & MSG_EOR)
1764 break;
1766 * If the MSG_WAITALL flag is set (for non-atomic socket), we
1767 * must not quit until "uio->uio_resid == 0" or an error
1768 * termination. If a signal/timeout occurs, return with a
1769 * short count but without error. Keep sockbuf locked
1770 * against other readers.
1772 while (flags & MSG_WAITALL && m == NULL && uio->uio_resid > 0 &&
1773 !sosendallatonce(so) && nextrecord == NULL) {
1774 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1775 if (so->so_error || so->so_rcv.sb_state & SBS_CANTRCVMORE)
1776 break;
1778 * Notify the protocol that some data has been
1779 * drained before blocking.
1781 if (pr->pr_flags & PR_WANTRCVD) {
1782 SOCKBUF_UNLOCK(&so->so_rcv);
1783 (*pr->pr_usrreqs->pru_rcvd)(so, flags);
1784 SOCKBUF_LOCK(&so->so_rcv);
1786 SBLASTRECORDCHK(&so->so_rcv);
1787 SBLASTMBUFCHK(&so->so_rcv);
1788 error = sbwait(&so->so_rcv);
1789 if (error) {
1790 SOCKBUF_UNLOCK(&so->so_rcv);
1791 goto release;
1793 m = so->so_rcv.sb_mb;
1794 if (m != NULL)
1795 nextrecord = m->m_nextpkt;
1799 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1800 if (m != NULL && pr->pr_flags & PR_ATOMIC) {
1801 flags |= MSG_TRUNC;
1802 if ((flags & MSG_PEEK) == 0)
1803 (void) sbdroprecord_locked(&so->so_rcv);
1805 if ((flags & MSG_PEEK) == 0) {
1806 if (m == NULL) {
1808 * First part is an inline SB_EMPTY_FIXUP(). Second
1809 * part makes sure sb_lastrecord is up-to-date if
1810 * there is still data in the socket buffer.
1812 so->so_rcv.sb_mb = nextrecord;
1813 if (so->so_rcv.sb_mb == NULL) {
1814 so->so_rcv.sb_mbtail = NULL;
1815 so->so_rcv.sb_lastrecord = NULL;
1816 } else if (nextrecord->m_nextpkt == NULL)
1817 so->so_rcv.sb_lastrecord = nextrecord;
1819 SBLASTRECORDCHK(&so->so_rcv);
1820 SBLASTMBUFCHK(&so->so_rcv);
1822 * If soreceive() is being done from the socket callback,
1823 * then don't need to generate ACK to peer to update window,
1824 * since ACK will be generated on return to TCP.
1826 if (!(flags & MSG_SOCALLBCK) &&
1827 (pr->pr_flags & PR_WANTRCVD)) {
1828 SOCKBUF_UNLOCK(&so->so_rcv);
1829 (*pr->pr_usrreqs->pru_rcvd)(so, flags);
1830 SOCKBUF_LOCK(&so->so_rcv);
1833 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1834 if (orig_resid == uio->uio_resid && orig_resid &&
1835 (flags & MSG_EOR) == 0 && (so->so_rcv.sb_state & SBS_CANTRCVMORE) == 0) {
1836 SOCKBUF_UNLOCK(&so->so_rcv);
1837 goto restart;
1839 SOCKBUF_UNLOCK(&so->so_rcv);
1841 if (flagsp != NULL)
1842 *flagsp |= flags;
1843 release:
1844 sbunlock(&so->so_rcv);
1845 return (error);
1849 * Optimized version of soreceive() for simple datagram cases from userspace;
1850 * this is experimental, and while heavily tested, may contain errors.
1853 soreceive_dgram(struct socket *so, struct sockaddr **psa, struct uio *uio,
1854 struct mbuf **mp0, struct mbuf **controlp, int *flagsp)
1856 struct mbuf *m, *m2;
1857 int flags, len, error, offset;
1858 struct protosw *pr = so->so_proto;
1859 struct mbuf *nextrecord;
1861 if (psa != NULL)
1862 *psa = NULL;
1863 if (controlp != NULL)
1864 *controlp = NULL;
1865 if (flagsp != NULL)
1866 flags = *flagsp &~ MSG_EOR;
1867 else
1868 flags = 0;
1871 * For any complicated cases, fall back to the full
1872 * soreceive_generic().
1874 if (mp0 != NULL || (flags & MSG_PEEK) || (flags & MSG_OOB))
1875 return (soreceive_generic(so, psa, uio, mp0, controlp,
1876 flagsp));
1879 * Enforce restrictions on use.
1881 KASSERT((pr->pr_flags & PR_WANTRCVD) == 0,
1882 ("soreceive_dgram: wantrcvd"));
1883 KASSERT(pr->pr_flags & PR_ATOMIC, ("soreceive_dgram: !atomic"));
1884 KASSERT((so->so_rcv.sb_state & SBS_RCVATMARK) == 0,
1885 ("soreceive_dgram: SBS_RCVATMARK"));
1886 KASSERT((so->so_proto->pr_flags & PR_CONNREQUIRED) == 0,
1887 ("soreceive_dgram: P_CONNREQUIRED"));
1889 restart:
1890 SOCKBUF_LOCK(&so->so_rcv);
1891 m = so->so_rcv.sb_mb;
1894 * If we have less data than requested, block awaiting more (subject
1895 * to any timeout) if:
1896 * 1. the current count is less than the low water mark, or
1897 * 2. MSG_WAITALL is set, and it is possible to do the entire
1898 * receive operation at once if we block (resid <= hiwat).
1899 * 3. MSG_DONTWAIT is not set
1900 * If MSG_WAITALL is set but resid is larger than the receive buffer,
1901 * we have to do the receive in sections, and thus risk returning a
1902 * short count if a timeout or signal occurs after we start.
1904 if (m == NULL) {
1905 KASSERT(m != NULL || !so->so_rcv.sb_cc,
1906 ("receive: m == %p so->so_rcv.sb_cc == %u",
1907 m, so->so_rcv.sb_cc));
1908 if (so->so_error) {
1909 if (m != NULL)
1910 goto dontblock;
1911 error = so->so_error;
1912 so->so_error = 0;
1913 SOCKBUF_UNLOCK(&so->so_rcv);
1914 return (error);
1916 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1917 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
1918 if (m == NULL) {
1919 SOCKBUF_UNLOCK(&so->so_rcv);
1920 return (0);
1921 } else
1922 goto dontblock;
1924 if (uio->uio_resid == 0) {
1925 SOCKBUF_UNLOCK(&so->so_rcv);
1926 return (0);
1928 if ((so->so_state & SS_NBIO) ||
1929 (flags & (MSG_DONTWAIT|MSG_NBIO))) {
1930 SOCKBUF_UNLOCK(&so->so_rcv);
1931 error = EWOULDBLOCK;
1932 return (error);
1934 SBLASTRECORDCHK(&so->so_rcv);
1935 SBLASTMBUFCHK(&so->so_rcv);
1937 error = sbwait(&so->so_rcv);
1938 SOCKBUF_UNLOCK(&so->so_rcv);
1939 if (error)
1940 return (error);
1941 goto restart;
1943 dontblock:
1945 * From this point onward, we maintain 'nextrecord' as a cache of the
1946 * pointer to the next record in the socket buffer. We must keep the
1947 * various socket buffer pointers and local stack versions of the
1948 * pointers in sync, pushing out modifications before dropping the
1949 * socket buffer mutex, and re-reading them when picking it up.
1951 * Otherwise, we will race with the network stack appending new data
1952 * or records onto the socket buffer by using inconsistent/stale
1953 * versions of the field, possibly resulting in socket buffer
1954 * corruption.
1956 * By holding the high-level sblock(), we prevent simultaneous
1957 * readers from pulling off the front of the socket buffer.
1959 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1960 if (uio->uio_td)
1961 uio->uio_td->td_ru.ru_msgrcv++;
1962 KASSERT(m == so->so_rcv.sb_mb, ("soreceive: m != so->so_rcv.sb_mb"));
1963 SBLASTRECORDCHK(&so->so_rcv);
1964 SBLASTMBUFCHK(&so->so_rcv);
1965 nextrecord = m->m_nextpkt;
1966 if (pr->pr_flags & PR_ADDR) {
1967 KASSERT(m->m_type == MT_SONAME,
1968 ("m->m_type == %d", m->m_type));
1969 if (psa != NULL)
1970 *psa = sodupsockaddr(mtod(m, struct sockaddr *),
1971 M_NOWAIT);
1972 sbfree(&so->so_rcv, m);
1973 so->so_rcv.sb_mb = m_free(m);
1974 m = so->so_rcv.sb_mb;
1975 sockbuf_pushsync(&so->so_rcv, nextrecord);
1977 if (m == NULL) {
1978 /* XXXRW: Can this happen? */
1979 SOCKBUF_UNLOCK(&so->so_rcv);
1980 return (0);
1982 KASSERT(m->m_nextpkt == nextrecord,
1983 ("soreceive: post-control, nextrecord !sync"));
1984 if (nextrecord == NULL) {
1985 KASSERT(so->so_rcv.sb_mb == m,
1986 ("soreceive: post-control, sb_mb!=m"));
1987 KASSERT(so->so_rcv.sb_lastrecord == m,
1988 ("soreceive: post-control, lastrecord!=m"));
1991 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1992 SBLASTRECORDCHK(&so->so_rcv);
1993 SBLASTMBUFCHK(&so->so_rcv);
1994 KASSERT(m == so->so_rcv.sb_mb, ("soreceive_dgram: m not sb_mb"));
1995 KASSERT(so->so_rcv.sb_mb->m_nextpkt == nextrecord,
1996 ("soreceive_dgram: m_nextpkt != nextrecord"));
1999 * Pull 'm' and its chain off the front of the packet queue.
2001 so->so_rcv.sb_mb = NULL;
2002 sockbuf_pushsync(&so->so_rcv, nextrecord);
2005 * Walk 'm's chain and free that many bytes from the socket buffer.
2007 for (m2 = m; m2 != NULL; m2 = m2->m_next)
2008 sbfree(&so->so_rcv, m2);
2011 * Do a few last checks before we let go of the lock.
2013 SBLASTRECORDCHK(&so->so_rcv);
2014 SBLASTMBUFCHK(&so->so_rcv);
2015 SOCKBUF_UNLOCK(&so->so_rcv);
2018 * Packet to copyout() is now in 'm' and it is disconnected from the
2019 * queue.
2021 * Process one or more MT_CONTROL mbufs present before any data mbufs
2022 * in the first mbuf chain on the socket buffer. If MSG_PEEK, we
2023 * just copy the data; if !MSG_PEEK, we call into the protocol to
2024 * perform externalization (or freeing if controlp == NULL).
2026 if (m->m_type == MT_CONTROL) {
2027 struct mbuf *cm = NULL, *cmn;
2028 struct mbuf **cme = &cm;
2030 do {
2031 m2 = m->m_next;
2032 m->m_next = NULL;
2033 *cme = m;
2034 cme = &(*cme)->m_next;
2035 m = m2;
2036 } while (m != NULL && m->m_type == MT_CONTROL);
2037 while (cm != NULL) {
2038 cmn = cm->m_next;
2039 cm->m_next = NULL;
2040 if (pr->pr_domain->dom_externalize != NULL) {
2041 error = (*pr->pr_domain->dom_externalize)
2042 (cm, controlp);
2043 } else if (controlp != NULL)
2044 *controlp = cm;
2045 else
2046 m_freem(cm);
2047 if (controlp != NULL) {
2048 while (*controlp != NULL)
2049 controlp = &(*controlp)->m_next;
2051 cm = cmn;
2055 KASSERT(m->m_type == MT_DATA, ("soreceive_dgram: !data"));
2057 offset = 0;
2058 while (m != NULL && uio->uio_resid > 0) {
2059 len = uio->uio_resid;
2060 if (len > m->m_len)
2061 len = m->m_len;
2062 error = uiomove(mtod(m, char *), (int)len, uio);
2063 if (error) {
2064 m_freem(m);
2065 return (error);
2067 m = m_free(m);
2069 if (m != NULL && pr->pr_flags & PR_ATOMIC)
2070 flags |= MSG_TRUNC;
2071 m_freem(m);
2072 if (flagsp != NULL)
2073 *flagsp |= flags;
2074 return (0);
2078 soreceive(struct socket *so, struct sockaddr **psa, struct uio *uio,
2079 struct mbuf **mp0, struct mbuf **controlp, int *flagsp)
2082 /* XXXRW: Temporary debugging. */
2083 KASSERT(so->so_proto->pr_usrreqs->pru_soreceive != soreceive,
2084 ("soreceive: protocol calls soreceive"));
2086 return (so->so_proto->pr_usrreqs->pru_soreceive(so, psa, uio, mp0,
2087 controlp, flagsp));
2091 soshutdown(struct socket *so, int how)
2093 struct protosw *pr = so->so_proto;
2095 if (!(how == SHUT_RD || how == SHUT_WR || how == SHUT_RDWR))
2096 return (EINVAL);
2097 if (pr->pr_usrreqs->pru_flush != NULL) {
2098 (*pr->pr_usrreqs->pru_flush)(so, how);
2100 if (how != SHUT_WR)
2101 sorflush(so);
2102 if (how != SHUT_RD)
2103 return ((*pr->pr_usrreqs->pru_shutdown)(so));
2104 return (0);
2107 void
2108 sorflush(struct socket *so)
2110 struct sockbuf *sb = &so->so_rcv;
2111 struct protosw *pr = so->so_proto;
2112 struct sockbuf asb;
2115 * In order to avoid calling dom_dispose with the socket buffer mutex
2116 * held, and in order to generally avoid holding the lock for a long
2117 * time, we make a copy of the socket buffer and clear the original
2118 * (except locks, state). The new socket buffer copy won't have
2119 * initialized locks so we can only call routines that won't use or
2120 * assert those locks.
2122 * Dislodge threads currently blocked in receive and wait to acquire
2123 * a lock against other simultaneous readers before clearing the
2124 * socket buffer. Don't let our acquire be interrupted by a signal
2125 * despite any existing socket disposition on interruptable waiting.
2127 socantrcvmore(so);
2128 (void) sblock(sb, SBL_WAIT | SBL_NOINTR);
2131 * Invalidate/clear most of the sockbuf structure, but leave selinfo
2132 * and mutex data unchanged.
2134 SOCKBUF_LOCK(sb);
2135 bzero(&asb, offsetof(struct sockbuf, sb_startzero));
2136 bcopy(&sb->sb_startzero, &asb.sb_startzero,
2137 sizeof(*sb) - offsetof(struct sockbuf, sb_startzero));
2138 bzero(&sb->sb_startzero,
2139 sizeof(*sb) - offsetof(struct sockbuf, sb_startzero));
2140 SOCKBUF_UNLOCK(sb);
2141 sbunlock(sb);
2144 * Dispose of special rights and flush the socket buffer. Don't call
2145 * any unsafe routines (that rely on locks being initialized) on asb.
2147 if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose != NULL)
2148 (*pr->pr_domain->dom_dispose)(asb.sb_mb);
2149 sbrelease_internal(&asb, so);
2153 * Perhaps this routine, and sooptcopyout(), below, ought to come in an
2154 * additional variant to handle the case where the option value needs to be
2155 * some kind of integer, but not a specific size. In addition to their use
2156 * here, these functions are also called by the protocol-level pr_ctloutput()
2157 * routines.
2160 sooptcopyin(struct sockopt *sopt, void *buf, size_t len, size_t minlen)
2162 size_t valsize;
2165 * If the user gives us more than we wanted, we ignore it, but if we
2166 * don't get the minimum length the caller wants, we return EINVAL.
2167 * On success, sopt->sopt_valsize is set to however much we actually
2168 * retrieved.
2170 if ((valsize = sopt->sopt_valsize) < minlen)
2171 return EINVAL;
2172 if (valsize > len)
2173 sopt->sopt_valsize = valsize = len;
2175 if (sopt->sopt_td != NULL)
2176 return (copyin(sopt->sopt_val, buf, valsize));
2178 bcopy(sopt->sopt_val, buf, valsize);
2179 return (0);
2183 * Kernel version of setsockopt(2).
2185 * XXX: optlen is size_t, not socklen_t
2188 so_setsockopt(struct socket *so, int level, int optname, void *optval,
2189 size_t optlen)
2191 struct sockopt sopt;
2193 sopt.sopt_level = level;
2194 sopt.sopt_name = optname;
2195 sopt.sopt_dir = SOPT_SET;
2196 sopt.sopt_val = optval;
2197 sopt.sopt_valsize = optlen;
2198 sopt.sopt_td = NULL;
2199 return (sosetopt(so, &sopt));
2203 sosetopt(struct socket *so, struct sockopt *sopt)
2205 int error, optval;
2206 struct linger l;
2207 struct timeval tv;
2208 u_long val;
2209 #ifdef MAC
2210 struct mac extmac;
2211 #endif
2213 error = 0;
2214 if (sopt->sopt_level != SOL_SOCKET) {
2215 if (so->so_proto && so->so_proto->pr_ctloutput)
2216 return ((*so->so_proto->pr_ctloutput)
2217 (so, sopt));
2218 error = ENOPROTOOPT;
2219 } else {
2220 switch (sopt->sopt_name) {
2221 #ifdef INET
2222 case SO_ACCEPTFILTER:
2223 error = do_setopt_accept_filter(so, sopt);
2224 if (error)
2225 goto bad;
2226 break;
2227 #endif
2228 case SO_LINGER:
2229 error = sooptcopyin(sopt, &l, sizeof l, sizeof l);
2230 if (error)
2231 goto bad;
2233 SOCK_LOCK(so);
2234 so->so_linger = l.l_linger;
2235 if (l.l_onoff)
2236 so->so_options |= SO_LINGER;
2237 else
2238 so->so_options &= ~SO_LINGER;
2239 SOCK_UNLOCK(so);
2240 break;
2242 case SO_DEBUG:
2243 case SO_KEEPALIVE:
2244 case SO_DONTROUTE:
2245 case SO_USELOOPBACK:
2246 case SO_BROADCAST:
2247 case SO_REUSEADDR:
2248 case SO_REUSEPORT:
2249 case SO_OOBINLINE:
2250 case SO_TIMESTAMP:
2251 case SO_BINTIME:
2252 case SO_NOSIGPIPE:
2253 error = sooptcopyin(sopt, &optval, sizeof optval,
2254 sizeof optval);
2255 if (error)
2256 goto bad;
2257 SOCK_LOCK(so);
2258 if (optval)
2259 so->so_options |= sopt->sopt_name;
2260 else
2261 so->so_options &= ~sopt->sopt_name;
2262 SOCK_UNLOCK(so);
2263 break;
2265 case SO_SETFIB:
2266 error = sooptcopyin(sopt, &optval, sizeof optval,
2267 sizeof optval);
2268 if (optval < 1 || optval > rt_numfibs) {
2269 error = EINVAL;
2270 goto bad;
2272 if ((so->so_proto->pr_domain->dom_family == PF_INET) ||
2273 (so->so_proto->pr_domain->dom_family == PF_ROUTE)) {
2274 so->so_fibnum = optval;
2275 } else {
2276 so->so_fibnum = 0;
2278 break;
2279 case SO_SNDBUF:
2280 case SO_RCVBUF:
2281 case SO_SNDLOWAT:
2282 case SO_RCVLOWAT:
2283 error = sooptcopyin(sopt, &optval, sizeof optval,
2284 sizeof optval);
2285 if (error)
2286 goto bad;
2289 * Values < 1 make no sense for any of these options,
2290 * so disallow them.
2292 if (optval < 1) {
2293 error = EINVAL;
2294 goto bad;
2297 switch (sopt->sopt_name) {
2298 case SO_SNDBUF:
2299 case SO_RCVBUF:
2300 if (sbreserve(sopt->sopt_name == SO_SNDBUF ?
2301 &so->so_snd : &so->so_rcv, (u_long)optval,
2302 so, curthread) == 0) {
2303 error = ENOBUFS;
2304 goto bad;
2306 (sopt->sopt_name == SO_SNDBUF ? &so->so_snd :
2307 &so->so_rcv)->sb_flags &= ~SB_AUTOSIZE;
2308 break;
2311 * Make sure the low-water is never greater than the
2312 * high-water.
2314 case SO_SNDLOWAT:
2315 SOCKBUF_LOCK(&so->so_snd);
2316 so->so_snd.sb_lowat =
2317 (optval > so->so_snd.sb_hiwat) ?
2318 so->so_snd.sb_hiwat : optval;
2319 SOCKBUF_UNLOCK(&so->so_snd);
2320 break;
2321 case SO_RCVLOWAT:
2322 SOCKBUF_LOCK(&so->so_rcv);
2323 so->so_rcv.sb_lowat =
2324 (optval > so->so_rcv.sb_hiwat) ?
2325 so->so_rcv.sb_hiwat : optval;
2326 SOCKBUF_UNLOCK(&so->so_rcv);
2327 break;
2329 break;
2331 case SO_SNDTIMEO:
2332 case SO_RCVTIMEO:
2333 #ifdef COMPAT_IA32
2334 if (curthread->td_proc->p_sysent == &ia32_freebsd_sysvec) {
2335 struct timeval32 tv32;
2337 error = sooptcopyin(sopt, &tv32, sizeof tv32,
2338 sizeof tv32);
2339 CP(tv32, tv, tv_sec);
2340 CP(tv32, tv, tv_usec);
2341 } else
2342 #endif
2343 error = sooptcopyin(sopt, &tv, sizeof tv,
2344 sizeof tv);
2345 if (error)
2346 goto bad;
2348 /* assert(hz > 0); */
2349 if (tv.tv_sec < 0 || tv.tv_sec > INT_MAX / hz ||
2350 tv.tv_usec < 0 || tv.tv_usec >= 1000000) {
2351 error = EDOM;
2352 goto bad;
2354 /* assert(tick > 0); */
2355 /* assert(ULONG_MAX - INT_MAX >= 1000000); */
2356 val = (u_long)(tv.tv_sec * hz) + tv.tv_usec / tick;
2357 if (val > INT_MAX) {
2358 error = EDOM;
2359 goto bad;
2361 if (val == 0 && tv.tv_usec != 0)
2362 val = 1;
2364 switch (sopt->sopt_name) {
2365 case SO_SNDTIMEO:
2366 so->so_snd.sb_timeo = val;
2367 break;
2368 case SO_RCVTIMEO:
2369 so->so_rcv.sb_timeo = val;
2370 break;
2372 break;
2374 case SO_LABEL:
2375 #ifdef MAC
2376 error = sooptcopyin(sopt, &extmac, sizeof extmac,
2377 sizeof extmac);
2378 if (error)
2379 goto bad;
2380 error = mac_setsockopt_label(sopt->sopt_td->td_ucred,
2381 so, &extmac);
2382 #else
2383 error = EOPNOTSUPP;
2384 #endif
2385 break;
2387 default:
2388 error = ENOPROTOOPT;
2389 break;
2391 if (error == 0 && so->so_proto != NULL &&
2392 so->so_proto->pr_ctloutput != NULL) {
2393 (void) ((*so->so_proto->pr_ctloutput)
2394 (so, sopt));
2397 bad:
2398 return (error);
2402 * Helper routine for getsockopt.
2405 sooptcopyout(struct sockopt *sopt, const void *buf, size_t len)
2407 int error;
2408 size_t valsize;
2410 error = 0;
2413 * Documented get behavior is that we always return a value, possibly
2414 * truncated to fit in the user's buffer. Traditional behavior is
2415 * that we always tell the user precisely how much we copied, rather
2416 * than something useful like the total amount we had available for
2417 * her. Note that this interface is not idempotent; the entire
2418 * answer must generated ahead of time.
2420 valsize = min(len, sopt->sopt_valsize);
2421 sopt->sopt_valsize = valsize;
2422 if (sopt->sopt_val != NULL) {
2423 if (sopt->sopt_td != NULL)
2424 error = copyout(buf, sopt->sopt_val, valsize);
2425 else
2426 bcopy(buf, sopt->sopt_val, valsize);
2428 return (error);
2432 sogetopt(struct socket *so, struct sockopt *sopt)
2434 int error, optval;
2435 struct linger l;
2436 struct timeval tv;
2437 #ifdef MAC
2438 struct mac extmac;
2439 #endif
2441 error = 0;
2442 if (sopt->sopt_level != SOL_SOCKET) {
2443 if (so->so_proto && so->so_proto->pr_ctloutput) {
2444 return ((*so->so_proto->pr_ctloutput)
2445 (so, sopt));
2446 } else
2447 return (ENOPROTOOPT);
2448 } else {
2449 switch (sopt->sopt_name) {
2450 #ifdef INET
2451 case SO_ACCEPTFILTER:
2452 error = do_getopt_accept_filter(so, sopt);
2453 break;
2454 #endif
2455 case SO_LINGER:
2456 SOCK_LOCK(so);
2457 l.l_onoff = so->so_options & SO_LINGER;
2458 l.l_linger = so->so_linger;
2459 SOCK_UNLOCK(so);
2460 error = sooptcopyout(sopt, &l, sizeof l);
2461 break;
2463 case SO_USELOOPBACK:
2464 case SO_DONTROUTE:
2465 case SO_DEBUG:
2466 case SO_KEEPALIVE:
2467 case SO_REUSEADDR:
2468 case SO_REUSEPORT:
2469 case SO_BROADCAST:
2470 case SO_OOBINLINE:
2471 case SO_ACCEPTCONN:
2472 case SO_TIMESTAMP:
2473 case SO_BINTIME:
2474 case SO_NOSIGPIPE:
2475 optval = so->so_options & sopt->sopt_name;
2476 integer:
2477 error = sooptcopyout(sopt, &optval, sizeof optval);
2478 break;
2480 case SO_TYPE:
2481 optval = so->so_type;
2482 goto integer;
2484 case SO_ERROR:
2485 SOCK_LOCK(so);
2486 optval = so->so_error;
2487 so->so_error = 0;
2488 SOCK_UNLOCK(so);
2489 goto integer;
2491 case SO_SNDBUF:
2492 optval = so->so_snd.sb_hiwat;
2493 goto integer;
2495 case SO_RCVBUF:
2496 optval = so->so_rcv.sb_hiwat;
2497 goto integer;
2499 case SO_SNDLOWAT:
2500 optval = so->so_snd.sb_lowat;
2501 goto integer;
2503 case SO_RCVLOWAT:
2504 optval = so->so_rcv.sb_lowat;
2505 goto integer;
2507 case SO_SNDTIMEO:
2508 case SO_RCVTIMEO:
2509 optval = (sopt->sopt_name == SO_SNDTIMEO ?
2510 so->so_snd.sb_timeo : so->so_rcv.sb_timeo);
2512 tv.tv_sec = optval / hz;
2513 tv.tv_usec = (optval % hz) * tick;
2514 #ifdef COMPAT_IA32
2515 if (curthread->td_proc->p_sysent == &ia32_freebsd_sysvec) {
2516 struct timeval32 tv32;
2518 CP(tv, tv32, tv_sec);
2519 CP(tv, tv32, tv_usec);
2520 error = sooptcopyout(sopt, &tv32, sizeof tv32);
2521 } else
2522 #endif
2523 error = sooptcopyout(sopt, &tv, sizeof tv);
2524 break;
2526 case SO_LABEL:
2527 #ifdef MAC
2528 error = sooptcopyin(sopt, &extmac, sizeof(extmac),
2529 sizeof(extmac));
2530 if (error)
2531 return (error);
2532 error = mac_getsockopt_label(sopt->sopt_td->td_ucred,
2533 so, &extmac);
2534 if (error)
2535 return (error);
2536 error = sooptcopyout(sopt, &extmac, sizeof extmac);
2537 #else
2538 error = EOPNOTSUPP;
2539 #endif
2540 break;
2542 case SO_PEERLABEL:
2543 #ifdef MAC
2544 error = sooptcopyin(sopt, &extmac, sizeof(extmac),
2545 sizeof(extmac));
2546 if (error)
2547 return (error);
2548 error = mac_getsockopt_peerlabel(
2549 sopt->sopt_td->td_ucred, so, &extmac);
2550 if (error)
2551 return (error);
2552 error = sooptcopyout(sopt, &extmac, sizeof extmac);
2553 #else
2554 error = EOPNOTSUPP;
2555 #endif
2556 break;
2558 case SO_LISTENQLIMIT:
2559 optval = so->so_qlimit;
2560 goto integer;
2562 case SO_LISTENQLEN:
2563 optval = so->so_qlen;
2564 goto integer;
2566 case SO_LISTENINCQLEN:
2567 optval = so->so_incqlen;
2568 goto integer;
2570 default:
2571 error = ENOPROTOOPT;
2572 break;
2574 return (error);
2578 /* XXX; prepare mbuf for (__FreeBSD__ < 3) routines. */
2580 soopt_getm(struct sockopt *sopt, struct mbuf **mp)
2582 struct mbuf *m, *m_prev;
2583 int sopt_size = sopt->sopt_valsize;
2585 MGET(m, sopt->sopt_td ? M_WAIT : M_DONTWAIT, MT_DATA);
2586 if (m == NULL)
2587 return ENOBUFS;
2588 if (sopt_size > MLEN) {
2589 MCLGET(m, sopt->sopt_td ? M_WAIT : M_DONTWAIT);
2590 if ((m->m_flags & M_EXT) == 0) {
2591 m_free(m);
2592 return ENOBUFS;
2594 m->m_len = min(MCLBYTES, sopt_size);
2595 } else {
2596 m->m_len = min(MLEN, sopt_size);
2598 sopt_size -= m->m_len;
2599 *mp = m;
2600 m_prev = m;
2602 while (sopt_size) {
2603 MGET(m, sopt->sopt_td ? M_WAIT : M_DONTWAIT, MT_DATA);
2604 if (m == NULL) {
2605 m_freem(*mp);
2606 return ENOBUFS;
2608 if (sopt_size > MLEN) {
2609 MCLGET(m, sopt->sopt_td != NULL ? M_WAIT :
2610 M_DONTWAIT);
2611 if ((m->m_flags & M_EXT) == 0) {
2612 m_freem(m);
2613 m_freem(*mp);
2614 return ENOBUFS;
2616 m->m_len = min(MCLBYTES, sopt_size);
2617 } else {
2618 m->m_len = min(MLEN, sopt_size);
2620 sopt_size -= m->m_len;
2621 m_prev->m_next = m;
2622 m_prev = m;
2624 return (0);
2627 /* XXX; copyin sopt data into mbuf chain for (__FreeBSD__ < 3) routines. */
2629 soopt_mcopyin(struct sockopt *sopt, struct mbuf *m)
2631 struct mbuf *m0 = m;
2633 if (sopt->sopt_val == NULL)
2634 return (0);
2635 while (m != NULL && sopt->sopt_valsize >= m->m_len) {
2636 if (sopt->sopt_td != NULL) {
2637 int error;
2639 error = copyin(sopt->sopt_val, mtod(m, char *),
2640 m->m_len);
2641 if (error != 0) {
2642 m_freem(m0);
2643 return(error);
2645 } else
2646 bcopy(sopt->sopt_val, mtod(m, char *), m->m_len);
2647 sopt->sopt_valsize -= m->m_len;
2648 sopt->sopt_val = (char *)sopt->sopt_val + m->m_len;
2649 m = m->m_next;
2651 if (m != NULL) /* should be allocated enoughly at ip6_sooptmcopyin() */
2652 panic("ip6_sooptmcopyin");
2653 return (0);
2656 /* XXX; copyout mbuf chain data into soopt for (__FreeBSD__ < 3) routines. */
2658 soopt_mcopyout(struct sockopt *sopt, struct mbuf *m)
2660 struct mbuf *m0 = m;
2661 size_t valsize = 0;
2663 if (sopt->sopt_val == NULL)
2664 return (0);
2665 while (m != NULL && sopt->sopt_valsize >= m->m_len) {
2666 if (sopt->sopt_td != NULL) {
2667 int error;
2669 error = copyout(mtod(m, char *), sopt->sopt_val,
2670 m->m_len);
2671 if (error != 0) {
2672 m_freem(m0);
2673 return(error);
2675 } else
2676 bcopy(mtod(m, char *), sopt->sopt_val, m->m_len);
2677 sopt->sopt_valsize -= m->m_len;
2678 sopt->sopt_val = (char *)sopt->sopt_val + m->m_len;
2679 valsize += m->m_len;
2680 m = m->m_next;
2682 if (m != NULL) {
2683 /* enough soopt buffer should be given from user-land */
2684 m_freem(m0);
2685 return(EINVAL);
2687 sopt->sopt_valsize = valsize;
2688 return (0);
2692 * sohasoutofband(): protocol notifies socket layer of the arrival of new
2693 * out-of-band data, which will then notify socket consumers.
2695 void
2696 sohasoutofband(struct socket *so)
2699 if (so->so_sigio != NULL)
2700 pgsigio(&so->so_sigio, SIGURG, 0);
2701 selwakeuppri(&so->so_rcv.sb_sel, PSOCK);
2705 sopoll(struct socket *so, int events, struct ucred *active_cred,
2706 struct thread *td)
2709 /* XXXRW: Temporary debugging. */
2710 KASSERT(so->so_proto->pr_usrreqs->pru_sopoll != sopoll,
2711 ("sopoll: protocol calls sopoll"));
2713 return (so->so_proto->pr_usrreqs->pru_sopoll(so, events, active_cred,
2714 td));
2718 sopoll_generic(struct socket *so, int events, struct ucred *active_cred,
2719 struct thread *td)
2721 int revents = 0;
2723 SOCKBUF_LOCK(&so->so_snd);
2724 SOCKBUF_LOCK(&so->so_rcv);
2725 if (events & (POLLIN | POLLRDNORM))
2726 if (soreadable(so))
2727 revents |= events & (POLLIN | POLLRDNORM);
2729 if (events & POLLINIGNEOF)
2730 if (so->so_rcv.sb_cc >= so->so_rcv.sb_lowat ||
2731 !TAILQ_EMPTY(&so->so_comp) || so->so_error)
2732 revents |= POLLINIGNEOF;
2734 if (events & (POLLOUT | POLLWRNORM))
2735 if (sowriteable(so))
2736 revents |= events & (POLLOUT | POLLWRNORM);
2738 if (events & (POLLPRI | POLLRDBAND))
2739 if (so->so_oobmark || (so->so_rcv.sb_state & SBS_RCVATMARK))
2740 revents |= events & (POLLPRI | POLLRDBAND);
2742 if (revents == 0) {
2743 if (events &
2744 (POLLIN | POLLINIGNEOF | POLLPRI | POLLRDNORM |
2745 POLLRDBAND)) {
2746 selrecord(td, &so->so_rcv.sb_sel);
2747 so->so_rcv.sb_flags |= SB_SEL;
2750 if (events & (POLLOUT | POLLWRNORM)) {
2751 selrecord(td, &so->so_snd.sb_sel);
2752 so->so_snd.sb_flags |= SB_SEL;
2756 SOCKBUF_UNLOCK(&so->so_rcv);
2757 SOCKBUF_UNLOCK(&so->so_snd);
2758 return (revents);
2762 soo_kqfilter(struct file *fp, struct knote *kn)
2764 struct socket *so = kn->kn_fp->f_data;
2765 struct sockbuf *sb;
2767 switch (kn->kn_filter) {
2768 case EVFILT_READ:
2769 if (so->so_options & SO_ACCEPTCONN)
2770 kn->kn_fop = &solisten_filtops;
2771 else
2772 kn->kn_fop = &soread_filtops;
2773 sb = &so->so_rcv;
2774 break;
2775 case EVFILT_WRITE:
2776 kn->kn_fop = &sowrite_filtops;
2777 sb = &so->so_snd;
2778 break;
2779 default:
2780 return (EINVAL);
2783 SOCKBUF_LOCK(sb);
2784 knlist_add(&sb->sb_sel.si_note, kn, 1);
2785 sb->sb_flags |= SB_KNOTE;
2786 SOCKBUF_UNLOCK(sb);
2787 return (0);
2791 * Some routines that return EOPNOTSUPP for entry points that are not
2792 * supported by a protocol. Fill in as needed.
2795 pru_accept_notsupp(struct socket *so, struct sockaddr **nam)
2798 return EOPNOTSUPP;
2802 pru_attach_notsupp(struct socket *so, int proto, struct thread *td)
2805 return EOPNOTSUPP;
2809 pru_bind_notsupp(struct socket *so, struct sockaddr *nam, struct thread *td)
2812 return EOPNOTSUPP;
2816 pru_connect_notsupp(struct socket *so, struct sockaddr *nam, struct thread *td)
2819 return EOPNOTSUPP;
2823 pru_connect2_notsupp(struct socket *so1, struct socket *so2)
2826 return EOPNOTSUPP;
2830 pru_control_notsupp(struct socket *so, u_long cmd, caddr_t data,
2831 struct ifnet *ifp, struct thread *td)
2834 return EOPNOTSUPP;
2838 pru_disconnect_notsupp(struct socket *so)
2841 return EOPNOTSUPP;
2845 pru_listen_notsupp(struct socket *so, int backlog, struct thread *td)
2848 return EOPNOTSUPP;
2852 pru_peeraddr_notsupp(struct socket *so, struct sockaddr **nam)
2855 return EOPNOTSUPP;
2859 pru_rcvd_notsupp(struct socket *so, int flags)
2862 return EOPNOTSUPP;
2866 pru_rcvoob_notsupp(struct socket *so, struct mbuf *m, int flags)
2869 return EOPNOTSUPP;
2873 pru_send_notsupp(struct socket *so, int flags, struct mbuf *m,
2874 struct sockaddr *addr, struct mbuf *control, struct thread *td)
2877 return EOPNOTSUPP;
2881 * This isn't really a ``null'' operation, but it's the default one and
2882 * doesn't do anything destructive.
2885 pru_sense_null(struct socket *so, struct stat *sb)
2888 sb->st_blksize = so->so_snd.sb_hiwat;
2889 return 0;
2893 pru_shutdown_notsupp(struct socket *so)
2896 return EOPNOTSUPP;
2900 pru_sockaddr_notsupp(struct socket *so, struct sockaddr **nam)
2903 return EOPNOTSUPP;
2907 pru_sosend_notsupp(struct socket *so, struct sockaddr *addr, struct uio *uio,
2908 struct mbuf *top, struct mbuf *control, int flags, struct thread *td)
2911 return EOPNOTSUPP;
2915 pru_soreceive_notsupp(struct socket *so, struct sockaddr **paddr,
2916 struct uio *uio, struct mbuf **mp0, struct mbuf **controlp, int *flagsp)
2919 return EOPNOTSUPP;
2923 pru_sopoll_notsupp(struct socket *so, int events, struct ucred *cred,
2924 struct thread *td)
2927 return EOPNOTSUPP;
2930 static void
2931 filt_sordetach(struct knote *kn)
2933 struct socket *so = kn->kn_fp->f_data;
2935 SOCKBUF_LOCK(&so->so_rcv);
2936 knlist_remove(&so->so_rcv.sb_sel.si_note, kn, 1);
2937 if (knlist_empty(&so->so_rcv.sb_sel.si_note))
2938 so->so_rcv.sb_flags &= ~SB_KNOTE;
2939 SOCKBUF_UNLOCK(&so->so_rcv);
2942 /*ARGSUSED*/
2943 static int
2944 filt_soread(struct knote *kn, long hint)
2946 struct socket *so;
2948 so = kn->kn_fp->f_data;
2949 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
2951 kn->kn_data = so->so_rcv.sb_cc - so->so_rcv.sb_ctl;
2952 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
2953 kn->kn_flags |= EV_EOF;
2954 kn->kn_fflags = so->so_error;
2955 return (1);
2956 } else if (so->so_error) /* temporary udp error */
2957 return (1);
2958 else if (kn->kn_sfflags & NOTE_LOWAT)
2959 return (kn->kn_data >= kn->kn_sdata);
2960 else
2961 return (so->so_rcv.sb_cc >= so->so_rcv.sb_lowat);
2964 static void
2965 filt_sowdetach(struct knote *kn)
2967 struct socket *so = kn->kn_fp->f_data;
2969 SOCKBUF_LOCK(&so->so_snd);
2970 knlist_remove(&so->so_snd.sb_sel.si_note, kn, 1);
2971 if (knlist_empty(&so->so_snd.sb_sel.si_note))
2972 so->so_snd.sb_flags &= ~SB_KNOTE;
2973 SOCKBUF_UNLOCK(&so->so_snd);
2976 /*ARGSUSED*/
2977 static int
2978 filt_sowrite(struct knote *kn, long hint)
2980 struct socket *so;
2982 so = kn->kn_fp->f_data;
2983 SOCKBUF_LOCK_ASSERT(&so->so_snd);
2984 kn->kn_data = sbspace(&so->so_snd);
2985 if (so->so_snd.sb_state & SBS_CANTSENDMORE) {
2986 kn->kn_flags |= EV_EOF;
2987 kn->kn_fflags = so->so_error;
2988 return (1);
2989 } else if (so->so_error) /* temporary udp error */
2990 return (1);
2991 else if (((so->so_state & SS_ISCONNECTED) == 0) &&
2992 (so->so_proto->pr_flags & PR_CONNREQUIRED))
2993 return (0);
2994 else if (kn->kn_sfflags & NOTE_LOWAT)
2995 return (kn->kn_data >= kn->kn_sdata);
2996 else
2997 return (kn->kn_data >= so->so_snd.sb_lowat);
3000 /*ARGSUSED*/
3001 static int
3002 filt_solisten(struct knote *kn, long hint)
3004 struct socket *so = kn->kn_fp->f_data;
3006 kn->kn_data = so->so_qlen;
3007 return (! TAILQ_EMPTY(&so->so_comp));
3011 socheckuid(struct socket *so, uid_t uid)
3014 if (so == NULL)
3015 return (EPERM);
3016 if (so->so_cred->cr_uid != uid)
3017 return (EPERM);
3018 return (0);
3021 static int
3022 sysctl_somaxconn(SYSCTL_HANDLER_ARGS)
3024 int error;
3025 int val;
3027 val = somaxconn;
3028 error = sysctl_handle_int(oidp, &val, 0, req);
3029 if (error || !req->newptr )
3030 return (error);
3032 if (val < 1 || val > USHRT_MAX)
3033 return (EINVAL);
3035 somaxconn = val;
3036 return (0);
3040 * These functions are used by protocols to notify the socket layer (and its
3041 * consumers) of state changes in the sockets driven by protocol-side events.
3045 * Procedures to manipulate state flags of socket and do appropriate wakeups.
3047 * Normal sequence from the active (originating) side is that
3048 * soisconnecting() is called during processing of connect() call, resulting
3049 * in an eventual call to soisconnected() if/when the connection is
3050 * established. When the connection is torn down soisdisconnecting() is
3051 * called during processing of disconnect() call, and soisdisconnected() is
3052 * called when the connection to the peer is totally severed. The semantics
3053 * of these routines are such that connectionless protocols can call
3054 * soisconnected() and soisdisconnected() only, bypassing the in-progress
3055 * calls when setting up a ``connection'' takes no time.
3057 * From the passive side, a socket is created with two queues of sockets:
3058 * so_incomp for connections in progress and so_comp for connections already
3059 * made and awaiting user acceptance. As a protocol is preparing incoming
3060 * connections, it creates a socket structure queued on so_incomp by calling
3061 * sonewconn(). When the connection is established, soisconnected() is
3062 * called, and transfers the socket structure to so_comp, making it available
3063 * to accept().
3065 * If a socket is closed with sockets on either so_incomp or so_comp, these
3066 * sockets are dropped.
3068 * If higher-level protocols are implemented in the kernel, the wakeups done
3069 * here will sometimes cause software-interrupt process scheduling.
3071 void
3072 soisconnecting(struct socket *so)
3075 SOCK_LOCK(so);
3076 so->so_state &= ~(SS_ISCONNECTED|SS_ISDISCONNECTING);
3077 so->so_state |= SS_ISCONNECTING;
3078 SOCK_UNLOCK(so);
3081 void
3082 soisconnected(struct socket *so)
3084 struct socket *head;
3086 ACCEPT_LOCK();
3087 SOCK_LOCK(so);
3088 so->so_state &= ~(SS_ISCONNECTING|SS_ISDISCONNECTING|SS_ISCONFIRMING);
3089 so->so_state |= SS_ISCONNECTED;
3090 head = so->so_head;
3091 if (head != NULL && (so->so_qstate & SQ_INCOMP)) {
3092 if ((so->so_options & SO_ACCEPTFILTER) == 0) {
3093 SOCK_UNLOCK(so);
3094 TAILQ_REMOVE(&head->so_incomp, so, so_list);
3095 head->so_incqlen--;
3096 so->so_qstate &= ~SQ_INCOMP;
3097 TAILQ_INSERT_TAIL(&head->so_comp, so, so_list);
3098 head->so_qlen++;
3099 so->so_qstate |= SQ_COMP;
3100 ACCEPT_UNLOCK();
3101 sorwakeup(head);
3102 wakeup_one(&head->so_timeo);
3103 } else {
3104 ACCEPT_UNLOCK();
3105 so->so_upcall =
3106 head->so_accf->so_accept_filter->accf_callback;
3107 so->so_upcallarg = head->so_accf->so_accept_filter_arg;
3108 so->so_rcv.sb_flags |= SB_UPCALL;
3109 so->so_options &= ~SO_ACCEPTFILTER;
3110 SOCK_UNLOCK(so);
3111 so->so_upcall(so, so->so_upcallarg, M_DONTWAIT);
3113 return;
3115 SOCK_UNLOCK(so);
3116 ACCEPT_UNLOCK();
3117 wakeup(&so->so_timeo);
3118 sorwakeup(so);
3119 sowwakeup(so);
3122 void
3123 soisdisconnecting(struct socket *so)
3127 * Note: This code assumes that SOCK_LOCK(so) and
3128 * SOCKBUF_LOCK(&so->so_rcv) are the same.
3130 SOCKBUF_LOCK(&so->so_rcv);
3131 so->so_state &= ~SS_ISCONNECTING;
3132 so->so_state |= SS_ISDISCONNECTING;
3133 so->so_rcv.sb_state |= SBS_CANTRCVMORE;
3134 sorwakeup_locked(so);
3135 SOCKBUF_LOCK(&so->so_snd);
3136 so->so_snd.sb_state |= SBS_CANTSENDMORE;
3137 sowwakeup_locked(so);
3138 wakeup(&so->so_timeo);
3141 void
3142 soisdisconnected(struct socket *so)
3146 * Note: This code assumes that SOCK_LOCK(so) and
3147 * SOCKBUF_LOCK(&so->so_rcv) are the same.
3149 SOCKBUF_LOCK(&so->so_rcv);
3150 so->so_state &= ~(SS_ISCONNECTING|SS_ISCONNECTED|SS_ISDISCONNECTING);
3151 so->so_state |= SS_ISDISCONNECTED;
3152 so->so_rcv.sb_state |= SBS_CANTRCVMORE;
3153 sorwakeup_locked(so);
3154 SOCKBUF_LOCK(&so->so_snd);
3155 so->so_snd.sb_state |= SBS_CANTSENDMORE;
3156 sbdrop_locked(&so->so_snd, so->so_snd.sb_cc);
3157 sowwakeup_locked(so);
3158 wakeup(&so->so_timeo);
3162 * Make a copy of a sockaddr in a malloced buffer of type M_SONAME.
3164 struct sockaddr *
3165 sodupsockaddr(const struct sockaddr *sa, int mflags)
3167 struct sockaddr *sa2;
3169 sa2 = malloc(sa->sa_len, M_SONAME, mflags);
3170 if (sa2)
3171 bcopy(sa, sa2, sa->sa_len);
3172 return sa2;
3176 * Create an external-format (``xsocket'') structure using the information in
3177 * the kernel-format socket structure pointed to by so. This is done to
3178 * reduce the spew of irrelevant information over this interface, to isolate
3179 * user code from changes in the kernel structure, and potentially to provide
3180 * information-hiding if we decide that some of this information should be
3181 * hidden from users.
3183 void
3184 sotoxsocket(struct socket *so, struct xsocket *xso)
3187 xso->xso_len = sizeof *xso;
3188 xso->xso_so = so;
3189 xso->so_type = so->so_type;
3190 xso->so_options = so->so_options;
3191 xso->so_linger = so->so_linger;
3192 xso->so_state = so->so_state;
3193 xso->so_pcb = so->so_pcb;
3194 xso->xso_protocol = so->so_proto->pr_protocol;
3195 xso->xso_family = so->so_proto->pr_domain->dom_family;
3196 xso->so_qlen = so->so_qlen;
3197 xso->so_incqlen = so->so_incqlen;
3198 xso->so_qlimit = so->so_qlimit;
3199 xso->so_timeo = so->so_timeo;
3200 xso->so_error = so->so_error;
3201 xso->so_pgid = so->so_sigio ? so->so_sigio->sio_pgid : 0;
3202 xso->so_oobmark = so->so_oobmark;
3203 sbtoxsockbuf(&so->so_snd, &xso->so_snd);
3204 sbtoxsockbuf(&so->so_rcv, &xso->so_rcv);
3205 xso->so_uid = so->so_cred->cr_uid;
3210 * Socket accessor functions to provide external consumers with
3211 * a safe interface to socket state
3215 void
3216 so_listeners_apply_all(struct socket *so, void (*func)(struct socket *, void *), void *arg)
3219 TAILQ_FOREACH(so, &so->so_comp, so_list)
3220 func(so, arg);
3223 struct sockbuf *
3224 so_sockbuf_rcv(struct socket *so)
3227 return (&so->so_rcv);
3230 struct sockbuf *
3231 so_sockbuf_snd(struct socket *so)
3234 return (&so->so_snd);
3238 so_state_get(const struct socket *so)
3241 return (so->so_state);
3244 void
3245 so_state_set(struct socket *so, int val)
3248 so->so_state = val;
3252 so_options_get(const struct socket *so)
3255 return (so->so_options);
3258 void
3259 so_options_set(struct socket *so, int val)
3262 so->so_options = val;
3266 so_error_get(const struct socket *so)
3269 return (so->so_error);
3272 void
3273 so_error_set(struct socket *so, int val)
3276 so->so_error = val;
3280 so_linger_get(const struct socket *so)
3283 return (so->so_linger);
3286 void
3287 so_linger_set(struct socket *so, int val)
3290 so->so_linger = val;
3293 struct protosw *
3294 so_protosw_get(const struct socket *so)
3297 return (so->so_proto);
3300 void
3301 so_protosw_set(struct socket *so, struct protosw *val)
3304 so->so_proto = val;
3307 void
3308 so_sorwakeup(struct socket *so)
3311 sorwakeup(so);
3314 void
3315 so_sowwakeup(struct socket *so)
3318 sowwakeup(so);
3321 void
3322 so_sorwakeup_locked(struct socket *so)
3325 sorwakeup_locked(so);
3328 void
3329 so_sowwakeup_locked(struct socket *so)
3332 sowwakeup_locked(so);
3335 void
3336 so_lock(struct socket *so)
3338 SOCK_LOCK(so);
3341 void
3342 so_unlock(struct socket *so)
3344 SOCK_UNLOCK(so);