usr.sbin/makefs/ffs: Remove m_buf::b_is_hammer2
[dragonfly.git] / sys / kern / uipc_socket2.c
blob5ec28d7df83f6b38d471b776fd628dd8ae9a4087
1 /*
2 * Copyright (c) 2005 Jeffrey M. Hsu. All rights reserved.
3 * Copyright (c) 1982, 1986, 1988, 1990, 1993
4 * The Regents of the University of California. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Neither the name of the University nor the names of its contributors
15 * may be used to endorse or promote products derived from this software
16 * without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
30 * @(#)uipc_socket2.c 8.1 (Berkeley) 6/10/93
31 * $FreeBSD: src/sys/kern/uipc_socket2.c,v 1.55.2.17 2002/08/31 19:04:55 dwmalone Exp $
34 #include "opt_param.h"
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/domain.h>
38 #include <sys/file.h> /* for maxfiles */
39 #include <sys/kernel.h>
40 #include <sys/ktr.h>
41 #include <sys/proc.h>
42 #include <sys/malloc.h>
43 #include <sys/mbuf.h>
44 #include <sys/protosw.h>
45 #include <sys/resourcevar.h>
46 #include <sys/stat.h>
47 #include <sys/socket.h>
48 #include <sys/socketvar.h>
49 #include <sys/socketops.h>
50 #include <sys/signalvar.h>
51 #include <sys/sysctl.h>
52 #include <sys/event.h>
54 #include <sys/msgport2.h>
55 #include <sys/socketvar2.h>
57 #include <net/netisr2.h>
59 #ifndef KTR_SOWAKEUP
60 #define KTR_SOWAKEUP KTR_ALL
61 #endif
62 KTR_INFO_MASTER(sowakeup);
63 KTR_INFO(KTR_SOWAKEUP, sowakeup, nconn_start, 0, "newconn sorwakeup start");
64 KTR_INFO(KTR_SOWAKEUP, sowakeup, nconn_end, 1, "newconn sorwakeup end");
65 KTR_INFO(KTR_SOWAKEUP, sowakeup, nconn_wakeupstart, 2, "newconn wakeup start");
66 KTR_INFO(KTR_SOWAKEUP, sowakeup, nconn_wakeupend, 3, "newconn wakeup end");
67 #define logsowakeup(name) KTR_LOG(sowakeup_ ## name)
69 int maxsockets;
72 * Primitive routines for operating on sockets and socket buffers
75 u_long sb_max = SB_MAX;
76 u_long sb_max_adj =
77 SB_MAX * MCLBYTES / (MSIZE + MCLBYTES); /* adjusted sb_max */
79 static u_long sb_efficiency = 8; /* parameter for sbreserve() */
81 SYSCTL_NODE(_kern, KERN_IPC, ipc, CTLFLAG_RW, 0, "IPC");
84 * soacceptreuse allows bind() a local port (e.g. for listen() purposes)
85 * to ignore any connections still accepted from a prior listen().
87 static int soacceptreuse = 1;
88 SYSCTL_INT(_kern_ipc, OID_AUTO, soaccept_reuse, CTLFLAG_RW,
89 &soacceptreuse, 0, "Allow quick reuse of local port");
91 /************************************************************************
92 * signalsockbuf procedures *
93 ************************************************************************/
96 * Wait for data to arrive at/drain from a socket buffer.
98 * NOTE: Caller must generally hold the ssb_lock (client side lock) since
99 * WAIT/WAKEUP only works for one client at a time.
101 * NOTE: Caller always retries whatever operation it was waiting on.
104 ssb_wait(struct signalsockbuf *ssb)
106 uint32_t flags;
107 int pflags;
108 int error;
110 pflags = (ssb->ssb_flags & SSB_NOINTR) ? 0 : PCATCH;
112 for (;;) {
113 flags = ssb->ssb_flags;
114 cpu_ccfence();
117 * WAKEUP and WAIT interlock each other. We can catch the
118 * race by checking to see if WAKEUP has already been set,
119 * and only setting WAIT if WAKEUP is clear.
121 if (flags & SSB_WAKEUP) {
122 if (atomic_cmpset_int(&ssb->ssb_flags, flags,
123 flags & ~SSB_WAKEUP)) {
124 error = 0;
125 break;
127 continue;
131 * Only set WAIT if WAKEUP is clear.
133 tsleep_interlock(&ssb->ssb_cc, pflags);
134 if (atomic_cmpset_int(&ssb->ssb_flags, flags,
135 flags | SSB_WAIT)) {
136 error = tsleep(&ssb->ssb_cc, pflags | PINTERLOCKED,
137 "sbwait", ssb->ssb_timeo);
138 break;
141 return (error);
145 * Lock a sockbuf already known to be locked;
146 * return any error returned from sleep (EINTR).
149 _ssb_lock(struct signalsockbuf *ssb)
151 uint32_t flags;
152 int pflags;
153 int error;
155 pflags = (ssb->ssb_flags & SSB_NOINTR) ? 0 : PCATCH;
157 for (;;) {
158 flags = ssb->ssb_flags;
159 cpu_ccfence();
160 if (flags & SSB_LOCK) {
161 tsleep_interlock(&ssb->ssb_flags, pflags);
162 if (atomic_cmpset_int(&ssb->ssb_flags, flags,
163 flags | SSB_WANT)) {
164 error = tsleep(&ssb->ssb_flags,
165 pflags | PINTERLOCKED,
166 "sblock", 0);
167 if (error)
168 break;
170 } else {
171 if (atomic_cmpset_int(&ssb->ssb_flags, flags,
172 flags | SSB_LOCK)) {
173 lwkt_gettoken(&ssb->ssb_token);
174 error = 0;
175 break;
179 return (error);
183 * This does the same for sockbufs. Note that the xsockbuf structure,
184 * since it is always embedded in a socket, does not include a self
185 * pointer nor a length. We make this entry point public in case
186 * some other mechanism needs it.
188 void
189 ssbtoxsockbuf(struct signalsockbuf *ssb, struct xsockbuf *xsb)
191 xsb->sb_cc = ssb->ssb_cc;
192 xsb->sb_hiwat = ssb->ssb_hiwat;
193 xsb->sb_mbcnt = ssb->ssb_mbcnt;
194 xsb->sb_mbmax = ssb->ssb_mbmax;
195 xsb->sb_lowat = ssb->ssb_lowat;
196 xsb->sb_flags = ssb->ssb_flags;
197 xsb->sb_timeo = ssb->ssb_timeo;
201 /************************************************************************
202 * Procedures which manipulate socket state flags, wakeups, etc. *
203 ************************************************************************
205 * Normal sequence from the active (originating) side is that
206 * soisconnecting() is called during processing of connect() call, resulting
207 * in an eventual call to soisconnected() if/when the connection is
208 * established. When the connection is torn down soisdisconnecting() is
209 * called during processing of disconnect() call, and soisdisconnected() is
210 * called when the connection to the peer is totally severed.
212 * The semantics of these routines are such that connectionless protocols
213 * can call soisconnected() and soisdisconnected() only, bypassing the
214 * in-progress calls when setting up a ``connection'' takes no time.
216 * From the passive side, a socket is created with two queues of sockets:
217 * so_incomp for connections in progress and so_comp for connections
218 * already made and awaiting user acceptance. As a protocol is preparing
219 * incoming connections, it creates a socket structure queued on so_incomp
220 * by calling sonewconn(). When the connection is established,
221 * soisconnected() is called, and transfers the socket structure to so_comp,
222 * making it available to accept().
224 * If a socket is closed with sockets on either so_incomp or so_comp, these
225 * sockets are dropped.
227 * If higher level protocols are implemented in the kernel, the wakeups
228 * done here will sometimes cause software-interrupt process scheduling.
231 void
232 soisconnecting(struct socket *so)
234 soclrstate(so, SS_ISCONNECTED | SS_ISDISCONNECTING);
235 sosetstate(so, SS_ISCONNECTING);
238 void
239 soisconnected(struct socket *so)
241 struct socket *head;
243 while ((head = so->so_head) != NULL) {
244 lwkt_getpooltoken(head);
245 if (so->so_head == head)
246 break;
247 lwkt_relpooltoken(head);
250 soclrstate(so, SS_ISCONNECTING | SS_ISDISCONNECTING | SS_ISCONFIRMING);
251 sosetstate(so, SS_ISCONNECTED);
252 if (head && (so->so_state & SS_INCOMP)) {
253 if ((so->so_options & SO_ACCEPTFILTER) != 0) {
254 so->so_upcall = head->so_accf->so_accept_filter->accf_callback;
255 so->so_upcallarg = head->so_accf->so_accept_filter_arg;
256 atomic_set_int(&so->so_rcv.ssb_flags, SSB_UPCALL);
257 so->so_options &= ~SO_ACCEPTFILTER;
258 so->so_upcall(so, so->so_upcallarg, 0);
259 lwkt_relpooltoken(head);
260 return;
264 * Listen socket are not per-cpu.
266 KKASSERT((so->so_state & (SS_COMP | SS_INCOMP)) == SS_INCOMP);
267 TAILQ_REMOVE(&head->so_incomp, so, so_list);
268 head->so_incqlen--;
269 TAILQ_INSERT_TAIL(&head->so_comp, so, so_list);
270 head->so_qlen++;
271 sosetstate(so, SS_COMP);
272 soclrstate(so, SS_INCOMP);
275 * XXX head may be on a different protocol thread.
276 * sorwakeup()->sowakeup() is hacked atm.
278 sorwakeup(head);
279 wakeup_one(&head->so_timeo);
280 } else {
281 wakeup(&so->so_timeo);
282 sorwakeup(so);
283 sowwakeup(so);
285 if (head)
286 lwkt_relpooltoken(head);
289 void
290 soisdisconnecting(struct socket *so)
292 soclrstate(so, SS_ISCONNECTING);
293 sosetstate(so, SS_ISDISCONNECTING | SS_CANTRCVMORE | SS_CANTSENDMORE);
294 wakeup((caddr_t)&so->so_timeo);
295 sowwakeup(so);
296 sorwakeup(so);
299 void
300 soisdisconnected(struct socket *so)
302 soclrstate(so, SS_ISCONNECTING | SS_ISCONNECTED | SS_ISDISCONNECTING);
303 sosetstate(so, SS_CANTRCVMORE | SS_CANTSENDMORE | SS_ISDISCONNECTED);
304 wakeup((caddr_t)&so->so_timeo);
305 sbdrop(&so->so_snd.sb, so->so_snd.ssb_cc);
306 sowwakeup(so);
307 sorwakeup(so);
310 void
311 soisreconnecting(struct socket *so)
313 soclrstate(so, SS_ISDISCONNECTING | SS_ISDISCONNECTED |
314 SS_CANTRCVMORE | SS_CANTSENDMORE);
315 sosetstate(so, SS_ISCONNECTING);
318 void
319 soisreconnected(struct socket *so)
321 soclrstate(so, SS_ISDISCONNECTED | SS_CANTRCVMORE | SS_CANTSENDMORE);
322 soisconnected(so);
326 * Set or change the message port a socket receives commands on.
328 * XXX
330 void
331 sosetport(struct socket *so, lwkt_port_t port)
333 so->so_port = port;
337 * When an attempt at a new connection is noted on a socket
338 * which accepts connections, sonewconn is called. If the
339 * connection is possible (subject to space constraints, etc.)
340 * then we allocate a new structure, propoerly linked into the
341 * data structure of the original socket, and return this.
342 * Connstatus may be 0, or SO_ISCONFIRMING, or SO_ISCONNECTED.
344 * The new socket is returned with one ref and so_pcb assigned.
345 * The reference is implied by so_pcb.
347 struct socket *
348 sonewconn_faddr(struct socket *head, int connstatus,
349 const struct sockaddr *faddr, boolean_t keep_ref)
351 struct socket *so;
352 struct socket *sp;
353 struct pru_attach_info ai;
355 if (head->so_qlen > 3 * head->so_qlimit / 2)
356 return (NULL);
357 so = soalloc(1, head->so_proto);
358 if (so == NULL)
359 return (NULL);
362 * Set the port prior to attaching the inpcb to the current
363 * cpu's protocol thread (which should be the current thread
364 * but might not be in all cases). This serializes any pcb ops
365 * which occur to our cpu allowing us to complete the attachment
366 * without racing anything.
368 if (head->so_proto->pr_flags & PR_SYNC_PORT)
369 sosetport(so, &netisr_sync_port);
370 else
371 sosetport(so, netisr_cpuport(mycpuid));
372 if ((head->so_options & SO_ACCEPTFILTER) != 0)
373 connstatus = 0;
374 so->so_head = head;
375 so->so_type = head->so_type;
376 so->so_options = head->so_options &~ SO_ACCEPTCONN;
377 so->so_linger = head->so_linger;
380 * NOTE: Clearing NOFDREF implies referencing the so with
381 * soreference().
383 so->so_state = head->so_state | SS_NOFDREF | SS_ASSERTINPROG;
384 so->so_cred = crhold(head->so_cred);
385 ai.sb_rlimit = NULL;
386 ai.p_ucred = NULL;
387 ai.fd_rdir = NULL; /* jail code cruft XXX JH */
390 * Reserve space and call pru_attach. We can direct-call the
391 * function since we're already in the protocol thread.
393 if (soreserve(so, head->so_snd.ssb_hiwat,
394 head->so_rcv.ssb_hiwat, NULL) ||
395 so_pru_attach_direct(so, 0, &ai)) {
396 so->so_head = NULL;
397 soclrstate(so, SS_ASSERTINPROG);
398 sofree(so); /* remove implied pcb ref */
399 return (NULL);
401 KKASSERT(((so->so_proto->pr_flags & PR_ASYNC_RCVD) == 0 &&
402 so->so_refs == 2) || /* attach + our base ref */
403 ((so->so_proto->pr_flags & PR_ASYNC_RCVD) &&
404 so->so_refs == 3)); /* + async rcvd ref */
405 if (keep_ref) {
407 * Keep the reference; caller will free it.
409 } else {
410 sofree(so);
412 KKASSERT(so->so_port != NULL);
413 so->so_rcv.ssb_lowat = head->so_rcv.ssb_lowat;
414 so->so_snd.ssb_lowat = head->so_snd.ssb_lowat;
415 so->so_rcv.ssb_timeo = head->so_rcv.ssb_timeo;
416 so->so_snd.ssb_timeo = head->so_snd.ssb_timeo;
418 if (head->so_rcv.ssb_flags & SSB_AUTOLOWAT)
419 so->so_rcv.ssb_flags |= SSB_AUTOLOWAT;
420 else
421 so->so_rcv.ssb_flags &= ~SSB_AUTOLOWAT;
423 if (head->so_snd.ssb_flags & SSB_AUTOLOWAT)
424 so->so_snd.ssb_flags |= SSB_AUTOLOWAT;
425 else
426 so->so_snd.ssb_flags &= ~SSB_AUTOLOWAT;
428 if (head->so_rcv.ssb_flags & SSB_AUTOSIZE)
429 so->so_rcv.ssb_flags |= SSB_AUTOSIZE;
430 else
431 so->so_rcv.ssb_flags &= ~SSB_AUTOSIZE;
433 if (head->so_snd.ssb_flags & SSB_AUTOSIZE)
434 so->so_snd.ssb_flags |= SSB_AUTOSIZE;
435 else
436 so->so_snd.ssb_flags &= ~SSB_AUTOSIZE;
439 * Save the faddr, if the information is provided and
440 * the protocol can perform the saving opertation.
442 if (faddr != NULL && so->so_proto->pr_usrreqs->pru_savefaddr != NULL)
443 so->so_proto->pr_usrreqs->pru_savefaddr(so, faddr);
445 lwkt_getpooltoken(head);
446 if (connstatus) {
447 KKASSERT((so->so_state & (SS_INCOMP | SS_COMP)) == 0);
448 TAILQ_INSERT_TAIL(&head->so_comp, so, so_list);
449 head->so_qlen++;
451 * Set connstatus within head token, so that the accepted
452 * socket will have connstatus (SS_ISCONNECTED) set.
454 if (soacceptreuse)
455 connstatus |= SS_ACCEPTMECH;
456 sosetstate(so, SS_COMP | connstatus);
457 } else {
458 if (head->so_incqlen > head->so_qlimit) {
459 sp = TAILQ_FIRST(&head->so_incomp);
460 KKASSERT((sp->so_state & (SS_INCOMP | SS_COMP)) ==
461 SS_INCOMP);
462 TAILQ_REMOVE(&head->so_incomp, sp, so_list);
463 head->so_incqlen--;
464 soclrstate(sp, SS_INCOMP);
465 soabort_async(sp, TRUE);
467 KKASSERT((so->so_state & (SS_INCOMP | SS_COMP)) == 0);
468 TAILQ_INSERT_TAIL(&head->so_incomp, so, so_list);
469 head->so_incqlen++;
470 sosetstate(so, SS_INCOMP | SS_ACCEPTMECH);
473 * Clear SS_ASSERTINPROG within head token, so that it will not
474 * race against accept-close or abort for "synchronous" sockets,
475 * e.g. unix socket, on other CPUs.
477 soclrstate(so, SS_ASSERTINPROG);
478 lwkt_relpooltoken(head);
480 if (connstatus) {
482 * XXX head may be on a different protocol thread.
483 * sorwakeup()->sowakeup() is hacked atm.
485 logsowakeup(nconn_start);
486 sorwakeup(head);
487 logsowakeup(nconn_end);
489 logsowakeup(nconn_wakeupstart);
490 wakeup((caddr_t)&head->so_timeo);
491 logsowakeup(nconn_wakeupend);
493 return (so);
496 struct socket *
497 sonewconn(struct socket *head, int connstatus)
499 return sonewconn_faddr(head, connstatus, NULL, FALSE /* don't ref */);
503 * Socantsendmore indicates that no more data will be sent on the
504 * socket; it would normally be applied to a socket when the user
505 * informs the system that no more data is to be sent, by the protocol
506 * code (in case PRU_SHUTDOWN). Socantrcvmore indicates that no more data
507 * will be received, and will normally be applied to the socket by a
508 * protocol when it detects that the peer will send no more data.
509 * Data queued for reading in the socket may yet be read.
511 void
512 socantsendmore(struct socket *so)
514 sosetstate(so, SS_CANTSENDMORE);
515 sowwakeup(so);
518 void
519 socantrcvmore(struct socket *so)
521 sosetstate(so, SS_CANTRCVMORE);
522 sorwakeup(so);
526 * soroverflow(): indicates that data was attempted to be sent
527 * but the receiving buffer overflowed.
529 void
530 soroverflow(struct socket *so)
532 if (so->so_options & SO_RERROR) {
533 so->so_rerror = ENOBUFS;
534 sorwakeup(so);
539 * Wakeup processes waiting on a socket buffer. Do asynchronous notification
540 * via SIGIO if the socket has the SS_ASYNC flag set.
542 * For users waiting on send/recv try to avoid unnecessary context switch
543 * thrashing. Particularly for senders of large buffers (needs to be
544 * extended to sel and aio? XXX)
546 * WARNING! Can be called on a foreign socket from the wrong protocol
547 * thread. aka is called on the 'head' listen socket when
548 * a new connection comes in.
551 void
552 sowakeup(struct socket *so, struct signalsockbuf *ssb)
554 uint32_t flags;
557 * Atomically check the flags. When no special features are being
558 * used, WAIT is clear, and WAKEUP is already set, we can simply
559 * return. The upcoming synchronous waiter will not block.
561 flags = atomic_fetchadd_int(&ssb->ssb_flags, 0);
562 if ((flags & SSB_NOTIFY_MASK) == 0) {
563 if (flags & SSB_WAKEUP)
564 return;
568 * Check conditions, set the WAKEUP flag, and clear and signal if
569 * the WAIT flag is found to be set. This interlocks against the
570 * client side.
572 for (;;) {
573 long space;
575 flags = ssb->ssb_flags;
576 cpu_ccfence();
577 if (ssb->ssb_flags & SSB_PREALLOC)
578 space = ssb_space_prealloc(ssb);
579 else
580 space = ssb_space(ssb);
582 if ((ssb == &so->so_snd && space >= ssb->ssb_lowat) ||
583 (ssb == &so->so_rcv && ssb->ssb_cc >= ssb->ssb_lowat) ||
584 (ssb == &so->so_snd && (so->so_state & SS_CANTSENDMORE)) ||
585 (ssb == &so->so_rcv && (so->so_state & SS_CANTRCVMORE))
587 if (atomic_cmpset_int(&ssb->ssb_flags, flags,
588 (flags | SSB_WAKEUP) & ~SSB_WAIT)) {
589 if (flags & SSB_WAIT)
590 wakeup(&ssb->ssb_cc);
591 break;
593 } else {
594 break;
599 * Misc other events
601 if ((so->so_state & SS_ASYNC) && so->so_sigio != NULL)
602 pgsigio(so->so_sigio, SIGIO, 0);
603 if (ssb->ssb_flags & SSB_UPCALL)
604 (*so->so_upcall)(so, so->so_upcallarg, M_NOWAIT);
605 KNOTE(&ssb->ssb_kq.ki_note, 0);
608 * This is a bit of a hack. Multiple threads can wind up scanning
609 * ssb_mlist concurrently due to the fact that this function can be
610 * called on a foreign socket, so we can't afford to block here.
612 * We need the pool token for (so) (likely the listne socket if
613 * SSB_MEVENT is set) because the predicate function may have
614 * to access the accept queue.
616 if (ssb->ssb_flags & SSB_MEVENT) {
617 struct netmsg_so_notify *msg, *nmsg;
619 lwkt_getpooltoken(so);
620 TAILQ_FOREACH_MUTABLE(msg, &ssb->ssb_mlist, nm_list, nmsg) {
621 if (msg->nm_predicate(msg)) {
622 TAILQ_REMOVE(&ssb->ssb_mlist, msg, nm_list);
623 lwkt_replymsg(&msg->base.lmsg,
624 msg->base.lmsg.ms_error);
627 if (TAILQ_EMPTY(&ssb->ssb_mlist))
628 atomic_clear_int(&ssb->ssb_flags, SSB_MEVENT);
629 lwkt_relpooltoken(so);
634 * Socket buffer (struct signalsockbuf) utility routines.
636 * Each socket contains two socket buffers: one for sending data and
637 * one for receiving data. Each buffer contains a queue of mbufs,
638 * information about the number of mbufs and amount of data in the
639 * queue, and other fields allowing kevent()/select()/poll() statements
640 * and notification on data availability to be implemented.
642 * Data stored in a socket buffer is maintained as a list of records.
643 * Each record is a list of mbufs chained together with the m_next
644 * field. Records are chained together with the m_nextpkt field. The upper
645 * level routine soreceive() expects the following conventions to be
646 * observed when placing information in the receive buffer:
648 * 1. If the protocol requires each message be preceded by the sender's
649 * name, then a record containing that name must be present before
650 * any associated data (mbuf's must be of type MT_SONAME).
651 * 2. If the protocol supports the exchange of ``access rights'' (really
652 * just additional data associated with the message), and there are
653 * ``rights'' to be received, then a record containing this data
654 * should be present (mbuf's must be of type MT_RIGHTS).
655 * 3. If a name or rights record exists, then it must be followed by
656 * a data record, perhaps of zero length.
658 * Before using a new socket structure it is first necessary to reserve
659 * buffer space to the socket, by calling sbreserve(). This should commit
660 * some of the available buffer space in the system buffer pool for the
661 * socket (currently, it does nothing but enforce limits). The space
662 * should be released by calling ssb_release() when the socket is destroyed.
665 soreserve(struct socket *so, u_long sndcc, u_long rcvcc, struct rlimit *rl)
667 if (so->so_snd.ssb_lowat == 0)
668 atomic_set_int(&so->so_snd.ssb_flags, SSB_AUTOLOWAT);
669 if (ssb_reserve(&so->so_snd, sndcc, so, rl) == 0)
670 goto bad;
671 if (ssb_reserve(&so->so_rcv, rcvcc, so, rl) == 0)
672 goto bad2;
673 if (so->so_rcv.ssb_lowat == 0)
674 so->so_rcv.ssb_lowat = 1;
675 if (so->so_snd.ssb_lowat == 0)
676 so->so_snd.ssb_lowat = MCLBYTES;
677 if (so->so_snd.ssb_lowat > so->so_snd.ssb_hiwat)
678 so->so_snd.ssb_lowat = so->so_snd.ssb_hiwat;
679 return (0);
680 bad2:
681 ssb_release(&so->so_snd, so);
682 bad:
683 return (ENOBUFS);
686 static int
687 sysctl_handle_sb_max(SYSCTL_HANDLER_ARGS)
689 int error = 0;
690 u_long old_sb_max = sb_max;
692 error = SYSCTL_OUT(req, arg1, sizeof(int));
693 if (error || !req->newptr)
694 return (error);
695 error = SYSCTL_IN(req, arg1, sizeof(int));
696 if (error)
697 return (error);
698 if (sb_max < MSIZE + MCLBYTES) {
699 sb_max = old_sb_max;
700 return (EINVAL);
702 sb_max_adj = (u_quad_t)sb_max * MCLBYTES / (MSIZE + MCLBYTES);
703 return (0);
707 * Allot mbufs to a signalsockbuf.
709 * Attempt to scale mbmax so that mbcnt doesn't become limiting
710 * if buffering efficiency is near the normal case.
712 * sb_max only applies to user-sockets (where rl != NULL). It does
713 * not apply to kernel sockets or kernel-controlled sockets. Note
714 * that NFS overrides the sockbuf limits created when nfsd creates
715 * a socket.
718 ssb_reserve(struct signalsockbuf *ssb, u_long cc, struct socket *so,
719 struct rlimit *rl)
722 * rl will only be NULL when we're in an interrupt (eg, in tcp_input)
723 * or when called from netgraph (ie, ngd_attach)
725 if (rl && cc > sb_max_adj)
726 cc = sb_max_adj;
727 if (!chgsbsize(so->so_cred->cr_uidinfo, &ssb->ssb_hiwat, cc,
728 rl ? rl->rlim_cur : RLIM_INFINITY)) {
729 return (0);
731 if (rl)
732 ssb->ssb_mbmax = min(cc * sb_efficiency, sb_max);
733 else
734 ssb->ssb_mbmax = cc * sb_efficiency;
737 * AUTOLOWAT is set on send buffers and prevents large writes
738 * from generating a huge number of context switches.
740 if (ssb->ssb_flags & SSB_AUTOLOWAT) {
741 ssb->ssb_lowat = ssb->ssb_hiwat / 4;
742 if (ssb->ssb_lowat < MCLBYTES)
743 ssb->ssb_lowat = MCLBYTES;
745 if (ssb->ssb_lowat > ssb->ssb_hiwat)
746 ssb->ssb_lowat = ssb->ssb_hiwat;
747 return (1);
751 * Free mbufs held by a socket, and reserved mbuf space.
753 void
754 ssb_release(struct signalsockbuf *ssb, struct socket *so)
756 sbflush(&ssb->sb);
757 (void)chgsbsize(so->so_cred->cr_uidinfo, &ssb->ssb_hiwat, 0,
758 RLIM_INFINITY);
759 ssb->ssb_mbmax = 0;
763 * Some routines that return EOPNOTSUPP for entry points that are not
764 * supported by a protocol. Fill in as needed.
766 void
767 pr_generic_notsupp(netmsg_t msg)
769 lwkt_replymsg(&msg->lmsg, EOPNOTSUPP);
773 pru_sosend_notsupp(struct socket *so, struct sockaddr *addr, struct uio *uio,
774 struct mbuf *top, struct mbuf *control, int flags,
775 struct thread *td)
777 if (top)
778 m_freem(top);
779 if (control)
780 m_freem(control);
781 return (EOPNOTSUPP);
785 pru_soreceive_notsupp(struct socket *so, struct sockaddr **paddr,
786 struct uio *uio, struct sockbuf *sio,
787 struct mbuf **controlp, int *flagsp)
789 return (EOPNOTSUPP);
793 * This isn't really a ``null'' operation, but it's the default one
794 * and doesn't do anything destructive.
796 void
797 pru_sense_null(netmsg_t msg)
799 msg->sense.nm_stat->st_blksize = msg->base.nm_so->so_snd.ssb_hiwat;
800 lwkt_replymsg(&msg->lmsg, 0);
804 * Make a copy of a sockaddr in a malloced buffer of type M_SONAME. Callers
805 * of this routine assume that it always succeeds, so we have to use a
806 * blockable allocation even though we might be called from a critical thread.
808 struct sockaddr *
809 dup_sockaddr(const struct sockaddr *sa)
811 struct sockaddr *sa2;
813 sa2 = kmalloc(sa->sa_len, M_SONAME, M_INTWAIT);
814 bcopy(sa, sa2, sa->sa_len);
815 return (sa2);
819 * Create an external-format (``xsocket'') structure using the information
820 * in the kernel-format socket structure pointed to by so. This is done
821 * to reduce the spew of irrelevant information over this interface,
822 * to isolate user code from changes in the kernel structure, and
823 * potentially to provide information-hiding if we decide that
824 * some of this information should be hidden from users.
826 void
827 sotoxsocket(struct socket *so, struct xsocket *xso)
829 xso->xso_len = sizeof *xso;
830 xso->xso_so = so;
831 xso->so_type = so->so_type;
832 xso->so_options = so->so_options;
833 xso->so_linger = so->so_linger;
834 xso->so_state = so->so_state;
835 xso->so_pcb = so->so_pcb;
836 xso->xso_protocol = so->so_proto->pr_protocol;
837 xso->xso_family = so->so_proto->pr_domain->dom_family;
838 xso->so_qlen = so->so_qlen;
839 xso->so_incqlen = so->so_incqlen;
840 xso->so_qlimit = so->so_qlimit;
841 xso->so_timeo = so->so_timeo;
842 xso->so_error = so->so_error;
843 xso->so_pgid = so->so_sigio ? so->so_sigio->sio_pgid : 0;
844 xso->so_oobmark = so->so_oobmark;
845 ssbtoxsockbuf(&so->so_snd, &xso->so_snd);
846 ssbtoxsockbuf(&so->so_rcv, &xso->so_rcv);
847 xso->so_uid = so->so_cred->cr_uid;
851 * This takes the place of kern.maxsockbuf, which moved to kern.ipc.
853 * NOTE! sb_max only applies to user-created socket buffers.
855 static int dummy;
856 SYSCTL_INT(_kern, KERN_DUMMY, dummy, CTLFLAG_RW, &dummy, 0, "");
857 SYSCTL_OID(_kern_ipc, KIPC_MAXSOCKBUF, maxsockbuf, CTLTYPE_INT|CTLFLAG_RW,
858 &sb_max, 0, sysctl_handle_sb_max, "I", "Maximum socket buffer size");
859 SYSCTL_INT(_kern_ipc, OID_AUTO, maxsockets, CTLFLAG_RD,
860 &maxsockets, 0, "Maximum number of sockets available");
861 SYSCTL_INT(_kern_ipc, KIPC_SOCKBUF_WASTE, sockbuf_waste_factor, CTLFLAG_RW,
862 &sb_efficiency, 0,
863 "Socket buffer limit scaler");
866 * Initialize maxsockets
868 static void
869 init_maxsockets(void *ignored)
871 TUNABLE_INT_FETCH("kern.ipc.maxsockets", &maxsockets);
872 maxsockets = imax(maxsockets, imax(maxfiles, nmbclusters));
874 SYSINIT(param, SI_BOOT1_TUNABLES, SI_ORDER_ANY,
875 init_maxsockets, NULL);