accept: Save foreign address earlier, if protocol supports it
[dragonfly.git] / sys / kern / uipc_socket2.c
blobdc3ca6a9be8d84e4986ebc76178540c3c55e9e72
1 /*
2 * Copyright (c) 2005 Jeffrey M. Hsu. All rights reserved.
3 * Copyright (c) 1982, 1986, 1988, 1990, 1993
4 * The Regents of the University of California. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by the University of
17 * California, Berkeley and its contributors.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
34 * @(#)uipc_socket2.c 8.1 (Berkeley) 6/10/93
35 * $FreeBSD: src/sys/kern/uipc_socket2.c,v 1.55.2.17 2002/08/31 19:04:55 dwmalone Exp $
36 * $DragonFly: src/sys/kern/uipc_socket2.c,v 1.33 2008/09/02 16:17:52 dillon Exp $
39 #include "opt_param.h"
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/domain.h>
43 #include <sys/file.h> /* for maxfiles */
44 #include <sys/kernel.h>
45 #include <sys/proc.h>
46 #include <sys/malloc.h>
47 #include <sys/mbuf.h>
48 #include <sys/protosw.h>
49 #include <sys/resourcevar.h>
50 #include <sys/stat.h>
51 #include <sys/socket.h>
52 #include <sys/socketvar.h>
53 #include <sys/socketops.h>
54 #include <sys/signalvar.h>
55 #include <sys/sysctl.h>
56 #include <sys/event.h>
58 #include <sys/thread2.h>
59 #include <sys/msgport2.h>
60 #include <sys/socketvar2.h>
62 int maxsockets;
65 * Primitive routines for operating on sockets and socket buffers
68 u_long sb_max = SB_MAX;
69 u_long sb_max_adj =
70 SB_MAX * MCLBYTES / (MSIZE + MCLBYTES); /* adjusted sb_max */
72 static u_long sb_efficiency = 8; /* parameter for sbreserve() */
74 /************************************************************************
75 * signalsockbuf procedures *
76 ************************************************************************/
79 * Wait for data to arrive at/drain from a socket buffer.
81 * NOTE: Caller must generally hold the ssb_lock (client side lock) since
82 * WAIT/WAKEUP only works for one client at a time.
84 * NOTE: Caller always retries whatever operation it was waiting on.
86 int
87 ssb_wait(struct signalsockbuf *ssb)
89 uint32_t flags;
90 int pflags;
91 int error;
93 pflags = (ssb->ssb_flags & SSB_NOINTR) ? 0 : PCATCH;
95 for (;;) {
96 flags = ssb->ssb_flags;
97 cpu_ccfence();
100 * WAKEUP and WAIT interlock eachother. We can catch the
101 * race by checking to see if WAKEUP has already been set,
102 * and only setting WAIT if WAKEUP is clear.
104 if (flags & SSB_WAKEUP) {
105 if (atomic_cmpset_int(&ssb->ssb_flags, flags,
106 flags & ~SSB_WAKEUP)) {
107 error = 0;
108 break;
110 continue;
114 * Only set WAIT if WAKEUP is clear.
116 tsleep_interlock(&ssb->ssb_cc, pflags);
117 if (atomic_cmpset_int(&ssb->ssb_flags, flags,
118 flags | SSB_WAIT)) {
119 error = tsleep(&ssb->ssb_cc, pflags | PINTERLOCKED,
120 "sbwait", ssb->ssb_timeo);
121 break;
124 return (error);
128 * Lock a sockbuf already known to be locked;
129 * return any error returned from sleep (EINTR).
132 _ssb_lock(struct signalsockbuf *ssb)
134 uint32_t flags;
135 int pflags;
136 int error;
138 pflags = (ssb->ssb_flags & SSB_NOINTR) ? 0 : PCATCH;
140 for (;;) {
141 flags = ssb->ssb_flags;
142 cpu_ccfence();
143 if (flags & SSB_LOCK) {
144 tsleep_interlock(&ssb->ssb_flags, pflags);
145 if (atomic_cmpset_int(&ssb->ssb_flags, flags,
146 flags | SSB_WANT)) {
147 error = tsleep(&ssb->ssb_flags,
148 pflags | PINTERLOCKED,
149 "sblock", 0);
150 if (error)
151 break;
153 } else {
154 if (atomic_cmpset_int(&ssb->ssb_flags, flags,
155 flags | SSB_LOCK)) {
156 lwkt_gettoken(&ssb->ssb_token);
157 error = 0;
158 break;
162 return (error);
166 * This does the same for sockbufs. Note that the xsockbuf structure,
167 * since it is always embedded in a socket, does not include a self
168 * pointer nor a length. We make this entry point public in case
169 * some other mechanism needs it.
171 void
172 ssbtoxsockbuf(struct signalsockbuf *ssb, struct xsockbuf *xsb)
174 xsb->sb_cc = ssb->ssb_cc;
175 xsb->sb_hiwat = ssb->ssb_hiwat;
176 xsb->sb_mbcnt = ssb->ssb_mbcnt;
177 xsb->sb_mbmax = ssb->ssb_mbmax;
178 xsb->sb_lowat = ssb->ssb_lowat;
179 xsb->sb_flags = ssb->ssb_flags;
180 xsb->sb_timeo = ssb->ssb_timeo;
184 /************************************************************************
185 * Procedures which manipulate socket state flags, wakeups, etc. *
186 ************************************************************************
188 * Normal sequence from the active (originating) side is that
189 * soisconnecting() is called during processing of connect() call, resulting
190 * in an eventual call to soisconnected() if/when the connection is
191 * established. When the connection is torn down soisdisconnecting() is
192 * called during processing of disconnect() call, and soisdisconnected() is
193 * called when the connection to the peer is totally severed.
195 * The semantics of these routines are such that connectionless protocols
196 * can call soisconnected() and soisdisconnected() only, bypassing the
197 * in-progress calls when setting up a ``connection'' takes no time.
199 * From the passive side, a socket is created with two queues of sockets:
200 * so_incomp for connections in progress and so_comp for connections
201 * already made and awaiting user acceptance. As a protocol is preparing
202 * incoming connections, it creates a socket structure queued on so_incomp
203 * by calling sonewconn(). When the connection is established,
204 * soisconnected() is called, and transfers the socket structure to so_comp,
205 * making it available to accept().
207 * If a socket is closed with sockets on either so_incomp or so_comp, these
208 * sockets are dropped.
210 * If higher level protocols are implemented in the kernel, the wakeups
211 * done here will sometimes cause software-interrupt process scheduling.
214 void
215 soisconnecting(struct socket *so)
217 soclrstate(so, SS_ISCONNECTED | SS_ISDISCONNECTING);
218 sosetstate(so, SS_ISCONNECTING);
221 void
222 soisconnected(struct socket *so)
224 struct socket *head;
226 while ((head = so->so_head) != NULL) {
227 lwkt_getpooltoken(head);
228 if (so->so_head == head)
229 break;
230 lwkt_relpooltoken(head);
233 soclrstate(so, SS_ISCONNECTING | SS_ISDISCONNECTING | SS_ISCONFIRMING);
234 sosetstate(so, SS_ISCONNECTED);
235 if (head && (so->so_state & SS_INCOMP)) {
236 if ((so->so_options & SO_ACCEPTFILTER) != 0) {
237 so->so_upcall = head->so_accf->so_accept_filter->accf_callback;
238 so->so_upcallarg = head->so_accf->so_accept_filter_arg;
239 atomic_set_int(&so->so_rcv.ssb_flags, SSB_UPCALL);
240 so->so_options &= ~SO_ACCEPTFILTER;
241 so->so_upcall(so, so->so_upcallarg, 0);
242 lwkt_relpooltoken(head);
243 return;
247 * Listen socket are not per-cpu.
249 TAILQ_REMOVE(&head->so_incomp, so, so_list);
250 head->so_incqlen--;
251 TAILQ_INSERT_TAIL(&head->so_comp, so, so_list);
252 head->so_qlen++;
253 sosetstate(so, SS_COMP);
254 soclrstate(so, SS_INCOMP);
257 * XXX head may be on a different protocol thread.
258 * sorwakeup()->sowakeup() is hacked atm.
260 sorwakeup(head);
261 wakeup_one(&head->so_timeo);
262 } else {
263 wakeup(&so->so_timeo);
264 sorwakeup(so);
265 sowwakeup(so);
267 if (head)
268 lwkt_relpooltoken(head);
271 void
272 soisdisconnecting(struct socket *so)
274 soclrstate(so, SS_ISCONNECTING);
275 sosetstate(so, SS_ISDISCONNECTING | SS_CANTRCVMORE | SS_CANTSENDMORE);
276 wakeup((caddr_t)&so->so_timeo);
277 sowwakeup(so);
278 sorwakeup(so);
281 void
282 soisdisconnected(struct socket *so)
284 soclrstate(so, SS_ISCONNECTING | SS_ISCONNECTED | SS_ISDISCONNECTING);
285 sosetstate(so, SS_CANTRCVMORE | SS_CANTSENDMORE | SS_ISDISCONNECTED);
286 wakeup((caddr_t)&so->so_timeo);
287 sbdrop(&so->so_snd.sb, so->so_snd.ssb_cc);
288 sowwakeup(so);
289 sorwakeup(so);
292 void
293 soisreconnecting(struct socket *so)
295 soclrstate(so, SS_ISDISCONNECTING | SS_ISDISCONNECTED |
296 SS_CANTRCVMORE | SS_CANTSENDMORE);
297 sosetstate(so, SS_ISCONNECTING);
300 void
301 soisreconnected(struct socket *so)
303 soclrstate(so, SS_ISDISCONNECTED | SS_CANTRCVMORE | SS_CANTSENDMORE);
304 soisconnected(so);
308 * Set or change the message port a socket receives commands on.
310 * XXX
312 void
313 sosetport(struct socket *so, lwkt_port_t port)
315 so->so_port = port;
319 * When an attempt at a new connection is noted on a socket
320 * which accepts connections, sonewconn is called. If the
321 * connection is possible (subject to space constraints, etc.)
322 * then we allocate a new structure, propoerly linked into the
323 * data structure of the original socket, and return this.
324 * Connstatus may be 0, or SO_ISCONFIRMING, or SO_ISCONNECTED.
326 * The new socket is returned with one ref and so_pcb assigned.
327 * The reference is implied by so_pcb.
329 struct socket *
330 sonewconn_faddr(struct socket *head, int connstatus,
331 const struct sockaddr *faddr)
333 struct socket *so;
334 struct socket *sp;
335 struct pru_attach_info ai;
337 if (head->so_qlen > 3 * head->so_qlimit / 2)
338 return (NULL);
339 so = soalloc(1);
340 if (so == NULL)
341 return (NULL);
344 * Set the port prior to attaching the inpcb to the current
345 * cpu's protocol thread (which should be the current thread
346 * but might not be in all cases). This serializes any pcb ops
347 * which occur to our cpu allowing us to complete the attachment
348 * without racing anything.
350 sosetport(so, cpu_portfn(mycpu->gd_cpuid));
351 if ((head->so_options & SO_ACCEPTFILTER) != 0)
352 connstatus = 0;
353 so->so_head = head;
354 so->so_type = head->so_type;
355 so->so_options = head->so_options &~ SO_ACCEPTCONN;
356 so->so_linger = head->so_linger;
359 * NOTE: Clearing NOFDREF implies referencing the so with
360 * soreference().
362 so->so_state = head->so_state | SS_NOFDREF | SS_ASSERTINPROG;
363 so->so_proto = head->so_proto;
364 so->so_cred = crhold(head->so_cred);
365 ai.sb_rlimit = NULL;
366 ai.p_ucred = NULL;
367 ai.fd_rdir = NULL; /* jail code cruft XXX JH */
370 * Reserve space and call pru_attach. We can direct-call the
371 * function since we're already in the protocol thread.
373 if (soreserve(so, head->so_snd.ssb_hiwat,
374 head->so_rcv.ssb_hiwat, NULL) ||
375 so_pru_attach_direct(so, 0, &ai)) {
376 so->so_head = NULL;
377 soclrstate(so, SS_ASSERTINPROG);
378 sofree(so); /* remove implied pcb ref */
379 return (NULL);
381 KKASSERT(so->so_refs == 2); /* attach + our base ref */
382 sofree(so);
383 KKASSERT(so->so_port != NULL);
384 so->so_rcv.ssb_lowat = head->so_rcv.ssb_lowat;
385 so->so_snd.ssb_lowat = head->so_snd.ssb_lowat;
386 so->so_rcv.ssb_timeo = head->so_rcv.ssb_timeo;
387 so->so_snd.ssb_timeo = head->so_snd.ssb_timeo;
389 if (head->so_rcv.ssb_flags & SSB_AUTOLOWAT)
390 so->so_rcv.ssb_flags |= SSB_AUTOLOWAT;
391 else
392 so->so_rcv.ssb_flags &= ~SSB_AUTOLOWAT;
394 if (head->so_snd.ssb_flags & SSB_AUTOLOWAT)
395 so->so_snd.ssb_flags |= SSB_AUTOLOWAT;
396 else
397 so->so_snd.ssb_flags &= ~SSB_AUTOLOWAT;
399 if (head->so_rcv.ssb_flags & SSB_AUTOSIZE)
400 so->so_rcv.ssb_flags |= SSB_AUTOSIZE;
401 else
402 so->so_rcv.ssb_flags &= ~SSB_AUTOSIZE;
404 if (head->so_snd.ssb_flags & SSB_AUTOSIZE)
405 so->so_snd.ssb_flags |= SSB_AUTOSIZE;
406 else
407 so->so_snd.ssb_flags &= ~SSB_AUTOSIZE;
410 * Save the faddr, if the information is provided and
411 * the protocol can perform the saving opertation.
413 if (faddr != NULL && so->so_proto->pr_usrreqs->pru_savefaddr != NULL)
414 so->so_proto->pr_usrreqs->pru_savefaddr(so, faddr);
416 lwkt_getpooltoken(head);
417 if (connstatus) {
418 TAILQ_INSERT_TAIL(&head->so_comp, so, so_list);
419 sosetstate(so, SS_COMP);
420 head->so_qlen++;
421 } else {
422 if (head->so_incqlen > head->so_qlimit) {
423 sp = TAILQ_FIRST(&head->so_incomp);
424 TAILQ_REMOVE(&head->so_incomp, sp, so_list);
425 head->so_incqlen--;
426 soclrstate(sp, SS_INCOMP);
427 sp->so_head = NULL;
428 soaborta(sp);
430 TAILQ_INSERT_TAIL(&head->so_incomp, so, so_list);
431 sosetstate(so, SS_INCOMP);
432 head->so_incqlen++;
434 lwkt_relpooltoken(head);
435 if (connstatus) {
437 * XXX head may be on a different protocol thread.
438 * sorwakeup()->sowakeup() is hacked atm.
440 sorwakeup(head);
441 wakeup((caddr_t)&head->so_timeo);
442 sosetstate(so, connstatus);
444 soclrstate(so, SS_ASSERTINPROG);
445 return (so);
448 struct socket *
449 sonewconn(struct socket *head, int connstatus)
451 return sonewconn_faddr(head, connstatus, NULL);
455 * Socantsendmore indicates that no more data will be sent on the
456 * socket; it would normally be applied to a socket when the user
457 * informs the system that no more data is to be sent, by the protocol
458 * code (in case PRU_SHUTDOWN). Socantrcvmore indicates that no more data
459 * will be received, and will normally be applied to the socket by a
460 * protocol when it detects that the peer will send no more data.
461 * Data queued for reading in the socket may yet be read.
463 void
464 socantsendmore(struct socket *so)
466 sosetstate(so, SS_CANTSENDMORE);
467 sowwakeup(so);
470 void
471 socantrcvmore(struct socket *so)
473 sosetstate(so, SS_CANTRCVMORE);
474 sorwakeup(so);
478 * Wakeup processes waiting on a socket buffer. Do asynchronous notification
479 * via SIGIO if the socket has the SS_ASYNC flag set.
481 * For users waiting on send/recv try to avoid unnecessary context switch
482 * thrashing. Particularly for senders of large buffers (needs to be
483 * extended to sel and aio? XXX)
485 * WARNING! Can be called on a foreign socket from the wrong protocol
486 * thread. aka is called on the 'head' listen socket when
487 * a new connection comes in.
489 void
490 sowakeup(struct socket *so, struct signalsockbuf *ssb)
492 struct kqinfo *kqinfo = &ssb->ssb_kq;
493 uint32_t flags;
496 * Check conditions, set the WAKEUP flag, and clear and signal if
497 * the WAIT flag is found to be set. This interlocks against the
498 * client side.
500 for (;;) {
501 flags = ssb->ssb_flags;
502 cpu_ccfence();
504 if ((ssb == &so->so_snd && ssb_space(ssb) >= ssb->ssb_lowat) ||
505 (ssb == &so->so_rcv && ssb->ssb_cc >= ssb->ssb_lowat) ||
506 (ssb == &so->so_snd && (so->so_state & SS_CANTSENDMORE)) ||
507 (ssb == &so->so_rcv && (so->so_state & SS_CANTRCVMORE))
509 if (atomic_cmpset_int(&ssb->ssb_flags, flags,
510 (flags | SSB_WAKEUP) & ~SSB_WAIT)) {
511 if (flags & SSB_WAIT)
512 wakeup(&ssb->ssb_cc);
513 break;
515 } else {
516 break;
521 * Misc other events
523 if ((so->so_state & SS_ASYNC) && so->so_sigio != NULL)
524 pgsigio(so->so_sigio, SIGIO, 0);
525 if (ssb->ssb_flags & SSB_UPCALL)
526 (*so->so_upcall)(so, so->so_upcallarg, MB_DONTWAIT);
527 KNOTE(&kqinfo->ki_note, 0);
530 * This is a bit of a hack. Multiple threads can wind up scanning
531 * ki_mlist concurrently due to the fact that this function can be
532 * called on a foreign socket, so we can't afford to block here.
534 * We need the pool token for (so) (likely the listne socket if
535 * SSB_MEVENT is set) because the predicate function may have
536 * to access the accept queue.
538 if (ssb->ssb_flags & SSB_MEVENT) {
539 struct netmsg_so_notify *msg, *nmsg;
541 lwkt_getpooltoken(so);
542 TAILQ_FOREACH_MUTABLE(msg, &kqinfo->ki_mlist, nm_list, nmsg) {
543 if (msg->nm_predicate(msg)) {
544 TAILQ_REMOVE(&kqinfo->ki_mlist, msg, nm_list);
545 lwkt_replymsg(&msg->base.lmsg,
546 msg->base.lmsg.ms_error);
549 if (TAILQ_EMPTY(&ssb->ssb_kq.ki_mlist))
550 atomic_clear_int(&ssb->ssb_flags, SSB_MEVENT);
551 lwkt_relpooltoken(so);
556 * Socket buffer (struct signalsockbuf) utility routines.
558 * Each socket contains two socket buffers: one for sending data and
559 * one for receiving data. Each buffer contains a queue of mbufs,
560 * information about the number of mbufs and amount of data in the
561 * queue, and other fields allowing kevent()/select()/poll() statements
562 * and notification on data availability to be implemented.
564 * Data stored in a socket buffer is maintained as a list of records.
565 * Each record is a list of mbufs chained together with the m_next
566 * field. Records are chained together with the m_nextpkt field. The upper
567 * level routine soreceive() expects the following conventions to be
568 * observed when placing information in the receive buffer:
570 * 1. If the protocol requires each message be preceded by the sender's
571 * name, then a record containing that name must be present before
572 * any associated data (mbuf's must be of type MT_SONAME).
573 * 2. If the protocol supports the exchange of ``access rights'' (really
574 * just additional data associated with the message), and there are
575 * ``rights'' to be received, then a record containing this data
576 * should be present (mbuf's must be of type MT_RIGHTS).
577 * 3. If a name or rights record exists, then it must be followed by
578 * a data record, perhaps of zero length.
580 * Before using a new socket structure it is first necessary to reserve
581 * buffer space to the socket, by calling sbreserve(). This should commit
582 * some of the available buffer space in the system buffer pool for the
583 * socket (currently, it does nothing but enforce limits). The space
584 * should be released by calling ssb_release() when the socket is destroyed.
587 soreserve(struct socket *so, u_long sndcc, u_long rcvcc, struct rlimit *rl)
589 if (so->so_snd.ssb_lowat == 0)
590 atomic_set_int(&so->so_snd.ssb_flags, SSB_AUTOLOWAT);
591 if (ssb_reserve(&so->so_snd, sndcc, so, rl) == 0)
592 goto bad;
593 if (ssb_reserve(&so->so_rcv, rcvcc, so, rl) == 0)
594 goto bad2;
595 if (so->so_rcv.ssb_lowat == 0)
596 so->so_rcv.ssb_lowat = 1;
597 if (so->so_snd.ssb_lowat == 0)
598 so->so_snd.ssb_lowat = MCLBYTES;
599 if (so->so_snd.ssb_lowat > so->so_snd.ssb_hiwat)
600 so->so_snd.ssb_lowat = so->so_snd.ssb_hiwat;
601 return (0);
602 bad2:
603 ssb_release(&so->so_snd, so);
604 bad:
605 return (ENOBUFS);
608 static int
609 sysctl_handle_sb_max(SYSCTL_HANDLER_ARGS)
611 int error = 0;
612 u_long old_sb_max = sb_max;
614 error = SYSCTL_OUT(req, arg1, sizeof(int));
615 if (error || !req->newptr)
616 return (error);
617 error = SYSCTL_IN(req, arg1, sizeof(int));
618 if (error)
619 return (error);
620 if (sb_max < MSIZE + MCLBYTES) {
621 sb_max = old_sb_max;
622 return (EINVAL);
624 sb_max_adj = (u_quad_t)sb_max * MCLBYTES / (MSIZE + MCLBYTES);
625 return (0);
629 * Allot mbufs to a signalsockbuf.
631 * Attempt to scale mbmax so that mbcnt doesn't become limiting
632 * if buffering efficiency is near the normal case.
634 * sb_max only applies to user-sockets (where rl != NULL). It does
635 * not apply to kernel sockets or kernel-controlled sockets. Note
636 * that NFS overrides the sockbuf limits created when nfsd creates
637 * a socket.
640 ssb_reserve(struct signalsockbuf *ssb, u_long cc, struct socket *so,
641 struct rlimit *rl)
644 * rl will only be NULL when we're in an interrupt (eg, in tcp_input)
645 * or when called from netgraph (ie, ngd_attach)
647 if (rl && cc > sb_max_adj)
648 cc = sb_max_adj;
649 if (!chgsbsize(so->so_cred->cr_uidinfo, &ssb->ssb_hiwat, cc,
650 rl ? rl->rlim_cur : RLIM_INFINITY)) {
651 return (0);
653 if (rl)
654 ssb->ssb_mbmax = min(cc * sb_efficiency, sb_max);
655 else
656 ssb->ssb_mbmax = cc * sb_efficiency;
659 * AUTOLOWAT is set on send buffers and prevents large writes
660 * from generating a huge number of context switches.
662 if (ssb->ssb_flags & SSB_AUTOLOWAT) {
663 ssb->ssb_lowat = ssb->ssb_hiwat / 2;
664 if (ssb->ssb_lowat < MCLBYTES)
665 ssb->ssb_lowat = MCLBYTES;
667 if (ssb->ssb_lowat > ssb->ssb_hiwat)
668 ssb->ssb_lowat = ssb->ssb_hiwat;
669 return (1);
673 * Free mbufs held by a socket, and reserved mbuf space.
675 void
676 ssb_release(struct signalsockbuf *ssb, struct socket *so)
678 sbflush(&ssb->sb);
679 (void)chgsbsize(so->so_cred->cr_uidinfo, &ssb->ssb_hiwat, 0,
680 RLIM_INFINITY);
681 ssb->ssb_mbmax = 0;
685 * Some routines that return EOPNOTSUPP for entry points that are not
686 * supported by a protocol. Fill in as needed.
688 void
689 pr_generic_notsupp(netmsg_t msg)
691 lwkt_replymsg(&msg->lmsg, EOPNOTSUPP);
695 pru_sosend_notsupp(struct socket *so, struct sockaddr *addr, struct uio *uio,
696 struct mbuf *top, struct mbuf *control, int flags,
697 struct thread *td)
699 if (top)
700 m_freem(top);
701 if (control)
702 m_freem(control);
703 return (EOPNOTSUPP);
707 pru_soreceive_notsupp(struct socket *so, struct sockaddr **paddr,
708 struct uio *uio, struct sockbuf *sio,
709 struct mbuf **controlp, int *flagsp)
711 return (EOPNOTSUPP);
715 * This isn't really a ``null'' operation, but it's the default one
716 * and doesn't do anything destructive.
718 void
719 pru_sense_null(netmsg_t msg)
721 msg->sense.nm_stat->st_blksize = msg->base.nm_so->so_snd.ssb_hiwat;
722 lwkt_replymsg(&msg->lmsg, 0);
726 * Make a copy of a sockaddr in a malloced buffer of type M_SONAME. Callers
727 * of this routine assume that it always succeeds, so we have to use a
728 * blockable allocation even though we might be called from a critical thread.
730 struct sockaddr *
731 dup_sockaddr(const struct sockaddr *sa)
733 struct sockaddr *sa2;
735 sa2 = kmalloc(sa->sa_len, M_SONAME, M_INTWAIT);
736 bcopy(sa, sa2, sa->sa_len);
737 return (sa2);
741 * Create an external-format (``xsocket'') structure using the information
742 * in the kernel-format socket structure pointed to by so. This is done
743 * to reduce the spew of irrelevant information over this interface,
744 * to isolate user code from changes in the kernel structure, and
745 * potentially to provide information-hiding if we decide that
746 * some of this information should be hidden from users.
748 void
749 sotoxsocket(struct socket *so, struct xsocket *xso)
751 xso->xso_len = sizeof *xso;
752 xso->xso_so = so;
753 xso->so_type = so->so_type;
754 xso->so_options = so->so_options;
755 xso->so_linger = so->so_linger;
756 xso->so_state = so->so_state;
757 xso->so_pcb = so->so_pcb;
758 xso->xso_protocol = so->so_proto->pr_protocol;
759 xso->xso_family = so->so_proto->pr_domain->dom_family;
760 xso->so_qlen = so->so_qlen;
761 xso->so_incqlen = so->so_incqlen;
762 xso->so_qlimit = so->so_qlimit;
763 xso->so_timeo = so->so_timeo;
764 xso->so_error = so->so_error;
765 xso->so_pgid = so->so_sigio ? so->so_sigio->sio_pgid : 0;
766 xso->so_oobmark = so->so_oobmark;
767 ssbtoxsockbuf(&so->so_snd, &xso->so_snd);
768 ssbtoxsockbuf(&so->so_rcv, &xso->so_rcv);
769 xso->so_uid = so->so_cred->cr_uid;
773 * Here is the definition of some of the basic objects in the kern.ipc
774 * branch of the MIB.
776 SYSCTL_NODE(_kern, KERN_IPC, ipc, CTLFLAG_RW, 0, "IPC");
779 * This takes the place of kern.maxsockbuf, which moved to kern.ipc.
781 * NOTE! sb_max only applies to user-created socket buffers.
783 static int dummy;
784 SYSCTL_INT(_kern, KERN_DUMMY, dummy, CTLFLAG_RW, &dummy, 0, "");
785 SYSCTL_OID(_kern_ipc, KIPC_MAXSOCKBUF, maxsockbuf, CTLTYPE_INT|CTLFLAG_RW,
786 &sb_max, 0, sysctl_handle_sb_max, "I", "Maximum socket buffer size");
787 SYSCTL_INT(_kern_ipc, OID_AUTO, maxsockets, CTLFLAG_RD,
788 &maxsockets, 0, "Maximum number of sockets available");
789 SYSCTL_INT(_kern_ipc, KIPC_SOCKBUF_WASTE, sockbuf_waste_factor, CTLFLAG_RW,
790 &sb_efficiency, 0,
791 "Socket buffer limit scaler");
794 * Initialize maxsockets
796 static void
797 init_maxsockets(void *ignored)
799 TUNABLE_INT_FETCH("kern.ipc.maxsockets", &maxsockets);
800 maxsockets = imax(maxsockets, imax(maxfiles, nmbclusters));
802 SYSINIT(param, SI_BOOT1_TUNABLES, SI_ORDER_ANY,
803 init_maxsockets, NULL);