2 * Copyright (c) 2003, 2004 Jeffrey M. Hsu. All rights reserved.
3 * Copyright (c) 2003, 2004 The DragonFly Project. All rights reserved.
5 * This code is derived from software contributed to The DragonFly Project
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of The DragonFly Project nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific, prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * Copyright (c) 1982, 1986, 1988, 1993
36 * The Regents of the University of California. All rights reserved.
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. Neither the name of the University nor the names of its contributors
47 * may be used to endorse or promote products derived from this software
48 * without specific prior written permission.
50 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
51 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
52 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
53 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
54 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
55 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
56 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
57 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
58 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
59 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
62 * From: @(#)tcp_usrreq.c 8.2 (Berkeley) 1/3/94
63 * $FreeBSD: src/sys/netinet/tcp_usrreq.c,v 1.51.2.17 2002/10/11 11:46:44 ume Exp $
66 #include "opt_ipsec.h"
68 #include "opt_inet6.h"
69 #include "opt_tcpdebug.h"
71 #include <sys/param.h>
72 #include <sys/systm.h>
73 #include <sys/kernel.h>
74 #include <sys/malloc.h>
75 #include <sys/sysctl.h>
76 #include <sys/globaldata.h>
77 #include <sys/thread.h>
81 #include <sys/domain.h>
83 #include <sys/socket.h>
84 #include <sys/socketvar.h>
85 #include <sys/socketops.h>
86 #include <sys/protosw.h>
88 #include <sys/thread2.h>
89 #include <sys/msgport2.h>
90 #include <sys/socketvar2.h>
93 #include <net/netisr.h>
94 #include <net/route.h>
96 #include <net/netmsg2.h>
97 #include <net/netisr2.h>
99 #include <netinet/in.h>
100 #include <netinet/in_systm.h>
102 #include <netinet/ip6.h>
104 #include <netinet/in_pcb.h>
106 #include <netinet6/in6_pcb.h>
108 #include <netinet/in_var.h>
109 #include <netinet/ip_var.h>
111 #include <netinet6/ip6_var.h>
112 #include <netinet6/tcp6_var.h>
114 #include <netinet/tcp.h>
115 #include <netinet/tcp_fsm.h>
116 #include <netinet/tcp_seq.h>
117 #include <netinet/tcp_timer.h>
118 #include <netinet/tcp_timer2.h>
119 #include <netinet/tcp_var.h>
120 #include <netinet/tcpip.h>
122 #include <netinet/tcp_debug.h>
126 #include <netinet6/ipsec.h>
130 * TCP protocol interface to socket abstraction.
132 extern char *tcpstates
[]; /* XXX ??? */
134 static int tcp_attach (struct socket
*, struct pru_attach_info
*);
135 static void tcp_connect (netmsg_t msg
);
137 static void tcp6_connect (netmsg_t msg
);
138 static int tcp6_connect_oncpu(struct tcpcb
*tp
, int flags
,
140 struct sockaddr_in6
*sin6
,
141 struct in6_addr
*addr6
);
143 static struct tcpcb
*
144 tcp_disconnect (struct tcpcb
*);
145 static struct tcpcb
*
146 tcp_usrclosed (struct tcpcb
*);
149 #define TCPDEBUG0 int ostate = 0
150 #define TCPDEBUG1() ostate = tp ? tp->t_state : 0
151 #define TCPDEBUG2(req) if (tp && (so->so_options & SO_DEBUG)) \
152 tcp_trace(TA_USER, ostate, tp, 0, 0, req)
156 #define TCPDEBUG2(req)
160 * For some ill optimized programs, which try to use TCP_NOPUSH
161 * to improve performance, will have small amount of data sits
162 * in the sending buffer. These small amount of data will _not_
163 * be pushed into the network until more data are written into
164 * the socket or the socket write side is shutdown.
166 static int tcp_disable_nopush
= 1;
167 SYSCTL_INT(_net_inet_tcp
, OID_AUTO
, disable_nopush
, CTLFLAG_RW
,
168 &tcp_disable_nopush
, 0, "TCP_NOPUSH socket option will have no effect");
171 * Allocate socket buffer space.
174 tcp_usr_preattach(struct socket
*so
, int proto __unused
,
175 struct pru_attach_info
*ai
)
179 if (so
->so_snd
.ssb_hiwat
== 0 || so
->so_rcv
.ssb_hiwat
== 0) {
180 error
= soreserve(so
, tcp_sendspace
, tcp_recvspace
,
185 atomic_set_int(&so
->so_rcv
.ssb_flags
, SSB_AUTOSIZE
);
186 atomic_set_int(&so
->so_snd
.ssb_flags
, SSB_AUTOSIZE
| SSB_PREALLOC
);
192 * TCP attaches to socket via pru_attach(), reserving space,
193 * and an internet control block. This socket may move to
194 * other CPU later when we bind/connect.
197 tcp_usr_attach(netmsg_t msg
)
199 struct socket
*so
= msg
->base
.nm_so
;
200 struct pru_attach_info
*ai
= msg
->attach
.nm_ai
;
203 struct tcpcb
*tp
= NULL
;
207 KASSERT(inp
== NULL
, ("tcp socket attached"));
210 error
= tcp_attach(so
, ai
);
214 if ((so
->so_options
& SO_LINGER
) && so
->so_linger
== 0)
215 so
->so_linger
= TCP_LINGERTIME
;
218 TCPDEBUG2(PRU_ATTACH
);
219 lwkt_replymsg(&msg
->lmsg
, error
);
223 * pru_detach() detaches the TCP protocol from the socket.
224 * If the protocol state is non-embryonic, then can't
225 * do this directly: have to initiate a pru_disconnect(),
226 * which may finish later; embryonic TCB's can just
230 tcp_usr_detach(netmsg_t msg
)
232 struct socket
*so
= msg
->base
.nm_so
;
241 * If the inp is already detached or never attached, it may have
242 * been due to an async close or async attach failure. Just return
243 * as if no error occured.
247 KASSERT(tp
!= NULL
, ("tcp_usr_detach: tp is NULL"));
249 tp
= tcp_disconnect(tp
);
250 TCPDEBUG2(PRU_DETACH
);
252 lwkt_replymsg(&msg
->lmsg
, error
);
256 * NOTE: ignore_error is non-zero for certain disconnection races
257 * which we want to silently allow, otherwise close() may return
258 * an unexpected error.
260 * NOTE: The variables (msg) and (tp) are assumed.
262 #define COMMON_START(so, inp, ignore_error) \
268 error = ignore_error ? 0 : EINVAL; \
272 tp = intotcpcb(inp); \
276 #define COMMON_END1(req, noreply) \
280 lwkt_replymsg(&msg->lmsg, error); \
284 #define COMMON_END(req) COMMON_END1((req), 0)
287 tcp_sosetport(struct lwkt_msg
*msg
, lwkt_port_t port
)
289 sosetport(((struct netmsg_base
*)msg
)->nm_so
, port
);
293 * Give the socket an address.
296 tcp_usr_bind(netmsg_t msg
)
298 struct socket
*so
= msg
->bind
.base
.nm_so
;
299 struct sockaddr
*nam
= msg
->bind
.nm_nam
;
300 struct thread
*td
= msg
->bind
.nm_td
;
304 struct sockaddr_in
*sinp
;
305 lwkt_port_t port0
= netisr_cpuport(0);
307 COMMON_START(so
, inp
, 0);
310 * Must check for multicast addresses and disallow binding
313 sinp
= (struct sockaddr_in
*)nam
;
314 if (sinp
->sin_family
== AF_INET
&&
315 IN_MULTICAST(ntohl(sinp
->sin_addr
.s_addr
))) {
316 error
= EAFNOSUPPORT
;
321 * Check "already bound" here (in_pcbbind() does the same check
322 * though), so we don't forward a connected socket to netisr0,
323 * which would panic in the following in_pcbunlink().
325 if (inp
->inp_lport
!= 0 || inp
->inp_laddr
.s_addr
!= INADDR_ANY
) {
326 error
= EINVAL
; /* already bound */
331 * Use netisr0 to serialize in_pcbbind(), so that pru_detach and
332 * pru_bind for different sockets on the same local port could be
333 * properly ordered. The original race is illustrated here for
338 * close(s1); <----- asynchronous
342 * All will expect bind(s2, *.PORT) to succeed. However, it will
343 * fail, if following sequence happens due to random socket initial
344 * msgport and asynchronous close(2):
348 * : pru_bind(s2) [*.PORT is used by s1]
351 if (&curthread
->td_msgport
!= port0
) {
352 lwkt_msg_t lmsg
= &msg
->bind
.base
.lmsg
;
354 KASSERT((msg
->bind
.nm_flags
& PRUB_RELINK
) == 0,
355 ("already asked to relink"));
357 in_pcbunlink(so
->so_pcb
, &tcbinfo
[mycpuid
]);
358 msg
->bind
.nm_flags
|= PRUB_RELINK
;
360 TCP_STATE_MIGRATE_START(tp
);
362 /* See the related comment in tcp_connect() */
363 lwkt_setmsg_receipt(lmsg
, tcp_sosetport
);
364 lwkt_forwardmsg(port0
, lmsg
);
365 /* msg invalid now */
368 KASSERT(so
->so_port
== port0
, ("so_port is not netisr0"));
370 if (msg
->bind
.nm_flags
& PRUB_RELINK
) {
371 msg
->bind
.nm_flags
&= ~PRUB_RELINK
;
372 TCP_STATE_MIGRATE_END(tp
);
373 in_pcblink(so
->so_pcb
, &tcbinfo
[mycpuid
]);
375 KASSERT(inp
->inp_pcbinfo
== &tcbinfo
[0], ("pcbinfo is not tcbinfo0"));
377 error
= in_pcbbind(inp
, nam
, td
);
381 COMMON_END(PRU_BIND
);
387 tcp6_usr_bind(netmsg_t msg
)
389 struct socket
*so
= msg
->bind
.base
.nm_so
;
390 struct sockaddr
*nam
= msg
->bind
.nm_nam
;
391 struct thread
*td
= msg
->bind
.nm_td
;
395 struct sockaddr_in6
*sin6p
;
397 COMMON_START(so
, inp
, 0);
400 * Must check for multicast addresses and disallow binding
403 sin6p
= (struct sockaddr_in6
*)nam
;
404 if (sin6p
->sin6_family
== AF_INET6
&&
405 IN6_IS_ADDR_MULTICAST(&sin6p
->sin6_addr
)) {
406 error
= EAFNOSUPPORT
;
409 error
= in6_pcbbind(inp
, nam
, td
);
412 COMMON_END(PRU_BIND
);
416 struct netmsg_inswildcard
{
417 struct netmsg_base base
;
418 struct inpcb
*nm_inp
;
422 in_pcbinswildcardhash_handler(netmsg_t msg
)
424 struct netmsg_inswildcard
*nm
= (struct netmsg_inswildcard
*)msg
;
425 int cpu
= mycpuid
, nextcpu
;
427 in_pcbinswildcardhash_oncpu(nm
->nm_inp
, &tcbinfo
[cpu
]);
430 if (nextcpu
< ncpus2
)
431 lwkt_forwardmsg(netisr_cpuport(nextcpu
), &nm
->base
.lmsg
);
433 lwkt_replymsg(&nm
->base
.lmsg
, 0);
437 * Prepare to accept connections.
440 tcp_usr_listen(netmsg_t msg
)
442 struct socket
*so
= msg
->listen
.base
.nm_so
;
443 struct thread
*td
= msg
->listen
.nm_td
;
447 struct netmsg_inswildcard nm
;
448 lwkt_port_t port0
= netisr_cpuport(0);
450 COMMON_START(so
, inp
, 0);
452 if (&curthread
->td_msgport
!= port0
) {
453 lwkt_msg_t lmsg
= &msg
->listen
.base
.lmsg
;
455 KASSERT((msg
->listen
.nm_flags
& PRUL_RELINK
) == 0,
456 ("already asked to relink"));
458 in_pcbunlink(so
->so_pcb
, &tcbinfo
[mycpuid
]);
459 msg
->listen
.nm_flags
|= PRUL_RELINK
;
461 TCP_STATE_MIGRATE_START(tp
);
463 /* See the related comment in tcp_connect() */
464 lwkt_setmsg_receipt(lmsg
, tcp_sosetport
);
465 lwkt_forwardmsg(port0
, lmsg
);
466 /* msg invalid now */
469 KASSERT(so
->so_port
== port0
, ("so_port is not netisr0"));
471 if (msg
->listen
.nm_flags
& PRUL_RELINK
) {
472 msg
->listen
.nm_flags
&= ~PRUL_RELINK
;
473 TCP_STATE_MIGRATE_END(tp
);
474 in_pcblink(so
->so_pcb
, &tcbinfo
[mycpuid
]);
476 KASSERT(inp
->inp_pcbinfo
== &tcbinfo
[0], ("pcbinfo is not tcbinfo0"));
478 if (tp
->t_flags
& TF_LISTEN
)
481 if (inp
->inp_lport
== 0) {
482 error
= in_pcbbind(inp
, NULL
, td
);
487 TCP_STATE_CHANGE(tp
, TCPS_LISTEN
);
488 tp
->t_flags
|= TF_LISTEN
;
489 tp
->tt_msg
= NULL
; /* Catch any invalid timer usage */
492 * Create tcpcb per-cpu port cache
495 * This _must_ be done before installing this inpcb into
498 tcp_pcbport_create(tp
);
502 * Put this inpcb into wildcard hash on other cpus.
504 ASSERT_INP_NOTINHASH(inp
);
505 netmsg_init(&nm
.base
, NULL
, &curthread
->td_msgport
,
506 MSGF_PRIORITY
, in_pcbinswildcardhash_handler
);
508 lwkt_domsg(netisr_cpuport(1), &nm
.base
.lmsg
, 0);
510 in_pcbinswildcardhash(inp
);
511 COMMON_END(PRU_LISTEN
);
517 tcp6_usr_listen(netmsg_t msg
)
519 struct socket
*so
= msg
->listen
.base
.nm_so
;
520 struct thread
*td
= msg
->listen
.nm_td
;
524 struct netmsg_inswildcard nm
;
526 COMMON_START(so
, inp
, 0);
528 if (tp
->t_flags
& TF_LISTEN
)
531 if (inp
->inp_lport
== 0) {
532 error
= in6_pcbbind(inp
, NULL
, td
);
537 TCP_STATE_CHANGE(tp
, TCPS_LISTEN
);
538 tp
->t_flags
|= TF_LISTEN
;
539 tp
->tt_msg
= NULL
; /* Catch any invalid timer usage */
542 * Create tcpcb per-cpu port cache
545 * This _must_ be done before installing this inpcb into
548 tcp_pcbport_create(tp
);
552 * Put this inpcb into wildcard hash on other cpus.
554 KKASSERT(so
->so_port
== netisr_cpuport(0));
556 KKASSERT(inp
->inp_pcbinfo
== &tcbinfo
[0]);
557 ASSERT_INP_NOTINHASH(inp
);
559 netmsg_init(&nm
.base
, NULL
, &curthread
->td_msgport
,
560 MSGF_PRIORITY
, in_pcbinswildcardhash_handler
);
562 lwkt_domsg(netisr_cpuport(1), &nm
.base
.lmsg
, 0);
564 in_pcbinswildcardhash(inp
);
565 COMMON_END(PRU_LISTEN
);
570 * Initiate connection to peer.
571 * Create a template for use in transmissions on this connection.
572 * Enter SYN_SENT state, and mark socket as connecting.
573 * Start keep-alive timer, and seed output sequence space.
574 * Send initial segment on connection.
577 tcp_usr_connect(netmsg_t msg
)
579 struct socket
*so
= msg
->connect
.base
.nm_so
;
580 struct sockaddr
*nam
= msg
->connect
.nm_nam
;
581 struct thread
*td
= msg
->connect
.nm_td
;
585 struct sockaddr_in
*sinp
;
587 COMMON_START(so
, inp
, 0);
590 * Must disallow TCP ``connections'' to multicast addresses.
592 sinp
= (struct sockaddr_in
*)nam
;
593 if (sinp
->sin_family
== AF_INET
594 && IN_MULTICAST(ntohl(sinp
->sin_addr
.s_addr
))) {
595 error
= EAFNOSUPPORT
;
599 if (!prison_remote_ip(td
, (struct sockaddr
*)sinp
)) {
600 error
= EAFNOSUPPORT
; /* IPv6 only jail */
605 /* msg is invalid now */
608 if (msg
->connect
.nm_m
) {
609 m_freem(msg
->connect
.nm_m
);
610 msg
->connect
.nm_m
= NULL
;
612 if (msg
->connect
.nm_flags
& PRUC_HELDTD
)
614 if (error
&& (msg
->connect
.nm_flags
& PRUC_ASYNC
)) {
615 so
->so_error
= error
;
616 soisdisconnected(so
);
618 lwkt_replymsg(&msg
->lmsg
, error
);
624 tcp6_usr_connect(netmsg_t msg
)
626 struct socket
*so
= msg
->connect
.base
.nm_so
;
627 struct sockaddr
*nam
= msg
->connect
.nm_nam
;
628 struct thread
*td
= msg
->connect
.nm_td
;
632 struct sockaddr_in6
*sin6p
;
634 COMMON_START(so
, inp
, 0);
637 * Must disallow TCP ``connections'' to multicast addresses.
639 sin6p
= (struct sockaddr_in6
*)nam
;
640 if (sin6p
->sin6_family
== AF_INET6
641 && IN6_IS_ADDR_MULTICAST(&sin6p
->sin6_addr
)) {
642 error
= EAFNOSUPPORT
;
646 if (!prison_remote_ip(td
, nam
)) {
647 error
= EAFNOSUPPORT
; /* IPv4 only jail */
651 /* Reject v4-mapped address */
652 if (IN6_IS_ADDR_V4MAPPED(&sin6p
->sin6_addr
)) {
653 error
= EADDRNOTAVAIL
;
657 inp
->inp_inc
.inc_isipv6
= 1;
659 /* msg is invalid now */
662 if (msg
->connect
.nm_m
) {
663 m_freem(msg
->connect
.nm_m
);
664 msg
->connect
.nm_m
= NULL
;
666 lwkt_replymsg(&msg
->lmsg
, error
);
672 * Initiate disconnect from peer.
673 * If connection never passed embryonic stage, just drop;
674 * else if don't need to let data drain, then can just drop anyways,
675 * else have to begin TCP shutdown process: mark socket disconnecting,
676 * drain unread data, state switch to reflect user close, and
677 * send segment (e.g. FIN) to peer. Socket will be really disconnected
678 * when peer sends FIN and acks ours.
680 * SHOULD IMPLEMENT LATER PRU_CONNECT VIA REALLOC TCPCB.
683 tcp_usr_disconnect(netmsg_t msg
)
685 struct socket
*so
= msg
->disconnect
.base
.nm_so
;
690 COMMON_START(so
, inp
, 1);
691 tp
= tcp_disconnect(tp
);
692 COMMON_END(PRU_DISCONNECT
);
696 * Accept a connection. Essentially all the work is
697 * done at higher levels; just return the address
698 * of the peer, storing through addr.
701 tcp_usr_accept(netmsg_t msg
)
703 struct socket
*so
= msg
->accept
.base
.nm_so
;
704 struct sockaddr
**nam
= msg
->accept
.nm_nam
;
707 struct tcpcb
*tp
= NULL
;
711 if (so
->so_state
& SS_ISDISCONNECTED
) {
712 error
= ECONNABORTED
;
722 in_setpeeraddr(so
, nam
);
723 COMMON_END(PRU_ACCEPT
);
728 tcp6_usr_accept(netmsg_t msg
)
730 struct socket
*so
= msg
->accept
.base
.nm_so
;
731 struct sockaddr
**nam
= msg
->accept
.nm_nam
;
734 struct tcpcb
*tp
= NULL
;
739 if (so
->so_state
& SS_ISDISCONNECTED
) {
740 error
= ECONNABORTED
;
749 in6_setpeeraddr(so
, nam
);
750 COMMON_END(PRU_ACCEPT
);
755 * Mark the connection as being incapable of further output.
758 tcp_usr_shutdown(netmsg_t msg
)
760 struct socket
*so
= msg
->shutdown
.base
.nm_so
;
765 COMMON_START(so
, inp
, 0);
767 tp
= tcp_usrclosed(tp
);
769 error
= tcp_output(tp
);
770 COMMON_END(PRU_SHUTDOWN
);
774 * After a receive, possibly send window update to peer.
777 tcp_usr_rcvd(netmsg_t msg
)
779 struct socket
*so
= msg
->rcvd
.base
.nm_so
;
780 int error
= 0, noreply
= 0;
784 COMMON_START(so
, inp
, 0);
786 if (msg
->rcvd
.nm_pru_flags
& PRUR_ASYNC
) {
788 so_async_rcvd_reply(so
);
792 COMMON_END1(PRU_RCVD
, noreply
);
796 * Do a send by putting data in output queue and updating urgent
797 * marker if URG set. Possibly send more data. Unlike the other
798 * pru_*() routines, the mbuf chains are our responsibility. We
799 * must either enqueue them or free them. The other pru_* routines
800 * generally are caller-frees.
803 tcp_usr_send(netmsg_t msg
)
805 struct socket
*so
= msg
->send
.base
.nm_so
;
806 int flags
= msg
->send
.nm_flags
;
807 struct mbuf
*m
= msg
->send
.nm_m
;
813 KKASSERT(msg
->send
.nm_control
== NULL
);
814 KKASSERT(msg
->send
.nm_addr
== NULL
);
815 KKASSERT((flags
& PRUS_FREEADDR
) == 0);
821 * OOPS! we lost a race, the TCP session got reset after
822 * we checked SS_CANTSENDMORE, eg: while doing uiomove or a
823 * network interrupt in the non-critical section of sosend().
826 error
= ECONNRESET
; /* XXX EPIPE? */
836 * This is no longer necessary, since:
837 * - sosendtcp() has already checked it for us
838 * - It does not work with asynchronized send
842 * Don't let too much OOB data build up
844 if (flags
& PRUS_OOB
) {
845 if (ssb_space(&so
->so_snd
) < -512) {
854 * Pump the data into the socket.
857 ssb_appendstream(&so
->so_snd
, m
);
860 if (flags
& PRUS_OOB
) {
862 * According to RFC961 (Assigned Protocols),
863 * the urgent pointer points to the last octet
864 * of urgent data. We continue, however,
865 * to consider it to indicate the first octet
866 * of data past the urgent section.
867 * Otherwise, snd_up should be one lower.
869 tp
->snd_up
= tp
->snd_una
+ so
->so_snd
.ssb_cc
;
870 tp
->t_flags
|= TF_FORCE
;
871 error
= tcp_output(tp
);
872 tp
->t_flags
&= ~TF_FORCE
;
874 if (flags
& PRUS_EOF
) {
876 * Close the send side of the connection after
880 tp
= tcp_usrclosed(tp
);
882 if (tp
!= NULL
&& !tcp_output_pending(tp
)) {
883 if (flags
& PRUS_MORETOCOME
)
884 tp
->t_flags
|= TF_MORETOCOME
;
885 error
= tcp_output_fair(tp
);
886 if (flags
& PRUS_MORETOCOME
)
887 tp
->t_flags
&= ~TF_MORETOCOME
;
890 COMMON_END1((flags
& PRUS_OOB
) ? PRU_SENDOOB
:
891 ((flags
& PRUS_EOF
) ? PRU_SEND_EOF
: PRU_SEND
),
892 (flags
& PRUS_NOREPLY
));
896 * NOTE: (so) is referenced from soabort*() and netmsg_pru_abort()
897 * will sofree() it when we return.
900 tcp_usr_abort(netmsg_t msg
)
902 struct socket
*so
= msg
->abort
.base
.nm_so
;
907 COMMON_START(so
, inp
, 1);
908 tp
= tcp_drop(tp
, ECONNABORTED
);
909 COMMON_END(PRU_ABORT
);
913 * Receive out-of-band data.
916 tcp_usr_rcvoob(netmsg_t msg
)
918 struct socket
*so
= msg
->rcvoob
.base
.nm_so
;
919 struct mbuf
*m
= msg
->rcvoob
.nm_m
;
920 int flags
= msg
->rcvoob
.nm_flags
;
925 COMMON_START(so
, inp
, 0);
926 if ((so
->so_oobmark
== 0 &&
927 (so
->so_state
& SS_RCVATMARK
) == 0) ||
928 so
->so_options
& SO_OOBINLINE
||
929 tp
->t_oobflags
& TCPOOB_HADDATA
) {
933 if ((tp
->t_oobflags
& TCPOOB_HAVEDATA
) == 0) {
938 *mtod(m
, caddr_t
) = tp
->t_iobc
;
939 if ((flags
& MSG_PEEK
) == 0)
940 tp
->t_oobflags
^= (TCPOOB_HAVEDATA
| TCPOOB_HADDATA
);
941 COMMON_END(PRU_RCVOOB
);
945 tcp_usr_savefaddr(struct socket
*so
, const struct sockaddr
*faddr
)
947 in_savefaddr(so
, faddr
);
952 tcp6_usr_savefaddr(struct socket
*so
, const struct sockaddr
*faddr
)
954 in6_savefaddr(so
, faddr
);
959 tcp_usr_preconnect(struct socket
*so
, const struct sockaddr
*nam
,
960 struct thread
*td __unused
)
962 const struct sockaddr_in
*sinp
;
964 sinp
= (const struct sockaddr_in
*)nam
;
965 if (sinp
->sin_family
== AF_INET
&&
966 IN_MULTICAST(ntohl(sinp
->sin_addr
.s_addr
)))
973 /* xxx - should be const */
974 struct pr_usrreqs tcp_usrreqs
= {
975 .pru_abort
= tcp_usr_abort
,
976 .pru_accept
= tcp_usr_accept
,
977 .pru_attach
= tcp_usr_attach
,
978 .pru_bind
= tcp_usr_bind
,
979 .pru_connect
= tcp_usr_connect
,
980 .pru_connect2
= pr_generic_notsupp
,
981 .pru_control
= in_control_dispatch
,
982 .pru_detach
= tcp_usr_detach
,
983 .pru_disconnect
= tcp_usr_disconnect
,
984 .pru_listen
= tcp_usr_listen
,
985 .pru_peeraddr
= in_setpeeraddr_dispatch
,
986 .pru_rcvd
= tcp_usr_rcvd
,
987 .pru_rcvoob
= tcp_usr_rcvoob
,
988 .pru_send
= tcp_usr_send
,
989 .pru_sense
= pru_sense_null
,
990 .pru_shutdown
= tcp_usr_shutdown
,
991 .pru_sockaddr
= in_setsockaddr_dispatch
,
992 .pru_sosend
= sosendtcp
,
993 .pru_soreceive
= sorecvtcp
,
994 .pru_savefaddr
= tcp_usr_savefaddr
,
995 .pru_preconnect
= tcp_usr_preconnect
,
996 .pru_preattach
= tcp_usr_preattach
1000 struct pr_usrreqs tcp6_usrreqs
= {
1001 .pru_abort
= tcp_usr_abort
,
1002 .pru_accept
= tcp6_usr_accept
,
1003 .pru_attach
= tcp_usr_attach
,
1004 .pru_bind
= tcp6_usr_bind
,
1005 .pru_connect
= tcp6_usr_connect
,
1006 .pru_connect2
= pr_generic_notsupp
,
1007 .pru_control
= in6_control_dispatch
,
1008 .pru_detach
= tcp_usr_detach
,
1009 .pru_disconnect
= tcp_usr_disconnect
,
1010 .pru_listen
= tcp6_usr_listen
,
1011 .pru_peeraddr
= in6_setpeeraddr_dispatch
,
1012 .pru_rcvd
= tcp_usr_rcvd
,
1013 .pru_rcvoob
= tcp_usr_rcvoob
,
1014 .pru_send
= tcp_usr_send
,
1015 .pru_sense
= pru_sense_null
,
1016 .pru_shutdown
= tcp_usr_shutdown
,
1017 .pru_sockaddr
= in6_setsockaddr_dispatch
,
1018 .pru_sosend
= sosendtcp
,
1019 .pru_soreceive
= sorecvtcp
,
1020 .pru_savefaddr
= tcp6_usr_savefaddr
1025 tcp_connect_oncpu(struct tcpcb
*tp
, int flags
, struct mbuf
*m
,
1026 const struct sockaddr_in
*sin
, struct sockaddr_in
*if_sin
,
1029 struct inpcb
*inp
= tp
->t_inpcb
, *oinp
;
1030 struct socket
*so
= inp
->inp_socket
;
1031 struct route
*ro
= &inp
->inp_route
;
1033 KASSERT(inp
->inp_pcbinfo
== &tcbinfo
[mycpu
->gd_cpuid
],
1034 ("pcbinfo mismatch"));
1036 oinp
= in_pcblookup_hash(inp
->inp_pcbinfo
,
1037 sin
->sin_addr
, sin
->sin_port
,
1038 (inp
->inp_laddr
.s_addr
!= INADDR_ANY
?
1039 inp
->inp_laddr
: if_sin
->sin_addr
),
1040 inp
->inp_lport
, 0, NULL
);
1043 return (EADDRINUSE
);
1045 if (inp
->inp_laddr
.s_addr
== INADDR_ANY
)
1046 inp
->inp_laddr
= if_sin
->sin_addr
;
1047 KASSERT(inp
->inp_faddr
.s_addr
== sin
->sin_addr
.s_addr
,
1048 ("faddr mismatch for reconnect"));
1049 KASSERT(inp
->inp_fport
== sin
->sin_port
,
1050 ("fport mismatch for reconnect"));
1051 in_pcbinsconnhash(inp
);
1053 inp
->inp_flags
|= INP_HASH
;
1054 inp
->inp_hashval
= hash
;
1057 * We are now on the inpcb's owner CPU, if the cached route was
1058 * freed because the rtentry's owner CPU is not the current CPU
1059 * (e.g. in tcp_connect()), then we try to reallocate it here with
1060 * the hope that a rtentry may be cloned from a RTF_PRCLONING
1063 if (!(inp
->inp_socket
->so_options
& SO_DONTROUTE
) && /*XXX*/
1064 ro
->ro_rt
== NULL
) {
1065 bzero(&ro
->ro_dst
, sizeof(struct sockaddr_in
));
1066 ro
->ro_dst
.sa_family
= AF_INET
;
1067 ro
->ro_dst
.sa_len
= sizeof(struct sockaddr_in
);
1068 ((struct sockaddr_in
*)&ro
->ro_dst
)->sin_addr
=
1074 * Now that no more errors can occur, change the protocol processing
1075 * port to the current thread (which is the correct thread).
1077 * Create TCP timer message now; we are on the tcpcb's owner
1080 tcp_create_timermsg(tp
, &curthread
->td_msgport
);
1083 * Compute window scaling to request. Use a larger scaling then
1084 * needed for the initial receive buffer in case the receive buffer
1087 if (tp
->request_r_scale
< TCP_MIN_WINSHIFT
)
1088 tp
->request_r_scale
= TCP_MIN_WINSHIFT
;
1089 while (tp
->request_r_scale
< TCP_MAX_WINSHIFT
&&
1090 (TCP_MAXWIN
<< tp
->request_r_scale
) < so
->so_rcv
.ssb_hiwat
1092 tp
->request_r_scale
++;
1096 tcpstat
.tcps_connattempt
++;
1097 TCP_STATE_CHANGE(tp
, TCPS_SYN_SENT
);
1098 tcp_callout_reset(tp
, tp
->tt_keep
, tp
->t_keepinit
, tcp_timer_keep
);
1099 tp
->iss
= tcp_new_isn(tp
);
1100 tcp_sendseqinit(tp
);
1102 ssb_appendstream(&so
->so_snd
, m
);
1104 if (flags
& PRUS_OOB
)
1105 tp
->snd_up
= tp
->snd_una
+ so
->so_snd
.ssb_cc
;
1109 * Close the send side of the connection after
1110 * the data is sent if flagged.
1112 if ((flags
& (PRUS_OOB
|PRUS_EOF
)) == PRUS_EOF
) {
1114 tp
= tcp_usrclosed(tp
);
1116 return (tcp_output(tp
));
1120 * Common subroutine to open a TCP connection to remote host specified
1121 * by struct sockaddr_in in mbuf *nam. Call in_pcbbind to assign a local
1122 * port number if needed. Call in_pcbladdr to do the routing and to choose
1123 * a local host address (interface).
1124 * Initialize connection parameters and enter SYN-SENT state.
1127 tcp_connect(netmsg_t msg
)
1129 struct socket
*so
= msg
->connect
.base
.nm_so
;
1130 struct sockaddr
*nam
= msg
->connect
.nm_nam
;
1131 struct thread
*td
= msg
->connect
.nm_td
;
1132 struct sockaddr_in
*sin
= (struct sockaddr_in
*)nam
;
1133 struct sockaddr_in
*if_sin
= NULL
;
1140 COMMON_START(so
, inp
, 0);
1143 * Reconnect our pcb if we have to
1145 if (msg
->connect
.nm_flags
& PRUC_RECONNECT
) {
1146 msg
->connect
.nm_flags
&= ~PRUC_RECONNECT
;
1147 TCP_STATE_MIGRATE_END(tp
);
1148 in_pcblink(so
->so_pcb
, &tcbinfo
[mycpu
->gd_cpuid
]);
1150 if (inp
->inp_faddr
.s_addr
!= INADDR_ANY
) {
1151 kprintf("inpcb %p, double-connect race\n", inp
);
1153 if (so
->so_state
& SS_ISCONNECTING
)
1157 KASSERT(inp
->inp_fport
== 0, ("invalid fport"));
1161 * Select local port, if it is not yet selected.
1163 if (inp
->inp_lport
== 0) {
1164 KKASSERT(inp
->inp_laddr
.s_addr
== INADDR_ANY
);
1166 error
= in_pcbladdr(inp
, nam
, &if_sin
, td
);
1169 inp
->inp_laddr
.s_addr
= if_sin
->sin_addr
.s_addr
;
1170 msg
->connect
.nm_flags
|= PRUC_HASLADDR
;
1173 * Install faddr/fport earlier, so that when this
1174 * inpcb is installed on to the lport hash, the
1175 * 4-tuple contains correct value.
1177 * NOTE: The faddr/fport will have to be installed
1178 * after the in_pcbladdr(), which may change them.
1180 inp
->inp_faddr
= sin
->sin_addr
;
1181 inp
->inp_fport
= sin
->sin_port
;
1183 error
= in_pcbbind_remote(inp
, nam
, td
);
1188 if ((msg
->connect
.nm_flags
& PRUC_HASLADDR
) == 0) {
1191 * This inpcb was bound before this connect.
1193 error
= in_pcbladdr(inp
, nam
, &if_sin
, td
);
1198 * Save or refresh the faddr/fport, since they may
1199 * be changed by in_pcbladdr().
1201 inp
->inp_faddr
= sin
->sin_addr
;
1202 inp
->inp_fport
= sin
->sin_port
;
1206 KASSERT(inp
->inp_faddr
.s_addr
== sin
->sin_addr
.s_addr
,
1207 ("faddr mismatch for reconnect"));
1208 KASSERT(inp
->inp_fport
== sin
->sin_port
,
1209 ("fport mismatch for reconnect"));
1212 KKASSERT(inp
->inp_socket
== so
);
1214 hash
= tcp_addrhash(sin
->sin_addr
.s_addr
, sin
->sin_port
,
1215 (inp
->inp_laddr
.s_addr
!= INADDR_ANY
?
1216 inp
->inp_laddr
.s_addr
: if_sin
->sin_addr
.s_addr
),
1218 port
= netisr_hashport(hash
);
1220 if (port
!= &curthread
->td_msgport
) {
1221 lwkt_msg_t lmsg
= &msg
->connect
.base
.lmsg
;
1224 * in_pcbladdr() may have allocated a route entry for us
1225 * on the current CPU, but we need a route entry on the
1226 * inpcb's owner CPU, so free it here.
1228 in_pcbresetroute(inp
);
1231 * We are moving the protocol processing port the socket
1232 * is on, we have to unlink here and re-link on the
1235 in_pcbunlink(so
->so_pcb
, &tcbinfo
[mycpu
->gd_cpuid
]);
1236 msg
->connect
.nm_flags
|= PRUC_RECONNECT
;
1237 msg
->connect
.base
.nm_dispatch
= tcp_connect
;
1239 TCP_STATE_MIGRATE_START(tp
);
1242 * Use message put done receipt to change this socket's
1243 * so_port, i.e. _after_ this message was put onto the
1244 * target netisr's msgport but _before_ the message could
1245 * be pulled from the target netisr's msgport, so that:
1246 * - The upper half (socket code) will not see the new
1247 * msgport before this message reaches the new msgport
1248 * and messages for this socket will be ordered.
1249 * - This message will see the new msgport, when its
1250 * handler is called in the target netisr.
1253 * We MUST use messege put done receipt to change this
1255 * If we changed the so_port in this netisr after the
1256 * lwkt_forwardmsg (so messages for this socket will be
1257 * ordered) and changed the so_port in the target netisr
1258 * at the very beginning of this message's handler, we
1259 * would suffer so_port overwritten race, given this
1260 * message might be forwarded again.
1263 * This mechanism depends on that the netisr's msgport
1264 * is spin msgport (currently it is :).
1266 * If the upper half saw the new msgport before this
1267 * message reached the target netisr's msgport, the
1268 * messages sent from the upper half could reach the new
1269 * msgport before this message, thus there would be
1270 * message reordering. The worst case could be soclose()
1271 * saw the new msgport and the detach message could reach
1272 * the new msgport before this message, i.e. the inpcb
1273 * could have been destroyed when this message was still
1274 * pending on or on its way to the new msgport. Other
1275 * weird cases could also happen, e.g. inpcb->inp_pcbinfo,
1276 * since we have unlinked this inpcb from the current
1279 lwkt_setmsg_receipt(lmsg
, tcp_sosetport
);
1280 lwkt_forwardmsg(port
, lmsg
);
1281 /* msg invalid now */
1283 } else if (msg
->connect
.nm_flags
& PRUC_HELDTD
) {
1285 * The original thread is no longer needed; release it.
1288 msg
->connect
.nm_flags
&= ~PRUC_HELDTD
;
1290 error
= tcp_connect_oncpu(tp
, msg
->connect
.nm_sndflags
,
1291 msg
->connect
.nm_m
, sin
, if_sin
, hash
);
1292 msg
->connect
.nm_m
= NULL
;
1294 if (msg
->connect
.nm_m
) {
1295 m_freem(msg
->connect
.nm_m
);
1296 msg
->connect
.nm_m
= NULL
;
1298 if (msg
->connect
.nm_flags
& PRUC_HELDTD
)
1300 if (error
&& (msg
->connect
.nm_flags
& PRUC_ASYNC
)) {
1301 so
->so_error
= error
;
1302 soisdisconnected(so
);
1304 lwkt_replymsg(&msg
->connect
.base
.lmsg
, error
);
1305 /* msg invalid now */
1311 tcp6_connect(netmsg_t msg
)
1314 struct socket
*so
= msg
->connect
.base
.nm_so
;
1315 struct sockaddr
*nam
= msg
->connect
.nm_nam
;
1316 struct thread
*td
= msg
->connect
.nm_td
;
1318 struct sockaddr_in6
*sin6
= (struct sockaddr_in6
*)nam
;
1319 struct in6_addr
*addr6
;
1323 COMMON_START(so
, inp
, 0);
1326 * Reconnect our pcb if we have to
1328 if (msg
->connect
.nm_flags
& PRUC_RECONNECT
) {
1329 msg
->connect
.nm_flags
&= ~PRUC_RECONNECT
;
1330 TCP_STATE_MIGRATE_END(tp
);
1331 in_pcblink(so
->so_pcb
, &tcbinfo
[mycpu
->gd_cpuid
]);
1335 * Bind if we have to
1337 if (inp
->inp_lport
== 0) {
1338 error
= in6_pcbbind(inp
, NULL
, td
);
1344 * Cannot simply call in_pcbconnect, because there might be an
1345 * earlier incarnation of this same connection still in
1346 * TIME_WAIT state, creating an ADDRINUSE error.
1348 error
= in6_pcbladdr(inp
, nam
, &addr6
, td
);
1352 port
= tcp6_addrport(); /* XXX hack for now, always cpu0 */
1354 if (port
!= &curthread
->td_msgport
) {
1355 lwkt_msg_t lmsg
= &msg
->connect
.base
.lmsg
;
1358 * in_pcbladdr() may have allocated a route entry for us
1359 * on the current CPU, but we need a route entry on the
1360 * inpcb's owner CPU, so free it here.
1362 in_pcbresetroute(inp
);
1364 in_pcbunlink(so
->so_pcb
, &tcbinfo
[mycpu
->gd_cpuid
]);
1365 msg
->connect
.nm_flags
|= PRUC_RECONNECT
;
1366 msg
->connect
.base
.nm_dispatch
= tcp6_connect
;
1368 TCP_STATE_MIGRATE_START(tp
);
1370 /* See the related comment in tcp_connect() */
1371 lwkt_setmsg_receipt(lmsg
, tcp_sosetport
);
1372 lwkt_forwardmsg(port
, lmsg
);
1373 /* msg invalid now */
1376 error
= tcp6_connect_oncpu(tp
, msg
->connect
.nm_sndflags
,
1377 &msg
->connect
.nm_m
, sin6
, addr6
);
1378 /* nm_m may still be intact */
1380 if (msg
->connect
.nm_m
) {
1381 m_freem(msg
->connect
.nm_m
);
1382 msg
->connect
.nm_m
= NULL
;
1384 lwkt_replymsg(&msg
->connect
.base
.lmsg
, error
);
1385 /* msg invalid now */
1389 tcp6_connect_oncpu(struct tcpcb
*tp
, int flags
, struct mbuf
**mp
,
1390 struct sockaddr_in6
*sin6
, struct in6_addr
*addr6
)
1392 struct mbuf
*m
= *mp
;
1393 struct inpcb
*inp
= tp
->t_inpcb
;
1394 struct socket
*so
= inp
->inp_socket
;
1398 * Cannot simply call in_pcbconnect, because there might be an
1399 * earlier incarnation of this same connection still in
1400 * TIME_WAIT state, creating an ADDRINUSE error.
1402 oinp
= in6_pcblookup_hash(inp
->inp_pcbinfo
,
1403 &sin6
->sin6_addr
, sin6
->sin6_port
,
1404 (IN6_IS_ADDR_UNSPECIFIED(&inp
->in6p_laddr
) ?
1405 addr6
: &inp
->in6p_laddr
),
1406 inp
->inp_lport
, 0, NULL
);
1408 return (EADDRINUSE
);
1410 if (IN6_IS_ADDR_UNSPECIFIED(&inp
->in6p_laddr
))
1411 inp
->in6p_laddr
= *addr6
;
1412 inp
->in6p_faddr
= sin6
->sin6_addr
;
1413 inp
->inp_fport
= sin6
->sin6_port
;
1414 if ((sin6
->sin6_flowinfo
& IPV6_FLOWINFO_MASK
) != 0)
1415 inp
->in6p_flowinfo
= sin6
->sin6_flowinfo
;
1416 in_pcbinsconnhash(inp
);
1419 * Now that no more errors can occur, change the protocol processing
1420 * port to the current thread (which is the correct thread).
1422 * Create TCP timer message now; we are on the tcpcb's owner
1425 tcp_create_timermsg(tp
, &curthread
->td_msgport
);
1427 /* Compute window scaling to request. */
1428 if (tp
->request_r_scale
< TCP_MIN_WINSHIFT
)
1429 tp
->request_r_scale
= TCP_MIN_WINSHIFT
;
1430 while (tp
->request_r_scale
< TCP_MAX_WINSHIFT
&&
1431 (TCP_MAXWIN
<< tp
->request_r_scale
) < so
->so_rcv
.ssb_hiwat
) {
1432 tp
->request_r_scale
++;
1436 tcpstat
.tcps_connattempt
++;
1437 TCP_STATE_CHANGE(tp
, TCPS_SYN_SENT
);
1438 tcp_callout_reset(tp
, tp
->tt_keep
, tp
->t_keepinit
, tcp_timer_keep
);
1439 tp
->iss
= tcp_new_isn(tp
);
1440 tcp_sendseqinit(tp
);
1442 ssb_appendstream(&so
->so_snd
, m
);
1444 if (flags
& PRUS_OOB
)
1445 tp
->snd_up
= tp
->snd_una
+ so
->so_snd
.ssb_cc
;
1449 * Close the send side of the connection after
1450 * the data is sent if flagged.
1452 if ((flags
& (PRUS_OOB
|PRUS_EOF
)) == PRUS_EOF
) {
1454 tp
= tcp_usrclosed(tp
);
1456 return (tcp_output(tp
));
1462 * The new sockopt interface makes it possible for us to block in the
1463 * copyin/out step (if we take a page fault). Taking a page fault while
1464 * in a critical section is probably a Bad Thing. (Since sockets and pcbs
1465 * both now use TSM, there probably isn't any need for this function to
1466 * run in a critical section any more. This needs more examination.)
1469 tcp_ctloutput(netmsg_t msg
)
1471 struct socket
*so
= msg
->base
.nm_so
;
1472 struct sockopt
*sopt
= msg
->ctloutput
.nm_sopt
;
1473 struct thread
*td
= NULL
;
1474 int error
, opt
, optval
, opthz
;
1478 if (msg
->ctloutput
.nm_flags
& PRCO_HELDTD
)
1487 tp
= intotcpcb(inp
);
1489 /* Get socket's owner cpuid hint */
1490 if (sopt
->sopt_level
== SOL_SOCKET
&&
1491 sopt
->sopt_dir
== SOPT_GET
&&
1492 sopt
->sopt_name
== SO_CPUHINT
) {
1493 if (tp
->t_flags
& TF_LISTEN
) {
1495 * Listen sockets owner cpuid is always 0,
1496 * which does not make sense if SO_REUSEPORT
1499 if (so
->so_options
& SO_REUSEPORT
)
1500 optval
= (inp
->inp_lgrpindex
& ncpus2_mask
);
1502 optval
= -1; /* no hint */
1506 soopt_from_kbuf(sopt
, &optval
, sizeof(optval
));
1510 if (sopt
->sopt_level
!= IPPROTO_TCP
) {
1511 if (sopt
->sopt_level
== IPPROTO_IP
) {
1512 switch (sopt
->sopt_name
) {
1513 case IP_MULTICAST_IF
:
1514 case IP_MULTICAST_VIF
:
1515 case IP_MULTICAST_TTL
:
1516 case IP_MULTICAST_LOOP
:
1517 case IP_ADD_MEMBERSHIP
:
1518 case IP_DROP_MEMBERSHIP
:
1520 * Multicast does not make sense on
1528 if (INP_CHECK_SOCKAF(so
, AF_INET6
))
1529 ip6_ctloutput_dispatch(msg
);
1533 /* msg invalid now */
1539 switch (sopt
->sopt_dir
) {
1541 error
= soopt_to_kbuf(sopt
, &optval
, sizeof optval
,
1545 switch (sopt
->sopt_name
) {
1548 tp
->t_keepidle
= tp
->t_keepintvl
;
1550 tp
->t_keepidle
= tcp_keepidle
;
1551 tcp_timer_keep_activity(tp
, 0);
1553 #ifdef TCP_SIGNATURE
1554 case TCP_SIGNATURE_ENABLE
:
1555 if (tp
->t_state
== TCPS_CLOSED
) {
1557 * This is the only safe state that this
1558 * option could be changed. Some segments
1559 * could already have been sent in other
1563 tp
->t_flags
|= TF_SIGNATURE
;
1565 tp
->t_flags
&= ~TF_SIGNATURE
;
1570 #endif /* TCP_SIGNATURE */
1573 switch (sopt
->sopt_name
) {
1581 opt
= 0; /* dead code to fool gcc */
1588 tp
->t_flags
&= ~opt
;
1592 if (tcp_disable_nopush
)
1595 tp
->t_flags
|= TF_NOPUSH
;
1597 tp
->t_flags
&= ~TF_NOPUSH
;
1598 error
= tcp_output(tp
);
1604 * Must be between 0 and maxseg. If the requested
1605 * maxseg is too small to satisfy the desired minmss,
1606 * pump it up (silently so sysctl modifications of
1607 * minmss do not create unexpected program failures).
1608 * Handle degenerate cases.
1610 if (optval
> 0 && optval
<= tp
->t_maxseg
) {
1611 if (optval
+ 40 < tcp_minmss
) {
1612 optval
= tcp_minmss
- 40;
1616 tp
->t_maxseg
= optval
;
1623 opthz
= ((int64_t)optval
* hz
) / 1000;
1625 tp
->t_keepinit
= opthz
;
1631 opthz
= ((int64_t)optval
* hz
) / 1000;
1633 tp
->t_keepidle
= opthz
;
1634 tcp_timer_keep_activity(tp
, 0);
1641 opthz
= ((int64_t)optval
* hz
) / 1000;
1643 tp
->t_keepintvl
= opthz
;
1644 tp
->t_maxidle
= tp
->t_keepintvl
* tp
->t_keepcnt
;
1652 tp
->t_keepcnt
= optval
;
1653 tp
->t_maxidle
= tp
->t_keepintvl
* tp
->t_keepcnt
;
1660 error
= ENOPROTOOPT
;
1666 switch (sopt
->sopt_name
) {
1667 #ifdef TCP_SIGNATURE
1668 case TCP_SIGNATURE_ENABLE
:
1669 optval
= (tp
->t_flags
& TF_SIGNATURE
) ? 1 : 0;
1671 #endif /* TCP_SIGNATURE */
1673 optval
= tp
->t_flags
& TF_NODELAY
;
1676 optval
= tp
->t_maxseg
;
1679 optval
= tp
->t_flags
& TF_NOOPT
;
1682 optval
= tp
->t_flags
& TF_NOPUSH
;
1685 optval
= ((int64_t)tp
->t_keepinit
* 1000) / hz
;
1688 optval
= ((int64_t)tp
->t_keepidle
* 1000) / hz
;
1691 optval
= ((int64_t)tp
->t_keepintvl
* 1000) / hz
;
1694 optval
= tp
->t_keepcnt
;
1697 error
= ENOPROTOOPT
;
1701 soopt_from_kbuf(sopt
, &optval
, sizeof optval
);
1707 lwkt_replymsg(&msg
->lmsg
, error
);
1710 struct netmsg_tcp_ctloutput
{
1711 struct netmsg_pr_ctloutput ctloutput
;
1712 struct sockopt sopt
;
1717 * Allocate netmsg_pr_ctloutput for asynchronous tcp_ctloutput.
1719 struct netmsg_pr_ctloutput
*
1720 tcp_ctloutmsg(struct sockopt
*sopt
)
1722 struct netmsg_tcp_ctloutput
*msg
;
1723 int flags
= 0, error
;
1725 KASSERT(sopt
->sopt_dir
== SOPT_SET
, ("not from ctloutput"));
1727 /* Only small set of options allows asynchronous setting. */
1728 if (sopt
->sopt_level
!= IPPROTO_TCP
)
1730 switch (sopt
->sopt_name
) {
1740 msg
= kmalloc(sizeof(*msg
), M_LWKTMSG
, M_WAITOK
| M_NULLOK
);
1742 /* Fallback to synchronous tcp_ctloutput */
1746 /* Save the sockopt */
1749 /* Fixup the sopt.sopt_val ptr */
1750 error
= sooptcopyin(sopt
, &msg
->sopt_val
,
1751 sizeof(msg
->sopt_val
), sizeof(msg
->sopt_val
));
1753 kfree(msg
, M_LWKTMSG
);
1756 msg
->sopt
.sopt_val
= &msg
->sopt_val
;
1758 /* Hold the current thread */
1759 if (msg
->sopt
.sopt_td
!= NULL
) {
1760 flags
|= PRCO_HELDTD
;
1761 lwkt_hold(msg
->sopt
.sopt_td
);
1764 msg
->ctloutput
.nm_flags
= flags
;
1765 msg
->ctloutput
.nm_sopt
= &msg
->sopt
;
1767 return &msg
->ctloutput
;
1771 * tcp_sendspace and tcp_recvspace are the default send and receive window
1772 * sizes, respectively. These are obsolescent (this information should
1773 * be set by the route).
1775 * Use a default that does not require tcp window scaling to be turned
1776 * on. Individual programs or the administrator can increase the default.
1778 u_long tcp_sendspace
= 57344; /* largest multiple of PAGE_SIZE < 64k */
1779 SYSCTL_INT(_net_inet_tcp
, TCPCTL_SENDSPACE
, sendspace
, CTLFLAG_RW
,
1780 &tcp_sendspace
, 0, "Maximum outgoing TCP datagram size");
1781 u_long tcp_recvspace
= 57344; /* largest multiple of PAGE_SIZE < 64k */
1782 SYSCTL_INT(_net_inet_tcp
, TCPCTL_RECVSPACE
, recvspace
, CTLFLAG_RW
,
1783 &tcp_recvspace
, 0, "Maximum incoming TCP datagram size");
1786 * Attach TCP protocol to socket, allocating internet protocol control
1787 * block, tcp control block, buffer space, and entering CLOSED state.
1790 tcp_attach(struct socket
*so
, struct pru_attach_info
*ai
)
1796 boolean_t isipv6
= INP_CHECK_SOCKAF(so
, AF_INET6
);
1800 error
= tcp_usr_preattach(so
, 0 /* don't care */, ai
);
1804 /* Post attach; do nothing */
1807 cpu
= mycpu
->gd_cpuid
;
1810 * Set the default pcbinfo. This will likely change when we
1813 error
= in_pcballoc(so
, &tcbinfo
[cpu
]);
1819 inp
->in6p_hops
= -1; /* use kernel default */
1822 /* Keep a reference for asynchronized pru_rcvd */
1828 * Initiate (or continue) disconnect.
1829 * If embryonic state, just send reset (once).
1830 * If in ``let data drain'' option and linger null, just drop.
1831 * Otherwise (hard), mark socket disconnecting and drop
1832 * current input data; switch states based on user close, and
1833 * send segment to peer (with FIN).
1835 static struct tcpcb
*
1836 tcp_disconnect(struct tcpcb
*tp
)
1838 struct socket
*so
= tp
->t_inpcb
->inp_socket
;
1840 if (tp
->t_state
< TCPS_ESTABLISHED
) {
1842 } else if ((so
->so_options
& SO_LINGER
) && so
->so_linger
== 0) {
1843 tp
= tcp_drop(tp
, 0);
1845 lwkt_gettoken(&so
->so_rcv
.ssb_token
);
1846 soisdisconnecting(so
);
1847 sbflush(&so
->so_rcv
.sb
);
1848 tp
= tcp_usrclosed(tp
);
1851 lwkt_reltoken(&so
->so_rcv
.ssb_token
);
1857 * User issued close, and wish to trail through shutdown states:
1858 * if never received SYN, just forget it. If got a SYN from peer,
1859 * but haven't sent FIN, then go to FIN_WAIT_1 state to send peer a FIN.
1860 * If already got a FIN from peer, then almost done; go to LAST_ACK
1861 * state. In all other cases, have already sent FIN to peer (e.g.
1862 * after PRU_SHUTDOWN), and just have to play tedious game waiting
1863 * for peer to send FIN or not respond to keep-alives, etc.
1864 * We can let the user exit from the close as soon as the FIN is acked.
1866 static struct tcpcb
*
1867 tcp_usrclosed(struct tcpcb
*tp
)
1870 switch (tp
->t_state
) {
1874 TCP_STATE_CHANGE(tp
, TCPS_CLOSED
);
1879 case TCPS_SYN_RECEIVED
:
1880 tp
->t_flags
|= TF_NEEDFIN
;
1883 case TCPS_ESTABLISHED
:
1884 TCP_STATE_CHANGE(tp
, TCPS_FIN_WAIT_1
);
1887 case TCPS_CLOSE_WAIT
:
1888 TCP_STATE_CHANGE(tp
, TCPS_LAST_ACK
);
1891 if (tp
&& tp
->t_state
>= TCPS_FIN_WAIT_2
) {
1892 soisdisconnected(tp
->t_inpcb
->inp_socket
);
1893 /* To prevent the connection hanging in FIN_WAIT_2 forever. */
1894 if (tp
->t_state
== TCPS_FIN_WAIT_2
) {
1895 tcp_callout_reset(tp
, tp
->tt_2msl
, tp
->t_maxidle
,