2 * Copyright (c) 2003, 2004 Jeffrey M. Hsu. All rights reserved.
3 * Copyright (c) 2003, 2004 The DragonFly Project. All rights reserved.
5 * This code is derived from software contributed to The DragonFly Project
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of The DragonFly Project nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific, prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * Copyright (c) 1982, 1986, 1988, 1993
36 * The Regents of the University of California. All rights reserved.
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. Neither the name of the University nor the names of its contributors
47 * may be used to endorse or promote products derived from this software
48 * without specific prior written permission.
50 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
51 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
52 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
53 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
54 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
55 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
56 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
57 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
58 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
59 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
62 * From: @(#)tcp_usrreq.c 8.2 (Berkeley) 1/3/94
63 * $FreeBSD: src/sys/netinet/tcp_usrreq.c,v 1.51.2.17 2002/10/11 11:46:44 ume Exp $
67 #include "opt_inet6.h"
68 #include "opt_tcpdebug.h"
70 #include <sys/param.h>
71 #include <sys/systm.h>
72 #include <sys/kernel.h>
73 #include <sys/malloc.h>
74 #include <sys/sysctl.h>
75 #include <sys/globaldata.h>
76 #include <sys/thread.h>
80 #include <sys/domain.h>
82 #include <sys/socket.h>
83 #include <sys/socketvar.h>
84 #include <sys/socketops.h>
85 #include <sys/protosw.h>
88 #include <sys/msgport2.h>
89 #include <sys/socketvar2.h>
92 #include <net/netisr.h>
93 #include <net/route.h>
95 #include <net/netmsg2.h>
96 #include <net/netisr2.h>
98 #include <netinet/in.h>
99 #include <netinet/in_systm.h>
101 #include <netinet/ip6.h>
103 #include <netinet/in_pcb.h>
105 #include <netinet6/in6_pcb.h>
107 #include <netinet/in_var.h>
108 #include <netinet/ip_var.h>
110 #include <netinet6/ip6_var.h>
111 #include <netinet6/tcp6_var.h>
113 #include <netinet/tcp.h>
114 #include <netinet/tcp_fsm.h>
115 #include <netinet/tcp_seq.h>
116 #include <netinet/tcp_timer.h>
117 #include <netinet/tcp_timer2.h>
118 #include <netinet/tcp_var.h>
119 #include <netinet/tcpip.h>
121 #include <netinet/tcp_debug.h>
123 #include <machine/limits.h>
126 * Limits for TCP_KEEP* options (we will adopt the same limits that linux
129 #define MAXKEEPALIVE 32767
130 #define MAXKEEPCNT 127
133 * TCP protocol interface to socket abstraction.
135 extern char *tcpstates
[]; /* XXX ??? */
137 static int tcp_attach (struct socket
*, struct pru_attach_info
*);
138 static void tcp_connect (netmsg_t msg
);
140 static void tcp6_connect (netmsg_t msg
);
141 static int tcp6_connect_oncpu(struct tcpcb
*tp
, int flags
,
143 struct sockaddr_in6
*sin6
,
144 struct in6_addr
*addr6
);
146 static struct tcpcb
*
147 tcp_disconnect (struct tcpcb
*);
148 static struct tcpcb
*
149 tcp_usrclosed (struct tcpcb
*);
152 #define TCPDEBUG0 int ostate = 0
153 #define TCPDEBUG1() ostate = tp ? tp->t_state : 0
154 #define TCPDEBUG2(req) if (tp && (so->so_options & SO_DEBUG)) \
155 tcp_trace(TA_USER, ostate, tp, 0, 0, req)
159 #define TCPDEBUG2(req)
163 * For some ill optimized programs, which try to use TCP_NOPUSH
164 * to improve performance, will have small amount of data sits
165 * in the sending buffer. These small amount of data will _not_
166 * be pushed into the network until more data are written into
167 * the socket or the socket write side is shutdown.
169 static int tcp_disable_nopush
= 1;
170 SYSCTL_INT(_net_inet_tcp
, OID_AUTO
, disable_nopush
, CTLFLAG_RW
,
171 &tcp_disable_nopush
, 0, "TCP_NOPUSH socket option will have no effect");
174 * Allocate socket buffer space.
177 tcp_usr_preattach(struct socket
*so
, int proto __unused
,
178 struct pru_attach_info
*ai
)
182 if (so
->so_snd
.ssb_hiwat
== 0 || so
->so_rcv
.ssb_hiwat
== 0) {
183 error
= soreserve(so
, tcp_sendspace
, tcp_recvspace
,
188 atomic_set_int(&so
->so_rcv
.ssb_flags
, SSB_AUTOSIZE
);
189 atomic_set_int(&so
->so_snd
.ssb_flags
, SSB_AUTOSIZE
| SSB_PREALLOC
);
195 * TCP attaches to socket via pru_attach(), reserving space,
196 * and an internet control block. This socket may move to
197 * other CPU later when we bind/connect.
200 tcp_usr_attach(netmsg_t msg
)
202 struct socket
*so
= msg
->base
.nm_so
;
203 struct pru_attach_info
*ai
= msg
->attach
.nm_ai
;
206 struct tcpcb
*tp
= NULL
;
210 KASSERT(inp
== NULL
, ("tcp socket attached"));
213 error
= tcp_attach(so
, ai
);
217 if ((so
->so_options
& SO_LINGER
) && so
->so_linger
== 0)
218 so
->so_linger
= TCP_LINGERTIME
;
221 TCPDEBUG2(PRU_ATTACH
);
222 lwkt_replymsg(&msg
->lmsg
, error
);
226 * pru_detach() detaches the TCP protocol from the socket.
227 * If the protocol state is non-embryonic, then can't
228 * do this directly: have to initiate a pru_disconnect(),
229 * which may finish later; embryonic TCB's can just
233 tcp_usr_detach(netmsg_t msg
)
235 struct socket
*so
= msg
->base
.nm_so
;
244 * If the inp is already detached or never attached, it may have
245 * been due to an async close or async attach failure. Just return
246 * as if no error occured.
250 KASSERT(tp
!= NULL
, ("tcp_usr_detach: tp is NULL"));
252 tp
= tcp_disconnect(tp
);
253 TCPDEBUG2(PRU_DETACH
);
255 lwkt_replymsg(&msg
->lmsg
, error
);
259 * NOTE: ignore_error is non-zero for certain disconnection races
260 * which we want to silently allow, otherwise close() may return
261 * an unexpected error.
263 * NOTE: The variables (msg) and (tp) are assumed.
265 #define COMMON_START(so, inp, ignore_error) \
271 error = ignore_error ? 0 : EINVAL; \
275 tp = intotcpcb(inp); \
279 #define COMMON_END1(req, noreply) \
283 lwkt_replymsg(&msg->lmsg, error); \
287 #define COMMON_END(req) COMMON_END1((req), 0)
290 tcp_sosetport(struct lwkt_msg
*msg
, lwkt_port_t port
)
292 sosetport(((struct netmsg_base
*)msg
)->nm_so
, port
);
296 * Give the socket an address.
299 tcp_usr_bind(netmsg_t msg
)
301 struct socket
*so
= msg
->bind
.base
.nm_so
;
302 struct sockaddr
*nam
= msg
->bind
.nm_nam
;
303 struct thread
*td
= msg
->bind
.nm_td
;
307 struct sockaddr_in
*sinp
;
308 lwkt_port_t port0
= netisr_cpuport(0);
310 COMMON_START(so
, inp
, 0);
313 * Must check for multicast addresses and disallow binding
316 sinp
= (struct sockaddr_in
*)nam
;
317 if (sinp
->sin_family
== AF_INET
&&
318 IN_MULTICAST(ntohl(sinp
->sin_addr
.s_addr
))) {
319 error
= EAFNOSUPPORT
;
324 * Check "already bound" here (in_pcbbind() does the same check
325 * though), so we don't forward a connected socket to netisr0,
326 * which would panic in the following in_pcbunlink().
328 if (inp
->inp_lport
!= 0 || inp
->inp_laddr
.s_addr
!= INADDR_ANY
) {
329 error
= EINVAL
; /* already bound */
334 * Use netisr0 to serialize in_pcbbind(), so that pru_detach and
335 * pru_bind for different sockets on the same local port could be
336 * properly ordered. The original race is illustrated here for
341 * close(s1); <----- asynchronous
345 * All will expect bind(s2, *.PORT) to succeed. However, it will
346 * fail, if following sequence happens due to random socket initial
347 * msgport and asynchronous close(2):
351 * : pru_bind(s2) [*.PORT is used by s1]
354 if (&curthread
->td_msgport
!= port0
) {
355 lwkt_msg_t lmsg
= &msg
->bind
.base
.lmsg
;
357 KASSERT((msg
->bind
.nm_flags
& PRUB_RELINK
) == 0,
358 ("already asked to relink"));
360 in_pcbunlink(so
->so_pcb
, &tcbinfo
[mycpuid
]);
361 msg
->bind
.nm_flags
|= PRUB_RELINK
;
363 TCP_STATE_MIGRATE_START(tp
);
365 /* See the related comment in tcp_connect() */
366 lwkt_setmsg_receipt(lmsg
, tcp_sosetport
);
367 lwkt_forwardmsg(port0
, lmsg
);
368 /* msg invalid now */
371 KASSERT(so
->so_port
== port0
, ("so_port is not netisr0"));
373 if (msg
->bind
.nm_flags
& PRUB_RELINK
) {
374 msg
->bind
.nm_flags
&= ~PRUB_RELINK
;
375 TCP_STATE_MIGRATE_END(tp
);
376 in_pcblink(so
->so_pcb
, &tcbinfo
[mycpuid
]);
378 KASSERT(inp
->inp_pcbinfo
== &tcbinfo
[0], ("pcbinfo is not tcbinfo0"));
380 error
= in_pcbbind(inp
, nam
, td
);
384 COMMON_END(PRU_BIND
);
390 tcp6_usr_bind(netmsg_t msg
)
392 struct socket
*so
= msg
->bind
.base
.nm_so
;
393 struct sockaddr
*nam
= msg
->bind
.nm_nam
;
394 struct thread
*td
= msg
->bind
.nm_td
;
398 struct sockaddr_in6
*sin6p
;
400 COMMON_START(so
, inp
, 0);
403 * Must check for multicast addresses and disallow binding
406 sin6p
= (struct sockaddr_in6
*)nam
;
407 if (sin6p
->sin6_family
== AF_INET6
&&
408 IN6_IS_ADDR_MULTICAST(&sin6p
->sin6_addr
)) {
409 error
= EAFNOSUPPORT
;
412 error
= in6_pcbbind(inp
, nam
, td
);
415 COMMON_END(PRU_BIND
);
419 struct netmsg_inswildcard
{
420 struct netmsg_base base
;
421 struct inpcb
*nm_inp
;
425 in_pcbinswildcardhash_handler(netmsg_t msg
)
427 struct netmsg_inswildcard
*nm
= (struct netmsg_inswildcard
*)msg
;
428 int cpu
= mycpuid
, nextcpu
;
430 in_pcbinswildcardhash_oncpu(nm
->nm_inp
, &tcbinfo
[cpu
]);
433 if (nextcpu
< netisr_ncpus
)
434 lwkt_forwardmsg(netisr_cpuport(nextcpu
), &nm
->base
.lmsg
);
436 lwkt_replymsg(&nm
->base
.lmsg
, 0);
440 * Prepare to accept connections.
443 tcp_usr_listen(netmsg_t msg
)
445 struct socket
*so
= msg
->listen
.base
.nm_so
;
446 struct thread
*td
= msg
->listen
.nm_td
;
450 struct netmsg_inswildcard nm
;
451 lwkt_port_t port0
= netisr_cpuport(0);
453 COMMON_START(so
, inp
, 0);
455 if (&curthread
->td_msgport
!= port0
) {
456 lwkt_msg_t lmsg
= &msg
->listen
.base
.lmsg
;
458 KASSERT((msg
->listen
.nm_flags
& PRUL_RELINK
) == 0,
459 ("already asked to relink"));
461 in_pcbunlink(so
->so_pcb
, &tcbinfo
[mycpuid
]);
462 msg
->listen
.nm_flags
|= PRUL_RELINK
;
464 TCP_STATE_MIGRATE_START(tp
);
466 /* See the related comment in tcp_connect() */
467 lwkt_setmsg_receipt(lmsg
, tcp_sosetport
);
468 lwkt_forwardmsg(port0
, lmsg
);
469 /* msg invalid now */
472 KASSERT(so
->so_port
== port0
, ("so_port is not netisr0"));
474 if (msg
->listen
.nm_flags
& PRUL_RELINK
) {
475 msg
->listen
.nm_flags
&= ~PRUL_RELINK
;
476 TCP_STATE_MIGRATE_END(tp
);
477 in_pcblink(so
->so_pcb
, &tcbinfo
[mycpuid
]);
479 KASSERT(inp
->inp_pcbinfo
== &tcbinfo
[0], ("pcbinfo is not tcbinfo0"));
481 if (tp
->t_flags
& TF_LISTEN
)
484 if (inp
->inp_lport
== 0) {
485 error
= in_pcbbind(inp
, NULL
, td
);
490 TCP_STATE_CHANGE(tp
, TCPS_LISTEN
);
491 tp
->t_flags
|= TF_LISTEN
;
492 tp
->tt_msg
= NULL
; /* Catch any invalid timer usage */
495 * Create tcpcb per-cpu port cache
498 * This _must_ be done before installing this inpcb into
501 tcp_pcbport_create(tp
);
503 if (netisr_ncpus
> 1) {
505 * Put this inpcb into wildcard hash on other cpus.
507 ASSERT_INP_NOTINHASH(inp
);
508 netmsg_init(&nm
.base
, NULL
, &curthread
->td_msgport
,
509 MSGF_PRIORITY
, in_pcbinswildcardhash_handler
);
511 lwkt_domsg(netisr_cpuport(1), &nm
.base
.lmsg
, 0);
513 in_pcbinswildcardhash(inp
);
514 COMMON_END(PRU_LISTEN
);
520 tcp6_usr_listen(netmsg_t msg
)
522 struct socket
*so
= msg
->listen
.base
.nm_so
;
523 struct thread
*td
= msg
->listen
.nm_td
;
527 struct netmsg_inswildcard nm
;
529 COMMON_START(so
, inp
, 0);
531 if (tp
->t_flags
& TF_LISTEN
)
534 if (inp
->inp_lport
== 0) {
535 error
= in6_pcbbind(inp
, NULL
, td
);
540 TCP_STATE_CHANGE(tp
, TCPS_LISTEN
);
541 tp
->t_flags
|= TF_LISTEN
;
542 tp
->tt_msg
= NULL
; /* Catch any invalid timer usage */
545 * Create tcpcb per-cpu port cache
548 * This _must_ be done before installing this inpcb into
551 tcp_pcbport_create(tp
);
553 if (netisr_ncpus
> 1) {
555 * Put this inpcb into wildcard hash on other cpus.
557 KKASSERT(so
->so_port
== netisr_cpuport(0));
559 KKASSERT(inp
->inp_pcbinfo
== &tcbinfo
[0]);
560 ASSERT_INP_NOTINHASH(inp
);
562 netmsg_init(&nm
.base
, NULL
, &curthread
->td_msgport
,
563 MSGF_PRIORITY
, in_pcbinswildcardhash_handler
);
565 lwkt_domsg(netisr_cpuport(1), &nm
.base
.lmsg
, 0);
567 in_pcbinswildcardhash(inp
);
568 COMMON_END(PRU_LISTEN
);
573 * Initiate connection to peer.
574 * Create a template for use in transmissions on this connection.
575 * Enter SYN_SENT state, and mark socket as connecting.
576 * Start keep-alive timer, and seed output sequence space.
577 * Send initial segment on connection.
580 tcp_usr_connect(netmsg_t msg
)
582 struct socket
*so
= msg
->connect
.base
.nm_so
;
583 struct sockaddr
*nam
= msg
->connect
.nm_nam
;
584 struct thread
*td
= msg
->connect
.nm_td
;
588 struct sockaddr_in
*sinp
;
590 ASSERT_NETISR_NCPUS(mycpuid
);
592 COMMON_START(so
, inp
, 0);
595 * Must disallow TCP ``connections'' to multicast addresses.
597 sinp
= (struct sockaddr_in
*)nam
;
598 if (sinp
->sin_family
== AF_INET
599 && IN_MULTICAST(ntohl(sinp
->sin_addr
.s_addr
))) {
600 error
= EAFNOSUPPORT
;
604 /* msg is invalid now */
607 if (msg
->connect
.nm_m
) {
608 m_freem(msg
->connect
.nm_m
);
609 msg
->connect
.nm_m
= NULL
;
611 if (msg
->connect
.nm_flags
& PRUC_HELDTD
)
613 if (error
&& (msg
->connect
.nm_flags
& PRUC_ASYNC
)) {
614 so
->so_error
= error
;
615 soisdisconnected(so
);
617 lwkt_replymsg(&msg
->lmsg
, error
);
623 tcp6_usr_connect(netmsg_t msg
)
625 struct socket
*so
= msg
->connect
.base
.nm_so
;
626 struct sockaddr
*nam
= msg
->connect
.nm_nam
;
627 struct thread
*td
= msg
->connect
.nm_td
;
631 struct sockaddr_in6
*sin6p
;
633 ASSERT_NETISR_NCPUS(mycpuid
);
635 COMMON_START(so
, inp
, 0);
638 * Must disallow TCP ``connections'' to multicast addresses.
640 sin6p
= (struct sockaddr_in6
*)nam
;
641 if (sin6p
->sin6_family
== AF_INET6
642 && IN6_IS_ADDR_MULTICAST(&sin6p
->sin6_addr
)) {
643 error
= EAFNOSUPPORT
;
647 if (!prison_remote_ip(td
, nam
)) {
648 error
= EAFNOSUPPORT
; /* Illegal jail IP */
652 /* Reject v4-mapped address */
653 if (IN6_IS_ADDR_V4MAPPED(&sin6p
->sin6_addr
)) {
654 error
= EADDRNOTAVAIL
;
658 inp
->inp_inc
.inc_isipv6
= 1;
660 /* msg is invalid now */
663 if (msg
->connect
.nm_m
) {
664 m_freem(msg
->connect
.nm_m
);
665 msg
->connect
.nm_m
= NULL
;
667 lwkt_replymsg(&msg
->lmsg
, error
);
673 * Initiate disconnect from peer.
674 * If connection never passed embryonic stage, just drop;
675 * else if don't need to let data drain, then can just drop anyways,
676 * else have to begin TCP shutdown process: mark socket disconnecting,
677 * drain unread data, state switch to reflect user close, and
678 * send segment (e.g. FIN) to peer. Socket will be really disconnected
679 * when peer sends FIN and acks ours.
681 * SHOULD IMPLEMENT LATER PRU_CONNECT VIA REALLOC TCPCB.
684 tcp_usr_disconnect(netmsg_t msg
)
686 struct socket
*so
= msg
->disconnect
.base
.nm_so
;
691 COMMON_START(so
, inp
, 1);
692 tp
= tcp_disconnect(tp
);
693 COMMON_END(PRU_DISCONNECT
);
697 * Accept a connection. Essentially all the work is
698 * done at higher levels; just return the address
699 * of the peer, storing through addr.
702 tcp_usr_accept(netmsg_t msg
)
704 struct socket
*so
= msg
->accept
.base
.nm_so
;
705 struct sockaddr
**nam
= msg
->accept
.nm_nam
;
708 struct tcpcb
*tp
= NULL
;
712 if (so
->so_state
& SS_ISDISCONNECTED
) {
713 error
= ECONNABORTED
;
723 in_setpeeraddr(so
, nam
);
724 COMMON_END(PRU_ACCEPT
);
729 tcp6_usr_accept(netmsg_t msg
)
731 struct socket
*so
= msg
->accept
.base
.nm_so
;
732 struct sockaddr
**nam
= msg
->accept
.nm_nam
;
735 struct tcpcb
*tp
= NULL
;
740 if (so
->so_state
& SS_ISDISCONNECTED
) {
741 error
= ECONNABORTED
;
750 in6_setpeeraddr(so
, nam
);
751 COMMON_END(PRU_ACCEPT
);
756 * Mark the connection as being incapable of further output.
759 tcp_usr_shutdown(netmsg_t msg
)
761 struct socket
*so
= msg
->shutdown
.base
.nm_so
;
766 COMMON_START(so
, inp
, 0);
768 tp
= tcp_usrclosed(tp
);
770 error
= tcp_output(tp
);
771 COMMON_END(PRU_SHUTDOWN
);
775 * After a receive, possibly send window update to peer.
778 tcp_usr_rcvd(netmsg_t msg
)
780 struct socket
*so
= msg
->rcvd
.base
.nm_so
;
781 int error
= 0, noreply
= 0;
785 COMMON_START(so
, inp
, 0);
787 if (msg
->rcvd
.nm_pru_flags
& PRUR_ASYNC
) {
789 so_async_rcvd_reply(so
);
793 COMMON_END1(PRU_RCVD
, noreply
);
797 * Do a send by putting data in output queue and updating urgent
798 * marker if URG set. Possibly send more data. Unlike the other
799 * pru_*() routines, the mbuf chains are our responsibility. We
800 * must either enqueue them or free them. The other pru_* routines
801 * generally are caller-frees.
804 tcp_usr_send(netmsg_t msg
)
806 struct socket
*so
= msg
->send
.base
.nm_so
;
807 int flags
= msg
->send
.nm_flags
;
808 struct mbuf
*m
= msg
->send
.nm_m
;
814 KKASSERT(msg
->send
.nm_control
== NULL
);
815 KKASSERT(msg
->send
.nm_addr
== NULL
);
816 KKASSERT((flags
& PRUS_FREEADDR
) == 0);
822 * OOPS! we lost a race, the TCP session got reset after
823 * we checked SS_CANTSENDMORE, eg: while doing uiomove or a
824 * network interrupt in the non-critical section of sosend().
827 error
= ECONNRESET
; /* XXX EPIPE? */
837 * This is no longer necessary, since:
838 * - sosendtcp() has already checked it for us
839 * - It does not work with asynchronized send
843 * Don't let too much OOB data build up
845 if (flags
& PRUS_OOB
) {
846 if (ssb_space(&so
->so_snd
) < -512) {
855 * Pump the data into the socket.
858 ssb_appendstream(&so
->so_snd
, m
);
861 if (flags
& PRUS_OOB
) {
863 * According to RFC961 (Assigned Protocols),
864 * the urgent pointer points to the last octet
865 * of urgent data. We continue, however,
866 * to consider it to indicate the first octet
867 * of data past the urgent section.
868 * Otherwise, snd_up should be one lower.
870 tp
->snd_up
= tp
->snd_una
+ so
->so_snd
.ssb_cc
;
871 tp
->t_flags
|= TF_FORCE
;
872 error
= tcp_output(tp
);
873 tp
->t_flags
&= ~TF_FORCE
;
875 if (flags
& PRUS_EOF
) {
877 * Close the send side of the connection after
881 tp
= tcp_usrclosed(tp
);
883 if (tp
!= NULL
&& !tcp_output_pending(tp
)) {
884 if (flags
& PRUS_MORETOCOME
)
885 tp
->t_flags
|= TF_MORETOCOME
;
886 error
= tcp_output_fair(tp
);
887 if (flags
& PRUS_MORETOCOME
)
888 tp
->t_flags
&= ~TF_MORETOCOME
;
891 COMMON_END1((flags
& PRUS_OOB
) ? PRU_SENDOOB
:
892 ((flags
& PRUS_EOF
) ? PRU_SEND_EOF
: PRU_SEND
),
893 (flags
& PRUS_NOREPLY
));
897 * NOTE: (so) is referenced from soabort*() and netmsg_pru_abort()
898 * will sofree() it when we return.
901 tcp_usr_abort(netmsg_t msg
)
903 struct socket
*so
= msg
->abort
.base
.nm_so
;
908 COMMON_START(so
, inp
, 1);
909 tp
= tcp_drop(tp
, ECONNABORTED
);
910 COMMON_END(PRU_ABORT
);
914 * Receive out-of-band data.
917 tcp_usr_rcvoob(netmsg_t msg
)
919 struct socket
*so
= msg
->rcvoob
.base
.nm_so
;
920 struct mbuf
*m
= msg
->rcvoob
.nm_m
;
921 int flags
= msg
->rcvoob
.nm_flags
;
926 COMMON_START(so
, inp
, 0);
927 if ((so
->so_oobmark
== 0 &&
928 (so
->so_state
& SS_RCVATMARK
) == 0) ||
929 so
->so_options
& SO_OOBINLINE
||
930 tp
->t_oobflags
& TCPOOB_HADDATA
) {
934 if ((tp
->t_oobflags
& TCPOOB_HAVEDATA
) == 0) {
939 *mtod(m
, caddr_t
) = tp
->t_iobc
;
940 if ((flags
& MSG_PEEK
) == 0)
941 tp
->t_oobflags
^= (TCPOOB_HAVEDATA
| TCPOOB_HADDATA
);
942 COMMON_END(PRU_RCVOOB
);
946 tcp_usr_savefaddr(struct socket
*so
, const struct sockaddr
*faddr
)
948 in_savefaddr(so
, faddr
);
953 tcp6_usr_savefaddr(struct socket
*so
, const struct sockaddr
*faddr
)
955 in6_savefaddr(so
, faddr
);
960 tcp_usr_preconnect(struct socket
*so
, const struct sockaddr
*nam
,
961 struct thread
*td __unused
)
963 const struct sockaddr_in
*sinp
;
965 sinp
= (const struct sockaddr_in
*)nam
;
966 if (sinp
->sin_family
== AF_INET
&&
967 IN_MULTICAST(ntohl(sinp
->sin_addr
.s_addr
)))
974 /* xxx - should be const */
975 struct pr_usrreqs tcp_usrreqs
= {
976 .pru_abort
= tcp_usr_abort
,
977 .pru_accept
= tcp_usr_accept
,
978 .pru_attach
= tcp_usr_attach
,
979 .pru_bind
= tcp_usr_bind
,
980 .pru_connect
= tcp_usr_connect
,
981 .pru_connect2
= pr_generic_notsupp
,
982 .pru_control
= in_control_dispatch
,
983 .pru_detach
= tcp_usr_detach
,
984 .pru_disconnect
= tcp_usr_disconnect
,
985 .pru_listen
= tcp_usr_listen
,
986 .pru_peeraddr
= in_setpeeraddr_dispatch
,
987 .pru_rcvd
= tcp_usr_rcvd
,
988 .pru_rcvoob
= tcp_usr_rcvoob
,
989 .pru_send
= tcp_usr_send
,
990 .pru_sense
= pru_sense_null
,
991 .pru_shutdown
= tcp_usr_shutdown
,
992 .pru_sockaddr
= in_setsockaddr_dispatch
,
993 .pru_sosend
= sosendtcp
,
994 .pru_soreceive
= sorecvtcp
,
995 .pru_savefaddr
= tcp_usr_savefaddr
,
996 .pru_preconnect
= tcp_usr_preconnect
,
997 .pru_preattach
= tcp_usr_preattach
1001 struct pr_usrreqs tcp6_usrreqs
= {
1002 .pru_abort
= tcp_usr_abort
,
1003 .pru_accept
= tcp6_usr_accept
,
1004 .pru_attach
= tcp_usr_attach
,
1005 .pru_bind
= tcp6_usr_bind
,
1006 .pru_connect
= tcp6_usr_connect
,
1007 .pru_connect2
= pr_generic_notsupp
,
1008 .pru_control
= in6_control_dispatch
,
1009 .pru_detach
= tcp_usr_detach
,
1010 .pru_disconnect
= tcp_usr_disconnect
,
1011 .pru_listen
= tcp6_usr_listen
,
1012 .pru_peeraddr
= in6_setpeeraddr_dispatch
,
1013 .pru_rcvd
= tcp_usr_rcvd
,
1014 .pru_rcvoob
= tcp_usr_rcvoob
,
1015 .pru_send
= tcp_usr_send
,
1016 .pru_sense
= pru_sense_null
,
1017 .pru_shutdown
= tcp_usr_shutdown
,
1018 .pru_sockaddr
= in6_setsockaddr_dispatch
,
1019 .pru_sosend
= sosendtcp
,
1020 .pru_soreceive
= sorecvtcp
,
1021 .pru_savefaddr
= tcp6_usr_savefaddr
1026 tcp_connect_oncpu(struct tcpcb
*tp
, int flags
, struct mbuf
*m
,
1027 const struct sockaddr_in
*sin
, struct sockaddr_in
*if_sin
,
1030 struct inpcb
*inp
= tp
->t_inpcb
, *oinp
;
1031 struct socket
*so
= inp
->inp_socket
;
1032 struct route
*ro
= &inp
->inp_route
;
1034 KASSERT(inp
->inp_pcbinfo
== &tcbinfo
[mycpu
->gd_cpuid
],
1035 ("pcbinfo mismatch"));
1037 oinp
= in_pcblookup_hash(inp
->inp_pcbinfo
,
1038 sin
->sin_addr
, sin
->sin_port
,
1039 (inp
->inp_laddr
.s_addr
!= INADDR_ANY
?
1040 inp
->inp_laddr
: if_sin
->sin_addr
),
1041 inp
->inp_lport
, 0, NULL
);
1044 return (EADDRINUSE
);
1046 if (inp
->inp_laddr
.s_addr
== INADDR_ANY
)
1047 inp
->inp_laddr
= if_sin
->sin_addr
;
1048 KASSERT(inp
->inp_faddr
.s_addr
== sin
->sin_addr
.s_addr
,
1049 ("faddr mismatch for reconnect"));
1050 KASSERT(inp
->inp_fport
== sin
->sin_port
,
1051 ("fport mismatch for reconnect"));
1052 in_pcbinsconnhash(inp
);
1054 inp
->inp_flags
|= INP_HASH
;
1055 inp
->inp_hashval
= hash
;
1058 * We are now on the inpcb's owner CPU, if the cached route was
1059 * freed because the rtentry's owner CPU is not the current CPU
1060 * (e.g. in tcp_connect()), then we try to reallocate it here with
1061 * the hope that a rtentry may be cloned from a RTF_PRCLONING
1064 if (!(inp
->inp_socket
->so_options
& SO_DONTROUTE
) && /*XXX*/
1065 ro
->ro_rt
== NULL
) {
1066 bzero(&ro
->ro_dst
, sizeof(struct sockaddr_in
));
1067 ro
->ro_dst
.sa_family
= AF_INET
;
1068 ro
->ro_dst
.sa_len
= sizeof(struct sockaddr_in
);
1069 ((struct sockaddr_in
*)&ro
->ro_dst
)->sin_addr
=
1075 * Now that no more errors can occur, change the protocol processing
1076 * port to the current thread (which is the correct thread).
1078 * Create TCP timer message now; we are on the tcpcb's owner
1081 tcp_create_timermsg(tp
, &curthread
->td_msgport
);
1084 * Compute window scaling to request. Use a larger scaling then
1085 * needed for the initial receive buffer in case the receive buffer
1088 if (tp
->request_r_scale
< TCP_MIN_WINSHIFT
)
1089 tp
->request_r_scale
= TCP_MIN_WINSHIFT
;
1090 while (tp
->request_r_scale
< TCP_MAX_WINSHIFT
&&
1091 (TCP_MAXWIN
<< tp
->request_r_scale
) < so
->so_rcv
.ssb_hiwat
1093 tp
->request_r_scale
++;
1097 tcpstat
.tcps_connattempt
++;
1098 TCP_STATE_CHANGE(tp
, TCPS_SYN_SENT
);
1099 tcp_callout_reset(tp
, tp
->tt_keep
, tp
->t_keepinit
, tcp_timer_keep
);
1100 tp
->iss
= tcp_new_isn(tp
);
1101 tcp_sendseqinit(tp
);
1103 ssb_appendstream(&so
->so_snd
, m
);
1105 if (flags
& PRUS_OOB
)
1106 tp
->snd_up
= tp
->snd_una
+ so
->so_snd
.ssb_cc
;
1110 * Close the send side of the connection after
1111 * the data is sent if flagged.
1113 if ((flags
& (PRUS_OOB
|PRUS_EOF
)) == PRUS_EOF
) {
1115 tp
= tcp_usrclosed(tp
);
1117 return (tcp_output(tp
));
1121 * Common subroutine to open a TCP connection to remote host specified
1122 * by struct sockaddr_in in mbuf *nam. Call in_pcbbind to assign a local
1123 * port number if needed. Call in_pcbladdr to do the routing and to choose
1124 * a local host address (interface).
1125 * Initialize connection parameters and enter SYN-SENT state.
1128 tcp_connect(netmsg_t msg
)
1130 struct socket
*so
= msg
->connect
.base
.nm_so
;
1131 struct sockaddr
*nam
= msg
->connect
.nm_nam
;
1132 struct thread
*td
= msg
->connect
.nm_td
;
1133 struct sockaddr_in
*sin
= (struct sockaddr_in
*)nam
;
1134 struct sockaddr_in
*if_sin
= NULL
;
1141 COMMON_START(so
, inp
, 0);
1144 * Reconnect our pcb if we have to
1146 if (msg
->connect
.nm_flags
& PRUC_RECONNECT
) {
1147 msg
->connect
.nm_flags
&= ~PRUC_RECONNECT
;
1148 TCP_STATE_MIGRATE_END(tp
);
1149 in_pcblink(so
->so_pcb
, &tcbinfo
[mycpu
->gd_cpuid
]);
1151 if (inp
->inp_faddr
.s_addr
!= INADDR_ANY
) {
1153 if (so
->so_state
& SS_ISCONNECTING
)
1157 KASSERT(inp
->inp_fport
== 0, ("invalid fport"));
1161 * Select local port, if it is not yet selected.
1163 if (inp
->inp_lport
== 0) {
1164 KKASSERT(inp
->inp_laddr
.s_addr
== INADDR_ANY
);
1166 error
= in_pcbladdr(inp
, nam
, &if_sin
, td
);
1169 inp
->inp_laddr
.s_addr
= if_sin
->sin_addr
.s_addr
;
1170 msg
->connect
.nm_flags
|= PRUC_HASLADDR
;
1173 * Install faddr/fport earlier, so that when this
1174 * inpcb is installed on to the lport hash, the
1175 * 4-tuple contains correct value.
1177 * NOTE: The faddr/fport will have to be installed
1178 * after the in_pcbladdr(), which may change them.
1180 inp
->inp_faddr
= sin
->sin_addr
;
1181 inp
->inp_fport
= sin
->sin_port
;
1183 error
= in_pcbbind_remote(inp
, nam
, td
);
1188 if ((msg
->connect
.nm_flags
& PRUC_HASLADDR
) == 0) {
1191 * This inpcb was bound before this connect.
1193 error
= in_pcbladdr(inp
, nam
, &if_sin
, td
);
1198 * Save or refresh the faddr/fport, since they may
1199 * be changed by in_pcbladdr().
1201 inp
->inp_faddr
= sin
->sin_addr
;
1202 inp
->inp_fport
= sin
->sin_port
;
1206 KASSERT(inp
->inp_faddr
.s_addr
== sin
->sin_addr
.s_addr
,
1207 ("faddr mismatch for reconnect"));
1208 KASSERT(inp
->inp_fport
== sin
->sin_port
,
1209 ("fport mismatch for reconnect"));
1212 KKASSERT(inp
->inp_socket
== so
);
1214 hash
= tcp_addrhash(sin
->sin_addr
.s_addr
, sin
->sin_port
,
1215 (inp
->inp_laddr
.s_addr
!= INADDR_ANY
?
1216 inp
->inp_laddr
.s_addr
: if_sin
->sin_addr
.s_addr
),
1218 port
= netisr_hashport(hash
);
1220 if (port
!= &curthread
->td_msgport
) {
1221 lwkt_msg_t lmsg
= &msg
->connect
.base
.lmsg
;
1224 * in_pcbladdr() may have allocated a route entry for us
1225 * on the current CPU, but we need a route entry on the
1226 * inpcb's owner CPU, so free it here.
1228 in_pcbresetroute(inp
);
1231 * We are moving the protocol processing port the socket
1232 * is on, we have to unlink here and re-link on the
1235 in_pcbunlink(so
->so_pcb
, &tcbinfo
[mycpu
->gd_cpuid
]);
1236 msg
->connect
.nm_flags
|= PRUC_RECONNECT
;
1237 msg
->connect
.base
.nm_dispatch
= tcp_connect
;
1239 TCP_STATE_MIGRATE_START(tp
);
1242 * Use message put done receipt to change this socket's
1243 * so_port, i.e. _after_ this message was put onto the
1244 * target netisr's msgport but _before_ the message could
1245 * be pulled from the target netisr's msgport, so that:
1246 * - The upper half (socket code) will not see the new
1247 * msgport before this message reaches the new msgport
1248 * and messages for this socket will be ordered.
1249 * - This message will see the new msgport, when its
1250 * handler is called in the target netisr.
1253 * We MUST use messege put done receipt to change this
1255 * If we changed the so_port in this netisr after the
1256 * lwkt_forwardmsg (so messages for this socket will be
1257 * ordered) and changed the so_port in the target netisr
1258 * at the very beginning of this message's handler, we
1259 * would suffer so_port overwritten race, given this
1260 * message might be forwarded again.
1263 * This mechanism depends on that the netisr's msgport
1264 * is spin msgport (currently it is :).
1266 * If the upper half saw the new msgport before this
1267 * message reached the target netisr's msgport, the
1268 * messages sent from the upper half could reach the new
1269 * msgport before this message, thus there would be
1270 * message reordering. The worst case could be soclose()
1271 * saw the new msgport and the detach message could reach
1272 * the new msgport before this message, i.e. the inpcb
1273 * could have been destroyed when this message was still
1274 * pending on or on its way to the new msgport. Other
1275 * weird cases could also happen, e.g. inpcb->inp_pcbinfo,
1276 * since we have unlinked this inpcb from the current
1279 lwkt_setmsg_receipt(lmsg
, tcp_sosetport
);
1280 lwkt_forwardmsg(port
, lmsg
);
1281 /* msg invalid now */
1283 } else if (msg
->connect
.nm_flags
& PRUC_HELDTD
) {
1285 * The original thread is no longer needed; release it.
1288 msg
->connect
.nm_flags
&= ~PRUC_HELDTD
;
1290 error
= tcp_connect_oncpu(tp
, msg
->connect
.nm_sndflags
,
1291 msg
->connect
.nm_m
, sin
, if_sin
, hash
);
1292 msg
->connect
.nm_m
= NULL
;
1294 if (msg
->connect
.nm_m
) {
1295 m_freem(msg
->connect
.nm_m
);
1296 msg
->connect
.nm_m
= NULL
;
1298 if (msg
->connect
.nm_flags
& PRUC_HELDTD
)
1300 if (error
&& (msg
->connect
.nm_flags
& PRUC_ASYNC
)) {
1301 so
->so_error
= error
;
1302 soisdisconnected(so
);
1304 lwkt_replymsg(&msg
->connect
.base
.lmsg
, error
);
1305 /* msg invalid now */
1311 tcp6_connect(netmsg_t msg
)
1314 struct socket
*so
= msg
->connect
.base
.nm_so
;
1315 struct sockaddr
*nam
= msg
->connect
.nm_nam
;
1316 struct thread
*td
= msg
->connect
.nm_td
;
1318 struct sockaddr_in6
*sin6
= (struct sockaddr_in6
*)nam
;
1319 struct in6_addr
*addr6
;
1323 COMMON_START(so
, inp
, 0);
1326 * Reconnect our pcb if we have to
1328 if (msg
->connect
.nm_flags
& PRUC_RECONNECT
) {
1329 msg
->connect
.nm_flags
&= ~PRUC_RECONNECT
;
1330 TCP_STATE_MIGRATE_END(tp
);
1331 in_pcblink(so
->so_pcb
, &tcbinfo
[mycpu
->gd_cpuid
]);
1335 * Bind if we have to
1337 if (inp
->inp_lport
== 0) {
1338 error
= in6_pcbbind(inp
, NULL
, td
);
1344 * Cannot simply call in_pcbconnect, because there might be an
1345 * earlier incarnation of this same connection still in
1346 * TIME_WAIT state, creating an ADDRINUSE error.
1348 error
= in6_pcbladdr(inp
, nam
, &addr6
, td
);
1352 port
= tcp6_addrport(); /* XXX hack for now, always cpu0 */
1354 if (port
!= &curthread
->td_msgport
) {
1355 lwkt_msg_t lmsg
= &msg
->connect
.base
.lmsg
;
1358 * in_pcbladdr() may have allocated a route entry for us
1359 * on the current CPU, but we need a route entry on the
1360 * inpcb's owner CPU, so free it here.
1362 in_pcbresetroute(inp
);
1364 in_pcbunlink(so
->so_pcb
, &tcbinfo
[mycpu
->gd_cpuid
]);
1365 msg
->connect
.nm_flags
|= PRUC_RECONNECT
;
1366 msg
->connect
.base
.nm_dispatch
= tcp6_connect
;
1368 TCP_STATE_MIGRATE_START(tp
);
1370 /* See the related comment in tcp_connect() */
1371 lwkt_setmsg_receipt(lmsg
, tcp_sosetport
);
1372 lwkt_forwardmsg(port
, lmsg
);
1373 /* msg invalid now */
1376 error
= tcp6_connect_oncpu(tp
, msg
->connect
.nm_sndflags
,
1377 &msg
->connect
.nm_m
, sin6
, addr6
);
1378 /* nm_m may still be intact */
1380 if (msg
->connect
.nm_m
) {
1381 m_freem(msg
->connect
.nm_m
);
1382 msg
->connect
.nm_m
= NULL
;
1384 lwkt_replymsg(&msg
->connect
.base
.lmsg
, error
);
1385 /* msg invalid now */
1389 tcp6_connect_oncpu(struct tcpcb
*tp
, int flags
, struct mbuf
**mp
,
1390 struct sockaddr_in6
*sin6
, struct in6_addr
*addr6
)
1392 struct mbuf
*m
= *mp
;
1393 struct inpcb
*inp
= tp
->t_inpcb
;
1394 struct socket
*so
= inp
->inp_socket
;
1398 * Cannot simply call in_pcbconnect, because there might be an
1399 * earlier incarnation of this same connection still in
1400 * TIME_WAIT state, creating an ADDRINUSE error.
1402 oinp
= in6_pcblookup_hash(inp
->inp_pcbinfo
,
1403 &sin6
->sin6_addr
, sin6
->sin6_port
,
1404 (IN6_IS_ADDR_UNSPECIFIED(&inp
->in6p_laddr
) ?
1405 addr6
: &inp
->in6p_laddr
),
1406 inp
->inp_lport
, 0, NULL
);
1408 return (EADDRINUSE
);
1410 if (IN6_IS_ADDR_UNSPECIFIED(&inp
->in6p_laddr
))
1411 inp
->in6p_laddr
= *addr6
;
1412 inp
->in6p_faddr
= sin6
->sin6_addr
;
1413 inp
->inp_fport
= sin6
->sin6_port
;
1414 if ((sin6
->sin6_flowinfo
& IPV6_FLOWINFO_MASK
) != 0)
1415 inp
->in6p_flowinfo
= sin6
->sin6_flowinfo
;
1416 in_pcbinsconnhash(inp
);
1419 * Now that no more errors can occur, change the protocol processing
1420 * port to the current thread (which is the correct thread).
1422 * Create TCP timer message now; we are on the tcpcb's owner
1425 tcp_create_timermsg(tp
, &curthread
->td_msgport
);
1427 /* Compute window scaling to request. */
1428 if (tp
->request_r_scale
< TCP_MIN_WINSHIFT
)
1429 tp
->request_r_scale
= TCP_MIN_WINSHIFT
;
1430 while (tp
->request_r_scale
< TCP_MAX_WINSHIFT
&&
1431 (TCP_MAXWIN
<< tp
->request_r_scale
) < so
->so_rcv
.ssb_hiwat
) {
1432 tp
->request_r_scale
++;
1436 tcpstat
.tcps_connattempt
++;
1437 TCP_STATE_CHANGE(tp
, TCPS_SYN_SENT
);
1438 tcp_callout_reset(tp
, tp
->tt_keep
, tp
->t_keepinit
, tcp_timer_keep
);
1439 tp
->iss
= tcp_new_isn(tp
);
1440 tcp_sendseqinit(tp
);
1442 ssb_appendstream(&so
->so_snd
, m
);
1444 if (flags
& PRUS_OOB
)
1445 tp
->snd_up
= tp
->snd_una
+ so
->so_snd
.ssb_cc
;
1449 * Close the send side of the connection after
1450 * the data is sent if flagged.
1452 if ((flags
& (PRUS_OOB
|PRUS_EOF
)) == PRUS_EOF
) {
1454 tp
= tcp_usrclosed(tp
);
1456 return (tcp_output(tp
));
1462 * The new sockopt interface makes it possible for us to block in the
1463 * copyin/out step (if we take a page fault). Taking a page fault while
1464 * in a critical section is probably a Bad Thing. (Since sockets and pcbs
1465 * both now use TSM, there probably isn't any need for this function to
1466 * run in a critical section any more. This needs more examination.)
1469 tcp_ctloutput(netmsg_t msg
)
1471 struct socket
*so
= msg
->base
.nm_so
;
1472 struct sockopt
*sopt
= msg
->ctloutput
.nm_sopt
;
1473 struct thread
*td
= NULL
;
1474 int error
, opt
, optval
, opthz
;
1478 if (msg
->ctloutput
.nm_flags
& PRCO_HELDTD
)
1487 tp
= intotcpcb(inp
);
1489 /* Get socket's owner cpuid hint */
1490 if (sopt
->sopt_level
== SOL_SOCKET
&&
1491 sopt
->sopt_dir
== SOPT_GET
&&
1492 sopt
->sopt_name
== SO_CPUHINT
) {
1493 if (tp
->t_flags
& TF_LISTEN
) {
1495 * Listen sockets owner cpuid is always 0,
1496 * which does not make sense if SO_REUSEPORT
1499 * NOTE: inp_lgrpindex is _not_ assigned in jail.
1501 if ((so
->so_options
& SO_REUSEPORT
) &&
1502 inp
->inp_lgrpindex
>= 0)
1503 optval
= inp
->inp_lgrpindex
% netisr_ncpus
;
1505 optval
= -1; /* no hint */
1509 soopt_from_kbuf(sopt
, &optval
, sizeof(optval
));
1513 if (sopt
->sopt_level
!= IPPROTO_TCP
) {
1514 if (sopt
->sopt_level
== IPPROTO_IP
) {
1515 switch (sopt
->sopt_name
) {
1516 case IP_MULTICAST_IF
:
1517 case IP_MULTICAST_VIF
:
1518 case IP_MULTICAST_TTL
:
1519 case IP_MULTICAST_LOOP
:
1520 case IP_ADD_MEMBERSHIP
:
1521 case IP_DROP_MEMBERSHIP
:
1523 * Multicast does not make sense on
1531 if (INP_CHECK_SOCKAF(so
, AF_INET6
))
1532 ip6_ctloutput_dispatch(msg
);
1536 /* msg invalid now */
1542 switch (sopt
->sopt_dir
) {
1544 error
= soopt_to_kbuf(sopt
, &optval
, sizeof optval
,
1548 switch (sopt
->sopt_name
) {
1551 tp
->t_keepidle
= tp
->t_keepintvl
;
1553 tp
->t_keepidle
= tcp_keepidle
;
1554 tcp_timer_keep_activity(tp
, 0);
1556 #ifdef TCP_SIGNATURE
1557 case TCP_SIGNATURE_ENABLE
:
1558 if (tp
->t_state
== TCPS_CLOSED
) {
1560 * This is the only safe state that this
1561 * option could be changed. Some segments
1562 * could already have been sent in other
1566 tp
->t_flags
|= TF_SIGNATURE
;
1568 tp
->t_flags
&= ~TF_SIGNATURE
;
1573 #endif /* TCP_SIGNATURE */
1576 switch (sopt
->sopt_name
) {
1584 opt
= 0; /* dead code to fool gcc */
1591 tp
->t_flags
&= ~opt
;
1595 if (tcp_disable_nopush
)
1598 tp
->t_flags
|= TF_NOPUSH
;
1600 tp
->t_flags
&= ~TF_NOPUSH
;
1601 error
= tcp_output(tp
);
1607 * Must be between 0 and maxseg. If the requested
1608 * maxseg is too small to satisfy the desired minmss,
1609 * pump it up (silently so sysctl modifications of
1610 * minmss do not create unexpected program failures).
1611 * Handle degenerate cases.
1613 if (optval
> 0 && optval
<= tp
->t_maxseg
) {
1614 if (optval
+ 40 < tcp_minmss
) {
1615 optval
= tcp_minmss
- 40;
1619 tp
->t_maxseg
= optval
;
1628 if (optval
< 1 || optval
> MAXKEEPALIVE
) {
1632 opthz
= optval
* hz
;
1634 switch (sopt
->sopt_name
) {
1636 tp
->t_keepinit
= opthz
;
1639 tp
->t_keepidle
= opthz
;
1640 tcp_timer_keep_activity(tp
, 0);
1643 tp
->t_keepintvl
= opthz
;
1644 tp
->t_maxidle
= tp
->t_keepintvl
* tp
->t_keepcnt
;
1650 if (optval
< 1 || optval
> MAXKEEPCNT
) {
1654 tp
->t_keepcnt
= optval
;
1655 tp
->t_maxidle
= tp
->t_keepintvl
* tp
->t_keepcnt
;
1659 error
= ENOPROTOOPT
;
1665 switch (sopt
->sopt_name
) {
1666 #ifdef TCP_SIGNATURE
1667 case TCP_SIGNATURE_ENABLE
:
1668 optval
= (tp
->t_flags
& TF_SIGNATURE
) ? 1 : 0;
1670 #endif /* TCP_SIGNATURE */
1672 optval
= tp
->t_flags
& TF_NODELAY
;
1675 optval
= tp
->t_maxseg
;
1678 optval
= tp
->t_flags
& TF_NOOPT
;
1681 optval
= tp
->t_flags
& TF_NOPUSH
;
1684 optval
= tp
->t_keepinit
/ hz
;
1687 optval
= tp
->t_keepidle
/ hz
;
1690 optval
= tp
->t_keepintvl
/ hz
;
1693 optval
= tp
->t_keepcnt
;
1696 error
= ENOPROTOOPT
;
1700 soopt_from_kbuf(sopt
, &optval
, sizeof optval
);
1706 lwkt_replymsg(&msg
->lmsg
, error
);
1709 struct netmsg_tcp_ctloutput
{
1710 struct netmsg_pr_ctloutput ctloutput
;
1711 struct sockopt sopt
;
1716 * Allocate netmsg_pr_ctloutput for asynchronous tcp_ctloutput.
1718 struct netmsg_pr_ctloutput
*
1719 tcp_ctloutmsg(struct sockopt
*sopt
)
1721 struct netmsg_tcp_ctloutput
*msg
;
1722 int flags
= 0, error
;
1724 KASSERT(sopt
->sopt_dir
== SOPT_SET
, ("not from ctloutput"));
1726 /* Only small set of options allows asynchronous setting. */
1727 if (sopt
->sopt_level
!= IPPROTO_TCP
)
1729 switch (sopt
->sopt_name
) {
1739 msg
= kmalloc(sizeof(*msg
), M_LWKTMSG
, M_WAITOK
| M_NULLOK
);
1741 /* Fallback to synchronous tcp_ctloutput */
1745 /* Save the sockopt */
1748 /* Fixup the sopt.sopt_val ptr */
1749 error
= sooptcopyin(sopt
, &msg
->sopt_val
,
1750 sizeof(msg
->sopt_val
), sizeof(msg
->sopt_val
));
1752 kfree(msg
, M_LWKTMSG
);
1755 msg
->sopt
.sopt_val
= &msg
->sopt_val
;
1757 /* Hold the current thread */
1758 if (msg
->sopt
.sopt_td
!= NULL
) {
1759 flags
|= PRCO_HELDTD
;
1760 lwkt_hold(msg
->sopt
.sopt_td
);
1763 msg
->ctloutput
.nm_flags
= flags
;
1764 msg
->ctloutput
.nm_sopt
= &msg
->sopt
;
1766 return &msg
->ctloutput
;
1770 * tcp_sendspace and tcp_recvspace are the default send and receive window
1771 * sizes, respectively. These are obsolescent (this information should
1772 * be set by the route).
1774 * Use a default that does not require tcp window scaling to be turned
1775 * on. Individual programs or the administrator can increase the default.
1777 u_long tcp_sendspace
= 57344; /* largest multiple of PAGE_SIZE < 64k */
1778 SYSCTL_INT(_net_inet_tcp
, TCPCTL_SENDSPACE
, sendspace
, CTLFLAG_RW
,
1779 &tcp_sendspace
, 0, "Maximum outgoing TCP datagram size");
1780 u_long tcp_recvspace
= 57344; /* largest multiple of PAGE_SIZE < 64k */
1781 SYSCTL_INT(_net_inet_tcp
, TCPCTL_RECVSPACE
, recvspace
, CTLFLAG_RW
,
1782 &tcp_recvspace
, 0, "Maximum incoming TCP datagram size");
1785 * Attach TCP protocol to socket, allocating internet protocol control
1786 * block, tcp control block, buffer space, and entering CLOSED state.
1789 tcp_attach(struct socket
*so
, struct pru_attach_info
*ai
)
1795 boolean_t isipv6
= INP_CHECK_SOCKAF(so
, AF_INET6
);
1799 error
= tcp_usr_preattach(so
, 0 /* don't care */, ai
);
1803 /* Post attach; do nothing */
1806 cpu
= mycpu
->gd_cpuid
;
1809 * Set the default pcbinfo. This will likely change when we
1812 error
= in_pcballoc(so
, &tcbinfo
[cpu
]);
1818 inp
->in6p_hops
= -1; /* use kernel default */
1821 /* Keep a reference for asynchronized pru_rcvd */
1827 * Initiate (or continue) disconnect.
1828 * If embryonic state, just send reset (once).
1829 * If in ``let data drain'' option and linger null, just drop.
1830 * Otherwise (hard), mark socket disconnecting and drop
1831 * current input data; switch states based on user close, and
1832 * send segment to peer (with FIN).
1834 static struct tcpcb
*
1835 tcp_disconnect(struct tcpcb
*tp
)
1837 struct socket
*so
= tp
->t_inpcb
->inp_socket
;
1839 if (tp
->t_state
< TCPS_ESTABLISHED
) {
1841 } else if ((so
->so_options
& SO_LINGER
) && so
->so_linger
== 0) {
1842 tp
= tcp_drop(tp
, 0);
1844 lwkt_gettoken(&so
->so_rcv
.ssb_token
);
1845 soisdisconnecting(so
);
1846 sbflush(&so
->so_rcv
.sb
);
1847 tp
= tcp_usrclosed(tp
);
1850 lwkt_reltoken(&so
->so_rcv
.ssb_token
);
1856 * User issued close, and wish to trail through shutdown states:
1857 * if never received SYN, just forget it. If got a SYN from peer,
1858 * but haven't sent FIN, then go to FIN_WAIT_1 state to send peer a FIN.
1859 * If already got a FIN from peer, then almost done; go to LAST_ACK
1860 * state. In all other cases, have already sent FIN to peer (e.g.
1861 * after PRU_SHUTDOWN), and just have to play tedious game waiting
1862 * for peer to send FIN or not respond to keep-alives, etc.
1863 * We can let the user exit from the close as soon as the FIN is acked.
1865 static struct tcpcb
*
1866 tcp_usrclosed(struct tcpcb
*tp
)
1869 switch (tp
->t_state
) {
1873 TCP_STATE_CHANGE(tp
, TCPS_CLOSED
);
1878 case TCPS_SYN_RECEIVED
:
1879 tp
->t_flags
|= TF_NEEDFIN
;
1882 case TCPS_ESTABLISHED
:
1883 TCP_STATE_CHANGE(tp
, TCPS_FIN_WAIT_1
);
1886 case TCPS_CLOSE_WAIT
:
1887 TCP_STATE_CHANGE(tp
, TCPS_LAST_ACK
);
1890 if (tp
&& tp
->t_state
>= TCPS_FIN_WAIT_2
) {
1891 soisdisconnected(tp
->t_inpcb
->inp_socket
);
1892 /* To prevent the connection hanging in FIN_WAIT_2 forever. */
1893 if (tp
->t_state
== TCPS_FIN_WAIT_2
) {
1894 tcp_callout_reset(tp
, tp
->tt_2msl
, tp
->t_maxidle
,