tcp: Make setsockopt(2) TCP_{NODELAY,NOPUSH,NOOPT,FASTKEEP} asynchronous
[dragonfly.git] / sys / netinet / tcp_usrreq.c
blob5c7f9a7c78fa3abdcd679f7a9987080aedc3d502
1 /*
2 * Copyright (c) 2003, 2004 Jeffrey M. Hsu. All rights reserved.
3 * Copyright (c) 2003, 2004 The DragonFly Project. All rights reserved.
5 * This code is derived from software contributed to The DragonFly Project
6 * by Jeffrey M. Hsu.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of The DragonFly Project nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific, prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
35 * Copyright (c) 1982, 1986, 1988, 1993
36 * The Regents of the University of California. All rights reserved.
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. Neither the name of the University nor the names of its contributors
47 * may be used to endorse or promote products derived from this software
48 * without specific prior written permission.
50 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
51 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
52 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
53 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
54 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
55 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
56 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
57 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
58 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
59 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
60 * SUCH DAMAGE.
62 * From: @(#)tcp_usrreq.c 8.2 (Berkeley) 1/3/94
63 * $FreeBSD: src/sys/netinet/tcp_usrreq.c,v 1.51.2.17 2002/10/11 11:46:44 ume Exp $
66 #include "opt_ipsec.h"
67 #include "opt_inet.h"
68 #include "opt_inet6.h"
69 #include "opt_tcpdebug.h"
71 #include <sys/param.h>
72 #include <sys/systm.h>
73 #include <sys/kernel.h>
74 #include <sys/malloc.h>
75 #include <sys/sysctl.h>
76 #include <sys/globaldata.h>
77 #include <sys/thread.h>
79 #include <sys/mbuf.h>
80 #ifdef INET6
81 #include <sys/domain.h>
82 #endif /* INET6 */
83 #include <sys/socket.h>
84 #include <sys/socketvar.h>
85 #include <sys/socketops.h>
86 #include <sys/protosw.h>
88 #include <sys/thread2.h>
89 #include <sys/msgport2.h>
90 #include <sys/socketvar2.h>
92 #include <net/if.h>
93 #include <net/netisr.h>
94 #include <net/route.h>
96 #include <net/netmsg2.h>
97 #include <net/netisr2.h>
99 #include <netinet/in.h>
100 #include <netinet/in_systm.h>
101 #ifdef INET6
102 #include <netinet/ip6.h>
103 #endif
104 #include <netinet/in_pcb.h>
105 #ifdef INET6
106 #include <netinet6/in6_pcb.h>
107 #endif
108 #include <netinet/in_var.h>
109 #include <netinet/ip_var.h>
110 #ifdef INET6
111 #include <netinet6/ip6_var.h>
112 #include <netinet6/tcp6_var.h>
113 #endif
114 #include <netinet/tcp.h>
115 #include <netinet/tcp_fsm.h>
116 #include <netinet/tcp_seq.h>
117 #include <netinet/tcp_timer.h>
118 #include <netinet/tcp_timer2.h>
119 #include <netinet/tcp_var.h>
120 #include <netinet/tcpip.h>
121 #ifdef TCPDEBUG
122 #include <netinet/tcp_debug.h>
123 #endif
125 #ifdef IPSEC
126 #include <netinet6/ipsec.h>
127 #endif /*IPSEC*/
130 * TCP protocol interface to socket abstraction.
132 extern char *tcpstates[]; /* XXX ??? */
134 static int tcp_attach (struct socket *, struct pru_attach_info *);
135 static void tcp_connect (netmsg_t msg);
136 #ifdef INET6
137 static void tcp6_connect (netmsg_t msg);
138 static int tcp6_connect_oncpu(struct tcpcb *tp, int flags,
139 struct mbuf **mp,
140 struct sockaddr_in6 *sin6,
141 struct in6_addr *addr6);
142 #endif /* INET6 */
143 static struct tcpcb *
144 tcp_disconnect (struct tcpcb *);
145 static struct tcpcb *
146 tcp_usrclosed (struct tcpcb *);
148 #ifdef TCPDEBUG
149 #define TCPDEBUG0 int ostate = 0
150 #define TCPDEBUG1() ostate = tp ? tp->t_state : 0
151 #define TCPDEBUG2(req) if (tp && (so->so_options & SO_DEBUG)) \
152 tcp_trace(TA_USER, ostate, tp, 0, 0, req)
153 #else
154 #define TCPDEBUG0
155 #define TCPDEBUG1()
156 #define TCPDEBUG2(req)
157 #endif
159 static int tcp_lport_extension = 1;
160 SYSCTL_INT(_net_inet_tcp, OID_AUTO, lportext, CTLFLAG_RW,
161 &tcp_lport_extension, 0, "");
164 * For some ill optimized programs, which try to use TCP_NOPUSH
165 * to improve performance, will have small amount of data sits
166 * in the sending buffer. These small amount of data will _not_
167 * be pushed into the network until more data are written into
168 * the socket or the socket write side is shutdown.
170 static int tcp_disable_nopush = 1;
171 SYSCTL_INT(_net_inet_tcp, OID_AUTO, disable_nopush, CTLFLAG_RW,
172 &tcp_disable_nopush, 0, "TCP_NOPUSH socket option will have no effect");
175 * Allocate socket buffer space.
177 static int
178 tcp_usr_preattach(struct socket *so, int proto __unused,
179 struct pru_attach_info *ai)
181 int error;
183 if (so->so_snd.ssb_hiwat == 0 || so->so_rcv.ssb_hiwat == 0) {
184 error = soreserve(so, tcp_sendspace, tcp_recvspace,
185 ai->sb_rlimit);
186 if (error)
187 return (error);
189 atomic_set_int(&so->so_rcv.ssb_flags, SSB_AUTOSIZE);
190 atomic_set_int(&so->so_snd.ssb_flags, SSB_AUTOSIZE | SSB_PREALLOC);
192 return 0;
196 * TCP attaches to socket via pru_attach(), reserving space,
197 * and an internet control block. This socket may move to
198 * other CPU later when we bind/connect.
200 static void
201 tcp_usr_attach(netmsg_t msg)
203 struct socket *so = msg->base.nm_so;
204 struct pru_attach_info *ai = msg->attach.nm_ai;
205 int error;
206 struct inpcb *inp;
207 struct tcpcb *tp = NULL;
208 TCPDEBUG0;
210 inp = so->so_pcb;
211 KASSERT(inp == NULL, ("tcp socket attached"));
212 TCPDEBUG1();
214 error = tcp_attach(so, ai);
215 if (error)
216 goto out;
218 if ((so->so_options & SO_LINGER) && so->so_linger == 0)
219 so->so_linger = TCP_LINGERTIME;
220 tp = sototcpcb(so);
221 out:
222 TCPDEBUG2(PRU_ATTACH);
223 lwkt_replymsg(&msg->lmsg, error);
227 * pru_detach() detaches the TCP protocol from the socket.
228 * If the protocol state is non-embryonic, then can't
229 * do this directly: have to initiate a pru_disconnect(),
230 * which may finish later; embryonic TCB's can just
231 * be discarded here.
233 static void
234 tcp_usr_detach(netmsg_t msg)
236 struct socket *so = msg->base.nm_so;
237 int error = 0;
238 struct inpcb *inp;
239 struct tcpcb *tp;
240 TCPDEBUG0;
242 inp = so->so_pcb;
245 * If the inp is already detached or never attached, it may have
246 * been due to an async close or async attach failure. Just return
247 * as if no error occured.
249 if (inp) {
250 tp = intotcpcb(inp);
251 KASSERT(tp != NULL, ("tcp_usr_detach: tp is NULL"));
252 TCPDEBUG1();
253 tp = tcp_disconnect(tp);
254 TCPDEBUG2(PRU_DETACH);
256 lwkt_replymsg(&msg->lmsg, error);
260 * NOTE: ignore_error is non-zero for certain disconnection races
261 * which we want to silently allow, otherwise close() may return
262 * an unexpected error.
264 * NOTE: The variables (msg) and (tp) are assumed.
266 #define COMMON_START(so, inp, ignore_error) \
267 TCPDEBUG0; \
269 inp = so->so_pcb; \
270 do { \
271 if (inp == NULL) { \
272 error = ignore_error ? 0 : EINVAL; \
273 tp = NULL; \
274 goto out; \
276 tp = intotcpcb(inp); \
277 TCPDEBUG1(); \
278 } while(0)
280 #define COMMON_END1(req, noreply) \
281 out: do { \
282 TCPDEBUG2(req); \
283 if (!(noreply)) \
284 lwkt_replymsg(&msg->lmsg, error); \
285 return; \
286 } while(0)
288 #define COMMON_END(req) COMMON_END1((req), 0)
290 static void
291 tcp_sosetport(struct lwkt_msg *msg, lwkt_port_t port)
293 sosetport(((struct netmsg_base *)msg)->nm_so, port);
297 * Give the socket an address.
299 static void
300 tcp_usr_bind(netmsg_t msg)
302 struct socket *so = msg->bind.base.nm_so;
303 struct sockaddr *nam = msg->bind.nm_nam;
304 struct thread *td = msg->bind.nm_td;
305 int error = 0;
306 struct inpcb *inp;
307 struct tcpcb *tp;
308 struct sockaddr_in *sinp;
309 lwkt_port_t port0 = netisr_cpuport(0);
311 COMMON_START(so, inp, 0);
314 * Must check for multicast addresses and disallow binding
315 * to them.
317 sinp = (struct sockaddr_in *)nam;
318 if (sinp->sin_family == AF_INET &&
319 IN_MULTICAST(ntohl(sinp->sin_addr.s_addr))) {
320 error = EAFNOSUPPORT;
321 goto out;
325 * Check "already bound" here (in_pcbbind() does the same check
326 * though), so we don't forward a connected socket to netisr0,
327 * which would panic in the following in_pcbunlink().
329 if (inp->inp_lport != 0 || inp->inp_laddr.s_addr != INADDR_ANY) {
330 error = EINVAL; /* already bound */
331 goto out;
335 * Use netisr0 to serialize in_pcbbind(), so that pru_detach and
336 * pru_bind for different sockets on the same local port could be
337 * properly ordered. The original race is illustrated here for
338 * reference.
340 * s1 = socket();
341 * bind(s1, *.PORT);
342 * close(s1); <----- asynchronous
343 * s2 = socket();
344 * bind(s2, *.PORT);
346 * All will expect bind(s2, *.PORT) to succeed. However, it will
347 * fail, if following sequence happens due to random socket initial
348 * msgport and asynchronous close(2):
350 * netisrN netisrM
351 * : :
352 * : pru_bind(s2) [*.PORT is used by s1]
353 * pru_detach(s1) :
355 if (&curthread->td_msgport != port0) {
356 lwkt_msg_t lmsg = &msg->bind.base.lmsg;
358 KASSERT((msg->bind.nm_flags & PRUB_RELINK) == 0,
359 ("already asked to relink"));
361 in_pcbunlink(so->so_pcb, &tcbinfo[mycpuid]);
362 msg->bind.nm_flags |= PRUB_RELINK;
364 TCP_STATE_MIGRATE_START(tp);
366 /* See the related comment in tcp_connect() */
367 lwkt_setmsg_receipt(lmsg, tcp_sosetport);
368 lwkt_forwardmsg(port0, lmsg);
369 /* msg invalid now */
370 return;
372 KASSERT(so->so_port == port0, ("so_port is not netisr0"));
374 if (msg->bind.nm_flags & PRUB_RELINK) {
375 msg->bind.nm_flags &= ~PRUB_RELINK;
376 TCP_STATE_MIGRATE_END(tp);
377 in_pcblink(so->so_pcb, &tcbinfo[mycpuid]);
379 KASSERT(inp->inp_pcbinfo == &tcbinfo[0], ("pcbinfo is not tcbinfo0"));
381 error = in_pcbbind(inp, nam, td);
382 if (error)
383 goto out;
385 COMMON_END(PRU_BIND);
388 #ifdef INET6
390 static void
391 tcp6_usr_bind(netmsg_t msg)
393 struct socket *so = msg->bind.base.nm_so;
394 struct sockaddr *nam = msg->bind.nm_nam;
395 struct thread *td = msg->bind.nm_td;
396 int error = 0;
397 struct inpcb *inp;
398 struct tcpcb *tp;
399 struct sockaddr_in6 *sin6p;
401 COMMON_START(so, inp, 0);
404 * Must check for multicast addresses and disallow binding
405 * to them.
407 sin6p = (struct sockaddr_in6 *)nam;
408 if (sin6p->sin6_family == AF_INET6 &&
409 IN6_IS_ADDR_MULTICAST(&sin6p->sin6_addr)) {
410 error = EAFNOSUPPORT;
411 goto out;
413 error = in6_pcbbind(inp, nam, td);
414 if (error)
415 goto out;
416 COMMON_END(PRU_BIND);
418 #endif /* INET6 */
420 struct netmsg_inswildcard {
421 struct netmsg_base base;
422 struct inpcb *nm_inp;
425 static void
426 in_pcbinswildcardhash_handler(netmsg_t msg)
428 struct netmsg_inswildcard *nm = (struct netmsg_inswildcard *)msg;
429 int cpu = mycpuid, nextcpu;
431 in_pcbinswildcardhash_oncpu(nm->nm_inp, &tcbinfo[cpu]);
433 nextcpu = cpu + 1;
434 if (nextcpu < ncpus2)
435 lwkt_forwardmsg(netisr_cpuport(nextcpu), &nm->base.lmsg);
436 else
437 lwkt_replymsg(&nm->base.lmsg, 0);
441 * Prepare to accept connections.
443 static void
444 tcp_usr_listen(netmsg_t msg)
446 struct socket *so = msg->listen.base.nm_so;
447 struct thread *td = msg->listen.nm_td;
448 int error = 0;
449 struct inpcb *inp;
450 struct tcpcb *tp;
451 struct netmsg_inswildcard nm;
452 lwkt_port_t port0 = netisr_cpuport(0);
454 COMMON_START(so, inp, 0);
456 if (&curthread->td_msgport != port0) {
457 lwkt_msg_t lmsg = &msg->listen.base.lmsg;
459 KASSERT((msg->listen.nm_flags & PRUL_RELINK) == 0,
460 ("already asked to relink"));
462 in_pcbunlink(so->so_pcb, &tcbinfo[mycpuid]);
463 msg->listen.nm_flags |= PRUL_RELINK;
465 TCP_STATE_MIGRATE_START(tp);
467 /* See the related comment in tcp_connect() */
468 lwkt_setmsg_receipt(lmsg, tcp_sosetport);
469 lwkt_forwardmsg(port0, lmsg);
470 /* msg invalid now */
471 return;
473 KASSERT(so->so_port == port0, ("so_port is not netisr0"));
475 if (msg->listen.nm_flags & PRUL_RELINK) {
476 msg->listen.nm_flags &= ~PRUL_RELINK;
477 TCP_STATE_MIGRATE_END(tp);
478 in_pcblink(so->so_pcb, &tcbinfo[mycpuid]);
480 KASSERT(inp->inp_pcbinfo == &tcbinfo[0], ("pcbinfo is not tcbinfo0"));
482 if (tp->t_flags & TF_LISTEN)
483 goto out;
485 if (inp->inp_lport == 0) {
486 error = in_pcbbind(inp, NULL, td);
487 if (error)
488 goto out;
491 TCP_STATE_CHANGE(tp, TCPS_LISTEN);
492 tp->t_flags |= TF_LISTEN;
493 tp->tt_msg = NULL; /* Catch any invalid timer usage */
496 * Create tcpcb per-cpu port cache
498 * NOTE:
499 * This _must_ be done before installing this inpcb into
500 * wildcard hash.
502 tcp_pcbport_create(tp);
504 if (ncpus2 > 1) {
506 * Put this inpcb into wildcard hash on other cpus.
508 ASSERT_INP_NOTINHASH(inp);
509 netmsg_init(&nm.base, NULL, &curthread->td_msgport,
510 MSGF_PRIORITY, in_pcbinswildcardhash_handler);
511 nm.nm_inp = inp;
512 lwkt_domsg(netisr_cpuport(1), &nm.base.lmsg, 0);
514 in_pcbinswildcardhash(inp);
515 COMMON_END(PRU_LISTEN);
518 #ifdef INET6
520 static void
521 tcp6_usr_listen(netmsg_t msg)
523 struct socket *so = msg->listen.base.nm_so;
524 struct thread *td = msg->listen.nm_td;
525 int error = 0;
526 struct inpcb *inp;
527 struct tcpcb *tp;
528 struct netmsg_inswildcard nm;
530 COMMON_START(so, inp, 0);
532 if (tp->t_flags & TF_LISTEN)
533 goto out;
535 if (inp->inp_lport == 0) {
536 error = in6_pcbbind(inp, NULL, td);
537 if (error)
538 goto out;
541 TCP_STATE_CHANGE(tp, TCPS_LISTEN);
542 tp->t_flags |= TF_LISTEN;
543 tp->tt_msg = NULL; /* Catch any invalid timer usage */
546 * Create tcpcb per-cpu port cache
548 * NOTE:
549 * This _must_ be done before installing this inpcb into
550 * wildcard hash.
552 tcp_pcbport_create(tp);
554 if (ncpus2 > 1) {
556 * Put this inpcb into wildcard hash on other cpus.
558 KKASSERT(so->so_port == netisr_cpuport(0));
559 ASSERT_IN_NETISR(0);
560 KKASSERT(inp->inp_pcbinfo == &tcbinfo[0]);
561 ASSERT_INP_NOTINHASH(inp);
563 netmsg_init(&nm.base, NULL, &curthread->td_msgport,
564 MSGF_PRIORITY, in_pcbinswildcardhash_handler);
565 nm.nm_inp = inp;
566 lwkt_domsg(netisr_cpuport(1), &nm.base.lmsg, 0);
568 in_pcbinswildcardhash(inp);
569 COMMON_END(PRU_LISTEN);
571 #endif /* INET6 */
574 * Initiate connection to peer.
575 * Create a template for use in transmissions on this connection.
576 * Enter SYN_SENT state, and mark socket as connecting.
577 * Start keep-alive timer, and seed output sequence space.
578 * Send initial segment on connection.
580 static void
581 tcp_usr_connect(netmsg_t msg)
583 struct socket *so = msg->connect.base.nm_so;
584 struct sockaddr *nam = msg->connect.nm_nam;
585 struct thread *td = msg->connect.nm_td;
586 int error = 0;
587 struct inpcb *inp;
588 struct tcpcb *tp;
589 struct sockaddr_in *sinp;
591 COMMON_START(so, inp, 0);
594 * Must disallow TCP ``connections'' to multicast addresses.
596 sinp = (struct sockaddr_in *)nam;
597 if (sinp->sin_family == AF_INET
598 && IN_MULTICAST(ntohl(sinp->sin_addr.s_addr))) {
599 error = EAFNOSUPPORT;
600 goto out;
603 if (!prison_remote_ip(td, (struct sockaddr*)sinp)) {
604 error = EAFNOSUPPORT; /* IPv6 only jail */
605 goto out;
608 tcp_connect(msg);
609 /* msg is invalid now */
610 return;
611 out:
612 if (msg->connect.nm_m) {
613 m_freem(msg->connect.nm_m);
614 msg->connect.nm_m = NULL;
616 if (msg->connect.nm_flags & PRUC_HELDTD)
617 lwkt_rele(td);
618 if (error && (msg->connect.nm_flags & PRUC_ASYNC)) {
619 so->so_error = error;
620 soisdisconnected(so);
622 lwkt_replymsg(&msg->lmsg, error);
625 #ifdef INET6
627 static void
628 tcp6_usr_connect(netmsg_t msg)
630 struct socket *so = msg->connect.base.nm_so;
631 struct sockaddr *nam = msg->connect.nm_nam;
632 struct thread *td = msg->connect.nm_td;
633 int error = 0;
634 struct inpcb *inp;
635 struct tcpcb *tp;
636 struct sockaddr_in6 *sin6p;
638 COMMON_START(so, inp, 0);
641 * Must disallow TCP ``connections'' to multicast addresses.
643 sin6p = (struct sockaddr_in6 *)nam;
644 if (sin6p->sin6_family == AF_INET6
645 && IN6_IS_ADDR_MULTICAST(&sin6p->sin6_addr)) {
646 error = EAFNOSUPPORT;
647 goto out;
650 if (!prison_remote_ip(td, nam)) {
651 error = EAFNOSUPPORT; /* IPv4 only jail */
652 goto out;
655 /* Reject v4-mapped address */
656 if (IN6_IS_ADDR_V4MAPPED(&sin6p->sin6_addr)) {
657 error = EADDRNOTAVAIL;
658 goto out;
661 inp->inp_inc.inc_isipv6 = 1;
662 tcp6_connect(msg);
663 /* msg is invalid now */
664 return;
665 out:
666 if (msg->connect.nm_m) {
667 m_freem(msg->connect.nm_m);
668 msg->connect.nm_m = NULL;
670 lwkt_replymsg(&msg->lmsg, error);
673 #endif /* INET6 */
676 * Initiate disconnect from peer.
677 * If connection never passed embryonic stage, just drop;
678 * else if don't need to let data drain, then can just drop anyways,
679 * else have to begin TCP shutdown process: mark socket disconnecting,
680 * drain unread data, state switch to reflect user close, and
681 * send segment (e.g. FIN) to peer. Socket will be really disconnected
682 * when peer sends FIN and acks ours.
684 * SHOULD IMPLEMENT LATER PRU_CONNECT VIA REALLOC TCPCB.
686 static void
687 tcp_usr_disconnect(netmsg_t msg)
689 struct socket *so = msg->disconnect.base.nm_so;
690 int error = 0;
691 struct inpcb *inp;
692 struct tcpcb *tp;
694 COMMON_START(so, inp, 1);
695 tp = tcp_disconnect(tp);
696 COMMON_END(PRU_DISCONNECT);
700 * Accept a connection. Essentially all the work is
701 * done at higher levels; just return the address
702 * of the peer, storing through addr.
704 static void
705 tcp_usr_accept(netmsg_t msg)
707 struct socket *so = msg->accept.base.nm_so;
708 struct sockaddr **nam = msg->accept.nm_nam;
709 int error = 0;
710 struct inpcb *inp;
711 struct tcpcb *tp = NULL;
712 TCPDEBUG0;
714 inp = so->so_pcb;
715 if (so->so_state & SS_ISDISCONNECTED) {
716 error = ECONNABORTED;
717 goto out;
719 if (inp == NULL) {
720 error = EINVAL;
721 goto out;
724 tp = intotcpcb(inp);
725 TCPDEBUG1();
726 in_setpeeraddr(so, nam);
727 COMMON_END(PRU_ACCEPT);
730 #ifdef INET6
731 static void
732 tcp6_usr_accept(netmsg_t msg)
734 struct socket *so = msg->accept.base.nm_so;
735 struct sockaddr **nam = msg->accept.nm_nam;
736 int error = 0;
737 struct inpcb *inp;
738 struct tcpcb *tp = NULL;
739 TCPDEBUG0;
741 inp = so->so_pcb;
743 if (so->so_state & SS_ISDISCONNECTED) {
744 error = ECONNABORTED;
745 goto out;
747 if (inp == NULL) {
748 error = EINVAL;
749 goto out;
751 tp = intotcpcb(inp);
752 TCPDEBUG1();
753 in6_setpeeraddr(so, nam);
754 COMMON_END(PRU_ACCEPT);
756 #endif /* INET6 */
759 * Mark the connection as being incapable of further output.
761 static void
762 tcp_usr_shutdown(netmsg_t msg)
764 struct socket *so = msg->shutdown.base.nm_so;
765 int error = 0;
766 struct inpcb *inp;
767 struct tcpcb *tp;
769 COMMON_START(so, inp, 0);
770 socantsendmore(so);
771 tp = tcp_usrclosed(tp);
772 if (tp)
773 error = tcp_output(tp);
774 COMMON_END(PRU_SHUTDOWN);
778 * After a receive, possibly send window update to peer.
780 static void
781 tcp_usr_rcvd(netmsg_t msg)
783 struct socket *so = msg->rcvd.base.nm_so;
784 int error = 0, noreply = 0;
785 struct inpcb *inp;
786 struct tcpcb *tp;
788 COMMON_START(so, inp, 0);
790 if (msg->rcvd.nm_pru_flags & PRUR_ASYNC) {
791 noreply = 1;
792 so_async_rcvd_reply(so);
794 tcp_output(tp);
796 COMMON_END1(PRU_RCVD, noreply);
800 * Do a send by putting data in output queue and updating urgent
801 * marker if URG set. Possibly send more data. Unlike the other
802 * pru_*() routines, the mbuf chains are our responsibility. We
803 * must either enqueue them or free them. The other pru_* routines
804 * generally are caller-frees.
806 static void
807 tcp_usr_send(netmsg_t msg)
809 struct socket *so = msg->send.base.nm_so;
810 int flags = msg->send.nm_flags;
811 struct mbuf *m = msg->send.nm_m;
812 int error = 0;
813 struct inpcb *inp;
814 struct tcpcb *tp;
815 TCPDEBUG0;
817 KKASSERT(msg->send.nm_control == NULL);
818 KKASSERT(msg->send.nm_addr == NULL);
819 KKASSERT((flags & PRUS_FREEADDR) == 0);
821 inp = so->so_pcb;
823 if (inp == NULL) {
825 * OOPS! we lost a race, the TCP session got reset after
826 * we checked SS_CANTSENDMORE, eg: while doing uiomove or a
827 * network interrupt in the non-critical section of sosend().
829 m_freem(m);
830 error = ECONNRESET; /* XXX EPIPE? */
831 tp = NULL;
832 TCPDEBUG1();
833 goto out;
835 tp = intotcpcb(inp);
836 TCPDEBUG1();
838 #ifdef foo
840 * This is no longer necessary, since:
841 * - sosendtcp() has already checked it for us
842 * - It does not work with asynchronized send
846 * Don't let too much OOB data build up
848 if (flags & PRUS_OOB) {
849 if (ssb_space(&so->so_snd) < -512) {
850 m_freem(m);
851 error = ENOBUFS;
852 goto out;
855 #endif
858 * Pump the data into the socket.
860 if (m) {
861 ssb_appendstream(&so->so_snd, m);
862 sowwakeup(so);
864 if (flags & PRUS_OOB) {
866 * According to RFC961 (Assigned Protocols),
867 * the urgent pointer points to the last octet
868 * of urgent data. We continue, however,
869 * to consider it to indicate the first octet
870 * of data past the urgent section.
871 * Otherwise, snd_up should be one lower.
873 tp->snd_up = tp->snd_una + so->so_snd.ssb_cc;
874 tp->t_flags |= TF_FORCE;
875 error = tcp_output(tp);
876 tp->t_flags &= ~TF_FORCE;
877 } else {
878 if (flags & PRUS_EOF) {
880 * Close the send side of the connection after
881 * the data is sent.
883 socantsendmore(so);
884 tp = tcp_usrclosed(tp);
886 if (tp != NULL && !tcp_output_pending(tp)) {
887 if (flags & PRUS_MORETOCOME)
888 tp->t_flags |= TF_MORETOCOME;
889 error = tcp_output_fair(tp);
890 if (flags & PRUS_MORETOCOME)
891 tp->t_flags &= ~TF_MORETOCOME;
894 COMMON_END1((flags & PRUS_OOB) ? PRU_SENDOOB :
895 ((flags & PRUS_EOF) ? PRU_SEND_EOF : PRU_SEND),
896 (flags & PRUS_NOREPLY));
900 * NOTE: (so) is referenced from soabort*() and netmsg_pru_abort()
901 * will sofree() it when we return.
903 static void
904 tcp_usr_abort(netmsg_t msg)
906 struct socket *so = msg->abort.base.nm_so;
907 int error = 0;
908 struct inpcb *inp;
909 struct tcpcb *tp;
911 COMMON_START(so, inp, 1);
912 tp = tcp_drop(tp, ECONNABORTED);
913 COMMON_END(PRU_ABORT);
917 * Receive out-of-band data.
919 static void
920 tcp_usr_rcvoob(netmsg_t msg)
922 struct socket *so = msg->rcvoob.base.nm_so;
923 struct mbuf *m = msg->rcvoob.nm_m;
924 int flags = msg->rcvoob.nm_flags;
925 int error = 0;
926 struct inpcb *inp;
927 struct tcpcb *tp;
929 COMMON_START(so, inp, 0);
930 if ((so->so_oobmark == 0 &&
931 (so->so_state & SS_RCVATMARK) == 0) ||
932 so->so_options & SO_OOBINLINE ||
933 tp->t_oobflags & TCPOOB_HADDATA) {
934 error = EINVAL;
935 goto out;
937 if ((tp->t_oobflags & TCPOOB_HAVEDATA) == 0) {
938 error = EWOULDBLOCK;
939 goto out;
941 m->m_len = 1;
942 *mtod(m, caddr_t) = tp->t_iobc;
943 if ((flags & MSG_PEEK) == 0)
944 tp->t_oobflags ^= (TCPOOB_HAVEDATA | TCPOOB_HADDATA);
945 COMMON_END(PRU_RCVOOB);
948 static void
949 tcp_usr_savefaddr(struct socket *so, const struct sockaddr *faddr)
951 in_savefaddr(so, faddr);
954 #ifdef INET6
955 static void
956 tcp6_usr_savefaddr(struct socket *so, const struct sockaddr *faddr)
958 in6_savefaddr(so, faddr);
960 #endif
962 static int
963 tcp_usr_preconnect(struct socket *so, const struct sockaddr *nam,
964 struct thread *td __unused)
966 const struct sockaddr_in *sinp;
968 sinp = (const struct sockaddr_in *)nam;
969 if (sinp->sin_family == AF_INET &&
970 IN_MULTICAST(ntohl(sinp->sin_addr.s_addr)))
971 return EAFNOSUPPORT;
973 soisconnecting(so);
974 return 0;
977 /* xxx - should be const */
978 struct pr_usrreqs tcp_usrreqs = {
979 .pru_abort = tcp_usr_abort,
980 .pru_accept = tcp_usr_accept,
981 .pru_attach = tcp_usr_attach,
982 .pru_bind = tcp_usr_bind,
983 .pru_connect = tcp_usr_connect,
984 .pru_connect2 = pr_generic_notsupp,
985 .pru_control = in_control_dispatch,
986 .pru_detach = tcp_usr_detach,
987 .pru_disconnect = tcp_usr_disconnect,
988 .pru_listen = tcp_usr_listen,
989 .pru_peeraddr = in_setpeeraddr_dispatch,
990 .pru_rcvd = tcp_usr_rcvd,
991 .pru_rcvoob = tcp_usr_rcvoob,
992 .pru_send = tcp_usr_send,
993 .pru_sense = pru_sense_null,
994 .pru_shutdown = tcp_usr_shutdown,
995 .pru_sockaddr = in_setsockaddr_dispatch,
996 .pru_sosend = sosendtcp,
997 .pru_soreceive = sorecvtcp,
998 .pru_savefaddr = tcp_usr_savefaddr,
999 .pru_preconnect = tcp_usr_preconnect,
1000 .pru_preattach = tcp_usr_preattach
1003 #ifdef INET6
1004 struct pr_usrreqs tcp6_usrreqs = {
1005 .pru_abort = tcp_usr_abort,
1006 .pru_accept = tcp6_usr_accept,
1007 .pru_attach = tcp_usr_attach,
1008 .pru_bind = tcp6_usr_bind,
1009 .pru_connect = tcp6_usr_connect,
1010 .pru_connect2 = pr_generic_notsupp,
1011 .pru_control = in6_control_dispatch,
1012 .pru_detach = tcp_usr_detach,
1013 .pru_disconnect = tcp_usr_disconnect,
1014 .pru_listen = tcp6_usr_listen,
1015 .pru_peeraddr = in6_setpeeraddr_dispatch,
1016 .pru_rcvd = tcp_usr_rcvd,
1017 .pru_rcvoob = tcp_usr_rcvoob,
1018 .pru_send = tcp_usr_send,
1019 .pru_sense = pru_sense_null,
1020 .pru_shutdown = tcp_usr_shutdown,
1021 .pru_sockaddr = in6_setsockaddr_dispatch,
1022 .pru_sosend = sosendtcp,
1023 .pru_soreceive = sorecvtcp,
1024 .pru_savefaddr = tcp6_usr_savefaddr
1026 #endif /* INET6 */
1028 static int
1029 tcp_connect_oncpu(struct tcpcb *tp, int flags, struct mbuf *m,
1030 struct sockaddr_in *sin, struct sockaddr_in *if_sin)
1032 struct inpcb *inp = tp->t_inpcb, *oinp;
1033 struct socket *so = inp->inp_socket;
1034 struct route *ro = &inp->inp_route;
1036 KASSERT(inp->inp_pcbinfo == &tcbinfo[mycpu->gd_cpuid],
1037 ("pcbinfo mismatch"));
1039 oinp = in_pcblookup_hash(inp->inp_pcbinfo,
1040 sin->sin_addr, sin->sin_port,
1041 (inp->inp_laddr.s_addr != INADDR_ANY ?
1042 inp->inp_laddr : if_sin->sin_addr),
1043 inp->inp_lport, 0, NULL);
1044 if (oinp != NULL) {
1045 m_freem(m);
1046 return (EADDRINUSE);
1048 if (inp->inp_laddr.s_addr == INADDR_ANY)
1049 inp->inp_laddr = if_sin->sin_addr;
1050 inp->inp_faddr = sin->sin_addr;
1051 inp->inp_fport = sin->sin_port;
1052 in_pcbinsconnhash(inp);
1055 * We are now on the inpcb's owner CPU, if the cached route was
1056 * freed because the rtentry's owner CPU is not the current CPU
1057 * (e.g. in tcp_connect()), then we try to reallocate it here with
1058 * the hope that a rtentry may be cloned from a RTF_PRCLONING
1059 * rtentry.
1061 if (!(inp->inp_socket->so_options & SO_DONTROUTE) && /*XXX*/
1062 ro->ro_rt == NULL) {
1063 bzero(&ro->ro_dst, sizeof(struct sockaddr_in));
1064 ro->ro_dst.sa_family = AF_INET;
1065 ro->ro_dst.sa_len = sizeof(struct sockaddr_in);
1066 ((struct sockaddr_in *)&ro->ro_dst)->sin_addr =
1067 sin->sin_addr;
1068 rtalloc(ro);
1072 * Now that no more errors can occur, change the protocol processing
1073 * port to the current thread (which is the correct thread).
1075 * Create TCP timer message now; we are on the tcpcb's owner
1076 * CPU/thread.
1078 tcp_create_timermsg(tp, &curthread->td_msgport);
1081 * Compute window scaling to request. Use a larger scaling then
1082 * needed for the initial receive buffer in case the receive buffer
1083 * gets expanded.
1085 if (tp->request_r_scale < TCP_MIN_WINSHIFT)
1086 tp->request_r_scale = TCP_MIN_WINSHIFT;
1087 while (tp->request_r_scale < TCP_MAX_WINSHIFT &&
1088 (TCP_MAXWIN << tp->request_r_scale) < so->so_rcv.ssb_hiwat
1090 tp->request_r_scale++;
1093 soisconnecting(so);
1094 tcpstat.tcps_connattempt++;
1095 TCP_STATE_CHANGE(tp, TCPS_SYN_SENT);
1096 tcp_callout_reset(tp, tp->tt_keep, tp->t_keepinit, tcp_timer_keep);
1097 tp->iss = tcp_new_isn(tp);
1098 tcp_sendseqinit(tp);
1099 if (m) {
1100 ssb_appendstream(&so->so_snd, m);
1101 m = NULL;
1102 if (flags & PRUS_OOB)
1103 tp->snd_up = tp->snd_una + so->so_snd.ssb_cc;
1107 * Close the send side of the connection after
1108 * the data is sent if flagged.
1110 if ((flags & (PRUS_OOB|PRUS_EOF)) == PRUS_EOF) {
1111 socantsendmore(so);
1112 tp = tcp_usrclosed(tp);
1114 return (tcp_output(tp));
1118 * Common subroutine to open a TCP connection to remote host specified
1119 * by struct sockaddr_in in mbuf *nam. Call in_pcbbind to assign a local
1120 * port number if needed. Call in_pcbladdr to do the routing and to choose
1121 * a local host address (interface).
1122 * Initialize connection parameters and enter SYN-SENT state.
1124 static void
1125 tcp_connect(netmsg_t msg)
1127 struct socket *so = msg->connect.base.nm_so;
1128 struct sockaddr *nam = msg->connect.nm_nam;
1129 struct thread *td = msg->connect.nm_td;
1130 struct sockaddr_in *sin = (struct sockaddr_in *)nam;
1131 struct sockaddr_in *if_sin = NULL;
1132 struct inpcb *inp;
1133 struct tcpcb *tp;
1134 int error;
1135 lwkt_port_t port;
1137 COMMON_START(so, inp, 0);
1140 * Reconnect our pcb if we have to
1142 if (msg->connect.nm_flags & PRUC_RECONNECT) {
1143 msg->connect.nm_flags &= ~PRUC_RECONNECT;
1144 TCP_STATE_MIGRATE_END(tp);
1145 in_pcblink(so->so_pcb, &tcbinfo[mycpu->gd_cpuid]);
1149 * Bind if we have to
1151 if (inp->inp_lport == 0) {
1152 if (tcp_lport_extension) {
1153 KKASSERT(inp->inp_laddr.s_addr == INADDR_ANY);
1155 error = in_pcbladdr(inp, nam, &if_sin, td);
1156 if (error)
1157 goto out;
1158 inp->inp_laddr.s_addr = if_sin->sin_addr.s_addr;
1160 error = in_pcbbind_remote(inp, nam, td);
1161 if (error)
1162 goto out;
1164 msg->connect.nm_flags |= PRUC_HASLADDR;
1165 } else {
1166 error = in_pcbbind(inp, NULL, td);
1167 if (error)
1168 goto out;
1172 if ((msg->connect.nm_flags & PRUC_HASLADDR) == 0) {
1174 * Calculate the correct protocol processing thread. The
1175 * connect operation must run there. Set the forwarding
1176 * port before we forward the message or it will get bounced
1177 * right back to us.
1179 error = in_pcbladdr(inp, nam, &if_sin, td);
1180 if (error)
1181 goto out;
1183 KKASSERT(inp->inp_socket == so);
1185 port = tcp_addrport(sin->sin_addr.s_addr, sin->sin_port,
1186 (inp->inp_laddr.s_addr != INADDR_ANY ?
1187 inp->inp_laddr.s_addr : if_sin->sin_addr.s_addr),
1188 inp->inp_lport);
1190 if (port != &curthread->td_msgport) {
1191 lwkt_msg_t lmsg = &msg->connect.base.lmsg;
1194 * in_pcbladdr() may have allocated a route entry for us
1195 * on the current CPU, but we need a route entry on the
1196 * inpcb's owner CPU, so free it here.
1198 in_pcbresetroute(inp);
1201 * We are moving the protocol processing port the socket
1202 * is on, we have to unlink here and re-link on the
1203 * target cpu.
1205 in_pcbunlink(so->so_pcb, &tcbinfo[mycpu->gd_cpuid]);
1206 msg->connect.nm_flags |= PRUC_RECONNECT;
1207 msg->connect.base.nm_dispatch = tcp_connect;
1209 TCP_STATE_MIGRATE_START(tp);
1212 * Use message put done receipt to change this socket's
1213 * so_port, i.e. _after_ this message was put onto the
1214 * target netisr's msgport but _before_ the message could
1215 * be pulled from the target netisr's msgport, so that:
1216 * - The upper half (socket code) will not see the new
1217 * msgport before this message reaches the new msgport
1218 * and messages for this socket will be ordered.
1219 * - This message will see the new msgport, when its
1220 * handler is called in the target netisr.
1222 * NOTE:
1223 * We MUST use messege put done receipt to change this
1224 * socket's so_port:
1225 * If we changed the so_port in this netisr after the
1226 * lwkt_forwardmsg (so messages for this socket will be
1227 * ordered) and changed the so_port in the target netisr
1228 * at the very beginning of this message's handler, we
1229 * would suffer so_port overwritten race, given this
1230 * message might be forwarded again.
1232 * NOTE:
1233 * This mechanism depends on that the netisr's msgport
1234 * is spin msgport (currently it is :).
1236 * If the upper half saw the new msgport before this
1237 * message reached the target netisr's msgport, the
1238 * messages sent from the upper half could reach the new
1239 * msgport before this message, thus there would be
1240 * message reordering. The worst case could be soclose()
1241 * saw the new msgport and the detach message could reach
1242 * the new msgport before this message, i.e. the inpcb
1243 * could have been destroyed when this message was still
1244 * pending on or on its way to the new msgport. Other
1245 * weird cases could also happen, e.g. inpcb->inp_pcbinfo,
1246 * since we have unlinked this inpcb from the current
1247 * pcbinfo first.
1249 lwkt_setmsg_receipt(lmsg, tcp_sosetport);
1250 lwkt_forwardmsg(port, lmsg);
1251 /* msg invalid now */
1252 return;
1253 } else if (msg->connect.nm_flags & PRUC_HELDTD) {
1255 * The original thread is no longer needed; release it.
1257 lwkt_rele(td);
1258 msg->connect.nm_flags &= ~PRUC_HELDTD;
1260 error = tcp_connect_oncpu(tp, msg->connect.nm_sndflags,
1261 msg->connect.nm_m, sin, if_sin);
1262 msg->connect.nm_m = NULL;
1263 out:
1264 if (msg->connect.nm_m) {
1265 m_freem(msg->connect.nm_m);
1266 msg->connect.nm_m = NULL;
1268 if (msg->connect.nm_flags & PRUC_HELDTD)
1269 lwkt_rele(td);
1270 if (error && (msg->connect.nm_flags & PRUC_ASYNC)) {
1271 so->so_error = error;
1272 soisdisconnected(so);
1274 lwkt_replymsg(&msg->connect.base.lmsg, error);
1275 /* msg invalid now */
1278 #ifdef INET6
1280 static void
1281 tcp6_connect(netmsg_t msg)
1283 struct tcpcb *tp;
1284 struct socket *so = msg->connect.base.nm_so;
1285 struct sockaddr *nam = msg->connect.nm_nam;
1286 struct thread *td = msg->connect.nm_td;
1287 struct inpcb *inp;
1288 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)nam;
1289 struct in6_addr *addr6;
1290 lwkt_port_t port;
1291 int error;
1293 COMMON_START(so, inp, 0);
1296 * Reconnect our pcb if we have to
1298 if (msg->connect.nm_flags & PRUC_RECONNECT) {
1299 msg->connect.nm_flags &= ~PRUC_RECONNECT;
1300 TCP_STATE_MIGRATE_END(tp);
1301 in_pcblink(so->so_pcb, &tcbinfo[mycpu->gd_cpuid]);
1305 * Bind if we have to
1307 if (inp->inp_lport == 0) {
1308 error = in6_pcbbind(inp, NULL, td);
1309 if (error)
1310 goto out;
1314 * Cannot simply call in_pcbconnect, because there might be an
1315 * earlier incarnation of this same connection still in
1316 * TIME_WAIT state, creating an ADDRINUSE error.
1318 error = in6_pcbladdr(inp, nam, &addr6, td);
1319 if (error)
1320 goto out;
1322 port = tcp6_addrport(); /* XXX hack for now, always cpu0 */
1324 if (port != &curthread->td_msgport) {
1325 lwkt_msg_t lmsg = &msg->connect.base.lmsg;
1328 * in_pcbladdr() may have allocated a route entry for us
1329 * on the current CPU, but we need a route entry on the
1330 * inpcb's owner CPU, so free it here.
1332 in_pcbresetroute(inp);
1334 in_pcbunlink(so->so_pcb, &tcbinfo[mycpu->gd_cpuid]);
1335 msg->connect.nm_flags |= PRUC_RECONNECT;
1336 msg->connect.base.nm_dispatch = tcp6_connect;
1338 TCP_STATE_MIGRATE_START(tp);
1340 /* See the related comment in tcp_connect() */
1341 lwkt_setmsg_receipt(lmsg, tcp_sosetport);
1342 lwkt_forwardmsg(port, lmsg);
1343 /* msg invalid now */
1344 return;
1346 error = tcp6_connect_oncpu(tp, msg->connect.nm_sndflags,
1347 &msg->connect.nm_m, sin6, addr6);
1348 /* nm_m may still be intact */
1349 out:
1350 if (msg->connect.nm_m) {
1351 m_freem(msg->connect.nm_m);
1352 msg->connect.nm_m = NULL;
1354 lwkt_replymsg(&msg->connect.base.lmsg, error);
1355 /* msg invalid now */
1358 static int
1359 tcp6_connect_oncpu(struct tcpcb *tp, int flags, struct mbuf **mp,
1360 struct sockaddr_in6 *sin6, struct in6_addr *addr6)
1362 struct mbuf *m = *mp;
1363 struct inpcb *inp = tp->t_inpcb;
1364 struct socket *so = inp->inp_socket;
1365 struct inpcb *oinp;
1368 * Cannot simply call in_pcbconnect, because there might be an
1369 * earlier incarnation of this same connection still in
1370 * TIME_WAIT state, creating an ADDRINUSE error.
1372 oinp = in6_pcblookup_hash(inp->inp_pcbinfo,
1373 &sin6->sin6_addr, sin6->sin6_port,
1374 (IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr) ?
1375 addr6 : &inp->in6p_laddr),
1376 inp->inp_lport, 0, NULL);
1377 if (oinp)
1378 return (EADDRINUSE);
1380 if (IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr))
1381 inp->in6p_laddr = *addr6;
1382 inp->in6p_faddr = sin6->sin6_addr;
1383 inp->inp_fport = sin6->sin6_port;
1384 if ((sin6->sin6_flowinfo & IPV6_FLOWINFO_MASK) != 0)
1385 inp->in6p_flowinfo = sin6->sin6_flowinfo;
1386 in_pcbinsconnhash(inp);
1389 * Now that no more errors can occur, change the protocol processing
1390 * port to the current thread (which is the correct thread).
1392 * Create TCP timer message now; we are on the tcpcb's owner
1393 * CPU/thread.
1395 tcp_create_timermsg(tp, &curthread->td_msgport);
1397 /* Compute window scaling to request. */
1398 if (tp->request_r_scale < TCP_MIN_WINSHIFT)
1399 tp->request_r_scale = TCP_MIN_WINSHIFT;
1400 while (tp->request_r_scale < TCP_MAX_WINSHIFT &&
1401 (TCP_MAXWIN << tp->request_r_scale) < so->so_rcv.ssb_hiwat) {
1402 tp->request_r_scale++;
1405 soisconnecting(so);
1406 tcpstat.tcps_connattempt++;
1407 TCP_STATE_CHANGE(tp, TCPS_SYN_SENT);
1408 tcp_callout_reset(tp, tp->tt_keep, tp->t_keepinit, tcp_timer_keep);
1409 tp->iss = tcp_new_isn(tp);
1410 tcp_sendseqinit(tp);
1411 if (m) {
1412 ssb_appendstream(&so->so_snd, m);
1413 *mp = NULL;
1414 if (flags & PRUS_OOB)
1415 tp->snd_up = tp->snd_una + so->so_snd.ssb_cc;
1419 * Close the send side of the connection after
1420 * the data is sent if flagged.
1422 if ((flags & (PRUS_OOB|PRUS_EOF)) == PRUS_EOF) {
1423 socantsendmore(so);
1424 tp = tcp_usrclosed(tp);
1426 return (tcp_output(tp));
1429 #endif /* INET6 */
1432 * The new sockopt interface makes it possible for us to block in the
1433 * copyin/out step (if we take a page fault). Taking a page fault while
1434 * in a critical section is probably a Bad Thing. (Since sockets and pcbs
1435 * both now use TSM, there probably isn't any need for this function to
1436 * run in a critical section any more. This needs more examination.)
1438 void
1439 tcp_ctloutput(netmsg_t msg)
1441 struct socket *so = msg->base.nm_so;
1442 struct sockopt *sopt = msg->ctloutput.nm_sopt;
1443 struct thread *td = NULL;
1444 int error, opt, optval, opthz;
1445 struct inpcb *inp;
1446 struct tcpcb *tp;
1448 if (msg->ctloutput.nm_flags & PRCO_HELDTD)
1449 td = sopt->sopt_td;
1451 error = 0;
1452 inp = so->so_pcb;
1453 if (inp == NULL) {
1454 error = ECONNRESET;
1455 goto done;
1457 tp = intotcpcb(inp);
1459 /* Get socket's owner cpuid hint */
1460 if (sopt->sopt_level == SOL_SOCKET &&
1461 sopt->sopt_dir == SOPT_GET &&
1462 sopt->sopt_name == SO_CPUHINT) {
1463 if (tp->t_flags & TF_LISTEN) {
1465 * Listen sockets owner cpuid is always 0,
1466 * which does not make sense if SO_REUSEPORT
1467 * is not set.
1469 if (so->so_options & SO_REUSEPORT)
1470 optval = (inp->inp_lgrpindex & ncpus2_mask);
1471 else
1472 optval = -1; /* no hint */
1473 } else {
1474 optval = mycpuid;
1476 soopt_from_kbuf(sopt, &optval, sizeof(optval));
1477 goto done;
1480 if (sopt->sopt_level != IPPROTO_TCP) {
1481 if (sopt->sopt_level == IPPROTO_IP) {
1482 switch (sopt->sopt_name) {
1483 case IP_MULTICAST_IF:
1484 case IP_MULTICAST_VIF:
1485 case IP_MULTICAST_TTL:
1486 case IP_MULTICAST_LOOP:
1487 case IP_ADD_MEMBERSHIP:
1488 case IP_DROP_MEMBERSHIP:
1490 * Multicast does not make sense on
1491 * TCP sockets.
1493 error = EOPNOTSUPP;
1494 goto done;
1497 #ifdef INET6
1498 if (INP_CHECK_SOCKAF(so, AF_INET6))
1499 ip6_ctloutput_dispatch(msg);
1500 else
1501 #endif /* INET6 */
1502 ip_ctloutput(msg);
1503 /* msg invalid now */
1504 if (td != NULL)
1505 lwkt_rele(td);
1506 return;
1509 switch (sopt->sopt_dir) {
1510 case SOPT_SET:
1511 error = soopt_to_kbuf(sopt, &optval, sizeof optval,
1512 sizeof optval);
1513 if (error)
1514 break;
1515 switch (sopt->sopt_name) {
1516 case TCP_FASTKEEP:
1517 if (optval > 0)
1518 tp->t_keepidle = tp->t_keepintvl;
1519 else
1520 tp->t_keepidle = tcp_keepidle;
1521 tcp_timer_keep_activity(tp, 0);
1522 break;
1523 #ifdef TCP_SIGNATURE
1524 case TCP_SIGNATURE_ENABLE:
1525 if (tp->t_state == TCPS_CLOSED) {
1527 * This is the only safe state that this
1528 * option could be changed. Some segments
1529 * could already have been sent in other
1530 * states.
1532 if (optval > 0)
1533 tp->t_flags |= TF_SIGNATURE;
1534 else
1535 tp->t_flags &= ~TF_SIGNATURE;
1536 } else {
1537 error = EOPNOTSUPP;
1539 break;
1540 #endif /* TCP_SIGNATURE */
1541 case TCP_NODELAY:
1542 case TCP_NOOPT:
1543 switch (sopt->sopt_name) {
1544 case TCP_NODELAY:
1545 opt = TF_NODELAY;
1546 break;
1547 case TCP_NOOPT:
1548 opt = TF_NOOPT;
1549 break;
1550 default:
1551 opt = 0; /* dead code to fool gcc */
1552 break;
1555 if (optval)
1556 tp->t_flags |= opt;
1557 else
1558 tp->t_flags &= ~opt;
1559 break;
1561 case TCP_NOPUSH:
1562 if (tcp_disable_nopush)
1563 break;
1564 if (optval)
1565 tp->t_flags |= TF_NOPUSH;
1566 else {
1567 tp->t_flags &= ~TF_NOPUSH;
1568 error = tcp_output(tp);
1570 break;
1572 case TCP_MAXSEG:
1574 * Must be between 0 and maxseg. If the requested
1575 * maxseg is too small to satisfy the desired minmss,
1576 * pump it up (silently so sysctl modifications of
1577 * minmss do not create unexpected program failures).
1578 * Handle degenerate cases.
1580 if (optval > 0 && optval <= tp->t_maxseg) {
1581 if (optval + 40 < tcp_minmss) {
1582 optval = tcp_minmss - 40;
1583 if (optval < 0)
1584 optval = 1;
1586 tp->t_maxseg = optval;
1587 } else {
1588 error = EINVAL;
1590 break;
1592 case TCP_KEEPINIT:
1593 opthz = ((int64_t)optval * hz) / 1000;
1594 if (opthz >= 1)
1595 tp->t_keepinit = opthz;
1596 else
1597 error = EINVAL;
1598 break;
1600 case TCP_KEEPIDLE:
1601 opthz = ((int64_t)optval * hz) / 1000;
1602 if (opthz >= 1) {
1603 tp->t_keepidle = opthz;
1604 tcp_timer_keep_activity(tp, 0);
1605 } else {
1606 error = EINVAL;
1608 break;
1610 case TCP_KEEPINTVL:
1611 opthz = ((int64_t)optval * hz) / 1000;
1612 if (opthz >= 1) {
1613 tp->t_keepintvl = opthz;
1614 tp->t_maxidle = tp->t_keepintvl * tp->t_keepcnt;
1615 } else {
1616 error = EINVAL;
1618 break;
1620 case TCP_KEEPCNT:
1621 if (optval > 0) {
1622 tp->t_keepcnt = optval;
1623 tp->t_maxidle = tp->t_keepintvl * tp->t_keepcnt;
1624 } else {
1625 error = EINVAL;
1627 break;
1629 default:
1630 error = ENOPROTOOPT;
1631 break;
1633 break;
1635 case SOPT_GET:
1636 switch (sopt->sopt_name) {
1637 #ifdef TCP_SIGNATURE
1638 case TCP_SIGNATURE_ENABLE:
1639 optval = (tp->t_flags & TF_SIGNATURE) ? 1 : 0;
1640 break;
1641 #endif /* TCP_SIGNATURE */
1642 case TCP_NODELAY:
1643 optval = tp->t_flags & TF_NODELAY;
1644 break;
1645 case TCP_MAXSEG:
1646 optval = tp->t_maxseg;
1647 break;
1648 case TCP_NOOPT:
1649 optval = tp->t_flags & TF_NOOPT;
1650 break;
1651 case TCP_NOPUSH:
1652 optval = tp->t_flags & TF_NOPUSH;
1653 break;
1654 case TCP_KEEPINIT:
1655 optval = ((int64_t)tp->t_keepinit * 1000) / hz;
1656 break;
1657 case TCP_KEEPIDLE:
1658 optval = ((int64_t)tp->t_keepidle * 1000) / hz;
1659 break;
1660 case TCP_KEEPINTVL:
1661 optval = ((int64_t)tp->t_keepintvl * 1000) / hz;
1662 break;
1663 case TCP_KEEPCNT:
1664 optval = tp->t_keepcnt;
1665 break;
1666 default:
1667 error = ENOPROTOOPT;
1668 break;
1670 if (error == 0)
1671 soopt_from_kbuf(sopt, &optval, sizeof optval);
1672 break;
1674 done:
1675 if (td != NULL)
1676 lwkt_rele(td);
1677 lwkt_replymsg(&msg->lmsg, error);
1680 struct netmsg_tcp_ctloutput {
1681 struct netmsg_pr_ctloutput ctloutput;
1682 struct sockopt sopt;
1683 int sopt_val;
1687 * Allocate netmsg_pr_ctloutput for asynchronous tcp_ctloutput.
1689 struct netmsg_pr_ctloutput *
1690 tcp_ctloutmsg(struct sockopt *sopt)
1692 struct netmsg_tcp_ctloutput *msg;
1693 int flags = 0, error;
1695 KASSERT(sopt->sopt_dir == SOPT_SET, ("not from ctloutput"));
1697 /* Only small set of options allows asynchronous setting. */
1698 if (sopt->sopt_level != IPPROTO_TCP)
1699 return NULL;
1700 switch (sopt->sopt_name) {
1701 case TCP_NODELAY:
1702 case TCP_NOOPT:
1703 case TCP_NOPUSH:
1704 case TCP_FASTKEEP:
1705 break;
1706 default:
1707 return NULL;
1710 msg = kmalloc(sizeof(*msg), M_LWKTMSG, M_WAITOK | M_NULLOK);
1711 if (msg == NULL) {
1712 /* Fallback to synchronous tcp_ctloutput */
1713 return NULL;
1716 /* Save the sockopt */
1717 msg->sopt = *sopt;
1719 /* Fixup the sopt.sopt_val ptr */
1720 error = sooptcopyin(sopt, &msg->sopt_val,
1721 sizeof(msg->sopt_val), sizeof(msg->sopt_val));
1722 if (error) {
1723 kfree(msg, M_LWKTMSG);
1724 return NULL;
1726 msg->sopt.sopt_val = &msg->sopt_val;
1728 /* Hold the current thread */
1729 if (msg->sopt.sopt_td != NULL) {
1730 flags |= PRCO_HELDTD;
1731 lwkt_hold(msg->sopt.sopt_td);
1734 msg->ctloutput.nm_flags = flags;
1735 msg->ctloutput.nm_sopt = &msg->sopt;
1737 return &msg->ctloutput;
1741 * tcp_sendspace and tcp_recvspace are the default send and receive window
1742 * sizes, respectively. These are obsolescent (this information should
1743 * be set by the route).
1745 * Use a default that does not require tcp window scaling to be turned
1746 * on. Individual programs or the administrator can increase the default.
1748 u_long tcp_sendspace = 57344; /* largest multiple of PAGE_SIZE < 64k */
1749 SYSCTL_INT(_net_inet_tcp, TCPCTL_SENDSPACE, sendspace, CTLFLAG_RW,
1750 &tcp_sendspace , 0, "Maximum outgoing TCP datagram size");
1751 u_long tcp_recvspace = 57344; /* largest multiple of PAGE_SIZE < 64k */
1752 SYSCTL_INT(_net_inet_tcp, TCPCTL_RECVSPACE, recvspace, CTLFLAG_RW,
1753 &tcp_recvspace , 0, "Maximum incoming TCP datagram size");
1756 * Attach TCP protocol to socket, allocating internet protocol control
1757 * block, tcp control block, buffer space, and entering CLOSED state.
1759 static int
1760 tcp_attach(struct socket *so, struct pru_attach_info *ai)
1762 struct inpcb *inp;
1763 int error;
1764 int cpu;
1765 #ifdef INET6
1766 boolean_t isipv6 = INP_CHECK_SOCKAF(so, AF_INET6);
1767 #endif
1769 if (ai != NULL) {
1770 error = tcp_usr_preattach(so, 0 /* don't care */, ai);
1771 if (error)
1772 return (error);
1773 } else {
1774 /* Post attach; do nothing */
1777 cpu = mycpu->gd_cpuid;
1780 * Set the default pcbinfo. This will likely change when we
1781 * bind/connect.
1783 error = in_pcballoc(so, &tcbinfo[cpu]);
1784 if (error)
1785 return (error);
1786 inp = so->so_pcb;
1787 #ifdef INET6
1788 if (isipv6)
1789 inp->in6p_hops = -1; /* use kernel default */
1790 #endif
1791 tcp_newtcpcb(inp);
1792 /* Keep a reference for asynchronized pru_rcvd */
1793 soreference(so);
1794 return (0);
1798 * Initiate (or continue) disconnect.
1799 * If embryonic state, just send reset (once).
1800 * If in ``let data drain'' option and linger null, just drop.
1801 * Otherwise (hard), mark socket disconnecting and drop
1802 * current input data; switch states based on user close, and
1803 * send segment to peer (with FIN).
1805 static struct tcpcb *
1806 tcp_disconnect(struct tcpcb *tp)
1808 struct socket *so = tp->t_inpcb->inp_socket;
1810 if (tp->t_state < TCPS_ESTABLISHED) {
1811 tp = tcp_close(tp);
1812 } else if ((so->so_options & SO_LINGER) && so->so_linger == 0) {
1813 tp = tcp_drop(tp, 0);
1814 } else {
1815 lwkt_gettoken(&so->so_rcv.ssb_token);
1816 soisdisconnecting(so);
1817 sbflush(&so->so_rcv.sb);
1818 tp = tcp_usrclosed(tp);
1819 if (tp)
1820 tcp_output(tp);
1821 lwkt_reltoken(&so->so_rcv.ssb_token);
1823 return (tp);
1827 * User issued close, and wish to trail through shutdown states:
1828 * if never received SYN, just forget it. If got a SYN from peer,
1829 * but haven't sent FIN, then go to FIN_WAIT_1 state to send peer a FIN.
1830 * If already got a FIN from peer, then almost done; go to LAST_ACK
1831 * state. In all other cases, have already sent FIN to peer (e.g.
1832 * after PRU_SHUTDOWN), and just have to play tedious game waiting
1833 * for peer to send FIN or not respond to keep-alives, etc.
1834 * We can let the user exit from the close as soon as the FIN is acked.
1836 static struct tcpcb *
1837 tcp_usrclosed(struct tcpcb *tp)
1840 switch (tp->t_state) {
1842 case TCPS_CLOSED:
1843 case TCPS_LISTEN:
1844 TCP_STATE_CHANGE(tp, TCPS_CLOSED);
1845 tp = tcp_close(tp);
1846 break;
1848 case TCPS_SYN_SENT:
1849 case TCPS_SYN_RECEIVED:
1850 tp->t_flags |= TF_NEEDFIN;
1851 break;
1853 case TCPS_ESTABLISHED:
1854 TCP_STATE_CHANGE(tp, TCPS_FIN_WAIT_1);
1855 break;
1857 case TCPS_CLOSE_WAIT:
1858 TCP_STATE_CHANGE(tp, TCPS_LAST_ACK);
1859 break;
1861 if (tp && tp->t_state >= TCPS_FIN_WAIT_2) {
1862 soisdisconnected(tp->t_inpcb->inp_socket);
1863 /* To prevent the connection hanging in FIN_WAIT_2 forever. */
1864 if (tp->t_state == TCPS_FIN_WAIT_2) {
1865 tcp_callout_reset(tp, tp->tt_2msl, tp->t_maxidle,
1866 tcp_timer_2msl);
1869 return (tp);