4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 1991, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2011, Joyent Inc. All rights reserved.
25 * Copyright (c) 2011 Nexenta Systems, Inc. All rights reserved.
26 * Copyright (c) 2013 by Delphix. All rights reserved.
28 /* Copyright (c) 1990 Mentat Inc. */
30 #include <sys/types.h>
31 #include <sys/stream.h>
32 #include <sys/strsun.h>
33 #include <sys/strsubr.h>
34 #include <sys/stropts.h>
35 #include <sys/strlog.h>
36 #define _SUN_TPI_VERSION 2
37 #include <sys/tihdr.h>
38 #include <sys/timod.h>
40 #include <sys/sunddi.h>
41 #include <sys/suntpi.h>
42 #include <sys/xti_inet.h>
43 #include <sys/cmn_err.h>
44 #include <sys/debug.h>
46 #include <sys/vtrace.h>
48 #include <sys/ethernet.h>
49 #include <sys/cpuvar.h>
51 #include <sys/pattr.h>
52 #include <sys/policy.h>
55 #include <sys/sunldi.h>
57 #include <sys/errno.h>
58 #include <sys/signal.h>
59 #include <sys/socket.h>
60 #include <sys/socketvar.h>
61 #include <sys/sockio.h>
62 #include <sys/isa_defs.h>
64 #include <sys/random.h>
66 #include <sys/systm.h>
67 #include <netinet/in.h>
68 #include <netinet/tcp.h>
69 #include <netinet/ip6.h>
70 #include <netinet/icmp6.h>
72 #include <net/route.h>
73 #include <inet/ipsec_impl.h>
75 #include <inet/common.h>
77 #include <inet/ip_impl.h>
79 #include <inet/ip_ndp.h>
80 #include <inet/proto_set.h>
81 #include <inet/mib2.h>
82 #include <inet/optcom.h>
83 #include <inet/snmpcom.h>
84 #include <inet/kstatcom.h>
86 #include <inet/tcp_impl.h>
87 #include <inet/tcp_cluster.h>
88 #include <inet/udp_impl.h>
89 #include <net/pfkeyv2.h>
90 #include <inet/ipdrop.h>
92 #include <inet/ipclassifier.h>
93 #include <inet/ip_ire.h>
94 #include <inet/ip_ftable.h>
95 #include <inet/ip_if.h>
96 #include <inet/ipp_common.h>
97 #include <inet/ip_rts.h>
98 #include <inet/ip_netinfo.h>
99 #include <sys/squeue_impl.h>
100 #include <sys/squeue.h>
101 #include <sys/tsol/label.h>
102 #include <sys/tsol/tnet.h>
103 #include <rpc/pmap_prot.h>
104 #include <sys/callo.h>
107 * TCP Notes: aka FireEngine Phase I (PSARC 2002/433)
109 * (Read the detailed design doc in PSARC case directory)
111 * The entire tcp state is contained in tcp_t and conn_t structure
112 * which are allocated in tandem using ipcl_conn_create() and passing
113 * IPCL_TCPCONN as a flag. We use 'conn_ref' and 'conn_lock' to protect
114 * the references on the tcp_t. The tcp_t structure is never compressed
115 * and packets always land on the correct TCP perimeter from the time
116 * eager is created till the time tcp_t dies (as such the old mentat
117 * TCP global queue is not used for detached state and no IPSEC checking
118 * is required). The global queue is still allocated to send out resets
119 * for connection which have no listeners and IP directly calls
120 * tcp_xmit_listeners_reset() which does any policy check.
122 * Protection and Synchronisation mechanism:
124 * The tcp data structure does not use any kind of lock for protecting
125 * its state but instead uses 'squeues' for mutual exclusion from various
126 * read and write side threads. To access a tcp member, the thread should
127 * always be behind squeue (via squeue_enter with flags as SQ_FILL, SQ_PROCESS,
128 * or SQ_NODRAIN). Since the squeues allow a direct function call, caller
129 * can pass any tcp function having prototype of edesc_t as argument
130 * (different from traditional STREAMs model where packets come in only
131 * designated entry points). The list of functions that can be directly
132 * called via squeue are listed before the usual function prototype.
136 * TCP is MT-Hot and we use a reference based scheme to make sure that the
137 * tcp structure doesn't disappear when its needed. When the application
138 * creates an outgoing connection or accepts an incoming connection, we
139 * start out with 2 references on 'conn_ref'. One for TCP and one for IP.
140 * The IP reference is just a symbolic reference since ip_tcpclose()
141 * looks at tcp structure after tcp_close_output() returns which could
142 * have dropped the last TCP reference. So as long as the connection is
143 * in attached state i.e. !TCP_IS_DETACHED, we have 2 references on the
144 * conn_t. The classifier puts its own reference when the connection is
145 * inserted in listen or connected hash. Anytime a thread needs to enter
146 * the tcp connection perimeter, it retrieves the conn/tcp from q->ptr
147 * on write side or by doing a classify on read side and then puts a
148 * reference on the conn before doing squeue_enter/tryenter/fill. For
149 * read side, the classifier itself puts the reference under fanout lock
150 * to make sure that tcp can't disappear before it gets processed. The
151 * squeue will drop this reference automatically so the called function
152 * doesn't have to do a DEC_REF.
154 * Opening a new connection:
156 * The outgoing connection open is pretty simple. tcp_open() does the
157 * work in creating the conn/tcp structure and initializing it. The
158 * squeue assignment is done based on the CPU the application
159 * is running on. So for outbound connections, processing is always done
160 * on application CPU which might be different from the incoming CPU
161 * being interrupted by the NIC. An optimal way would be to figure out
162 * the NIC <-> CPU binding at listen time, and assign the outgoing
163 * connection to the squeue attached to the CPU that will be interrupted
164 * for incoming packets (we know the NIC based on the bind IP address).
165 * This might seem like a problem if more data is going out but the
166 * fact is that in most cases the transmit is ACK driven transmit where
167 * the outgoing data normally sits on TCP's xmit queue waiting to be
170 * Accepting a connection:
172 * This is a more interesting case because of various races involved in
173 * establishing a eager in its own perimeter. Read the meta comment on
174 * top of tcp_input_listener(). But briefly, the squeue is picked by
175 * ip_fanout based on the ring or the sender (if loopback).
177 * Closing a connection:
179 * The close is fairly straight forward. tcp_close() calls tcp_close_output()
180 * via squeue to do the close and mark the tcp as detached if the connection
181 * was in state TCPS_ESTABLISHED or greater. In the later case, TCP keep its
182 * reference but tcp_close() drop IP's reference always. So if tcp was
183 * not killed, it is sitting in time_wait list with 2 reference - 1 for TCP
184 * and 1 because it is in classifier's connected hash. This is the condition
185 * we use to determine that its OK to clean up the tcp outside of squeue
186 * when time wait expires (check the ref under fanout and conn_lock and
187 * if it is 2, remove it from fanout hash and kill it).
189 * Although close just drops the necessary references and marks the
190 * tcp_detached state, tcp_close needs to know the tcp_detached has been
191 * set (under squeue) before letting the STREAM go away (because a
192 * inbound packet might attempt to go up the STREAM while the close
193 * has happened and tcp_detached is not set). So a special lock and
194 * flag is used along with a condition variable (tcp_closelock, tcp_closed,
195 * and tcp_closecv) to signal tcp_close that tcp_close_out() has marked
198 * Special provisions and fast paths:
200 * We make special provisions for sockfs by marking tcp_issocket
201 * whenever we have only sockfs on top of TCP. This allows us to skip
202 * putting the tcp in acceptor hash since a sockfs listener can never
203 * become acceptor and also avoid allocating a tcp_t for acceptor STREAM
204 * since eager has already been allocated and the accept now happens
205 * on acceptor STREAM. There is a big blob of comment on top of
206 * tcp_input_listener explaining the new accept. When socket is POP'd,
207 * sockfs sends us an ioctl to mark the fact and we go back to old
208 * behaviour. Once tcp_issocket is unset, its never set for the
209 * life of that connection.
213 * Since a packet is always executed on the correct TCP perimeter
214 * all IPsec processing is defered to IP including checking new
215 * connections and setting IPSEC policies for new connection. The
216 * only exception is tcp_xmit_listeners_reset() which is called
217 * directly from IP and needs to policy check to see if TH_RST
222 * Values for squeue switch:
227 int tcp_squeue_wput
= 2; /* /etc/systems */
231 * To prevent memory hog, limit the number of entries in tcp_free_list
232 * to 1% of available memory / number of cpus
234 uint_t tcp_free_list_max_cnt
= 0;
236 #define TIDUSZ 4096 /* transport interface data unit size */
239 * Size of acceptor hash list. It has to be a power of 2 for hashing.
241 #define TCP_ACCEPTOR_FANOUT_SIZE 512
244 #define TCP_ACCEPTOR_HASH(accid) \
245 (((uint_t)(accid) >> 8) & (TCP_ACCEPTOR_FANOUT_SIZE - 1))
247 #define TCP_ACCEPTOR_HASH(accid) \
248 ((uint_t)(accid) & (TCP_ACCEPTOR_FANOUT_SIZE - 1))
252 * Minimum number of connections which can be created per listener. Used
253 * when the listener connection count is in effect.
255 static uint32_t tcp_min_conn_listener
= 2;
257 uint32_t tcp_early_abort
= 30;
259 /* TCP Timer control structure */
260 typedef struct tcpt_s
{
261 pfv_t tcpt_pfv
; /* The routine we are to call */
262 tcp_t
*tcpt_tcp
; /* The parameter we are to pass in */
266 * Functions called directly via squeue having a prototype of edesc_t.
268 void tcp_input_listener(void *arg
, mblk_t
*mp
, void *arg2
,
269 ip_recv_attr_t
*ira
);
270 void tcp_input_data(void *arg
, mblk_t
*mp
, void *arg2
,
271 ip_recv_attr_t
*ira
);
272 static void tcp_linger_interrupted(void *arg
, mblk_t
*mp
, void *arg2
,
273 ip_recv_attr_t
*dummy
);
276 /* Prototype for TCP functions */
277 static void tcp_random_init(void);
278 int tcp_random(void);
279 static int tcp_connect_ipv4(tcp_t
*tcp
, ipaddr_t
*dstaddrp
,
280 in_port_t dstport
, uint_t srcid
);
281 static int tcp_connect_ipv6(tcp_t
*tcp
, in6_addr_t
*dstaddrp
,
282 in_port_t dstport
, uint32_t flowinfo
,
283 uint_t srcid
, uint32_t scope_id
);
284 static void tcp_iss_init(tcp_t
*tcp
);
285 static void tcp_reinit(tcp_t
*tcp
);
286 static void tcp_reinit_values(tcp_t
*tcp
);
288 static void tcp_wsrv(queue_t
*q
);
289 static void tcp_update_lso(tcp_t
*tcp
, ip_xmit_attr_t
*ixa
);
290 static void tcp_update_zcopy(tcp_t
*tcp
);
291 static void tcp_notify(void *, ip_xmit_attr_t
*, ixa_notify_type_t
,
293 static void *tcp_stack_init(netstackid_t stackid
, netstack_t
*ns
);
294 static void tcp_stack_fini(netstackid_t stackid
, void *arg
);
296 static int tcp_squeue_switch(int);
298 static int tcp_open(queue_t
*, dev_t
*, int, int, cred_t
*, boolean_t
);
299 static int tcp_openv4(queue_t
*, dev_t
*, int, int, cred_t
*);
300 static int tcp_openv6(queue_t
*, dev_t
*, int, int, cred_t
*);
302 static void tcp_squeue_add(squeue_t
*);
304 struct module_info tcp_rinfo
= {
305 TCP_MOD_ID
, TCP_MOD_NAME
, 0, INFPSZ
, TCP_RECV_HIWATER
, TCP_RECV_LOWATER
308 static struct module_info tcp_winfo
= {
309 TCP_MOD_ID
, TCP_MOD_NAME
, 0, INFPSZ
, 127, 16
313 * Entry points for TCP as a device. The normal case which supports
314 * the TCP functionality.
315 * We have separate open functions for the /dev/tcp and /dev/tcp6 devices.
317 struct qinit tcp_rinitv4
= {
318 NULL
, (pfi_t
)tcp_rsrv
, tcp_openv4
, tcp_tpi_close
, NULL
, &tcp_rinfo
321 struct qinit tcp_rinitv6
= {
322 NULL
, (pfi_t
)tcp_rsrv
, tcp_openv6
, tcp_tpi_close
, NULL
, &tcp_rinfo
325 struct qinit tcp_winit
= {
326 (pfi_t
)tcp_wput
, (pfi_t
)tcp_wsrv
, NULL
, NULL
, NULL
, &tcp_winfo
329 /* Initial entry point for TCP in socket mode. */
330 struct qinit tcp_sock_winit
= {
331 (pfi_t
)tcp_wput_sock
, (pfi_t
)tcp_wsrv
, NULL
, NULL
, NULL
, &tcp_winfo
334 /* TCP entry point during fallback */
335 struct qinit tcp_fallback_sock_winit
= {
336 (pfi_t
)tcp_wput_fallback
, NULL
, NULL
, NULL
, NULL
, &tcp_winfo
340 * Entry points for TCP as a acceptor STREAM opened by sockfs when doing
341 * an accept. Avoid allocating data structures since eager has already
344 struct qinit tcp_acceptor_rinit
= {
345 NULL
, (pfi_t
)tcp_rsrv
, NULL
, tcp_tpi_close_accept
, NULL
, &tcp_winfo
348 struct qinit tcp_acceptor_winit
= {
349 (pfi_t
)tcp_tpi_accept
, NULL
, NULL
, NULL
, NULL
, &tcp_winfo
352 /* For AF_INET aka /dev/tcp */
353 struct streamtab tcpinfov4
= {
354 &tcp_rinitv4
, &tcp_winit
357 /* For AF_INET6 aka /dev/tcp6 */
358 struct streamtab tcpinfov6
= {
359 &tcp_rinitv6
, &tcp_winit
363 * Following assumes TPI alignment requirements stay along 32 bit
366 #define ROUNDUP32(x) \
367 (((x) + (sizeof (int32_t) - 1)) & ~(sizeof (int32_t) - 1))
369 /* Template for response to info request. */
370 struct T_info_ack tcp_g_t_info_ack
= {
371 T_INFO_ACK
, /* PRIM_type */
373 T_INFINITE
, /* ETSDU_size */
374 T_INVALID
, /* CDATA_size */
375 T_INVALID
, /* DDATA_size */
376 sizeof (sin_t
), /* ADDR_size */
377 0, /* OPT_size - not initialized here */
378 TIDUSZ
, /* TIDU_size */
379 T_COTS_ORD
, /* SERV_type */
380 TCPS_IDLE
, /* CURRENT_state */
381 (XPG4_1
|EXPINLINE
) /* PROVIDER_flag */
384 struct T_info_ack tcp_g_t_info_ack_v6
= {
385 T_INFO_ACK
, /* PRIM_type */
387 T_INFINITE
, /* ETSDU_size */
388 T_INVALID
, /* CDATA_size */
389 T_INVALID
, /* DDATA_size */
390 sizeof (sin6_t
), /* ADDR_size */
391 0, /* OPT_size - not initialized here */
392 TIDUSZ
, /* TIDU_size */
393 T_COTS_ORD
, /* SERV_type */
394 TCPS_IDLE
, /* CURRENT_state */
395 (XPG4_1
|EXPINLINE
) /* PROVIDER_flag */
399 * TCP tunables related declarations. Definitions are in tcp_tunables.c
401 extern mod_prop_info_t tcp_propinfo_tbl
[];
402 extern int tcp_propinfo_count
;
404 #define IS_VMLOANED_MBLK(mp) \
405 (((mp)->b_datap->db_struioflag & STRUIO_ZC) != 0)
407 uint32_t do_tcpzcopy
= 1; /* 0: disable, 1: enable, 2: force */
410 * Forces all connections to obey the value of the tcps_maxpsz_multiplier
411 * tunable settable via NDD. Otherwise, the per-connection behavior is
412 * determined dynamically during tcp_set_destination(), which is the default.
414 boolean_t tcp_static_maxpsz
= B_FALSE
;
417 * If the receive buffer size is changed, this function is called to update
418 * the upper socket layer on the new delayed receive wake up threshold.
421 tcp_set_recv_threshold(tcp_t
*tcp
, uint32_t new_rcvthresh
)
423 uint32_t default_threshold
= SOCKET_RECVHIWATER
>> 3;
425 if (IPCL_IS_NONSTR(tcp
->tcp_connp
)) {
426 conn_t
*connp
= tcp
->tcp_connp
;
427 struct sock_proto_props sopp
;
430 * only increase rcvthresh upto default_threshold
432 if (new_rcvthresh
> default_threshold
)
433 new_rcvthresh
= default_threshold
;
435 sopp
.sopp_flags
= SOCKOPT_RCVTHRESH
;
436 sopp
.sopp_rcvthresh
= new_rcvthresh
;
438 (*connp
->conn_upcalls
->su_set_proto_props
)
439 (connp
->conn_upper_handle
, &sopp
);
444 * Figure out the value of window scale opton. Note that the rwnd is
445 * ASSUMED to be rounded up to the nearest MSS before the calculation.
446 * We cannot find the scale value and then do a round up of tcp_rwnd
447 * because the scale value may not be correct after that.
449 * Set the compiler flag to make this function inline.
452 tcp_set_ws_value(tcp_t
*tcp
)
455 uint32_t rwnd
= tcp
->tcp_rwnd
;
457 for (i
= 0; rwnd
> TCP_MAXWIN
&& i
< TCP_MAX_WINSHIFT
;
464 * Remove cached/latched IPsec references.
467 tcp_ipsec_cleanup(tcp_t
*tcp
)
469 conn_t
*connp
= tcp
->tcp_connp
;
471 ASSERT(connp
->conn_flags
& IPCL_TCPCONN
);
473 if (connp
->conn_latch
!= NULL
) {
474 IPLATCH_REFRELE(connp
->conn_latch
);
475 connp
->conn_latch
= NULL
;
477 if (connp
->conn_latch_in_policy
!= NULL
) {
478 IPPOL_REFRELE(connp
->conn_latch_in_policy
);
479 connp
->conn_latch_in_policy
= NULL
;
481 if (connp
->conn_latch_in_action
!= NULL
) {
482 IPACT_REFRELE(connp
->conn_latch_in_action
);
483 connp
->conn_latch_in_action
= NULL
;
485 if (connp
->conn_policy
!= NULL
) {
486 IPPH_REFRELE(connp
->conn_policy
, connp
->conn_netstack
);
487 connp
->conn_policy
= NULL
;
492 * Cleaup before placing on free list.
493 * Disassociate from the netstack/tcp_stack_t since the freelist
494 * is per squeue and not per netstack.
497 tcp_cleanup(tcp_t
*tcp
)
500 conn_t
*connp
= tcp
->tcp_connp
;
501 tcp_stack_t
*tcps
= tcp
->tcp_tcps
;
502 netstack_t
*ns
= tcps
->tcps_netstack
;
505 tcp_bind_hash_remove(tcp
);
507 /* Cleanup that which needs the netstack first */
508 tcp_ipsec_cleanup(tcp
);
509 ixa_cleanup(connp
->conn_ixa
);
511 if (connp
->conn_ht_iphc
!= NULL
) {
512 kmem_free(connp
->conn_ht_iphc
, connp
->conn_ht_iphc_allocated
);
513 connp
->conn_ht_iphc
= NULL
;
514 connp
->conn_ht_iphc_allocated
= 0;
515 connp
->conn_ht_iphc_len
= 0;
516 connp
->conn_ht_ulp
= NULL
;
517 connp
->conn_ht_ulp_len
= 0;
518 tcp
->tcp_ipha
= NULL
;
519 tcp
->tcp_ip6h
= NULL
;
520 tcp
->tcp_tcpha
= NULL
;
523 /* We clear any IP_OPTIONS and extension headers */
524 ip_pkt_free(&connp
->conn_xmit_ipp
);
529 * Since we will bzero the entire structure, we need to
530 * remove it and reinsert it in global hash list. We
531 * know the walkers can't get to this conn because we
532 * had set CONDEMNED flag earlier and checked reference
533 * under conn_lock so walker won't pick it and when we
534 * go the ipcl_globalhash_remove() below, no walker
537 ipcl_globalhash_remove(connp
);
539 /* Save some state */
540 mp
= tcp
->tcp_timercache
;
542 tcp_rsrv_mp
= tcp
->tcp_rsrv_mp
;
544 if (connp
->conn_cred
!= NULL
) {
545 crfree(connp
->conn_cred
);
546 connp
->conn_cred
= NULL
;
548 ipcl_conn_cleanup(connp
);
549 connp
->conn_flags
= IPCL_TCPCONN
;
552 * Now it is safe to decrement the reference counts.
553 * This might be the last reference on the netstack
554 * in which case it will cause the freeing of the IP Instance.
556 connp
->conn_netstack
= NULL
;
557 connp
->conn_ixa
->ixa_ipst
= NULL
;
559 ASSERT(tcps
!= NULL
);
560 tcp
->tcp_tcps
= NULL
;
562 bzero(tcp
, sizeof (tcp_t
));
564 /* restore the state */
565 tcp
->tcp_timercache
= mp
;
567 tcp
->tcp_rsrv_mp
= tcp_rsrv_mp
;
569 tcp
->tcp_connp
= connp
;
571 ASSERT(connp
->conn_tcp
== tcp
);
572 ASSERT(connp
->conn_flags
& IPCL_TCPCONN
);
573 connp
->conn_state_flags
= CONN_INCIPIENT
;
574 ASSERT(connp
->conn_proto
== IPPROTO_TCP
);
575 ASSERT(connp
->conn_ref
== 1);
579 * Adapt to the information, such as rtt and rtt_sd, provided from the
580 * DCE and IRE maintained by IP.
582 * Checks for multicast and broadcast destination address.
583 * Returns zero if ok; an errno on failure.
585 * Note that the MSS calculation here is based on the info given in
586 * the DCE and IRE. We do not do any calculation based on TCP options. They
587 * will be handled in tcp_input_data() when TCP knows which options to use.
589 * Note on how TCP gets its parameters for a connection.
591 * When a tcp_t structure is allocated, it gets all the default parameters.
592 * In tcp_set_destination(), it gets those metric parameters, like rtt, rtt_sd,
593 * spipe, rpipe, ... from the route metrics. Route metric overrides the
596 * An incoming SYN with a multicast or broadcast destination address is dropped
597 * in ip_fanout_v4/v6.
599 * An incoming SYN with a multicast or broadcast source address is always
600 * dropped in tcp_set_destination, since IPDF_ALLOW_MCBC is not set in
602 * The same logic in tcp_set_destination also serves to
603 * reject an attempt to connect to a broadcast or multicast (destination)
607 tcp_set_destination(tcp_t
*tcp
)
611 boolean_t tcp_detached
= TCP_IS_DETACHED(tcp
);
612 conn_t
*connp
= tcp
->tcp_connp
;
613 tcp_stack_t
*tcps
= tcp
->tcp_tcps
;
618 flags
= IPDF_LSO
| IPDF_ZCOPY
;
620 * Make sure we have a dce for the destination to avoid dce_ident
621 * contention for connected sockets.
623 flags
|= IPDF_UNIQUE_DCE
;
625 if (!tcps
->tcps_ignore_path_mtu
)
626 connp
->conn_ixa
->ixa_flags
|= IXAF_PMTU_DISCOVERY
;
628 /* Use conn_lock to satify ASSERT; tcp is already serialized */
629 mutex_enter(&connp
->conn_lock
);
630 error
= conn_connect(connp
, &uinfo
, flags
);
631 mutex_exit(&connp
->conn_lock
);
635 error
= tcp_build_hdrs(tcp
);
639 tcp
->tcp_localnet
= uinfo
.iulp_localnet
;
641 if (uinfo
.iulp_rtt
!= 0) {
644 tcp
->tcp_rtt_sa
= uinfo
.iulp_rtt
;
645 tcp
->tcp_rtt_sd
= uinfo
.iulp_rtt_sd
;
646 rto
= (tcp
->tcp_rtt_sa
>> 3) + tcp
->tcp_rtt_sd
+
647 tcps
->tcps_rexmit_interval_extra
+
648 (tcp
->tcp_rtt_sa
>> 5);
650 TCP_SET_RTO(tcp
, rto
);
652 if (uinfo
.iulp_ssthresh
!= 0)
653 tcp
->tcp_cwnd_ssthresh
= uinfo
.iulp_ssthresh
;
655 tcp
->tcp_cwnd_ssthresh
= TCP_MAX_LARGEWIN
;
656 if (uinfo
.iulp_spipe
> 0) {
657 connp
->conn_sndbuf
= MIN(uinfo
.iulp_spipe
,
659 if (tcps
->tcps_snd_lowat_fraction
!= 0) {
660 connp
->conn_sndlowat
= connp
->conn_sndbuf
/
661 tcps
->tcps_snd_lowat_fraction
;
663 (void) tcp_maxpsz_set(tcp
, B_TRUE
);
666 * Note that up till now, acceptor always inherits receive
667 * window from the listener. But if there is a metrics
668 * associated with a host, we should use that instead of
669 * inheriting it from listener. Thus we need to pass this
670 * info back to the caller.
672 if (uinfo
.iulp_rpipe
> 0) {
673 tcp
->tcp_rwnd
= MIN(uinfo
.iulp_rpipe
,
677 if (uinfo
.iulp_rtomax
> 0) {
678 tcp
->tcp_second_timer_threshold
=
683 * Use the metric option settings, iulp_tstamp_ok and
684 * iulp_wscale_ok, only for active open. What this means
685 * is that if the other side uses timestamp or window
686 * scale option, TCP will also use those options. That
687 * is for passive open. If the application sets a
688 * large window, window scale is enabled regardless of
689 * the value in iulp_wscale_ok. This is the behavior
690 * since 2.6. So we keep it.
691 * The only case left in passive open processing is the
693 * For ECN, it should probably be like SACK. But the
694 * current value is binary, so we treat it like the other
695 * cases. The metric only controls active open.For passive
696 * open, the ndd param, tcp_ecn_permitted, controls the
701 * The if check means that the following can only
702 * be turned on by the metrics only IRE, but not off.
704 if (uinfo
.iulp_tstamp_ok
)
705 tcp
->tcp_snd_ts_ok
= B_TRUE
;
706 if (uinfo
.iulp_wscale_ok
)
707 tcp
->tcp_snd_ws_ok
= B_TRUE
;
708 if (uinfo
.iulp_sack
== 2)
709 tcp
->tcp_snd_sack_ok
= B_TRUE
;
710 if (uinfo
.iulp_ecn_ok
)
711 tcp
->tcp_ecn_ok
= B_TRUE
;
716 * As above, the if check means that SACK can only be
717 * turned on by the metric only IRE.
719 if (uinfo
.iulp_sack
> 0) {
720 tcp
->tcp_snd_sack_ok
= B_TRUE
;
725 * XXX Note that currently, iulp_mtu can be as small as 68
726 * because of PMTUd. So tcp_mss may go to negative if combined
727 * length of all those options exceeds 28 bytes. But because
728 * of the tcp_mss_min check below, we may not have a problem if
729 * tcp_mss_min is of a reasonable value. The default is 1 so
730 * the negative problem still exists. And the check defeats PMTUd.
731 * In fact, if PMTUd finds that the MSS should be smaller than
732 * tcp_mss_min, TCP should turn off PMUTd and use the tcp_mss_min
735 * We do not deal with that now. All those problems related to
736 * PMTUd will be fixed later.
738 ASSERT(uinfo
.iulp_mtu
!= 0);
739 mss
= tcp
->tcp_initial_pmtu
= uinfo
.iulp_mtu
;
741 /* Sanity check for MSS value. */
742 if (connp
->conn_ipversion
== IPV4_VERSION
)
743 mss_max
= tcps
->tcps_mss_max_ipv4
;
745 mss_max
= tcps
->tcps_mss_max_ipv6
;
747 if (tcp
->tcp_ipsec_overhead
== 0)
748 tcp
->tcp_ipsec_overhead
= conn_ipsec_length(connp
);
750 mss
-= tcp
->tcp_ipsec_overhead
;
752 if (mss
< tcps
->tcps_mss_min
)
753 mss
= tcps
->tcps_mss_min
;
757 /* Note that this is the maximum MSS, excluding all options. */
761 * Update the tcp connection with LSO capability.
763 tcp_update_lso(tcp
, connp
->conn_ixa
);
766 * Initialize the ISS here now that we have the full connection ID.
767 * The RFC 1948 method of initial sequence number generation requires
768 * knowledge of the full connection ID before setting the ISS.
772 tcp
->tcp_loopback
= (uinfo
.iulp_loopback
| uinfo
.iulp_local
);
775 * Make sure that conn is not marked incipient
776 * for incoming connections. A blind
777 * removal of incipient flag is cheaper than
780 mutex_enter(&connp
->conn_lock
);
781 connp
->conn_state_flags
&= ~CONN_INCIPIENT
;
782 mutex_exit(&connp
->conn_lock
);
787 * tcp_clean_death / tcp_close_detached must not be called more than once
788 * on a tcp. Thus every function that potentially calls tcp_clean_death
789 * must check for the tcp state before calling tcp_clean_death.
790 * Eg. tcp_input_data, tcp_eager_kill, tcp_clean_death_wrapper,
791 * tcp_timer_handler, all check for the tcp state.
795 tcp_clean_death_wrapper(void *arg
, mblk_t
*mp
, void *arg2
,
796 ip_recv_attr_t
*dummy
)
798 tcp_t
*tcp
= ((conn_t
*)arg
)->conn_tcp
;
801 if (tcp
->tcp_state
> TCPS_BOUND
)
802 (void) tcp_clean_death(((conn_t
*)arg
)->conn_tcp
, ETIMEDOUT
);
806 * We are dying for some reason. Try to do it gracefully. (May be called
809 * Return -1 if the structure was not cleaned up (if the cleanup had to be
810 * done by a service procedure).
811 * TBD - Should the return value distinguish between the tcp_t being
812 * freed and it being reinitialized?
815 tcp_clean_death(tcp_t
*tcp
, int err
)
819 conn_t
*connp
= tcp
->tcp_connp
;
820 tcp_stack_t
*tcps
= tcp
->tcp_tcps
;
825 if (tcp
->tcp_linger_tid
!= 0 &&
826 TCP_TIMER_CANCEL(tcp
, tcp
->tcp_linger_tid
) >= 0) {
827 tcp_stop_lingering(tcp
);
831 ASSERT((connp
->conn_family
== AF_INET
&&
832 connp
->conn_ipversion
== IPV4_VERSION
) ||
833 (connp
->conn_family
== AF_INET6
&&
834 (connp
->conn_ipversion
== IPV4_VERSION
||
835 connp
->conn_ipversion
== IPV6_VERSION
)));
837 if (TCP_IS_DETACHED(tcp
)) {
838 if (tcp
->tcp_hard_binding
) {
840 * Its an eager that we are dealing with. We close the
841 * eager but in case a conn_ind has already gone to the
842 * listener, let tcp_accept_finish() send a discon_ind
843 * to the listener and drop the last reference. If the
844 * listener doesn't even know about the eager i.e. the
845 * conn_ind hasn't gone up, blow away the eager and drop
846 * the last reference as well. If the conn_ind has gone
847 * up, state should be BOUND. tcp_accept_finish
848 * will figure out that the connection has received a
849 * RST and will send a DISCON_IND to the application.
851 tcp_closei_local(tcp
);
852 if (!tcp
->tcp_tconnind_started
) {
855 tcp
->tcp_state
= TCPS_BOUND
;
856 DTRACE_TCP6(state__change
, void, NULL
,
857 ip_xmit_attr_t
*, connp
->conn_ixa
,
858 void, NULL
, tcp_t
*, tcp
, void, NULL
,
859 int32_t, TCPS_CLOSED
);
862 tcp_close_detached(tcp
);
867 TCP_STAT(tcps
, tcp_clean_death_nondetached
);
870 * The connection is dead. Decrement listener connection counter if
873 if (tcp
->tcp_listen_cnt
!= NULL
)
874 TCP_DECR_LISTEN_CNT(tcp
);
877 * When a connection is moved to TIME_WAIT state, the connection
878 * counter is already decremented. So no need to decrement here
879 * again. See SET_TIME_WAIT() macro.
881 if (tcp
->tcp_state
>= TCPS_ESTABLISHED
&&
882 tcp
->tcp_state
< TCPS_TIME_WAIT
) {
888 /* Trash all inbound data */
889 if (!IPCL_IS_NONSTR(connp
)) {
895 * If we are at least part way open and there is error
896 * (err==0 implies no error)
897 * notify our client by a T_DISCON_IND.
899 if ((tcp
->tcp_state
>= TCPS_SYN_SENT
) && err
) {
900 if (tcp
->tcp_state
>= TCPS_ESTABLISHED
&&
901 !TCP_IS_SOCKET(tcp
)) {
903 * Send M_FLUSH according to TPI. Because sockets will
904 * (and must) ignore FLUSHR we do that only for TPI
905 * endpoints and sockets in STREAMS mode.
907 (void) putnextctl1(q
, M_FLUSH
, FLUSHR
);
909 if (connp
->conn_debug
) {
910 (void) strlog(TCP_MOD_ID
, 0, 1, SL_TRACE
|SL_ERROR
,
911 "tcp_clean_death: discon err %d", err
);
913 if (IPCL_IS_NONSTR(connp
)) {
914 /* Direct socket, use upcall */
915 (*connp
->conn_upcalls
->su_disconnected
)(
916 connp
->conn_upper_handle
, tcp
->tcp_connid
, err
);
918 mp
= mi_tpi_discon_ind(NULL
, err
, 0);
922 if (connp
->conn_debug
) {
923 (void) strlog(TCP_MOD_ID
, 0, 1,
925 "tcp_clean_death, sending M_ERROR");
927 (void) putnextctl1(q
, M_ERROR
, EPROTO
);
930 if (tcp
->tcp_state
<= TCPS_SYN_RCVD
) {
931 /* SYN_SENT or SYN_RCVD */
932 TCPS_BUMP_MIB(tcps
, tcpAttemptFails
);
933 } else if (tcp
->tcp_state
<= TCPS_CLOSE_WAIT
) {
934 /* ESTABLISHED or CLOSE_WAIT */
935 TCPS_BUMP_MIB(tcps
, tcpEstabResets
);
940 * ESTABLISHED non-STREAMS eagers are not 'detached' because
941 * an upper handle is obtained when the SYN-ACK comes in. So it
942 * should receive the 'disconnected' upcall, but tcp_reinit should
943 * not be called since this is an eager.
945 if (tcp
->tcp_listener
!= NULL
&& IPCL_IS_NONSTR(connp
)) {
946 tcp_closei_local(tcp
);
947 tcp
->tcp_state
= TCPS_BOUND
;
948 DTRACE_TCP6(state__change
, void, NULL
, ip_xmit_attr_t
*,
949 connp
->conn_ixa
, void, NULL
, tcp_t
*, tcp
, void, NULL
,
950 int32_t, TCPS_CLOSED
);
955 if (IPCL_IS_NONSTR(connp
))
956 (void) tcp_do_unbind(connp
);
962 * In case tcp is in the "lingering state" and waits for the SO_LINGER timeout
963 * to expire, stop the wait and finish the close.
966 tcp_stop_lingering(tcp_t
*tcp
)
969 tcp_stack_t
*tcps
= tcp
->tcp_tcps
;
970 conn_t
*connp
= tcp
->tcp_connp
;
972 tcp
->tcp_linger_tid
= 0;
973 if (tcp
->tcp_state
> TCPS_LISTEN
) {
974 tcp_acceptor_hash_remove(tcp
);
975 mutex_enter(&tcp
->tcp_non_sq_lock
);
976 if (tcp
->tcp_flow_stopped
) {
979 mutex_exit(&tcp
->tcp_non_sq_lock
);
981 if (tcp
->tcp_timer_tid
!= 0) {
982 delta
= TCP_TIMER_CANCEL(tcp
, tcp
->tcp_timer_tid
);
983 tcp
->tcp_timer_tid
= 0;
986 * Need to cancel those timers which will not be used when
987 * TCP is detached. This has to be done before the conn_wq
990 tcp_timers_stop(tcp
);
992 tcp
->tcp_detached
= B_TRUE
;
993 connp
->conn_rq
= NULL
;
994 connp
->conn_wq
= NULL
;
996 if (tcp
->tcp_state
== TCPS_TIME_WAIT
) {
997 tcp_time_wait_append(tcp
);
998 TCP_DBGSTAT(tcps
, tcp_detach_time_wait
);
1003 * If delta is zero the timer event wasn't executed and was
1004 * successfully canceled. In this case we need to restart it
1005 * with the minimal delta possible.
1008 tcp
->tcp_timer_tid
= TCP_TIMER(tcp
, tcp_timer
,
1012 tcp_closei_local(tcp
);
1013 CONN_DEC_REF(connp
);
1016 tcp
->tcp_detached
= B_TRUE
;
1017 connp
->conn_rq
= NULL
;
1018 connp
->conn_wq
= NULL
;
1020 /* Signal closing thread that it can complete close */
1021 mutex_enter(&tcp
->tcp_closelock
);
1022 tcp
->tcp_closed
= 1;
1023 cv_signal(&tcp
->tcp_closecv
);
1024 mutex_exit(&tcp
->tcp_closelock
);
1026 /* If we have an upper handle (socket), release it */
1027 if (IPCL_IS_NONSTR(connp
)) {
1028 ASSERT(connp
->conn_upper_handle
!= NULL
);
1029 (*connp
->conn_upcalls
->su_closed
)(connp
->conn_upper_handle
);
1030 connp
->conn_upper_handle
= NULL
;
1031 connp
->conn_upcalls
= NULL
;
1036 tcp_close_common(conn_t
*connp
, int flags
)
1038 tcp_t
*tcp
= connp
->conn_tcp
;
1039 mblk_t
*mp
= &tcp
->tcp_closemp
;
1040 boolean_t conn_ioctl_cleanup_reqd
= B_FALSE
;
1043 ASSERT(connp
->conn_ref
>= 2);
1046 * Mark the conn as closing. ipsq_pending_mp_add will not
1047 * add any mp to the pending mp list, after this conn has
1050 mutex_enter(&connp
->conn_lock
);
1051 connp
->conn_state_flags
|= CONN_CLOSING
;
1052 if (connp
->conn_oper_pending_ill
!= NULL
)
1053 conn_ioctl_cleanup_reqd
= B_TRUE
;
1054 CONN_INC_REF_LOCKED(connp
);
1055 mutex_exit(&connp
->conn_lock
);
1056 tcp
->tcp_closeflags
= (uint8_t)flags
;
1057 ASSERT(connp
->conn_ref
>= 3);
1060 * tcp_closemp_used is used below without any protection of a lock
1061 * as we don't expect any one else to use it concurrently at this
1062 * point otherwise it would be a major defect.
1065 if (mp
->b_prev
== NULL
)
1066 tcp
->tcp_closemp_used
= B_TRUE
;
1068 cmn_err(CE_PANIC
, "tcp_close: concurrent use of tcp_closemp: "
1069 "connp %p tcp %p\n", (void *)connp
, (void *)tcp
);
1071 TCP_DEBUG_GETPCSTACK(tcp
->tcmp_stk
, 15);
1074 * Cleanup any queued ioctls here. This must be done before the wq/rq
1075 * are re-written by tcp_close_output().
1077 if (conn_ioctl_cleanup_reqd
)
1078 conn_ioctl_cleanup(connp
);
1081 * As CONN_CLOSING is set, no further ioctls should be passed down to
1082 * IP for this conn (see the guards in tcp_ioctl, tcp_wput_ioctl and
1083 * tcp_wput_iocdata). If the ioctl was queued on an ipsq,
1084 * conn_ioctl_cleanup should have found it and removed it. If the ioctl
1085 * was still in flight at the time, we wait for it here. See comments
1086 * for CONN_INC_IOCTLREF in ip.h for details.
1088 mutex_enter(&connp
->conn_lock
);
1089 while (connp
->conn_ioctlref
> 0)
1090 cv_wait(&connp
->conn_cv
, &connp
->conn_lock
);
1091 ASSERT(connp
->conn_ioctlref
== 0);
1092 ASSERT(connp
->conn_oper_pending_ill
== NULL
);
1093 mutex_exit(&connp
->conn_lock
);
1095 SQUEUE_ENTER_ONE(connp
->conn_sqp
, mp
, tcp_close_output
, connp
,
1096 NULL
, tcp_squeue_flag
, SQTAG_IP_TCP_CLOSE
);
1099 * For non-STREAMS sockets, the normal case is that the conn makes
1100 * an upcall when it's finally closed, so there is no need to wait
1101 * in the protocol. But in case of SO_LINGER the thread sleeps here
1102 * so it can properly deal with the thread being interrupted.
1104 if (IPCL_IS_NONSTR(connp
) && connp
->conn_linger
== 0)
1107 mutex_enter(&tcp
->tcp_closelock
);
1108 while (!tcp
->tcp_closed
) {
1109 if (!cv_wait_sig(&tcp
->tcp_closecv
, &tcp
->tcp_closelock
)) {
1111 * The cv_wait_sig() was interrupted. We now do the
1114 * 1) If the endpoint was lingering, we allow this
1115 * to be interrupted by cancelling the linger timeout
1116 * and closing normally.
1118 * 2) Revert to calling cv_wait()
1120 * We revert to using cv_wait() to avoid an
1121 * infinite loop which can occur if the calling
1122 * thread is higher priority than the squeue worker
1123 * thread and is bound to the same cpu.
1125 if (connp
->conn_linger
&& connp
->conn_lingertime
> 0) {
1126 mutex_exit(&tcp
->tcp_closelock
);
1127 /* Entering squeue, bump ref count. */
1128 CONN_INC_REF(connp
);
1129 bp
= allocb_wait(0, BPRI_HI
, STR_NOSIG
, NULL
);
1130 SQUEUE_ENTER_ONE(connp
->conn_sqp
, bp
,
1131 tcp_linger_interrupted
, connp
, NULL
,
1132 tcp_squeue_flag
, SQTAG_IP_TCP_CLOSE
);
1133 mutex_enter(&tcp
->tcp_closelock
);
1138 while (!tcp
->tcp_closed
)
1139 cv_wait(&tcp
->tcp_closecv
, &tcp
->tcp_closelock
);
1140 mutex_exit(&tcp
->tcp_closelock
);
1143 * In the case of listener streams that have eagers in the q or q0
1144 * we wait for the eagers to drop their reference to us. conn_rq and
1145 * conn_wq of the eagers point to our queues. By waiting for the
1146 * refcnt to drop to 1, we are sure that the eagers have cleaned
1147 * up their queue pointers and also dropped their references to us.
1149 * For non-STREAMS sockets we do not have to wait here; the
1150 * listener will instead make a su_closed upcall when the last
1151 * reference is dropped.
1153 if (tcp
->tcp_wait_for_eagers
&& !IPCL_IS_NONSTR(connp
)) {
1154 mutex_enter(&connp
->conn_lock
);
1155 while (connp
->conn_ref
!= 1) {
1156 cv_wait(&connp
->conn_cv
, &connp
->conn_lock
);
1158 mutex_exit(&connp
->conn_lock
);
1162 connp
->conn_cpid
= NOPID
;
1166 * Called by tcp_close() routine via squeue when lingering is
1167 * interrupted by a signal.
1172 tcp_linger_interrupted(void *arg
, mblk_t
*mp
, void *arg2
, ip_recv_attr_t
*dummy
)
1174 conn_t
*connp
= (conn_t
*)arg
;
1175 tcp_t
*tcp
= connp
->conn_tcp
;
1178 if (tcp
->tcp_linger_tid
!= 0 &&
1179 TCP_TIMER_CANCEL(tcp
, tcp
->tcp_linger_tid
) >= 0) {
1180 tcp_stop_lingering(tcp
);
1181 tcp
->tcp_client_errno
= EINTR
;
1186 * Clean up the b_next and b_prev fields of every mblk pointed at by *mpp.
1187 * Some stream heads get upset if they see these later on as anything but NULL.
1190 tcp_close_mpp(mblk_t
**mpp
)
1194 if ((mp
= *mpp
) != NULL
) {
1198 } while ((mp
= mp
->b_cont
) != NULL
);
1206 /* Do detached close. */
1208 tcp_close_detached(tcp_t
*tcp
)
1214 * Clustering code serializes TCP disconnect callbacks and
1215 * cluster tcp list walks by blocking a TCP disconnect callback
1216 * if a cluster tcp list walk is in progress. This ensures
1217 * accurate accounting of TCPs in the cluster code even though
1218 * the TCP list walk itself is not atomic.
1220 tcp_closei_local(tcp
);
1221 CONN_DEC_REF(tcp
->tcp_connp
);
1225 * The tcp_t is going away. Remove it from all lists and set it
1226 * to TCPS_CLOSED. The freeing up of memory is deferred until
1227 * tcp_inactive. This is needed since a thread in tcp_rput might have
1228 * done a CONN_INC_REF on this structure before it was removed from the
1232 tcp_closei_local(tcp_t
*tcp
)
1234 conn_t
*connp
= tcp
->tcp_connp
;
1235 tcp_stack_t
*tcps
= tcp
->tcp_tcps
;
1238 if (!TCP_IS_SOCKET(tcp
))
1239 tcp_acceptor_hash_remove(tcp
);
1241 TCPS_UPDATE_MIB(tcps
, tcpHCInSegs
, tcp
->tcp_ibsegs
);
1242 tcp
->tcp_ibsegs
= 0;
1243 TCPS_UPDATE_MIB(tcps
, tcpHCOutSegs
, tcp
->tcp_obsegs
);
1244 tcp
->tcp_obsegs
= 0;
1247 * This can be called via tcp_time_wait_processing() if TCP gets a
1248 * SYN with sequence number outside the TIME-WAIT connection's
1249 * window. So we need to check for TIME-WAIT state here as the
1250 * connection counter is already decremented. See SET_TIME_WAIT()
1253 if (tcp
->tcp_state
>= TCPS_ESTABLISHED
&&
1254 tcp
->tcp_state
< TCPS_TIME_WAIT
) {
1255 TCPS_CONN_DEC(tcps
);
1259 * If we are an eager connection hanging off a listener that
1260 * hasn't formally accepted the connection yet, get off his
1261 * list and blow off any data that we have accumulated.
1263 if (tcp
->tcp_listener
!= NULL
) {
1264 tcp_t
*listener
= tcp
->tcp_listener
;
1265 mutex_enter(&listener
->tcp_eager_lock
);
1267 * tcp_tconnind_started == B_TRUE means that the
1268 * conn_ind has already gone to listener. At
1269 * this point, eager will be closed but we
1270 * leave it in listeners eager list so that
1271 * if listener decides to close without doing
1272 * accept, we can clean this up. In tcp_tli_accept
1273 * we take care of the case of accept on closed
1276 if (!tcp
->tcp_tconnind_started
) {
1277 tcp_eager_unlink(tcp
);
1278 mutex_exit(&listener
->tcp_eager_lock
);
1280 * We don't want to have any pointers to the
1281 * listener queue, after we have released our
1282 * reference on the listener
1284 ASSERT(tcp
->tcp_detached
);
1285 connp
->conn_rq
= NULL
;
1286 connp
->conn_wq
= NULL
;
1287 CONN_DEC_REF(listener
->tcp_connp
);
1289 mutex_exit(&listener
->tcp_eager_lock
);
1293 /* Stop all the timers */
1294 tcp_timers_stop(tcp
);
1296 if (tcp
->tcp_state
== TCPS_LISTEN
) {
1297 if (tcp
->tcp_ip_addr_cache
) {
1298 kmem_free((void *)tcp
->tcp_ip_addr_cache
,
1299 IP_ADDR_CACHE_SIZE
* sizeof (ipaddr_t
));
1300 tcp
->tcp_ip_addr_cache
= NULL
;
1304 /* Decrement listerner connection counter if necessary. */
1305 if (tcp
->tcp_listen_cnt
!= NULL
)
1306 TCP_DECR_LISTEN_CNT(tcp
);
1308 mutex_enter(&tcp
->tcp_non_sq_lock
);
1309 if (tcp
->tcp_flow_stopped
)
1311 mutex_exit(&tcp
->tcp_non_sq_lock
);
1313 tcp_bind_hash_remove(tcp
);
1315 * If the tcp_time_wait_collector (which runs outside the squeue)
1316 * is trying to remove this tcp from the time wait list, we will
1317 * block in tcp_time_wait_remove while trying to acquire the
1318 * tcp_time_wait_lock. The logic in tcp_time_wait_collector also
1319 * requires the ipcl_hash_remove to be ordered after the
1320 * tcp_time_wait_remove for the refcnt checks to work correctly.
1322 if (tcp
->tcp_state
== TCPS_TIME_WAIT
)
1323 (void) tcp_time_wait_remove(tcp
, NULL
);
1324 CL_INET_DISCONNECT(connp
);
1325 ipcl_hash_remove(connp
);
1326 oldstate
= tcp
->tcp_state
;
1327 tcp
->tcp_state
= TCPS_CLOSED
;
1328 /* Need to probe before ixa_cleanup() is called */
1329 DTRACE_TCP6(state__change
, void, NULL
, ip_xmit_attr_t
*,
1330 connp
->conn_ixa
, void, NULL
, tcp_t
*, tcp
, void, NULL
,
1332 ixa_cleanup(connp
->conn_ixa
);
1335 * Mark the conn as CONDEMNED
1337 mutex_enter(&connp
->conn_lock
);
1338 connp
->conn_state_flags
|= CONN_CONDEMNED
;
1339 mutex_exit(&connp
->conn_lock
);
1341 ASSERT(tcp
->tcp_time_wait_next
== NULL
);
1342 ASSERT(tcp
->tcp_time_wait_prev
== NULL
);
1343 ASSERT(tcp
->tcp_time_wait_expire
== 0);
1345 tcp_ipsec_cleanup(tcp
);
1349 * tcp is dying (called from ipcl_conn_destroy and error cases).
1350 * Free the tcp_t in either case.
1353 tcp_free(tcp_t
*tcp
)
1356 conn_t
*connp
= tcp
->tcp_connp
;
1358 ASSERT(tcp
!= NULL
);
1359 ASSERT(tcp
->tcp_ptpahn
== NULL
&& tcp
->tcp_acceptor_hash
== NULL
);
1361 connp
->conn_rq
= NULL
;
1362 connp
->conn_wq
= NULL
;
1364 tcp_close_mpp(&tcp
->tcp_xmit_head
);
1365 tcp_close_mpp(&tcp
->tcp_reass_head
);
1366 if (tcp
->tcp_rcv_list
!= NULL
) {
1367 /* Free b_next chain */
1368 tcp_close_mpp(&tcp
->tcp_rcv_list
);
1370 if ((mp
= tcp
->tcp_urp_mp
) != NULL
) {
1373 if ((mp
= tcp
->tcp_urp_mark_mp
) != NULL
) {
1377 if (tcp
->tcp_fused_sigurg_mp
!= NULL
) {
1378 ASSERT(!IPCL_IS_NONSTR(tcp
->tcp_connp
));
1379 freeb(tcp
->tcp_fused_sigurg_mp
);
1380 tcp
->tcp_fused_sigurg_mp
= NULL
;
1383 if (tcp
->tcp_ordrel_mp
!= NULL
) {
1384 ASSERT(!IPCL_IS_NONSTR(tcp
->tcp_connp
));
1385 freeb(tcp
->tcp_ordrel_mp
);
1386 tcp
->tcp_ordrel_mp
= NULL
;
1389 TCP_NOTSACK_REMOVE_ALL(tcp
->tcp_notsack_list
, tcp
);
1390 bzero(&tcp
->tcp_sack_info
, sizeof (tcp_sack_info_t
));
1392 if (tcp
->tcp_hopopts
!= NULL
) {
1393 mi_free(tcp
->tcp_hopopts
);
1394 tcp
->tcp_hopopts
= NULL
;
1395 tcp
->tcp_hopoptslen
= 0;
1397 ASSERT(tcp
->tcp_hopoptslen
== 0);
1398 if (tcp
->tcp_dstopts
!= NULL
) {
1399 mi_free(tcp
->tcp_dstopts
);
1400 tcp
->tcp_dstopts
= NULL
;
1401 tcp
->tcp_dstoptslen
= 0;
1403 ASSERT(tcp
->tcp_dstoptslen
== 0);
1404 if (tcp
->tcp_rthdrdstopts
!= NULL
) {
1405 mi_free(tcp
->tcp_rthdrdstopts
);
1406 tcp
->tcp_rthdrdstopts
= NULL
;
1407 tcp
->tcp_rthdrdstoptslen
= 0;
1409 ASSERT(tcp
->tcp_rthdrdstoptslen
== 0);
1410 if (tcp
->tcp_rthdr
!= NULL
) {
1411 mi_free(tcp
->tcp_rthdr
);
1412 tcp
->tcp_rthdr
= NULL
;
1413 tcp
->tcp_rthdrlen
= 0;
1415 ASSERT(tcp
->tcp_rthdrlen
== 0);
1418 * Following is really a blowing away a union.
1419 * It happens to have exactly two members of identical size
1420 * the following code is enough.
1422 tcp_close_mpp(&tcp
->tcp_conn
.tcp_eager_conn_ind
);
1425 * If this is a non-STREAM socket still holding on to an upper
1426 * handle, release it. As a result of fallback we might also see
1427 * STREAMS based conns with upper handles, in which case there is
1428 * nothing to do other than clearing the field.
1430 if (connp
->conn_upper_handle
!= NULL
) {
1431 if (IPCL_IS_NONSTR(connp
)) {
1432 (*connp
->conn_upcalls
->su_closed
)(
1433 connp
->conn_upper_handle
);
1434 tcp
->tcp_detached
= B_TRUE
;
1436 connp
->conn_upper_handle
= NULL
;
1437 connp
->conn_upcalls
= NULL
;
1442 * tcp_get_conn/tcp_free_conn
1444 * tcp_get_conn is used to get a clean tcp connection structure.
1445 * It tries to reuse the connections put on the freelist by the
1446 * time_wait_collector failing which it goes to kmem_cache. This
1447 * way has two benefits compared to just allocating from and
1448 * freeing to kmem_cache.
1449 * 1) The time_wait_collector can free (which includes the cleanup)
1450 * outside the squeue. So when the interrupt comes, we have a clean
1451 * connection sitting in the freelist. Obviously, this buys us
1454 * 2) Defence against DOS attack. Allocating a tcp/conn in tcp_input_listener
1455 * has multiple disadvantages - tying up the squeue during alloc.
1456 * But allocating the conn/tcp in IP land is also not the best since
1457 * we can't check the 'q' and 'q0' which are protected by squeue and
1458 * blindly allocate memory which might have to be freed here if we are
1459 * not allowed to accept the connection. By using the freelist and
1460 * putting the conn/tcp back in freelist, we don't pay a penalty for
1461 * allocating memory without checking 'q/q0' and freeing it if we can't
1462 * accept the connection.
1464 * Care should be taken to put the conn back in the same squeue's freelist
1465 * from which it was allocated. Best results are obtained if conn is
1466 * allocated from listener's squeue and freed to the same. Time wait
1467 * collector will free up the freelist is the connection ends up sitting
1468 * there for too long.
1471 tcp_get_conn(void *arg
, tcp_stack_t
*tcps
)
1474 conn_t
*connp
= NULL
;
1475 squeue_t
*sqp
= (squeue_t
*)arg
;
1476 tcp_squeue_priv_t
*tcp_time_wait
;
1478 mblk_t
*tcp_rsrv_mp
= NULL
;
1481 *((tcp_squeue_priv_t
**)squeue_getprivate(sqp
, SQPRIVATE_TCP
));
1483 mutex_enter(&tcp_time_wait
->tcp_time_wait_lock
);
1484 tcp
= tcp_time_wait
->tcp_free_list
;
1485 ASSERT((tcp
!= NULL
) ^ (tcp_time_wait
->tcp_free_list_cnt
== 0));
1487 tcp_time_wait
->tcp_free_list
= tcp
->tcp_time_wait_next
;
1488 tcp_time_wait
->tcp_free_list_cnt
--;
1489 mutex_exit(&tcp_time_wait
->tcp_time_wait_lock
);
1490 tcp
->tcp_time_wait_next
= NULL
;
1491 connp
= tcp
->tcp_connp
;
1492 connp
->conn_flags
|= IPCL_REUSED
;
1494 ASSERT(tcp
->tcp_tcps
== NULL
);
1495 ASSERT(connp
->conn_netstack
== NULL
);
1496 ASSERT(tcp
->tcp_rsrv_mp
!= NULL
);
1497 ns
= tcps
->tcps_netstack
;
1499 connp
->conn_netstack
= ns
;
1500 connp
->conn_ixa
->ixa_ipst
= ns
->netstack_ip
;
1501 tcp
->tcp_tcps
= tcps
;
1502 ipcl_globalhash_insert(connp
);
1504 connp
->conn_ixa
->ixa_notify_cookie
= tcp
;
1505 ASSERT(connp
->conn_ixa
->ixa_notify
== tcp_notify
);
1506 connp
->conn_recv
= tcp_input_data
;
1507 ASSERT(connp
->conn_recvicmp
== tcp_icmp_input
);
1508 ASSERT(connp
->conn_verifyicmp
== tcp_verifyicmp
);
1509 return ((void *)connp
);
1511 mutex_exit(&tcp_time_wait
->tcp_time_wait_lock
);
1513 * Pre-allocate the tcp_rsrv_mp. This mblk will not be freed until
1514 * this conn_t/tcp_t is freed at ipcl_conn_destroy().
1516 tcp_rsrv_mp
= allocb(0, BPRI_HI
);
1517 if (tcp_rsrv_mp
== NULL
)
1520 if ((connp
= ipcl_conn_create(IPCL_TCPCONN
, KM_NOSLEEP
,
1521 tcps
->tcps_netstack
)) == NULL
) {
1526 tcp
= connp
->conn_tcp
;
1527 tcp
->tcp_rsrv_mp
= tcp_rsrv_mp
;
1528 mutex_init(&tcp
->tcp_rsrv_mp_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
1530 tcp
->tcp_tcps
= tcps
;
1532 connp
->conn_recv
= tcp_input_data
;
1533 connp
->conn_recvicmp
= tcp_icmp_input
;
1534 connp
->conn_verifyicmp
= tcp_verifyicmp
;
1537 * Register tcp_notify to listen to capability changes detected by IP.
1538 * This upcall is made in the context of the call to conn_ip_output
1539 * thus it is inside the squeue.
1541 connp
->conn_ixa
->ixa_notify
= tcp_notify
;
1542 connp
->conn_ixa
->ixa_notify_cookie
= tcp
;
1544 return ((void *)connp
);
1548 * Handle connect to IPv4 destinations, including connections for AF_INET6
1549 * sockets connecting to IPv4 mapped IPv6 destinations.
1550 * Returns zero if OK, a positive errno, or a negative TLI error.
1553 tcp_connect_ipv4(tcp_t
*tcp
, ipaddr_t
*dstaddrp
, in_port_t dstport
,
1556 ipaddr_t dstaddr
= *dstaddrp
;
1558 conn_t
*connp
= tcp
->tcp_connp
;
1559 tcp_stack_t
*tcps
= tcp
->tcp_tcps
;
1562 ASSERT(connp
->conn_ipversion
== IPV4_VERSION
);
1564 /* Check for attempt to connect to INADDR_ANY */
1565 if (dstaddr
== INADDR_ANY
) {
1567 * SunOS 4.x and 4.3 BSD allow an application
1568 * to connect a TCP socket to INADDR_ANY.
1569 * When they do this, the kernel picks the
1570 * address of one interface and uses it
1571 * instead. The kernel usually ends up
1572 * picking the address of the loopback
1573 * interface. This is an undocumented feature.
1574 * However, we provide the same thing here
1575 * in order to have source and binary
1576 * compatibility with SunOS 4.x.
1577 * Update the T_CONN_REQ (sin/sin6) since it is used to
1578 * generate the T_CONN_CON.
1580 dstaddr
= htonl(INADDR_LOOPBACK
);
1581 *dstaddrp
= dstaddr
;
1584 /* Handle __sin6_src_id if socket not bound to an IP address */
1585 if (srcid
!= 0 && connp
->conn_laddr_v4
== INADDR_ANY
) {
1586 ip_srcid_find_id(srcid
, &connp
->conn_laddr_v6
,
1587 IPCL_ZONEID(connp
), tcps
->tcps_netstack
);
1588 connp
->conn_saddr_v6
= connp
->conn_laddr_v6
;
1591 IN6_IPADDR_TO_V4MAPPED(dstaddr
, &connp
->conn_faddr_v6
);
1592 connp
->conn_fport
= dstport
;
1595 * At this point the remote destination address and remote port fields
1596 * in the tcp-four-tuple have been filled in the tcp structure. Now we
1597 * have to see which state tcp was in so we can take appropriate action.
1599 if (tcp
->tcp_state
== TCPS_IDLE
) {
1601 * We support a quick connect capability here, allowing
1602 * clients to transition directly from IDLE to SYN_SENT
1603 * tcp_bindi will pick an unused port, insert the connection
1604 * in the bind hash and transition to BOUND state.
1606 lport
= tcp_update_next_port(tcps
->tcps_next_port_to_try
,
1608 lport
= tcp_bindi(tcp
, lport
, &connp
->conn_laddr_v6
, 0, B_TRUE
,
1615 * Lookup the route to determine a source address and the uinfo.
1616 * Setup TCP parameters based on the metrics/DCE.
1618 error
= tcp_set_destination(tcp
);
1623 * Don't let an endpoint connect to itself.
1625 if (connp
->conn_faddr_v4
== connp
->conn_laddr_v4
&&
1626 connp
->conn_fport
== connp
->conn_lport
)
1629 tcp
->tcp_state
= TCPS_SYN_SENT
;
1631 return (ipcl_conn_insert_v4(connp
));
1635 * Handle connect to IPv6 destinations.
1636 * Returns zero if OK, a positive errno, or a negative TLI error.
1639 tcp_connect_ipv6(tcp_t
*tcp
, in6_addr_t
*dstaddrp
, in_port_t dstport
,
1640 uint32_t flowinfo
, uint_t srcid
, uint32_t scope_id
)
1643 conn_t
*connp
= tcp
->tcp_connp
;
1644 tcp_stack_t
*tcps
= tcp
->tcp_tcps
;
1647 ASSERT(connp
->conn_family
== AF_INET6
);
1650 * If we're here, it means that the destination address is a native
1651 * IPv6 address. Return an error if conn_ipversion is not IPv6. A
1652 * reason why it might not be IPv6 is if the socket was bound to an
1653 * IPv4-mapped IPv6 address.
1655 if (connp
->conn_ipversion
!= IPV6_VERSION
)
1659 * Interpret a zero destination to mean loopback.
1660 * Update the T_CONN_REQ (sin/sin6) since it is used to
1661 * generate the T_CONN_CON.
1663 if (IN6_IS_ADDR_UNSPECIFIED(dstaddrp
))
1664 *dstaddrp
= ipv6_loopback
;
1666 /* Handle __sin6_src_id if socket not bound to an IP address */
1667 if (srcid
!= 0 && IN6_IS_ADDR_UNSPECIFIED(&connp
->conn_laddr_v6
)) {
1668 ip_srcid_find_id(srcid
, &connp
->conn_laddr_v6
,
1669 IPCL_ZONEID(connp
), tcps
->tcps_netstack
);
1670 connp
->conn_saddr_v6
= connp
->conn_laddr_v6
;
1674 * Take care of the scope_id now.
1676 if (scope_id
!= 0 && IN6_IS_ADDR_LINKSCOPE(dstaddrp
)) {
1677 connp
->conn_ixa
->ixa_flags
|= IXAF_SCOPEID_SET
;
1678 connp
->conn_ixa
->ixa_scopeid
= scope_id
;
1680 connp
->conn_ixa
->ixa_flags
&= ~IXAF_SCOPEID_SET
;
1683 connp
->conn_flowinfo
= flowinfo
;
1684 connp
->conn_faddr_v6
= *dstaddrp
;
1685 connp
->conn_fport
= dstport
;
1688 * At this point the remote destination address and remote port fields
1689 * in the tcp-four-tuple have been filled in the tcp structure. Now we
1690 * have to see which state tcp was in so we can take appropriate action.
1692 if (tcp
->tcp_state
== TCPS_IDLE
) {
1694 * We support a quick connect capability here, allowing
1695 * clients to transition directly from IDLE to SYN_SENT
1696 * tcp_bindi will pick an unused port, insert the connection
1697 * in the bind hash and transition to BOUND state.
1699 lport
= tcp_update_next_port(tcps
->tcps_next_port_to_try
,
1701 lport
= tcp_bindi(tcp
, lport
, &connp
->conn_laddr_v6
, 0, B_TRUE
,
1708 * Lookup the route to determine a source address and the uinfo.
1709 * Setup TCP parameters based on the metrics/DCE.
1711 error
= tcp_set_destination(tcp
);
1716 * Don't let an endpoint connect to itself.
1718 if (IN6_ARE_ADDR_EQUAL(&connp
->conn_faddr_v6
, &connp
->conn_laddr_v6
) &&
1719 connp
->conn_fport
== connp
->conn_lport
)
1722 tcp
->tcp_state
= TCPS_SYN_SENT
;
1724 return (ipcl_conn_insert_v6(connp
));
1729 * Note that unlike other functions this returns a positive tli error
1730 * when it fails; it never returns an errno.
1733 tcp_disconnect_common(tcp_t
*tcp
, t_scalar_t seqnum
)
1736 tcp_stack_t
*tcps
= tcp
->tcp_tcps
;
1737 conn_t
*connp
= tcp
->tcp_connp
;
1740 * Right now, upper modules pass down a T_DISCON_REQ to TCP,
1741 * when the stream is in BOUND state. Do not send a reset,
1742 * since the destination IP address is not valid, and it can
1743 * be the initialized value of all zeros (broadcast address).
1745 if (tcp
->tcp_state
<= TCPS_BOUND
) {
1746 if (connp
->conn_debug
) {
1747 (void) strlog(TCP_MOD_ID
, 0, 1, SL_ERROR
|SL_TRACE
,
1748 "tcp_disconnect: bad state, %d", tcp
->tcp_state
);
1751 } else if (tcp
->tcp_state
>= TCPS_ESTABLISHED
) {
1752 TCPS_CONN_DEC(tcps
);
1755 if (seqnum
== -1 || tcp
->tcp_conn_req_max
== 0) {
1758 * According to TPI, for non-listeners, ignore seqnum
1760 * Following interpretation of -1 seqnum is historical
1761 * and implied TPI ? (TPI only states that for T_CONN_IND,
1762 * a valid seqnum should not be -1).
1764 * -1 means disconnect everything
1765 * regardless even on a listener.
1768 int old_state
= tcp
->tcp_state
;
1769 ip_stack_t
*ipst
= tcps
->tcps_netstack
->netstack_ip
;
1772 * The connection can't be on the tcp_time_wait_head list
1773 * since it is not detached.
1775 ASSERT(tcp
->tcp_time_wait_next
== NULL
);
1776 ASSERT(tcp
->tcp_time_wait_prev
== NULL
);
1777 ASSERT(tcp
->tcp_time_wait_expire
== 0);
1779 * If it used to be a listener, check to make sure no one else
1780 * has taken the port before switching back to LISTEN state.
1782 if (connp
->conn_ipversion
== IPV4_VERSION
) {
1783 lconnp
= ipcl_lookup_listener_v4(connp
->conn_lport
,
1784 connp
->conn_laddr_v4
, IPCL_ZONEID(connp
), ipst
);
1788 if (connp
->conn_ixa
->ixa_flags
& IXAF_SCOPEID_SET
)
1789 ifindex
= connp
->conn_ixa
->ixa_scopeid
;
1791 /* Allow conn_bound_if listeners? */
1792 lconnp
= ipcl_lookup_listener_v6(connp
->conn_lport
,
1793 &connp
->conn_laddr_v6
, ifindex
, IPCL_ZONEID(connp
),
1796 if (tcp
->tcp_conn_req_max
&& lconnp
== NULL
) {
1797 tcp
->tcp_state
= TCPS_LISTEN
;
1798 DTRACE_TCP6(state__change
, void, NULL
, ip_xmit_attr_t
*,
1799 connp
->conn_ixa
, void, NULL
, tcp_t
*, tcp
, void,
1800 NULL
, int32_t, old_state
);
1801 } else if (old_state
> TCPS_BOUND
) {
1802 tcp
->tcp_conn_req_max
= 0;
1803 tcp
->tcp_state
= TCPS_BOUND
;
1804 DTRACE_TCP6(state__change
, void, NULL
, ip_xmit_attr_t
*,
1805 connp
->conn_ixa
, void, NULL
, tcp_t
*, tcp
, void,
1806 NULL
, int32_t, old_state
);
1809 * If this end point is not going to become a listener,
1810 * decrement the listener connection count if
1811 * necessary. Note that we do not do this if it is
1812 * going to be a listner (the above if case) since
1813 * then it may remove the counter struct.
1815 if (tcp
->tcp_listen_cnt
!= NULL
)
1816 TCP_DECR_LISTEN_CNT(tcp
);
1819 CONN_DEC_REF(lconnp
);
1820 switch (old_state
) {
1823 TCPS_BUMP_MIB(tcps
, tcpAttemptFails
);
1825 case TCPS_ESTABLISHED
:
1826 case TCPS_CLOSE_WAIT
:
1827 TCPS_BUMP_MIB(tcps
, tcpEstabResets
);
1834 mutex_enter(&tcp
->tcp_eager_lock
);
1835 if ((tcp
->tcp_conn_req_cnt_q0
!= 0) ||
1836 (tcp
->tcp_conn_req_cnt_q
!= 0)) {
1837 tcp_eager_cleanup(tcp
, 0);
1839 mutex_exit(&tcp
->tcp_eager_lock
);
1841 tcp_xmit_ctl("tcp_disconnect", tcp
, tcp
->tcp_snxt
,
1842 tcp
->tcp_rnxt
, TH_RST
| TH_ACK
);
1847 } else if (!tcp_eager_blowoff(tcp
, seqnum
)) {
1854 * Our client hereby directs us to reject the connection request
1855 * that tcp_input_listener() marked with 'seqnum'. Rejection consists
1856 * of sending the appropriate RST, not an ICMP error.
1859 tcp_disconnect(tcp_t
*tcp
, mblk_t
*mp
)
1863 conn_t
*connp
= tcp
->tcp_connp
;
1865 ASSERT((uintptr_t)(mp
->b_wptr
- mp
->b_rptr
) <= (uintptr_t)INT_MAX
);
1866 if ((mp
->b_wptr
- mp
->b_rptr
) < sizeof (struct T_discon_req
)) {
1867 tcp_err_ack(tcp
, mp
, TPROTO
, 0);
1870 seqnum
= ((struct T_discon_req
*)mp
->b_rptr
)->SEQ_number
;
1871 error
= tcp_disconnect_common(tcp
, seqnum
);
1873 tcp_err_ack(tcp
, mp
, error
, 0);
1875 if (tcp
->tcp_state
>= TCPS_ESTABLISHED
) {
1876 /* Send M_FLUSH according to TPI */
1877 (void) putnextctl1(connp
->conn_rq
, M_FLUSH
, FLUSHRW
);
1879 mp
= mi_tpi_ok_ack_alloc(mp
);
1881 putnext(connp
->conn_rq
, mp
);
1886 * Handle reinitialization of a tcp structure.
1887 * Maintain "binding state" resetting the state to BOUND, LISTEN, or IDLE.
1890 tcp_reinit(tcp_t
*tcp
)
1893 tcp_stack_t
*tcps
= tcp
->tcp_tcps
;
1894 conn_t
*connp
= tcp
->tcp_connp
;
1897 /* tcp_reinit should never be called for detached tcp_t's */
1898 ASSERT(tcp
->tcp_listener
== NULL
);
1899 ASSERT((connp
->conn_family
== AF_INET
&&
1900 connp
->conn_ipversion
== IPV4_VERSION
) ||
1901 (connp
->conn_family
== AF_INET6
&&
1902 (connp
->conn_ipversion
== IPV4_VERSION
||
1903 connp
->conn_ipversion
== IPV6_VERSION
)));
1905 /* Cancel outstanding timers */
1906 tcp_timers_stop(tcp
);
1909 * Reset everything in the state vector, after updating global
1910 * MIB data from instance counters.
1912 TCPS_UPDATE_MIB(tcps
, tcpHCInSegs
, tcp
->tcp_ibsegs
);
1913 tcp
->tcp_ibsegs
= 0;
1914 TCPS_UPDATE_MIB(tcps
, tcpHCOutSegs
, tcp
->tcp_obsegs
);
1915 tcp
->tcp_obsegs
= 0;
1917 tcp_close_mpp(&tcp
->tcp_xmit_head
);
1918 if (tcp
->tcp_snd_zcopy_aware
)
1919 tcp_zcopy_notify(tcp
);
1920 tcp
->tcp_xmit_last
= tcp
->tcp_xmit_tail
= NULL
;
1921 tcp
->tcp_unsent
= tcp
->tcp_xmit_tail_unsent
= 0;
1922 mutex_enter(&tcp
->tcp_non_sq_lock
);
1923 if (tcp
->tcp_flow_stopped
&&
1924 TCP_UNSENT_BYTES(tcp
) <= connp
->conn_sndlowat
) {
1927 mutex_exit(&tcp
->tcp_non_sq_lock
);
1928 tcp_close_mpp(&tcp
->tcp_reass_head
);
1929 tcp
->tcp_reass_tail
= NULL
;
1930 if (tcp
->tcp_rcv_list
!= NULL
) {
1931 /* Free b_next chain */
1932 tcp_close_mpp(&tcp
->tcp_rcv_list
);
1933 tcp
->tcp_rcv_last_head
= NULL
;
1934 tcp
->tcp_rcv_last_tail
= NULL
;
1935 tcp
->tcp_rcv_cnt
= 0;
1937 tcp
->tcp_rcv_last_tail
= NULL
;
1939 if ((mp
= tcp
->tcp_urp_mp
) != NULL
) {
1941 tcp
->tcp_urp_mp
= NULL
;
1943 if ((mp
= tcp
->tcp_urp_mark_mp
) != NULL
) {
1945 tcp
->tcp_urp_mark_mp
= NULL
;
1947 if (tcp
->tcp_fused_sigurg_mp
!= NULL
) {
1948 ASSERT(!IPCL_IS_NONSTR(tcp
->tcp_connp
));
1949 freeb(tcp
->tcp_fused_sigurg_mp
);
1950 tcp
->tcp_fused_sigurg_mp
= NULL
;
1952 if (tcp
->tcp_ordrel_mp
!= NULL
) {
1953 ASSERT(!IPCL_IS_NONSTR(tcp
->tcp_connp
));
1954 freeb(tcp
->tcp_ordrel_mp
);
1955 tcp
->tcp_ordrel_mp
= NULL
;
1959 * Following is a union with two members which are
1960 * identical types and size so the following cleanup
1963 tcp_close_mpp(&tcp
->tcp_conn
.tcp_eager_conn_ind
);
1965 CL_INET_DISCONNECT(connp
);
1968 * The connection can't be on the tcp_time_wait_head list
1969 * since it is not detached.
1971 ASSERT(tcp
->tcp_time_wait_next
== NULL
);
1972 ASSERT(tcp
->tcp_time_wait_prev
== NULL
);
1973 ASSERT(tcp
->tcp_time_wait_expire
== 0);
1976 * Reset/preserve other values
1978 tcp_reinit_values(tcp
);
1979 ipcl_hash_remove(connp
);
1980 /* Note that ixa_cred gets cleared in ixa_cleanup */
1981 ixa_cleanup(connp
->conn_ixa
);
1982 tcp_ipsec_cleanup(tcp
);
1984 connp
->conn_laddr_v6
= connp
->conn_bound_addr_v6
;
1985 connp
->conn_saddr_v6
= connp
->conn_bound_addr_v6
;
1986 oldstate
= tcp
->tcp_state
;
1988 if (tcp
->tcp_conn_req_max
!= 0) {
1990 * This is the case when a TLI program uses the same
1991 * transport end point to accept a connection. This
1992 * makes the TCP both a listener and acceptor. When
1993 * this connection is closed, we need to set the state
1994 * back to TCPS_LISTEN. Make sure that the eager list
1997 * Note that this stream is still bound to the four
1998 * tuples of the previous connection in IP. If a new
1999 * SYN with different foreign address comes in, IP will
2000 * not find it and will send it to the global queue. In
2001 * the global queue, TCP will do a tcp_lookup_listener()
2002 * to find this stream. This works because this stream
2003 * is only removed from connected hash.
2006 tcp
->tcp_state
= TCPS_LISTEN
;
2007 tcp
->tcp_eager_next_q0
= tcp
->tcp_eager_prev_q0
= tcp
;
2008 tcp
->tcp_eager_next_drop_q0
= tcp
;
2009 tcp
->tcp_eager_prev_drop_q0
= tcp
;
2011 * Initially set conn_recv to tcp_input_listener_unbound to try
2012 * to pick a good squeue for the listener when the first SYN
2013 * arrives. tcp_input_listener_unbound sets it to
2014 * tcp_input_listener on that first SYN.
2016 connp
->conn_recv
= tcp_input_listener_unbound
;
2018 connp
->conn_proto
= IPPROTO_TCP
;
2019 connp
->conn_faddr_v6
= ipv6_all_zeros
;
2020 connp
->conn_fport
= 0;
2022 (void) ipcl_bind_insert(connp
);
2024 tcp
->tcp_state
= TCPS_BOUND
;
2028 * Initialize to default values
2030 tcp_init_values(tcp
, NULL
);
2032 DTRACE_TCP6(state__change
, void, NULL
, ip_xmit_attr_t
*,
2033 connp
->conn_ixa
, void, NULL
, tcp_t
*, tcp
, void, NULL
,
2036 ASSERT(tcp
->tcp_ptpbhn
!= NULL
);
2037 tcp
->tcp_rwnd
= connp
->conn_rcvbuf
;
2038 tcp
->tcp_mss
= connp
->conn_ipversion
!= IPV4_VERSION
?
2039 tcps
->tcps_mss_def_ipv6
: tcps
->tcps_mss_def_ipv4
;
2043 * Force values to zero that need be zero.
2044 * Do not touch values asociated with the BOUND or LISTEN state
2045 * since the connection will end up in that state after the reinit.
2046 * NOTE: tcp_reinit_values MUST have a line for each field in the tcp_t
2050 tcp_reinit_values(tcp
)
2053 tcp_stack_t
*tcps
= tcp
->tcp_tcps
;
2054 conn_t
*connp
= tcp
->tcp_connp
;
2060 #define DONTCARE(x) ((x) = (x))
2061 #define PRESERVE(x) ((x) = (x))
2064 PRESERVE(tcp
->tcp_bind_hash_port
);
2065 PRESERVE(tcp
->tcp_bind_hash
);
2066 PRESERVE(tcp
->tcp_ptpbhn
);
2067 PRESERVE(tcp
->tcp_acceptor_hash
);
2068 PRESERVE(tcp
->tcp_ptpahn
);
2070 /* Should be ASSERT NULL on these with new code! */
2071 ASSERT(tcp
->tcp_time_wait_next
== NULL
);
2072 ASSERT(tcp
->tcp_time_wait_prev
== NULL
);
2073 ASSERT(tcp
->tcp_time_wait_expire
== 0);
2074 PRESERVE(tcp
->tcp_state
);
2075 PRESERVE(connp
->conn_rq
);
2076 PRESERVE(connp
->conn_wq
);
2078 ASSERT(tcp
->tcp_xmit_head
== NULL
);
2079 ASSERT(tcp
->tcp_xmit_last
== NULL
);
2080 ASSERT(tcp
->tcp_unsent
== 0);
2081 ASSERT(tcp
->tcp_xmit_tail
== NULL
);
2082 ASSERT(tcp
->tcp_xmit_tail_unsent
== 0);
2084 tcp
->tcp_snxt
= 0; /* Displayed in mib */
2085 tcp
->tcp_suna
= 0; /* Displayed in mib */
2087 DONTCARE(tcp
->tcp_cwnd
); /* Init in tcp_process_options */
2089 ASSERT(tcp
->tcp_ibsegs
== 0);
2090 ASSERT(tcp
->tcp_obsegs
== 0);
2092 if (connp
->conn_ht_iphc
!= NULL
) {
2093 kmem_free(connp
->conn_ht_iphc
, connp
->conn_ht_iphc_allocated
);
2094 connp
->conn_ht_iphc
= NULL
;
2095 connp
->conn_ht_iphc_allocated
= 0;
2096 connp
->conn_ht_iphc_len
= 0;
2097 connp
->conn_ht_ulp
= NULL
;
2098 connp
->conn_ht_ulp_len
= 0;
2099 tcp
->tcp_ipha
= NULL
;
2100 tcp
->tcp_ip6h
= NULL
;
2101 tcp
->tcp_tcpha
= NULL
;
2104 /* We clear any IP_OPTIONS and extension headers */
2105 ip_pkt_free(&connp
->conn_xmit_ipp
);
2107 DONTCARE(tcp
->tcp_naglim
); /* Init in tcp_init_values */
2108 DONTCARE(tcp
->tcp_ipha
);
2109 DONTCARE(tcp
->tcp_ip6h
);
2110 DONTCARE(tcp
->tcp_tcpha
);
2111 tcp
->tcp_valid_bits
= 0;
2113 DONTCARE(tcp
->tcp_timer_backoff
); /* Init in tcp_init_values */
2114 DONTCARE(tcp
->tcp_last_recv_time
); /* Init in tcp_init_values */
2115 tcp
->tcp_last_rcv_lbolt
= 0;
2117 tcp
->tcp_init_cwnd
= 0;
2119 tcp
->tcp_urp_last_valid
= 0;
2120 tcp
->tcp_hard_binding
= 0;
2122 tcp
->tcp_fin_acked
= 0;
2123 tcp
->tcp_fin_rcvd
= 0;
2124 tcp
->tcp_fin_sent
= 0;
2125 tcp
->tcp_ordrel_done
= 0;
2127 tcp
->tcp_detached
= 0;
2129 tcp
->tcp_snd_ws_ok
= B_FALSE
;
2130 tcp
->tcp_snd_ts_ok
= B_FALSE
;
2131 tcp
->tcp_zero_win_probe
= 0;
2133 tcp
->tcp_loopback
= 0;
2134 tcp
->tcp_localnet
= 0;
2135 tcp
->tcp_syn_defense
= 0;
2136 tcp
->tcp_set_timer
= 0;
2138 tcp
->tcp_active_open
= 0;
2139 tcp
->tcp_rexmit
= B_FALSE
;
2140 tcp
->tcp_xmit_zc_clean
= B_FALSE
;
2142 tcp
->tcp_snd_sack_ok
= B_FALSE
;
2143 tcp
->tcp_hwcksum
= B_FALSE
;
2145 DONTCARE(tcp
->tcp_maxpsz_multiplier
); /* Init in tcp_init_values */
2147 tcp
->tcp_conn_def_q0
= 0;
2148 tcp
->tcp_ip_forward_progress
= B_FALSE
;
2149 tcp
->tcp_ecn_ok
= B_FALSE
;
2151 tcp
->tcp_cwr
= B_FALSE
;
2152 tcp
->tcp_ecn_echo_on
= B_FALSE
;
2153 tcp
->tcp_is_wnd_shrnk
= B_FALSE
;
2155 TCP_NOTSACK_REMOVE_ALL(tcp
->tcp_notsack_list
, tcp
);
2156 bzero(&tcp
->tcp_sack_info
, sizeof (tcp_sack_info_t
));
2158 tcp
->tcp_rcv_ws
= 0;
2159 tcp
->tcp_snd_ws
= 0;
2160 tcp
->tcp_ts_recent
= 0;
2161 tcp
->tcp_rnxt
= 0; /* Displayed in mib */
2162 DONTCARE(tcp
->tcp_rwnd
); /* Set in tcp_reinit() */
2163 tcp
->tcp_initial_pmtu
= 0;
2165 ASSERT(tcp
->tcp_reass_head
== NULL
);
2166 ASSERT(tcp
->tcp_reass_tail
== NULL
);
2168 tcp
->tcp_cwnd_cnt
= 0;
2170 ASSERT(tcp
->tcp_rcv_list
== NULL
);
2171 ASSERT(tcp
->tcp_rcv_last_head
== NULL
);
2172 ASSERT(tcp
->tcp_rcv_last_tail
== NULL
);
2173 ASSERT(tcp
->tcp_rcv_cnt
== 0);
2175 DONTCARE(tcp
->tcp_cwnd_ssthresh
); /* Init in tcp_set_destination */
2176 DONTCARE(tcp
->tcp_cwnd_max
); /* Init in tcp_init_values */
2179 tcp
->tcp_rto
= 0; /* Displayed in MIB */
2180 DONTCARE(tcp
->tcp_rtt_sa
); /* Init in tcp_init_values */
2181 DONTCARE(tcp
->tcp_rtt_sd
); /* Init in tcp_init_values */
2182 tcp
->tcp_rtt_update
= 0;
2184 DONTCARE(tcp
->tcp_swl1
); /* Init in case TCPS_LISTEN/TCPS_SYN_SENT */
2185 DONTCARE(tcp
->tcp_swl2
); /* Init in case TCPS_LISTEN/TCPS_SYN_SENT */
2187 tcp
->tcp_rack
= 0; /* Displayed in mib */
2188 tcp
->tcp_rack_cnt
= 0;
2189 tcp
->tcp_rack_cur_max
= 0;
2190 tcp
->tcp_rack_abs_max
= 0;
2192 tcp
->tcp_max_swnd
= 0;
2194 ASSERT(tcp
->tcp_listener
== NULL
);
2196 DONTCARE(tcp
->tcp_irs
); /* tcp_valid_bits cleared */
2197 DONTCARE(tcp
->tcp_iss
); /* tcp_valid_bits cleared */
2198 DONTCARE(tcp
->tcp_fss
); /* tcp_valid_bits cleared */
2199 DONTCARE(tcp
->tcp_urg
); /* tcp_valid_bits cleared */
2201 ASSERT(tcp
->tcp_conn_req_cnt_q
== 0);
2202 ASSERT(tcp
->tcp_conn_req_cnt_q0
== 0);
2203 PRESERVE(tcp
->tcp_conn_req_max
);
2204 PRESERVE(tcp
->tcp_conn_req_seqnum
);
2206 DONTCARE(tcp
->tcp_first_timer_threshold
); /* Init in tcp_init_values */
2207 DONTCARE(tcp
->tcp_second_timer_threshold
); /* Init in tcp_init_values */
2208 DONTCARE(tcp
->tcp_first_ctimer_threshold
); /* Init in tcp_init_values */
2209 DONTCARE(tcp
->tcp_second_ctimer_threshold
); /* in tcp_init_values */
2211 DONTCARE(tcp
->tcp_urp_last
); /* tcp_urp_last_valid is cleared */
2212 ASSERT(tcp
->tcp_urp_mp
== NULL
);
2213 ASSERT(tcp
->tcp_urp_mark_mp
== NULL
);
2214 ASSERT(tcp
->tcp_fused_sigurg_mp
== NULL
);
2216 ASSERT(tcp
->tcp_eager_next_q
== NULL
);
2217 ASSERT(tcp
->tcp_eager_last_q
== NULL
);
2218 ASSERT((tcp
->tcp_eager_next_q0
== NULL
&&
2219 tcp
->tcp_eager_prev_q0
== NULL
) ||
2220 tcp
->tcp_eager_next_q0
== tcp
->tcp_eager_prev_q0
);
2221 ASSERT(tcp
->tcp_conn
.tcp_eager_conn_ind
== NULL
);
2223 ASSERT((tcp
->tcp_eager_next_drop_q0
== NULL
&&
2224 tcp
->tcp_eager_prev_drop_q0
== NULL
) ||
2225 tcp
->tcp_eager_next_drop_q0
== tcp
->tcp_eager_prev_drop_q0
);
2227 DONTCARE(tcp
->tcp_ka_rinterval
); /* Init in tcp_init_values */
2228 DONTCARE(tcp
->tcp_ka_abort_thres
); /* Init in tcp_init_values */
2229 DONTCARE(tcp
->tcp_ka_cnt
); /* Init in tcp_init_values */
2231 tcp
->tcp_client_errno
= 0;
2233 DONTCARE(connp
->conn_sum
); /* Init in tcp_init_values */
2235 connp
->conn_faddr_v6
= ipv6_all_zeros
; /* Displayed in MIB */
2237 PRESERVE(connp
->conn_bound_addr_v6
);
2238 tcp
->tcp_last_sent_len
= 0;
2239 tcp
->tcp_dupack_cnt
= 0;
2241 connp
->conn_fport
= 0; /* Displayed in MIB */
2242 PRESERVE(connp
->conn_lport
);
2244 PRESERVE(tcp
->tcp_acceptor_lockp
);
2246 ASSERT(tcp
->tcp_ordrel_mp
== NULL
);
2247 PRESERVE(tcp
->tcp_acceptor_id
);
2248 DONTCARE(tcp
->tcp_ipsec_overhead
);
2250 PRESERVE(connp
->conn_family
);
2251 /* Remove any remnants of mapped address binding */
2252 if (connp
->conn_family
== AF_INET6
) {
2253 connp
->conn_ipversion
= IPV6_VERSION
;
2254 tcp
->tcp_mss
= tcps
->tcps_mss_def_ipv6
;
2256 connp
->conn_ipversion
= IPV4_VERSION
;
2257 tcp
->tcp_mss
= tcps
->tcps_mss_def_ipv4
;
2260 connp
->conn_bound_if
= 0;
2261 connp
->conn_recv_ancillary
.crb_all
= 0;
2262 tcp
->tcp_recvifindex
= 0;
2263 tcp
->tcp_recvhops
= 0;
2264 tcp
->tcp_closed
= 0;
2265 if (tcp
->tcp_hopopts
!= NULL
) {
2266 mi_free(tcp
->tcp_hopopts
);
2267 tcp
->tcp_hopopts
= NULL
;
2268 tcp
->tcp_hopoptslen
= 0;
2270 ASSERT(tcp
->tcp_hopoptslen
== 0);
2271 if (tcp
->tcp_dstopts
!= NULL
) {
2272 mi_free(tcp
->tcp_dstopts
);
2273 tcp
->tcp_dstopts
= NULL
;
2274 tcp
->tcp_dstoptslen
= 0;
2276 ASSERT(tcp
->tcp_dstoptslen
== 0);
2277 if (tcp
->tcp_rthdrdstopts
!= NULL
) {
2278 mi_free(tcp
->tcp_rthdrdstopts
);
2279 tcp
->tcp_rthdrdstopts
= NULL
;
2280 tcp
->tcp_rthdrdstoptslen
= 0;
2282 ASSERT(tcp
->tcp_rthdrdstoptslen
== 0);
2283 if (tcp
->tcp_rthdr
!= NULL
) {
2284 mi_free(tcp
->tcp_rthdr
);
2285 tcp
->tcp_rthdr
= NULL
;
2286 tcp
->tcp_rthdrlen
= 0;
2288 ASSERT(tcp
->tcp_rthdrlen
== 0);
2290 /* Reset fusion-related fields */
2291 tcp
->tcp_fused
= B_FALSE
;
2292 tcp
->tcp_unfusable
= B_FALSE
;
2293 tcp
->tcp_fused_sigurg
= B_FALSE
;
2294 tcp
->tcp_loopback_peer
= NULL
;
2296 tcp
->tcp_lso
= B_FALSE
;
2298 tcp
->tcp_in_ack_unsent
= 0;
2299 tcp
->tcp_cork
= B_FALSE
;
2300 tcp
->tcp_tconnind_started
= B_FALSE
;
2302 PRESERVE(tcp
->tcp_squeue_bytes
);
2304 tcp
->tcp_closemp_used
= B_FALSE
;
2306 PRESERVE(tcp
->tcp_rsrv_mp
);
2307 PRESERVE(tcp
->tcp_rsrv_mp_lock
);
2310 DONTCARE(tcp
->tcmp_stk
[0]);
2313 PRESERVE(tcp
->tcp_connid
);
2315 ASSERT(tcp
->tcp_listen_cnt
== NULL
);
2316 ASSERT(tcp
->tcp_reass_tid
== 0);
2323 * Initialize the various fields in tcp_t. If parent (the listener) is non
2324 * NULL, certain values will be inheritted from it.
2327 tcp_init_values(tcp_t
*tcp
, tcp_t
*parent
)
2329 tcp_stack_t
*tcps
= tcp
->tcp_tcps
;
2330 conn_t
*connp
= tcp
->tcp_connp
;
2333 ASSERT((connp
->conn_family
== AF_INET
&&
2334 connp
->conn_ipversion
== IPV4_VERSION
) ||
2335 (connp
->conn_family
== AF_INET6
&&
2336 (connp
->conn_ipversion
== IPV4_VERSION
||
2337 connp
->conn_ipversion
== IPV6_VERSION
)));
2339 if (parent
== NULL
) {
2340 tcp
->tcp_naglim
= tcps
->tcps_naglim_def
;
2342 tcp
->tcp_rto_initial
= tcps
->tcps_rexmit_interval_initial
;
2343 tcp
->tcp_rto_min
= tcps
->tcps_rexmit_interval_min
;
2344 tcp
->tcp_rto_max
= tcps
->tcps_rexmit_interval_max
;
2346 tcp
->tcp_first_ctimer_threshold
=
2347 tcps
->tcps_ip_notify_cinterval
;
2348 tcp
->tcp_second_ctimer_threshold
=
2349 tcps
->tcps_ip_abort_cinterval
;
2350 tcp
->tcp_first_timer_threshold
= tcps
->tcps_ip_notify_interval
;
2351 tcp
->tcp_second_timer_threshold
= tcps
->tcps_ip_abort_interval
;
2353 tcp
->tcp_fin_wait_2_flush_interval
=
2354 tcps
->tcps_fin_wait_2_flush_interval
;
2356 tcp
->tcp_ka_interval
= tcps
->tcps_keepalive_interval
;
2357 tcp
->tcp_ka_abort_thres
= tcps
->tcps_keepalive_abort_interval
;
2358 tcp
->tcp_ka_cnt
= 0;
2359 tcp
->tcp_ka_rinterval
= 0;
2362 * Default value of tcp_init_cwnd is 0, so no need to set here
2363 * if parent is NULL. But we need to inherit it from parent.
2366 /* Inherit various TCP parameters from the parent. */
2367 tcp
->tcp_naglim
= parent
->tcp_naglim
;
2369 tcp
->tcp_rto_initial
= parent
->tcp_rto_initial
;
2370 tcp
->tcp_rto_min
= parent
->tcp_rto_min
;
2371 tcp
->tcp_rto_max
= parent
->tcp_rto_max
;
2373 tcp
->tcp_first_ctimer_threshold
=
2374 parent
->tcp_first_ctimer_threshold
;
2375 tcp
->tcp_second_ctimer_threshold
=
2376 parent
->tcp_second_ctimer_threshold
;
2377 tcp
->tcp_first_timer_threshold
=
2378 parent
->tcp_first_timer_threshold
;
2379 tcp
->tcp_second_timer_threshold
=
2380 parent
->tcp_second_timer_threshold
;
2382 tcp
->tcp_fin_wait_2_flush_interval
=
2383 parent
->tcp_fin_wait_2_flush_interval
;
2385 tcp
->tcp_ka_interval
= parent
->tcp_ka_interval
;
2386 tcp
->tcp_ka_abort_thres
= parent
->tcp_ka_abort_thres
;
2387 tcp
->tcp_ka_cnt
= parent
->tcp_ka_cnt
;
2388 tcp
->tcp_ka_rinterval
= parent
->tcp_ka_rinterval
;
2390 tcp
->tcp_init_cwnd
= parent
->tcp_init_cwnd
;
2394 * Initialize tcp_rtt_sa and tcp_rtt_sd so that the calculated RTO
2395 * will be close to tcp_rexmit_interval_initial. By doing this, we
2396 * allow the algorithm to adjust slowly to large fluctuations of RTT
2397 * during first few transmissions of a connection as seen in slow
2400 tcp
->tcp_rtt_sa
= tcp
->tcp_rto_initial
<< 2;
2401 tcp
->tcp_rtt_sd
= tcp
->tcp_rto_initial
>> 1;
2402 rto
= (tcp
->tcp_rtt_sa
>> 3) + tcp
->tcp_rtt_sd
+
2403 tcps
->tcps_rexmit_interval_extra
+ (tcp
->tcp_rtt_sa
>> 5) +
2404 tcps
->tcps_conn_grace_period
;
2405 TCP_SET_RTO(tcp
, rto
);
2407 tcp
->tcp_timer_backoff
= 0;
2408 tcp
->tcp_ms_we_have_waited
= 0;
2409 tcp
->tcp_last_recv_time
= ddi_get_lbolt();
2410 tcp
->tcp_cwnd_max
= tcps
->tcps_cwnd_max_
;
2411 tcp
->tcp_cwnd_ssthresh
= TCP_MAX_LARGEWIN
;
2412 tcp
->tcp_snd_burst
= TCP_CWND_INFINITE
;
2414 tcp
->tcp_maxpsz_multiplier
= tcps
->tcps_maxpsz_multiplier
;
2416 /* NOTE: ISS is now set in tcp_set_destination(). */
2418 /* Reset fusion-related fields */
2419 tcp
->tcp_fused
= B_FALSE
;
2420 tcp
->tcp_unfusable
= B_FALSE
;
2421 tcp
->tcp_fused_sigurg
= B_FALSE
;
2422 tcp
->tcp_loopback_peer
= NULL
;
2424 /* We rebuild the header template on the next connect/conn_request */
2426 connp
->conn_mlp_type
= mlptSingle
;
2429 * Init the window scale to the max so tcp_rwnd_set() won't pare
2430 * down tcp_rwnd. tcp_set_destination() will set the right value later.
2432 tcp
->tcp_rcv_ws
= TCP_MAX_WINSHIFT
;
2433 tcp
->tcp_rwnd
= connp
->conn_rcvbuf
;
2435 tcp
->tcp_cork
= B_FALSE
;
2437 * Init the tcp_debug option if it wasn't already set. This value
2438 * determines whether TCP
2439 * calls strlog() to print out debug messages. Doing this
2440 * initialization here means that this value is not inherited thru
2443 if (!connp
->conn_debug
)
2444 connp
->conn_debug
= tcps
->tcps_dbg
;
2448 * Update the TCP connection according to change of PMTU.
2450 * Path MTU might have changed by either increase or decrease, so need to
2451 * adjust the MSS based on the value of ixa_pmtu. No need to handle tiny
2452 * or negative MSS, since tcp_mss_set() will do it.
2455 tcp_update_pmtu(tcp_t
*tcp
, boolean_t decrease_only
)
2459 conn_t
*connp
= tcp
->tcp_connp
;
2460 ip_xmit_attr_t
*ixa
= connp
->conn_ixa
;
2463 if (tcp
->tcp_tcps
->tcps_ignore_path_mtu
)
2466 if (tcp
->tcp_state
< TCPS_ESTABLISHED
)
2470 * Always call ip_get_pmtu() to make sure that IP has updated
2471 * ixa_flags properly.
2473 pmtu
= ip_get_pmtu(ixa
);
2474 ixaflags
= ixa
->ixa_flags
;
2477 * Calculate the MSS by decreasing the PMTU by conn_ht_iphc_len and
2478 * IPsec overhead if applied. Make sure to use the most recent
2479 * IPsec information.
2481 mss
= pmtu
- connp
->conn_ht_iphc_len
- conn_ipsec_length(connp
);
2484 * Nothing to change, so just return.
2486 if (mss
== tcp
->tcp_mss
)
2490 * Currently, for ICMP errors, only PMTU decrease is handled.
2492 if (mss
> tcp
->tcp_mss
&& decrease_only
)
2495 DTRACE_PROBE2(tcp_update_pmtu
, int32_t, tcp
->tcp_mss
, uint32_t, mss
);
2498 * Update ixa_fragsize and ixa_pmtu.
2500 ixa
->ixa_fragsize
= ixa
->ixa_pmtu
= pmtu
;
2503 * Adjust MSS and all relevant variables.
2505 tcp_mss_set(tcp
, mss
);
2508 * If the PMTU is below the min size maintained by IP, then ip_get_pmtu
2509 * has set IXAF_PMTU_TOO_SMALL and cleared IXAF_PMTU_IPV4_DF. Since TCP
2510 * has a (potentially different) min size we do the same. Make sure to
2511 * clear IXAF_DONTFRAG, which is used by IP to decide whether to
2512 * fragment the packet.
2514 * LSO over IPv6 can not be fragmented. So need to disable LSO
2515 * when IPv6 fragmentation is needed.
2517 if (mss
< tcp
->tcp_tcps
->tcps_mss_min
)
2518 ixaflags
|= IXAF_PMTU_TOO_SMALL
;
2520 if (ixaflags
& IXAF_PMTU_TOO_SMALL
)
2521 ixaflags
&= ~(IXAF_DONTFRAG
| IXAF_PMTU_IPV4_DF
);
2523 if ((connp
->conn_ipversion
== IPV4_VERSION
) &&
2524 !(ixaflags
& IXAF_PMTU_IPV4_DF
)) {
2525 tcp
->tcp_ipha
->ipha_fragment_offset_and_flags
= 0;
2527 ixa
->ixa_flags
= ixaflags
;
2531 tcp_maxpsz_set(tcp_t
*tcp
, boolean_t set_maxblk
)
2533 conn_t
*connp
= tcp
->tcp_connp
;
2534 queue_t
*q
= connp
->conn_rq
;
2535 int32_t mss
= tcp
->tcp_mss
;
2538 if (TCP_IS_DETACHED(tcp
))
2540 if (tcp
->tcp_fused
) {
2541 maxpsz
= tcp_fuse_maxpsz(tcp
);
2543 } else if (tcp
->tcp_maxpsz_multiplier
== 0) {
2545 * Set the sd_qn_maxpsz according to the socket send buffer
2546 * size, and sd_maxblk to INFPSZ (-1). This will essentially
2547 * instruct the stream head to copyin user data into contiguous
2548 * kernel-allocated buffers without breaking it up into smaller
2549 * chunks. We round up the buffer size to the nearest SMSS.
2551 maxpsz
= MSS_ROUNDUP(connp
->conn_sndbuf
, mss
);
2555 * Set sd_qn_maxpsz to approx half the (receivers) buffer
2556 * (and a multiple of the mss). This instructs the stream
2557 * head to break down larger than SMSS writes into SMSS-
2558 * size mblks, up to tcp_maxpsz_multiplier mblks at a time.
2560 maxpsz
= tcp
->tcp_maxpsz_multiplier
* mss
;
2561 if (maxpsz
> connp
->conn_sndbuf
/ 2) {
2562 maxpsz
= connp
->conn_sndbuf
/ 2;
2563 /* Round up to nearest mss */
2564 maxpsz
= MSS_ROUNDUP(maxpsz
, mss
);
2568 (void) proto_set_maxpsz(q
, connp
, maxpsz
);
2569 if (!(IPCL_IS_NONSTR(connp
)))
2570 connp
->conn_wq
->q_maxpsz
= maxpsz
;
2572 (void) proto_set_tx_maxblk(q
, connp
, mss
);
2576 /* For /dev/tcp aka AF_INET open */
2578 tcp_openv4(queue_t
*q
, dev_t
*devp
, int flag
, int sflag
, cred_t
*credp
)
2580 return (tcp_open(q
, devp
, flag
, sflag
, credp
, B_FALSE
));
2583 /* For /dev/tcp6 aka AF_INET6 open */
2585 tcp_openv6(queue_t
*q
, dev_t
*devp
, int flag
, int sflag
, cred_t
*credp
)
2587 return (tcp_open(q
, devp
, flag
, sflag
, credp
, B_TRUE
));
2591 tcp_create_common(cred_t
*credp
, boolean_t isv6
, boolean_t issocket
,
2600 ASSERT(errorp
!= NULL
);
2602 * Find the proper zoneid and netstack.
2605 * Special case for install: miniroot needs to be able to
2606 * access files via NFS as though it were always in the
2609 if (credp
== kcred
&& nfs_global_client_only
!= 0) {
2610 zoneid
= GLOBAL_ZONEID
;
2611 tcps
= netstack_find_by_stackid(GLOBAL_NETSTACKID
)->
2613 ASSERT(tcps
!= NULL
);
2618 if ((err
= secpolicy_basic_net_access(credp
)) != 0) {
2623 ns
= netstack_find_by_cred(credp
);
2625 tcps
= ns
->netstack_tcp
;
2626 ASSERT(tcps
!= NULL
);
2629 * For exclusive stacks we set the zoneid to zero
2630 * to make TCP operate as if in the global zone.
2632 if (tcps
->tcps_netstack
->netstack_stackid
!=
2634 zoneid
= GLOBAL_ZONEID
;
2636 zoneid
= crgetzoneid(credp
);
2639 sqp
= IP_SQUEUE_GET((uint_t
)gethrtime());
2640 connp
= (conn_t
*)tcp_get_conn(sqp
, tcps
);
2642 * Both tcp_get_conn and netstack_find_by_cred incremented refcnt,
2643 * so we drop it by one.
2645 netstack_rele(tcps
->tcps_netstack
);
2646 if (connp
== NULL
) {
2650 ASSERT(connp
->conn_ixa
->ixa_protocol
== connp
->conn_proto
);
2652 connp
->conn_sqp
= sqp
;
2653 connp
->conn_initial_sqp
= connp
->conn_sqp
;
2654 connp
->conn_ixa
->ixa_sqp
= connp
->conn_sqp
;
2655 tcp
= connp
->conn_tcp
;
2658 * Besides asking IP to set the checksum for us, have conn_ip_output
2659 * to do the following checks when necessary:
2661 * IXAF_VERIFY_SOURCE: drop packets when our outer source goes invalid
2662 * IXAF_VERIFY_PMTU: verify PMTU changes
2663 * IXAF_VERIFY_LSO: verify LSO capability changes
2665 connp
->conn_ixa
->ixa_flags
|= IXAF_SET_ULP_CKSUM
| IXAF_VERIFY_SOURCE
|
2666 IXAF_VERIFY_PMTU
| IXAF_VERIFY_LSO
;
2668 if (!tcps
->tcps_dev_flow_ctl
)
2669 connp
->conn_ixa
->ixa_flags
|= IXAF_NO_DEV_FLOW_CTL
;
2672 connp
->conn_ixa
->ixa_src_preferences
= IPV6_PREFER_SRC_DEFAULT
;
2673 connp
->conn_ipversion
= IPV6_VERSION
;
2674 connp
->conn_family
= AF_INET6
;
2675 tcp
->tcp_mss
= tcps
->tcps_mss_def_ipv6
;
2676 connp
->conn_default_ttl
= tcps
->tcps_ipv6_hoplimit
;
2678 connp
->conn_ipversion
= IPV4_VERSION
;
2679 connp
->conn_family
= AF_INET
;
2680 tcp
->tcp_mss
= tcps
->tcps_mss_def_ipv4
;
2681 connp
->conn_default_ttl
= tcps
->tcps_ipv4_ttl
;
2683 connp
->conn_xmit_ipp
.ipp_unicast_hops
= connp
->conn_default_ttl
;
2686 connp
->conn_cred
= credp
;
2687 connp
->conn_cpid
= curproc
->p_pid
;
2688 connp
->conn_open_time
= ddi_get_lbolt64();
2690 /* Cache things in the ixa without any refhold */
2691 ASSERT(!(connp
->conn_ixa
->ixa_free_flags
& IXA_FREE_CRED
));
2692 connp
->conn_ixa
->ixa_cred
= credp
;
2693 connp
->conn_ixa
->ixa_cpid
= connp
->conn_cpid
;
2695 connp
->conn_zoneid
= zoneid
;
2696 /* conn_allzones can not be set this early, hence no IPCL_ZONEID */
2697 connp
->conn_ixa
->ixa_zoneid
= zoneid
;
2698 connp
->conn_mlp_type
= mlptSingle
;
2699 ASSERT(connp
->conn_netstack
== tcps
->tcps_netstack
);
2700 ASSERT(tcp
->tcp_tcps
== tcps
);
2703 * If the caller has the process-wide flag set, then default to MAC
2704 * exempt mode. This allows read-down to unlabeled hosts.
2706 if (getpflags(NET_MAC_AWARE
, credp
) != 0)
2707 connp
->conn_mac_mode
= CONN_MAC_AWARE
;
2709 connp
->conn_zone_is_global
= (crgetzoneid(credp
) == GLOBAL_ZONEID
);
2712 tcp
->tcp_issocket
= 1;
2715 connp
->conn_rcvbuf
= tcps
->tcps_recv_hiwat
;
2716 connp
->conn_sndbuf
= tcps
->tcps_xmit_hiwat
;
2717 if (tcps
->tcps_snd_lowat_fraction
!= 0) {
2718 connp
->conn_sndlowat
= connp
->conn_sndbuf
/
2719 tcps
->tcps_snd_lowat_fraction
;
2721 connp
->conn_sndlowat
= tcps
->tcps_xmit_lowat
;
2723 connp
->conn_so_type
= SOCK_STREAM
;
2724 connp
->conn_wroff
= connp
->conn_ht_iphc_allocated
+
2725 tcps
->tcps_wroff_xtra
;
2727 SOCK_CONNID_INIT(tcp
->tcp_connid
);
2728 /* DTrace ignores this - it isn't a tcp:::state-change */
2729 tcp
->tcp_state
= TCPS_IDLE
;
2730 tcp_init_values(tcp
, NULL
);
2735 tcp_open(queue_t
*q
, dev_t
*devp
, int flag
, int sflag
, cred_t
*credp
,
2739 conn_t
*connp
= NULL
;
2741 vmem_t
*minor_arena
= NULL
;
2745 if (q
->q_ptr
!= NULL
)
2748 if (sflag
== MODOPEN
)
2751 if ((ip_minor_arena_la
!= NULL
) && (flag
& SO_SOCKSTR
) &&
2752 ((conn_dev
= inet_minor_alloc(ip_minor_arena_la
)) != 0)) {
2753 minor_arena
= ip_minor_arena_la
;
2756 * Either minor numbers in the large arena were exhausted
2757 * or a non socket application is doing the open.
2758 * Try to allocate from the small arena.
2760 if ((conn_dev
= inet_minor_alloc(ip_minor_arena_sa
)) == 0) {
2763 minor_arena
= ip_minor_arena_sa
;
2766 ASSERT(minor_arena
!= NULL
);
2768 *devp
= makedevice(getmajor(*devp
), (minor_t
)conn_dev
);
2770 if (flag
& SO_FALLBACK
) {
2772 * Non streams socket needs a stream to fallback to
2774 RD(q
)->q_ptr
= (void *)conn_dev
;
2775 WR(q
)->q_qinfo
= &tcp_fallback_sock_winit
;
2776 WR(q
)->q_ptr
= (void *)minor_arena
;
2779 } else if (flag
& SO_ACCEPTOR
) {
2780 q
->q_qinfo
= &tcp_acceptor_rinit
;
2782 * the conn_dev and minor_arena will be subsequently used by
2783 * tcp_tli_accept() and tcp_tpi_close_accept() to figure out
2784 * the minor device number for this connection from the q_ptr.
2786 RD(q
)->q_ptr
= (void *)conn_dev
;
2787 WR(q
)->q_qinfo
= &tcp_acceptor_winit
;
2788 WR(q
)->q_ptr
= (void *)minor_arena
;
2793 issocket
= flag
& SO_SOCKSTR
;
2794 connp
= tcp_create_common(credp
, isv6
, issocket
, &err
);
2796 if (connp
== NULL
) {
2797 inet_minor_free(minor_arena
, conn_dev
);
2798 q
->q_ptr
= WR(q
)->q_ptr
= NULL
;
2803 connp
->conn_wq
= WR(q
);
2804 q
->q_ptr
= WR(q
)->q_ptr
= connp
;
2806 connp
->conn_dev
= conn_dev
;
2807 connp
->conn_minor_arena
= minor_arena
;
2809 ASSERT(q
->q_qinfo
== &tcp_rinitv4
|| q
->q_qinfo
== &tcp_rinitv6
);
2810 ASSERT(WR(q
)->q_qinfo
== &tcp_winit
);
2812 tcp
= connp
->conn_tcp
;
2815 WR(q
)->q_qinfo
= &tcp_sock_winit
;
2818 tcp
->tcp_acceptor_id
= (t_uscalar_t
)RD(q
);
2820 tcp
->tcp_acceptor_id
= conn_dev
;
2822 tcp_acceptor_hash_insert(tcp
->tcp_acceptor_id
, tcp
);
2826 * Put the ref for TCP. Ref for IP was already put
2827 * by ipcl_conn_create. Also Make the conn_t globally
2828 * visible to walkers
2830 mutex_enter(&connp
->conn_lock
);
2831 CONN_INC_REF_LOCKED(connp
);
2832 ASSERT(connp
->conn_ref
== 2);
2833 connp
->conn_state_flags
&= ~CONN_INCIPIENT
;
2834 mutex_exit(&connp
->conn_lock
);
2841 * Build/update the tcp header template (in conn_ht_iphc) based on
2842 * conn_xmit_ipp. The headers include ip6_t, any extension
2843 * headers, and the maximum size tcp header (to avoid reallocation
2844 * on the fly for additional tcp options).
2846 * Assumes the caller has already set conn_{faddr,laddr,fport,lport,flowinfo}.
2847 * Returns failure if can't allocate memory.
2850 tcp_build_hdrs(tcp_t
*tcp
)
2852 tcp_stack_t
*tcps
= tcp
->tcp_tcps
;
2853 conn_t
*connp
= tcp
->tcp_connp
;
2854 char buf
[TCP_MAX_HDR_LENGTH
];
2856 uint_t ulplen
= TCP_MIN_HEADER_LENGTH
;
2857 uint_t extralen
= TCP_MAX_TCP_OPTIONS_LENGTH
;
2863 * We might be called after the connection is set up, and we might
2864 * have TS options already in the TCP header. Thus we save any
2865 * existing tcp header.
2867 buflen
= connp
->conn_ht_ulp_len
;
2869 bcopy(connp
->conn_ht_ulp
, buf
, buflen
);
2870 extralen
-= buflen
- ulplen
;
2874 /* Grab lock to satisfy ASSERT; TCP is serialized using squeue */
2875 mutex_enter(&connp
->conn_lock
);
2876 error
= conn_build_hdr_template(connp
, ulplen
, extralen
,
2877 &connp
->conn_laddr_v6
, &connp
->conn_faddr_v6
, connp
->conn_flowinfo
);
2878 mutex_exit(&connp
->conn_lock
);
2883 * Any routing header/option has been massaged. The checksum difference
2884 * is stored in conn_sum for later use.
2886 tcpha
= (tcpha_t
*)connp
->conn_ht_ulp
;
2887 tcp
->tcp_tcpha
= tcpha
;
2889 /* restore any old tcp header */
2891 bcopy(buf
, connp
->conn_ht_ulp
, buflen
);
2896 tcpha
->tha_offset_and_reserved
= (5 << 4);
2897 tcpha
->tha_lport
= connp
->conn_lport
;
2898 tcpha
->tha_fport
= connp
->conn_fport
;
2902 * IP wants our header length in the checksum field to
2903 * allow it to perform a single pseudo-header+checksum
2904 * calculation on behalf of TCP.
2905 * Include the adjustment for a source route once IP_OPTIONS is set.
2907 cksum
= sizeof (tcpha_t
) + connp
->conn_sum
;
2908 cksum
= (cksum
>> 16) + (cksum
& 0xFFFF);
2909 ASSERT(cksum
< 0x10000);
2910 tcpha
->tha_sum
= htons(cksum
);
2912 if (connp
->conn_ipversion
== IPV4_VERSION
)
2913 tcp
->tcp_ipha
= (ipha_t
*)connp
->conn_ht_iphc
;
2915 tcp
->tcp_ip6h
= (ip6_t
*)connp
->conn_ht_iphc
;
2917 if (connp
->conn_ht_iphc_allocated
+ tcps
->tcps_wroff_xtra
>
2918 connp
->conn_wroff
) {
2919 connp
->conn_wroff
= connp
->conn_ht_iphc_allocated
+
2920 tcps
->tcps_wroff_xtra
;
2921 (void) proto_set_tx_wroff(connp
->conn_rq
, connp
,
2928 * tcp_rwnd_set() is called to adjust the receive window to a desired value.
2929 * We do not allow the receive window to shrink. After setting rwnd,
2930 * set the flow control hiwat of the stream.
2932 * This function is called in 2 cases:
2934 * 1) Before data transfer begins, in tcp_input_listener() for accepting a
2935 * connection (passive open) and in tcp_input_data() for active connect.
2936 * This is called after tcp_mss_set() when the desired MSS value is known.
2937 * This makes sure that our window size is a mutiple of the other side's
2939 * 2) Handling SO_RCVBUF option.
2941 * It is ASSUMED that the requested size is a multiple of the current MSS.
2943 * XXX - Should allow a lower rwnd than tcp_recv_hiwat_minmss * mss if the
2947 tcp_rwnd_set(tcp_t
*tcp
, uint32_t rwnd
)
2949 uint32_t mss
= tcp
->tcp_mss
;
2950 uint32_t old_max_rwnd
;
2951 uint32_t max_transmittable_rwnd
;
2952 boolean_t tcp_detached
= TCP_IS_DETACHED(tcp
);
2953 tcp_stack_t
*tcps
= tcp
->tcp_tcps
;
2954 conn_t
*connp
= tcp
->tcp_connp
;
2957 * Insist on a receive window that is at least
2958 * tcp_recv_hiwat_minmss * MSS (default 4 * MSS) to avoid
2959 * funny TCP interactions of Nagle algorithm, SWS avoidance
2960 * and delayed acknowledgement.
2962 rwnd
= MAX(rwnd
, tcps
->tcps_recv_hiwat_minmss
* mss
);
2964 if (tcp
->tcp_fused
) {
2966 tcp_t
*peer_tcp
= tcp
->tcp_loopback_peer
;
2968 ASSERT(peer_tcp
!= NULL
);
2969 sth_hiwat
= tcp_fuse_set_rcv_hiwat(tcp
, rwnd
);
2970 if (!tcp_detached
) {
2971 (void) proto_set_rx_hiwat(connp
->conn_rq
, connp
,
2973 tcp_set_recv_threshold(tcp
, sth_hiwat
>> 3);
2976 /* Caller could have changed tcp_rwnd; update tha_win */
2977 if (tcp
->tcp_tcpha
!= NULL
) {
2978 tcp
->tcp_tcpha
->tha_win
=
2979 htons(tcp
->tcp_rwnd
>> tcp
->tcp_rcv_ws
);
2981 if ((tcp
->tcp_rcv_ws
> 0) && rwnd
> tcp
->tcp_cwnd_max
)
2982 tcp
->tcp_cwnd_max
= rwnd
;
2985 * In the fusion case, the maxpsz stream head value of
2986 * our peer is set according to its send buffer size
2987 * and our receive buffer size; since the latter may
2988 * have changed we need to update the peer's maxpsz.
2990 (void) tcp_maxpsz_set(peer_tcp
, B_TRUE
);
2995 old_max_rwnd
= tcp
->tcp_rwnd
;
2997 old_max_rwnd
= connp
->conn_rcvbuf
;
3001 * If window size info has already been exchanged, TCP should not
3002 * shrink the window. Shrinking window is doable if done carefully.
3003 * We may add that support later. But so far there is not a real
3006 if (rwnd
< old_max_rwnd
&& tcp
->tcp_state
> TCPS_SYN_SENT
) {
3007 /* MSS may have changed, do a round up again. */
3008 rwnd
= MSS_ROUNDUP(old_max_rwnd
, mss
);
3012 * tcp_rcv_ws starts with TCP_MAX_WINSHIFT so the following check
3013 * can be applied even before the window scale option is decided.
3015 max_transmittable_rwnd
= TCP_MAXWIN
<< tcp
->tcp_rcv_ws
;
3016 if (rwnd
> max_transmittable_rwnd
) {
3017 rwnd
= max_transmittable_rwnd
-
3018 (max_transmittable_rwnd
% mss
);
3020 rwnd
= max_transmittable_rwnd
;
3022 * If we're over the limit we may have to back down tcp_rwnd.
3023 * The increment below won't work for us. So we set all three
3024 * here and the increment below will have no effect.
3026 tcp
->tcp_rwnd
= old_max_rwnd
= rwnd
;
3028 if (tcp
->tcp_localnet
) {
3029 tcp
->tcp_rack_abs_max
=
3030 MIN(tcps
->tcps_local_dacks_max
, rwnd
/ mss
/ 2);
3033 * For a remote host on a different subnet (through a router),
3034 * we ack every other packet to be conforming to RFC1122.
3035 * tcp_deferred_acks_max is default to 2.
3037 tcp
->tcp_rack_abs_max
=
3038 MIN(tcps
->tcps_deferred_acks_max
, rwnd
/ mss
/ 2);
3040 if (tcp
->tcp_rack_cur_max
> tcp
->tcp_rack_abs_max
)
3041 tcp
->tcp_rack_cur_max
= tcp
->tcp_rack_abs_max
;
3043 tcp
->tcp_rack_cur_max
= 0;
3045 * Increment the current rwnd by the amount the maximum grew (we
3046 * can not overwrite it since we might be in the middle of a
3049 tcp
->tcp_rwnd
+= rwnd
- old_max_rwnd
;
3050 connp
->conn_rcvbuf
= rwnd
;
3052 /* Are we already connected? */
3053 if (tcp
->tcp_tcpha
!= NULL
) {
3054 tcp
->tcp_tcpha
->tha_win
=
3055 htons(tcp
->tcp_rwnd
>> tcp
->tcp_rcv_ws
);
3058 if ((tcp
->tcp_rcv_ws
> 0) && rwnd
> tcp
->tcp_cwnd_max
)
3059 tcp
->tcp_cwnd_max
= rwnd
;
3064 tcp_set_recv_threshold(tcp
, rwnd
>> 3);
3066 (void) proto_set_rx_hiwat(connp
->conn_rq
, connp
, rwnd
);
3071 tcp_do_unbind(conn_t
*connp
)
3073 tcp_t
*tcp
= connp
->conn_tcp
;
3076 switch (tcp
->tcp_state
) {
3081 return (-TOUTSTATE
);
3085 * Need to clean up all the eagers since after the unbind, segments
3086 * will no longer be delivered to this listener stream.
3088 mutex_enter(&tcp
->tcp_eager_lock
);
3089 if (tcp
->tcp_conn_req_cnt_q0
!= 0 || tcp
->tcp_conn_req_cnt_q
!= 0) {
3090 tcp_eager_cleanup(tcp
, 0);
3092 mutex_exit(&tcp
->tcp_eager_lock
);
3094 /* Clean up the listener connection counter if necessary. */
3095 if (tcp
->tcp_listen_cnt
!= NULL
)
3096 TCP_DECR_LISTEN_CNT(tcp
);
3097 connp
->conn_laddr_v6
= ipv6_all_zeros
;
3098 connp
->conn_saddr_v6
= ipv6_all_zeros
;
3099 tcp_bind_hash_remove(tcp
);
3100 oldstate
= tcp
->tcp_state
;
3101 tcp
->tcp_state
= TCPS_IDLE
;
3102 DTRACE_TCP6(state__change
, void, NULL
, ip_xmit_attr_t
*,
3103 connp
->conn_ixa
, void, NULL
, tcp_t
*, tcp
, void, NULL
,
3107 bzero(&connp
->conn_ports
, sizeof (connp
->conn_ports
));
3113 * Collect protocol properties to send to the upper handle.
3116 tcp_get_proto_props(tcp_t
*tcp
, struct sock_proto_props
*sopp
)
3118 conn_t
*connp
= tcp
->tcp_connp
;
3120 sopp
->sopp_flags
= SOCKOPT_RCVHIWAT
| SOCKOPT_MAXBLK
| SOCKOPT_WROFF
;
3121 sopp
->sopp_maxblk
= tcp_maxpsz_set(tcp
, B_FALSE
);
3123 sopp
->sopp_rxhiwat
= tcp
->tcp_fused
?
3124 tcp_fuse_set_rcv_hiwat(tcp
, connp
->conn_rcvbuf
) :
3127 * Determine what write offset value to use depending on SACK and
3128 * whether the endpoint is fused or not.
3130 if (tcp
->tcp_fused
) {
3131 ASSERT(tcp
->tcp_loopback
);
3132 ASSERT(tcp
->tcp_loopback_peer
!= NULL
);
3134 * For fused tcp loopback, set the stream head's write
3135 * offset value to zero since we won't be needing any room
3136 * for TCP/IP headers. This would also improve performance
3137 * since it would reduce the amount of work done by kmem.
3138 * Non-fused tcp loopback case is handled separately below.
3140 sopp
->sopp_wroff
= 0;
3142 * Update the peer's transmit parameters according to
3143 * our recently calculated high water mark value.
3145 (void) tcp_maxpsz_set(tcp
->tcp_loopback_peer
, B_TRUE
);
3146 } else if (tcp
->tcp_snd_sack_ok
) {
3147 sopp
->sopp_wroff
= connp
->conn_ht_iphc_allocated
+
3148 (tcp
->tcp_loopback
? 0 : tcp
->tcp_tcps
->tcps_wroff_xtra
);
3150 sopp
->sopp_wroff
= connp
->conn_ht_iphc_len
+
3151 (tcp
->tcp_loopback
? 0 : tcp
->tcp_tcps
->tcps_wroff_xtra
);
3154 if (tcp
->tcp_loopback
) {
3155 sopp
->sopp_flags
|= SOCKOPT_LOOPBACK
;
3156 sopp
->sopp_loopback
= B_TRUE
;
3161 * Check the usability of ZEROCOPY. It's instead checking the flag set by IP.
3164 tcp_zcopy_check(tcp_t
*tcp
)
3166 conn_t
*connp
= tcp
->tcp_connp
;
3167 ip_xmit_attr_t
*ixa
= connp
->conn_ixa
;
3168 boolean_t zc_enabled
= B_FALSE
;
3169 tcp_stack_t
*tcps
= tcp
->tcp_tcps
;
3171 if (do_tcpzcopy
== 2)
3172 zc_enabled
= B_TRUE
;
3173 else if ((do_tcpzcopy
== 1) && (ixa
->ixa_flags
& IXAF_ZCOPY_CAPAB
))
3174 zc_enabled
= B_TRUE
;
3176 tcp
->tcp_snd_zcopy_on
= zc_enabled
;
3177 if (!TCP_IS_DETACHED(tcp
)) {
3179 ixa
->ixa_flags
|= IXAF_VERIFY_ZCOPY
;
3180 (void) proto_set_tx_copyopt(connp
->conn_rq
, connp
,
3182 TCP_STAT(tcps
, tcp_zcopy_on
);
3184 ixa
->ixa_flags
&= ~IXAF_VERIFY_ZCOPY
;
3185 (void) proto_set_tx_copyopt(connp
->conn_rq
, connp
,
3187 TCP_STAT(tcps
, tcp_zcopy_off
);
3190 return (zc_enabled
);
3194 * Backoff from a zero-copy message by copying data to a new allocated
3195 * message and freeing the original desballoca'ed segmapped message.
3197 * This function is called by following two callers:
3198 * 1. tcp_timer: fix_xmitlist is set to B_TRUE, because it's safe to free
3199 * the origial desballoca'ed message and notify sockfs. This is in re-
3201 * 2. tcp_output: fix_xmitlist is set to B_FALSE. Flag STRUIO_ZCNOTIFY need
3202 * to be copied to new message.
3205 tcp_zcopy_backoff(tcp_t
*tcp
, mblk_t
*bp
, boolean_t fix_xmitlist
)
3208 mblk_t
*head
= NULL
;
3209 mblk_t
*tail
= NULL
;
3210 tcp_stack_t
*tcps
= tcp
->tcp_tcps
;
3213 while (bp
!= NULL
) {
3214 if (IS_VMLOANED_MBLK(bp
)) {
3215 TCP_STAT(tcps
, tcp_zcopy_backoff
);
3216 if ((nbp
= copyb(bp
)) == NULL
) {
3217 tcp
->tcp_xmit_zc_clean
= B_FALSE
;
3220 return ((head
== NULL
) ? bp
: head
);
3223 if (bp
->b_datap
->db_struioflag
& STRUIO_ZCNOTIFY
) {
3225 tcp_zcopy_notify(tcp
);
3227 nbp
->b_datap
->db_struioflag
|=
3230 nbp
->b_cont
= bp
->b_cont
;
3233 * Copy saved information and adjust tcp_xmit_tail
3237 nbp
->b_prev
= bp
->b_prev
;
3238 nbp
->b_next
= bp
->b_next
;
3240 if (tcp
->tcp_xmit_tail
== bp
)
3241 tcp
->tcp_xmit_tail
= nbp
;
3244 /* Free the original message. */
3267 tcp
->tcp_xmit_last
= tail
;
3268 tcp
->tcp_xmit_zc_clean
= B_TRUE
;
3275 tcp_zcopy_notify(tcp_t
*tcp
)
3280 if (tcp
->tcp_detached
)
3282 connp
= tcp
->tcp_connp
;
3283 if (IPCL_IS_NONSTR(connp
)) {
3284 (*connp
->conn_upcalls
->su_zcopy_notify
)
3285 (connp
->conn_upper_handle
);
3288 stp
= STREAM(connp
->conn_rq
);
3289 mutex_enter(&stp
->sd_lock
);
3290 stp
->sd_flag
|= STZCNOTIFY
;
3291 cv_broadcast(&stp
->sd_zcopy_wait
);
3292 mutex_exit(&stp
->sd_lock
);
3296 * Update the TCP connection according to change of LSO capability.
3299 tcp_update_lso(tcp_t
*tcp
, ip_xmit_attr_t
*ixa
)
3302 * We check against IPv4 header length to preserve the old behavior
3303 * of only enabling LSO when there are no IP options.
3304 * But this restriction might not be necessary at all. Before removing
3305 * it, need to verify how LSO is handled for source routing case, with
3306 * which IP does software checksum.
3308 * For IPv6, whenever any extension header is needed, LSO is supressed.
3310 if (ixa
->ixa_ip_hdr_length
!= ((ixa
->ixa_flags
& IXAF_IS_IPV4
) ?
3311 IP_SIMPLE_HDR_LENGTH
: IPV6_HDR_LEN
))
3315 * Either the LSO capability newly became usable, or it has changed.
3317 if (ixa
->ixa_flags
& IXAF_LSO_CAPAB
) {
3318 ill_lso_capab_t
*lsoc
= &ixa
->ixa_lso_capab
;
3320 ASSERT(lsoc
->ill_lso_max
> 0);
3321 tcp
->tcp_lso_max
= MIN(TCP_MAX_LSO_LENGTH
, lsoc
->ill_lso_max
);
3323 DTRACE_PROBE3(tcp_update_lso
, boolean_t
, tcp
->tcp_lso
,
3324 boolean_t
, B_TRUE
, uint32_t, tcp
->tcp_lso_max
);
3327 * If LSO to be enabled, notify the STREAM header with larger
3331 tcp
->tcp_maxpsz_multiplier
= 0;
3333 tcp
->tcp_lso
= B_TRUE
;
3334 TCP_STAT(tcp
->tcp_tcps
, tcp_lso_enabled
);
3335 } else { /* LSO capability is not usable any more. */
3336 DTRACE_PROBE3(tcp_update_lso
, boolean_t
, tcp
->tcp_lso
,
3337 boolean_t
, B_FALSE
, uint32_t, tcp
->tcp_lso_max
);
3340 * If LSO to be disabled, notify the STREAM header with smaller
3341 * data block. And need to restore fragsize to PMTU.
3344 tcp
->tcp_maxpsz_multiplier
=
3345 tcp
->tcp_tcps
->tcps_maxpsz_multiplier
;
3346 ixa
->ixa_fragsize
= ixa
->ixa_pmtu
;
3347 tcp
->tcp_lso
= B_FALSE
;
3348 TCP_STAT(tcp
->tcp_tcps
, tcp_lso_disabled
);
3352 (void) tcp_maxpsz_set(tcp
, B_TRUE
);
3356 * Update the TCP connection according to change of ZEROCOPY capability.
3359 tcp_update_zcopy(tcp_t
*tcp
)
3361 conn_t
*connp
= tcp
->tcp_connp
;
3362 tcp_stack_t
*tcps
= tcp
->tcp_tcps
;
3364 if (tcp
->tcp_snd_zcopy_on
) {
3365 tcp
->tcp_snd_zcopy_on
= B_FALSE
;
3366 if (!TCP_IS_DETACHED(tcp
)) {
3367 (void) proto_set_tx_copyopt(connp
->conn_rq
, connp
,
3369 TCP_STAT(tcps
, tcp_zcopy_off
);
3372 tcp
->tcp_snd_zcopy_on
= B_TRUE
;
3373 if (!TCP_IS_DETACHED(tcp
)) {
3374 (void) proto_set_tx_copyopt(connp
->conn_rq
, connp
,
3376 TCP_STAT(tcps
, tcp_zcopy_on
);
3382 * Notify function registered with ip_xmit_attr_t. It's called in the squeue
3383 * so it's safe to update the TCP connection.
3387 tcp_notify(void *arg
, ip_xmit_attr_t
*ixa
, ixa_notify_type_t ntype
,
3388 ixa_notify_arg_t narg
)
3390 tcp_t
*tcp
= (tcp_t
*)arg
;
3391 conn_t
*connp
= tcp
->tcp_connp
;
3395 tcp_update_lso(tcp
, connp
->conn_ixa
);
3398 tcp_update_pmtu(tcp
, B_FALSE
);
3401 tcp_update_zcopy(tcp
);
3409 * The TCP write service routine should never be called...
3413 tcp_wsrv(queue_t
*q
)
3415 tcp_stack_t
*tcps
= Q_TO_TCP(q
)->tcp_tcps
;
3417 TCP_STAT(tcps
, tcp_wsrv_called
);
3421 * Hash list lookup routine for tcp_t structures.
3422 * Returns with a CONN_INC_REF tcp structure. Caller must do a CONN_DEC_REF.
3425 tcp_acceptor_hash_lookup(t_uscalar_t id
, tcp_stack_t
*tcps
)
3430 tf
= &tcps
->tcps_acceptor_fanout
[TCP_ACCEPTOR_HASH(id
)];
3431 mutex_enter(&tf
->tf_lock
);
3432 for (tcp
= tf
->tf_tcp
; tcp
!= NULL
;
3433 tcp
= tcp
->tcp_acceptor_hash
) {
3434 if (tcp
->tcp_acceptor_id
== id
) {
3435 CONN_INC_REF(tcp
->tcp_connp
);
3436 mutex_exit(&tf
->tf_lock
);
3440 mutex_exit(&tf
->tf_lock
);
3445 * Hash list insertion routine for tcp_t structures.
3448 tcp_acceptor_hash_insert(t_uscalar_t id
, tcp_t
*tcp
)
3453 tcp_stack_t
*tcps
= tcp
->tcp_tcps
;
3455 tf
= &tcps
->tcps_acceptor_fanout
[TCP_ACCEPTOR_HASH(id
)];
3457 if (tcp
->tcp_ptpahn
!= NULL
)
3458 tcp_acceptor_hash_remove(tcp
);
3460 mutex_enter(&tf
->tf_lock
);
3463 tcpnext
->tcp_ptpahn
= &tcp
->tcp_acceptor_hash
;
3464 tcp
->tcp_acceptor_hash
= tcpnext
;
3465 tcp
->tcp_ptpahn
= tcpp
;
3467 tcp
->tcp_acceptor_lockp
= &tf
->tf_lock
; /* For tcp_*_hash_remove */
3468 mutex_exit(&tf
->tf_lock
);
3472 * Hash list removal routine for tcp_t structures.
3475 tcp_acceptor_hash_remove(tcp_t
*tcp
)
3481 * Extract the lock pointer in case there are concurrent
3482 * hash_remove's for this instance.
3484 lockp
= tcp
->tcp_acceptor_lockp
;
3486 if (tcp
->tcp_ptpahn
== NULL
)
3489 ASSERT(lockp
!= NULL
);
3491 if (tcp
->tcp_ptpahn
) {
3492 tcpnext
= tcp
->tcp_acceptor_hash
;
3494 tcpnext
->tcp_ptpahn
= tcp
->tcp_ptpahn
;
3495 tcp
->tcp_acceptor_hash
= NULL
;
3497 *tcp
->tcp_ptpahn
= tcpnext
;
3498 tcp
->tcp_ptpahn
= NULL
;
3501 tcp
->tcp_acceptor_lockp
= NULL
;
3505 * Type three generator adapted from the random() function in 4.4 BSD:
3509 * Copyright (c) 1983, 1993
3510 * The Regents of the University of California. All rights reserved.
3512 * Redistribution and use in source and binary forms, with or without
3513 * modification, are permitted provided that the following conditions
3515 * 1. Redistributions of source code must retain the above copyright
3516 * notice, this list of conditions and the following disclaimer.
3517 * 2. Redistributions in binary form must reproduce the above copyright
3518 * notice, this list of conditions and the following disclaimer in the
3519 * documentation and/or other materials provided with the distribution.
3520 * 3. All advertising materials mentioning features or use of this software
3521 * must display the following acknowledgement:
3522 * This product includes software developed by the University of
3523 * California, Berkeley and its contributors.
3524 * 4. Neither the name of the University nor the names of its contributors
3525 * may be used to endorse or promote products derived from this software
3526 * without specific prior written permission.
3528 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
3529 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
3530 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
3531 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
3532 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
3533 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
3534 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
3535 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
3536 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
3537 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
3541 /* Type 3 -- x**31 + x**3 + 1 */
3546 /* Protected by tcp_random_lock */
3547 static int tcp_randtbl
[DEG_3
+ 1];
3549 static int *tcp_random_fptr
= &tcp_randtbl
[SEP_3
+ 1];
3550 static int *tcp_random_rptr
= &tcp_randtbl
[1];
3552 static int *tcp_random_state
= &tcp_randtbl
[1];
3553 static int *tcp_random_end_ptr
= &tcp_randtbl
[DEG_3
+ 1];
3555 kmutex_t tcp_random_lock
;
3558 tcp_random_init(void)
3566 * Use high-res timer and current time for seed. Gethrtime() returns
3567 * a longlong, which may contain resolution down to nanoseconds.
3568 * The current time will either be a 32-bit or a 64-bit quantity.
3569 * XOR the two together in a 64-bit result variable.
3570 * Convert the result to a 32-bit value by multiplying the high-order
3571 * 32-bits by the low-order 32-bits.
3575 (void) drv_getparm(TIME
, &wallclock
);
3576 result
= (uint64_t)wallclock
^ (uint64_t)hrt
;
3577 mutex_enter(&tcp_random_lock
);
3578 tcp_random_state
[0] = ((result
>> 32) & 0xffffffff) *
3579 (result
& 0xffffffff);
3581 for (i
= 1; i
< DEG_3
; i
++)
3582 tcp_random_state
[i
] = 1103515245 * tcp_random_state
[i
- 1]
3584 tcp_random_fptr
= &tcp_random_state
[SEP_3
];
3585 tcp_random_rptr
= &tcp_random_state
[0];
3586 mutex_exit(&tcp_random_lock
);
3587 for (i
= 0; i
< 10 * DEG_3
; i
++)
3588 (void) tcp_random();
3592 * tcp_random: Return a random number in the range [1 - (128K + 1)].
3593 * This range is selected to be approximately centered on TCP_ISS / 2,
3594 * and easy to compute. We get this value by generating a 32-bit random
3595 * number, selecting out the high-order 17 bits, and then adding one so
3596 * that we never return zero.
3603 mutex_enter(&tcp_random_lock
);
3604 *tcp_random_fptr
+= *tcp_random_rptr
;
3607 * The high-order bits are more random than the low-order bits,
3608 * so we select out the high-order 17 bits and add one so that
3609 * we never return zero.
3611 i
= ((*tcp_random_fptr
>> 15) & 0x1ffff) + 1;
3612 if (++tcp_random_fptr
>= tcp_random_end_ptr
) {
3613 tcp_random_fptr
= tcp_random_state
;
3615 } else if (++tcp_random_rptr
>= tcp_random_end_ptr
)
3616 tcp_random_rptr
= tcp_random_state
;
3618 mutex_exit(&tcp_random_lock
);
3623 * Split this function out so that if the secret changes, I'm okay.
3625 * Initialize the tcp_iss_cookie and tcp_iss_key.
3628 #define PASSWD_SIZE 16 /* MUST be multiple of 4 */
3631 tcp_iss_key_init(uint8_t *phrase
, int len
, tcp_stack_t
*tcps
)
3634 int32_t current_time
;
3638 uint8_t passwd
[PASSWD_SIZE
];
3643 * Start with the current absolute time.
3645 (void) drv_getparm(TIME
, &t
);
3646 tcp_iss_cookie
.current_time
= t
;
3649 * XXX - Need a more random number per RFC 1750, not this crap.
3650 * OTOH, if what follows is pretty random, then I'm in better shape.
3652 tcp_iss_cookie
.randnum
= (uint32_t)(gethrtime() + tcp_random());
3653 tcp_iss_cookie
.pad
= 0x365c; /* Picked from HMAC pad values. */
3656 * The cpu_type_info is pretty non-random. Ugggh. It does serve
3657 * as a good template.
3659 bcopy(&cpu_list
->cpu_type_info
, &tcp_iss_cookie
.passwd
,
3660 min(PASSWD_SIZE
, sizeof (cpu_list
->cpu_type_info
)));
3663 * The pass-phrase. Normally this is supplied by user-called NDD.
3665 bcopy(phrase
, &tcp_iss_cookie
.passwd
, min(PASSWD_SIZE
, len
));
3668 * See 4010593 if this section becomes a problem again,
3669 * but the local ethernet address is useful here.
3671 (void) localetheraddr(NULL
,
3672 (struct ether_addr
*)&tcp_iss_cookie
.ether
);
3675 * Hash 'em all together. The MD5Final is called per-connection.
3677 mutex_enter(&tcps
->tcps_iss_key_lock
);
3678 MD5Init(&tcps
->tcps_iss_key
);
3679 MD5Update(&tcps
->tcps_iss_key
, (uchar_t
*)&tcp_iss_cookie
,
3680 sizeof (tcp_iss_cookie
));
3681 mutex_exit(&tcps
->tcps_iss_key_lock
);
3685 * Called by IP when IP is loaded into the kernel
3688 tcp_ddi_g_init(void)
3690 tcp_timercache
= kmem_cache_create("tcp_timercache",
3691 sizeof (tcp_timer_t
) + sizeof (mblk_t
), 0,
3692 NULL
, NULL
, NULL
, NULL
, NULL
, 0);
3694 tcp_notsack_blk_cache
= kmem_cache_create("tcp_notsack_blk_cache",
3695 sizeof (notsack_blk_t
), 0, NULL
, NULL
, NULL
, NULL
, NULL
, 0);
3697 mutex_init(&tcp_random_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
3699 /* Initialize the random number generator */
3702 /* A single callback independently of how many netstacks we have */
3703 ip_squeue_init(tcp_squeue_add
);
3705 tcp_g_kstat
= tcp_g_kstat_init(&tcp_g_statistics
);
3707 tcp_squeue_flag
= tcp_squeue_switch(tcp_squeue_wput
);
3710 * We want to be informed each time a stack is created or
3711 * destroyed in the kernel, so we can maintain the
3712 * set of tcp_stack_t's.
3714 netstack_register(NS_TCP
, tcp_stack_init
, NULL
, tcp_stack_fini
);
3718 #define INET_NAME "ip"
3721 * Initialize the TCP stack instance.
3724 tcp_stack_init(netstackid_t stackid
, netstack_t
*ns
)
3732 tcps
= (tcp_stack_t
*)kmem_zalloc(sizeof (*tcps
), KM_SLEEP
);
3733 tcps
->tcps_netstack
= ns
;
3735 /* Initialize locks */
3736 mutex_init(&tcps
->tcps_iss_key_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
3737 mutex_init(&tcps
->tcps_epriv_port_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
3739 tcps
->tcps_g_num_epriv_ports
= TCP_NUM_EPRIV_PORTS
;
3740 tcps
->tcps_g_epriv_ports
[0] = ULP_DEF_EPRIV_PORT1
;
3741 tcps
->tcps_g_epriv_ports
[1] = ULP_DEF_EPRIV_PORT2
;
3742 tcps
->tcps_min_anonpriv_port
= 512;
3744 tcps
->tcps_bind_fanout
= kmem_zalloc(sizeof (tf_t
) *
3745 TCP_BIND_FANOUT_SIZE
, KM_SLEEP
);
3746 tcps
->tcps_acceptor_fanout
= kmem_zalloc(sizeof (tf_t
) *
3747 TCP_ACCEPTOR_FANOUT_SIZE
, KM_SLEEP
);
3749 for (i
= 0; i
< TCP_BIND_FANOUT_SIZE
; i
++) {
3750 mutex_init(&tcps
->tcps_bind_fanout
[i
].tf_lock
, NULL
,
3751 MUTEX_DEFAULT
, NULL
);
3754 for (i
= 0; i
< TCP_ACCEPTOR_FANOUT_SIZE
; i
++) {
3755 mutex_init(&tcps
->tcps_acceptor_fanout
[i
].tf_lock
, NULL
,
3756 MUTEX_DEFAULT
, NULL
);
3759 /* TCP's IPsec code calls the packet dropper. */
3760 ip_drop_register(&tcps
->tcps_dropper
, "TCP IPsec policy enforcement");
3762 arrsz
= tcp_propinfo_count
* sizeof (mod_prop_info_t
);
3763 tcps
->tcps_propinfo_tbl
= (mod_prop_info_t
*)kmem_alloc(arrsz
,
3765 bcopy(tcp_propinfo_tbl
, tcps
->tcps_propinfo_tbl
, arrsz
);
3768 * Note: To really walk the device tree you need the devinfo
3769 * pointer to your device which is only available after probe/attach.
3770 * The following is safe only because it uses ddi_root_node()
3772 tcp_max_optsize
= optcom_max_optsize(tcp_opt_obj
.odb_opt_des_arr
,
3773 tcp_opt_obj
.odb_opt_arr_cnt
);
3776 * Initialize RFC 1948 secret values. This will probably be reset once
3777 * by the boot scripts.
3779 * Use NULL name, as the name is caught by the new lockstats.
3781 * Initialize with some random, non-guessable string, like the global
3785 tcp_iss_key_init((uint8_t *)&tcp_g_t_info_ack
,
3786 sizeof (tcp_g_t_info_ack
), tcps
);
3788 tcps
->tcps_kstat
= tcp_kstat2_init(stackid
);
3789 tcps
->tcps_mibkp
= tcp_kstat_init(stackid
);
3791 major
= mod_name_to_major(INET_NAME
);
3792 error
= ldi_ident_from_major(major
, &tcps
->tcps_ldi_ident
);
3794 tcps
->tcps_ixa_cleanup_mp
= allocb_wait(0, BPRI_MED
, STR_NOSIG
, NULL
);
3795 ASSERT(tcps
->tcps_ixa_cleanup_mp
!= NULL
);
3796 cv_init(&tcps
->tcps_ixa_cleanup_ready_cv
, NULL
, CV_DEFAULT
, NULL
);
3797 cv_init(&tcps
->tcps_ixa_cleanup_done_cv
, NULL
, CV_DEFAULT
, NULL
);
3798 mutex_init(&tcps
->tcps_ixa_cleanup_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
3800 mutex_init(&tcps
->tcps_reclaim_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
3801 tcps
->tcps_reclaim
= B_FALSE
;
3802 tcps
->tcps_reclaim_tid
= 0;
3803 tcps
->tcps_reclaim_period
= tcps
->tcps_rexmit_interval_max
;
3806 * ncpus is the current number of CPUs, which can be bigger than
3807 * boot_ncpus. But we don't want to use ncpus to allocate all the
3808 * tcp_stats_cpu_t at system boot up time since it will be 1. While
3809 * we handle adding CPU in tcp_cpu_update(), it will be slow if
3810 * there are many CPUs as we will be adding them 1 by 1.
3812 * Note that tcps_sc_cnt never decreases and the tcps_sc[x] pointers
3813 * are not freed until the stack is going away. So there is no need
3814 * to grab a lock to access the per CPU tcps_sc[x] pointer.
3816 mutex_enter(&cpu_lock
);
3817 tcps
->tcps_sc_cnt
= MAX(ncpus
, boot_ncpus
);
3818 mutex_exit(&cpu_lock
);
3819 tcps
->tcps_sc
= kmem_zalloc(max_ncpus
* sizeof (tcp_stats_cpu_t
*),
3821 for (i
= 0; i
< tcps
->tcps_sc_cnt
; i
++) {
3822 tcps
->tcps_sc
[i
] = kmem_zalloc(sizeof (tcp_stats_cpu_t
),
3826 mutex_init(&tcps
->tcps_listener_conf_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
3827 list_create(&tcps
->tcps_listener_conf
, sizeof (tcp_listener_t
),
3828 offsetof(tcp_listener_t
, tl_link
));
3834 * Called when the IP module is about to be unloaded.
3837 tcp_ddi_g_destroy(void)
3839 tcp_g_kstat_fini(tcp_g_kstat
);
3841 bzero(&tcp_g_statistics
, sizeof (tcp_g_statistics
));
3843 mutex_destroy(&tcp_random_lock
);
3845 kmem_cache_destroy(tcp_timercache
);
3846 kmem_cache_destroy(tcp_notsack_blk_cache
);
3848 netstack_unregister(NS_TCP
);
3852 * Free the TCP stack instance.
3855 tcp_stack_fini(netstackid_t stackid
, void *arg
)
3857 tcp_stack_t
*tcps
= (tcp_stack_t
*)arg
;
3860 freeb(tcps
->tcps_ixa_cleanup_mp
);
3861 tcps
->tcps_ixa_cleanup_mp
= NULL
;
3862 cv_destroy(&tcps
->tcps_ixa_cleanup_ready_cv
);
3863 cv_destroy(&tcps
->tcps_ixa_cleanup_done_cv
);
3864 mutex_destroy(&tcps
->tcps_ixa_cleanup_lock
);
3867 * Set tcps_reclaim to false tells tcp_reclaim_timer() not to restart
3870 mutex_enter(&tcps
->tcps_reclaim_lock
);
3871 tcps
->tcps_reclaim
= B_FALSE
;
3872 mutex_exit(&tcps
->tcps_reclaim_lock
);
3873 if (tcps
->tcps_reclaim_tid
!= 0)
3874 (void) untimeout(tcps
->tcps_reclaim_tid
);
3875 mutex_destroy(&tcps
->tcps_reclaim_lock
);
3877 tcp_listener_conf_cleanup(tcps
);
3879 for (i
= 0; i
< tcps
->tcps_sc_cnt
; i
++)
3880 kmem_free(tcps
->tcps_sc
[i
], sizeof (tcp_stats_cpu_t
));
3881 kmem_free(tcps
->tcps_sc
, max_ncpus
* sizeof (tcp_stats_cpu_t
*));
3883 kmem_free(tcps
->tcps_propinfo_tbl
,
3884 tcp_propinfo_count
* sizeof (mod_prop_info_t
));
3885 tcps
->tcps_propinfo_tbl
= NULL
;
3887 for (i
= 0; i
< TCP_BIND_FANOUT_SIZE
; i
++) {
3888 ASSERT(tcps
->tcps_bind_fanout
[i
].tf_tcp
== NULL
);
3889 mutex_destroy(&tcps
->tcps_bind_fanout
[i
].tf_lock
);
3892 for (i
= 0; i
< TCP_ACCEPTOR_FANOUT_SIZE
; i
++) {
3893 ASSERT(tcps
->tcps_acceptor_fanout
[i
].tf_tcp
== NULL
);
3894 mutex_destroy(&tcps
->tcps_acceptor_fanout
[i
].tf_lock
);
3897 kmem_free(tcps
->tcps_bind_fanout
, sizeof (tf_t
) * TCP_BIND_FANOUT_SIZE
);
3898 tcps
->tcps_bind_fanout
= NULL
;
3900 kmem_free(tcps
->tcps_acceptor_fanout
, sizeof (tf_t
) *
3901 TCP_ACCEPTOR_FANOUT_SIZE
);
3902 tcps
->tcps_acceptor_fanout
= NULL
;
3904 mutex_destroy(&tcps
->tcps_iss_key_lock
);
3905 mutex_destroy(&tcps
->tcps_epriv_port_lock
);
3907 ip_drop_unregister(&tcps
->tcps_dropper
);
3909 tcp_kstat2_fini(stackid
, tcps
->tcps_kstat
);
3910 tcps
->tcps_kstat
= NULL
;
3912 tcp_kstat_fini(stackid
, tcps
->tcps_mibkp
);
3913 tcps
->tcps_mibkp
= NULL
;
3915 ldi_ident_release(tcps
->tcps_ldi_ident
);
3916 kmem_free(tcps
, sizeof (*tcps
));
3920 * Generate ISS, taking into account NDD changes may happen halfway through.
3921 * (If the iss is not zero, set it.)
3925 tcp_iss_init(tcp_t
*tcp
)
3928 struct { uint32_t ports
; in6_addr_t src
; in6_addr_t dst
; } arg
;
3930 tcp_stack_t
*tcps
= tcp
->tcp_tcps
;
3931 conn_t
*connp
= tcp
->tcp_connp
;
3933 tcps
->tcps_iss_incr_extra
+= (tcps
->tcps_iss_incr
>> 1);
3934 tcp
->tcp_iss
= tcps
->tcps_iss_incr_extra
;
3935 switch (tcps
->tcps_strong_iss
) {
3937 mutex_enter(&tcps
->tcps_iss_key_lock
);
3938 context
= tcps
->tcps_iss_key
;
3939 mutex_exit(&tcps
->tcps_iss_key_lock
);
3940 arg
.ports
= connp
->conn_ports
;
3941 arg
.src
= connp
->conn_laddr_v6
;
3942 arg
.dst
= connp
->conn_faddr_v6
;
3943 MD5Update(&context
, (uchar_t
*)&arg
, sizeof (arg
));
3944 MD5Final((uchar_t
*)answer
, &context
);
3945 tcp
->tcp_iss
+= answer
[0] ^ answer
[1] ^ answer
[2] ^ answer
[3];
3947 * Now that we've hashed into a unique per-connection sequence
3948 * space, add a random increment per strong_iss == 1. So I
3949 * guess we'll have to...
3953 tcp
->tcp_iss
+= (gethrtime() >> ISS_NSEC_SHT
) + tcp_random();
3956 tcp
->tcp_iss
+= (uint32_t)gethrestime_sec() *
3957 tcps
->tcps_iss_incr
;
3960 tcp
->tcp_valid_bits
= TCP_ISS_VALID
;
3961 tcp
->tcp_fss
= tcp
->tcp_iss
- 1;
3962 tcp
->tcp_suna
= tcp
->tcp_iss
;
3963 tcp
->tcp_snxt
= tcp
->tcp_iss
+ 1;
3964 tcp
->tcp_rexmit_nxt
= tcp
->tcp_snxt
;
3965 tcp
->tcp_csuna
= tcp
->tcp_snxt
;
3969 * tcp_{set,clr}qfull() functions are used to either set or clear QFULL
3970 * on the specified backing STREAMS q. Note, the caller may make the
3971 * decision to call based on the tcp_t.tcp_flow_stopped value which
3972 * when check outside the q's lock is only an advisory check ...
3975 tcp_setqfull(tcp_t
*tcp
)
3977 tcp_stack_t
*tcps
= tcp
->tcp_tcps
;
3978 conn_t
*connp
= tcp
->tcp_connp
;
3980 if (tcp
->tcp_closed
)
3983 conn_setqfull(connp
, &tcp
->tcp_flow_stopped
);
3984 if (tcp
->tcp_flow_stopped
)
3985 TCP_STAT(tcps
, tcp_flwctl_on
);
3989 tcp_clrqfull(tcp_t
*tcp
)
3991 conn_t
*connp
= tcp
->tcp_connp
;
3993 if (tcp
->tcp_closed
)
3995 conn_clrqfull(connp
, &tcp
->tcp_flow_stopped
);
3999 tcp_squeue_switch(int val
)
4017 * This is called once for each squeue - globally for all stack
4021 tcp_squeue_add(squeue_t
*sqp
)
4023 tcp_squeue_priv_t
*tcp_time_wait
= kmem_zalloc(
4024 sizeof (tcp_squeue_priv_t
), KM_SLEEP
);
4026 *squeue_getprivate(sqp
, SQPRIVATE_TCP
) = (intptr_t)tcp_time_wait
;
4027 if (tcp_free_list_max_cnt
== 0) {
4028 int tcp_ncpus
= ((boot_max_ncpus
== -1) ?
4029 max_ncpus
: boot_max_ncpus
);
4032 * Limit number of entries to 1% of availble memory / tcp_ncpus
4034 tcp_free_list_max_cnt
= (freemem
* PAGESIZE
) /
4035 (tcp_ncpus
* sizeof (tcp_t
) * 100);
4037 tcp_time_wait
->tcp_free_list_cnt
= 0;
4040 * Return unix error is tli error is TSYSERR, otherwise return a negative
4044 tcp_do_bind(conn_t
*connp
, struct sockaddr
*sa
, socklen_t len
, cred_t
*cr
,
4045 boolean_t bind_to_req_port_only
)
4048 tcp_t
*tcp
= connp
->conn_tcp
;
4050 if (tcp
->tcp_state
>= TCPS_BOUND
) {
4051 if (connp
->conn_debug
) {
4052 (void) strlog(TCP_MOD_ID
, 0, 1, SL_ERROR
|SL_TRACE
,
4053 "tcp_bind: bad state, %d", tcp
->tcp_state
);
4055 return (-TOUTSTATE
);
4058 error
= tcp_bind_check(connp
, sa
, len
, cr
, bind_to_req_port_only
);
4062 ASSERT(tcp
->tcp_state
== TCPS_BOUND
);
4063 tcp
->tcp_conn_req_max
= 0;
4068 * If the return value from this function is positive, it's a UNIX error.
4069 * Otherwise, if it's negative, then the absolute value is a TLI error.
4070 * the TPI routine tcp_tpi_connect() is a wrapper function for this.
4073 tcp_do_connect(conn_t
*connp
, const struct sockaddr
*sa
, socklen_t len
,
4074 cred_t
*cr
, pid_t pid
)
4076 tcp_t
*tcp
= connp
->conn_tcp
;
4077 sin_t
*sin
= (sin_t
*)sa
;
4078 sin6_t
*sin6
= (sin6_t
*)sa
;
4085 tcp_stack_t
*tcps
= tcp
->tcp_tcps
;
4087 ip_xmit_attr_t
*ixa
= connp
->conn_ixa
;
4089 oldstate
= tcp
->tcp_state
;
4094 * Should never happen
4098 case sizeof (sin_t
):
4100 if (sin
->sin_port
== 0) {
4103 if (connp
->conn_ipv6_v6only
) {
4104 return (EAFNOSUPPORT
);
4108 case sizeof (sin6_t
):
4109 sin6
= (sin6_t
*)sa
;
4110 if (sin6
->sin6_port
== 0) {
4116 * If we're connecting to an IPv4-mapped IPv6 address, we need to
4117 * make sure that the conn_ipversion is IPV4_VERSION. We
4118 * need to this before we call tcp_bindi() so that the port lookup
4119 * code will look for ports in the correct port space (IPv4 and
4120 * IPv6 have separate port spaces).
4122 if (connp
->conn_family
== AF_INET6
&&
4123 connp
->conn_ipversion
== IPV6_VERSION
&&
4124 IN6_IS_ADDR_V4MAPPED(&sin6
->sin6_addr
)) {
4125 if (connp
->conn_ipv6_v6only
)
4126 return (EADDRNOTAVAIL
);
4128 connp
->conn_ipversion
= IPV4_VERSION
;
4131 switch (tcp
->tcp_state
) {
4134 * Listening sockets are not allowed to issue connect().
4136 if (IPCL_IS_NONSTR(connp
))
4137 return (EOPNOTSUPP
);
4141 * We support quick connect, refer to comments in
4148 return (-TOUTSTATE
);
4152 * We update our cred/cpid based on the caller of connect
4154 if (connp
->conn_cred
!= cr
) {
4156 crfree(connp
->conn_cred
);
4157 connp
->conn_cred
= cr
;
4159 connp
->conn_cpid
= pid
;
4161 /* Cache things in the ixa without any refhold */
4162 ASSERT(!(ixa
->ixa_free_flags
& IXA_FREE_CRED
));
4164 ixa
->ixa_cpid
= pid
;
4165 if (is_system_labeled()) {
4166 /* We need to restart with a label based on the cred */
4167 ip_xmit_attr_restore_tsl(ixa
, ixa
->ixa_cred
);
4170 if (connp
->conn_family
== AF_INET6
) {
4171 if (!IN6_IS_ADDR_V4MAPPED(&sin6
->sin6_addr
)) {
4172 error
= tcp_connect_ipv6(tcp
, &sin6
->sin6_addr
,
4173 sin6
->sin6_port
, sin6
->sin6_flowinfo
,
4174 sin6
->__sin6_src_id
, sin6
->sin6_scope_id
);
4177 * Destination adress is mapped IPv6 address.
4178 * Source bound address should be unspecified or
4179 * IPv6 mapped address as well.
4181 if (!IN6_IS_ADDR_UNSPECIFIED(
4182 &connp
->conn_bound_addr_v6
) &&
4183 !IN6_IS_ADDR_V4MAPPED(&connp
->conn_bound_addr_v6
)) {
4184 return (EADDRNOTAVAIL
);
4186 dstaddrp
= &V4_PART_OF_V6((sin6
->sin6_addr
));
4187 dstport
= sin6
->sin6_port
;
4188 srcid
= sin6
->__sin6_src_id
;
4189 error
= tcp_connect_ipv4(tcp
, dstaddrp
, dstport
,
4193 dstaddrp
= &sin
->sin_addr
.s_addr
;
4194 dstport
= sin
->sin_port
;
4196 error
= tcp_connect_ipv4(tcp
, dstaddrp
, dstport
, srcid
);
4200 goto connect_failed
;
4202 CL_INET_CONNECT(connp
, B_TRUE
, error
);
4204 goto connect_failed
;
4206 /* connect succeeded */
4207 TCPS_BUMP_MIB(tcps
, tcpActiveOpens
);
4208 tcp
->tcp_active_open
= 1;
4211 * tcp_set_destination() does not adjust for TCP/IP header length.
4213 mss
= tcp
->tcp_mss
- connp
->conn_ht_iphc_len
;
4216 * Just make sure our rwnd is at least rcvbuf * MSS large, and round up
4217 * to the nearest MSS.
4219 * We do the round up here because we need to get the interface MTU
4220 * first before we can do the round up.
4222 tcp
->tcp_rwnd
= connp
->conn_rcvbuf
;
4223 tcp
->tcp_rwnd
= MAX(MSS_ROUNDUP(tcp
->tcp_rwnd
, mss
),
4224 tcps
->tcps_recv_hiwat_minmss
* mss
);
4225 connp
->conn_rcvbuf
= tcp
->tcp_rwnd
;
4226 tcp_set_ws_value(tcp
);
4227 tcp
->tcp_tcpha
->tha_win
= htons(tcp
->tcp_rwnd
>> tcp
->tcp_rcv_ws
);
4228 if (tcp
->tcp_rcv_ws
> 0 || tcps
->tcps_wscale_always
)
4229 tcp
->tcp_snd_ws_ok
= B_TRUE
;
4232 * Set tcp_snd_ts_ok to true
4233 * so that tcp_xmit_mp will
4234 * include the timestamp
4235 * option in the SYN segment.
4237 if (tcps
->tcps_tstamp_always
||
4238 (tcp
->tcp_rcv_ws
&& tcps
->tcps_tstamp_if_wscale
)) {
4239 tcp
->tcp_snd_ts_ok
= B_TRUE
;
4243 * Note that tcp_snd_sack_ok can be set in tcp_set_destination() if
4244 * the SACK metric is set. So here we just check the per stack SACK
4247 if (tcps
->tcps_sack_permitted
== 2) {
4248 ASSERT(tcp
->tcp_num_sack_blk
== 0);
4249 ASSERT(tcp
->tcp_notsack_list
== NULL
);
4250 tcp
->tcp_snd_sack_ok
= B_TRUE
;
4254 * Should we use ECN? Note that the current
4255 * default value (SunOS 5.9) of tcp_ecn_permitted
4256 * is 1. The reason for doing this is that there
4257 * are equipments out there that will drop ECN
4258 * enabled IP packets. Setting it to 1 avoids
4259 * compatibility problems.
4261 if (tcps
->tcps_ecn_permitted
== 2)
4262 tcp
->tcp_ecn_ok
= B_TRUE
;
4264 /* Trace change from BOUND -> SYN_SENT here */
4265 DTRACE_TCP6(state__change
, void, NULL
, ip_xmit_attr_t
*,
4266 connp
->conn_ixa
, void, NULL
, tcp_t
*, tcp
, void, NULL
,
4267 int32_t, TCPS_BOUND
);
4269 TCP_TIMER_RESTART(tcp
, tcp
->tcp_rto
);
4270 syn_mp
= tcp_xmit_mp(tcp
, NULL
, 0, NULL
, NULL
,
4271 tcp
->tcp_iss
, B_FALSE
, NULL
, B_FALSE
);
4272 if (syn_mp
!= NULL
) {
4274 * We must bump the generation before sending the syn
4275 * to ensure that we use the right generation in case
4276 * this thread issues a "connected" up call.
4278 SOCK_CONNID_BUMP(tcp
->tcp_connid
);
4280 * DTrace sending the first SYN as a
4281 * tcp:::connect-request event.
4283 DTRACE_TCP5(connect__request
, mblk_t
*, NULL
,
4284 ip_xmit_attr_t
*, connp
->conn_ixa
,
4285 void_ip_t
*, syn_mp
->b_rptr
, tcp_t
*, tcp
,
4287 &syn_mp
->b_rptr
[connp
->conn_ixa
->ixa_ip_hdr_length
]);
4288 tcp_send_data(tcp
, syn_mp
);
4291 if (tcp
->tcp_conn
.tcp_opts_conn_req
!= NULL
)
4292 tcp_close_mpp(&tcp
->tcp_conn
.tcp_opts_conn_req
);
4296 connp
->conn_faddr_v6
= ipv6_all_zeros
;
4297 connp
->conn_fport
= 0;
4298 tcp
->tcp_state
= oldstate
;
4299 if (tcp
->tcp_conn
.tcp_opts_conn_req
!= NULL
)
4300 tcp_close_mpp(&tcp
->tcp_conn
.tcp_opts_conn_req
);
4305 tcp_do_listen(conn_t
*connp
, struct sockaddr
*sa
, socklen_t len
,
4306 int backlog
, cred_t
*cr
, boolean_t bind_to_req_port_only
)
4308 tcp_t
*tcp
= connp
->conn_tcp
;
4310 tcp_stack_t
*tcps
= tcp
->tcp_tcps
;
4313 /* All Solaris components should pass a cred for this operation. */
4316 if (tcp
->tcp_state
>= TCPS_BOUND
) {
4317 if ((tcp
->tcp_state
== TCPS_BOUND
||
4318 tcp
->tcp_state
== TCPS_LISTEN
) && backlog
> 0) {
4320 * Handle listen() increasing backlog.
4321 * This is more "liberal" then what the TPI spec
4322 * requires but is needed to avoid a t_unbind
4323 * when handling listen() since the port number
4324 * might be "stolen" between the unbind and bind.
4328 if (connp
->conn_debug
) {
4329 (void) strlog(TCP_MOD_ID
, 0, 1, SL_ERROR
|SL_TRACE
,
4330 "tcp_listen: bad state, %d", tcp
->tcp_state
);
4332 return (-TOUTSTATE
);
4339 ASSERT(IPCL_IS_NONSTR(connp
));
4340 /* Do an implicit bind: Request for a generic port. */
4341 if (connp
->conn_family
== AF_INET
) {
4342 len
= sizeof (sin_t
);
4343 sin
= (sin_t
*)&addr
;
4345 sin
->sin_family
= AF_INET
;
4347 ASSERT(connp
->conn_family
== AF_INET6
);
4348 len
= sizeof (sin6_t
);
4349 sin6
= (sin6_t
*)&addr
;
4351 sin6
->sin6_family
= AF_INET6
;
4353 sa
= (struct sockaddr
*)&addr
;
4356 error
= tcp_bind_check(connp
, sa
, len
, cr
,
4357 bind_to_req_port_only
);
4360 /* Fall through and do the fanout insertion */
4364 ASSERT(tcp
->tcp_state
== TCPS_BOUND
|| tcp
->tcp_state
== TCPS_LISTEN
);
4365 tcp
->tcp_conn_req_max
= backlog
;
4366 if (tcp
->tcp_conn_req_max
) {
4367 if (tcp
->tcp_conn_req_max
< tcps
->tcps_conn_req_min
)
4368 tcp
->tcp_conn_req_max
= tcps
->tcps_conn_req_min
;
4369 if (tcp
->tcp_conn_req_max
> tcps
->tcps_conn_req_max_q
)
4370 tcp
->tcp_conn_req_max
= tcps
->tcps_conn_req_max_q
;
4372 * If this is a listener, do not reset the eager list
4373 * and other stuffs. Note that we don't check if the
4374 * existing eager list meets the new tcp_conn_req_max
4377 if (tcp
->tcp_state
!= TCPS_LISTEN
) {
4378 tcp
->tcp_state
= TCPS_LISTEN
;
4379 DTRACE_TCP6(state__change
, void, NULL
, ip_xmit_attr_t
*,
4380 connp
->conn_ixa
, void, NULL
, tcp_t
*, tcp
,
4381 void, NULL
, int32_t, TCPS_BOUND
);
4382 /* Initialize the chain. Don't need the eager_lock */
4383 tcp
->tcp_eager_next_q0
= tcp
->tcp_eager_prev_q0
= tcp
;
4384 tcp
->tcp_eager_next_drop_q0
= tcp
;
4385 tcp
->tcp_eager_prev_drop_q0
= tcp
;
4386 tcp
->tcp_second_ctimer_threshold
=
4387 tcps
->tcps_ip_abort_linterval
;
4392 * We need to make sure that the conn_recv is set to a non-null
4393 * value before we insert the conn into the classifier table.
4394 * This is to avoid a race with an incoming packet which does an
4396 * We initially set it to tcp_input_listener_unbound to try to
4397 * pick a good squeue for the listener when the first SYN arrives.
4398 * tcp_input_listener_unbound sets it to tcp_input_listener on that
4401 connp
->conn_recv
= tcp_input_listener_unbound
;
4403 /* Insert the listener in the classifier table */
4404 error
= ip_laddr_fanout_insert(connp
);
4406 /* Undo the bind - release the port number */
4407 oldstate
= tcp
->tcp_state
;
4408 tcp
->tcp_state
= TCPS_IDLE
;
4409 DTRACE_TCP6(state__change
, void, NULL
, ip_xmit_attr_t
*,
4410 connp
->conn_ixa
, void, NULL
, tcp_t
*, tcp
, void, NULL
,
4412 connp
->conn_bound_addr_v6
= ipv6_all_zeros
;
4414 connp
->conn_laddr_v6
= ipv6_all_zeros
;
4415 connp
->conn_saddr_v6
= ipv6_all_zeros
;
4416 connp
->conn_ports
= 0;
4418 if (connp
->conn_anon_port
) {
4421 zone
= crgetzone(cr
);
4422 connp
->conn_anon_port
= B_FALSE
;
4423 (void) tsol_mlp_anon(zone
, connp
->conn_mlp_type
,
4424 connp
->conn_proto
, connp
->conn_lport
, B_FALSE
);
4426 connp
->conn_mlp_type
= mlptSingle
;
4428 tcp_bind_hash_remove(tcp
);
4432 * If there is a connection limit, allocate and initialize
4433 * the counter struct. Note that since listen can be called
4434 * multiple times, the struct may have been allready allocated.
4436 if (!list_is_empty(&tcps
->tcps_listener_conf
) &&
4437 tcp
->tcp_listen_cnt
== NULL
) {
4438 tcp_listen_cnt_t
*tlc
;
4441 ratio
= tcp_find_listener_conf(tcps
,
4442 ntohs(connp
->conn_lport
));
4444 uint32_t mem_ratio
, tot_buf
;
4446 tlc
= kmem_alloc(sizeof (tcp_listen_cnt_t
),
4449 * Calculate the connection limit based on
4450 * the configured ratio and maxusers. Maxusers
4451 * are calculated based on memory size,
4452 * ~ 1 user per MB. Note that the conn_rcvbuf
4453 * and conn_sndbuf may change after a
4454 * connection is accepted. So what we have
4455 * is only an approximation.
4457 if ((tot_buf
= connp
->conn_rcvbuf
+
4458 connp
->conn_sndbuf
) < MB
) {
4459 mem_ratio
= MB
/ tot_buf
;
4460 tlc
->tlc_max
= maxusers
/ ratio
*
4463 mem_ratio
= tot_buf
/ MB
;
4464 tlc
->tlc_max
= maxusers
/ ratio
/
4467 /* At least we should allow two connections! */
4468 if (tlc
->tlc_max
<= tcp_min_conn_listener
)
4469 tlc
->tlc_max
= tcp_min_conn_listener
;
4472 tcp
->tcp_listen_cnt
= tlc
;