4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 1991, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2011, Joyent Inc. All rights reserved.
26 /* Copyright (c) 1990 Mentat Inc. */
28 #include <sys/types.h>
29 #include <sys/stream.h>
30 #include <sys/strsun.h>
31 #include <sys/strsubr.h>
32 #include <sys/stropts.h>
33 #include <sys/strlog.h>
34 #define _SUN_TPI_VERSION 2
35 #include <sys/tihdr.h>
36 #include <sys/timod.h>
38 #include <sys/sunddi.h>
39 #include <sys/suntpi.h>
40 #include <sys/xti_inet.h>
41 #include <sys/cmn_err.h>
42 #include <sys/debug.h>
44 #include <sys/vtrace.h>
46 #include <sys/ethernet.h>
47 #include <sys/cpuvar.h>
49 #include <sys/pattr.h>
50 #include <sys/policy.h>
53 #include <sys/sunldi.h>
55 #include <sys/errno.h>
56 #include <sys/signal.h>
57 #include <sys/socket.h>
58 #include <sys/socketvar.h>
59 #include <sys/sockio.h>
60 #include <sys/isa_defs.h>
62 #include <sys/random.h>
64 #include <sys/systm.h>
65 #include <netinet/in.h>
66 #include <netinet/tcp.h>
67 #include <netinet/ip6.h>
68 #include <netinet/icmp6.h>
70 #include <net/route.h>
71 #include <inet/ipsec_impl.h>
73 #include <inet/common.h>
75 #include <inet/ip_impl.h>
77 #include <inet/ip_ndp.h>
78 #include <inet/proto_set.h>
79 #include <inet/mib2.h>
80 #include <inet/optcom.h>
81 #include <inet/snmpcom.h>
82 #include <inet/kstatcom.h>
84 #include <inet/tcp_impl.h>
85 #include <inet/tcp_cluster.h>
86 #include <inet/udp_impl.h>
87 #include <net/pfkeyv2.h>
88 #include <inet/ipdrop.h>
90 #include <inet/ipclassifier.h>
91 #include <inet/ip_ire.h>
92 #include <inet/ip_ftable.h>
93 #include <inet/ip_if.h>
94 #include <inet/ipp_common.h>
95 #include <inet/ip_rts.h>
96 #include <inet/ip_netinfo.h>
97 #include <sys/squeue_impl.h>
98 #include <sys/squeue.h>
99 #include <sys/tsol/label.h>
100 #include <sys/tsol/tnet.h>
101 #include <rpc/pmap_prot.h>
102 #include <sys/callo.h>
105 * TCP Notes: aka FireEngine Phase I (PSARC 2002/433)
107 * (Read the detailed design doc in PSARC case directory)
109 * The entire tcp state is contained in tcp_t and conn_t structure
110 * which are allocated in tandem using ipcl_conn_create() and passing
111 * IPCL_TCPCONN as a flag. We use 'conn_ref' and 'conn_lock' to protect
112 * the references on the tcp_t. The tcp_t structure is never compressed
113 * and packets always land on the correct TCP perimeter from the time
114 * eager is created till the time tcp_t dies (as such the old mentat
115 * TCP global queue is not used for detached state and no IPSEC checking
116 * is required). The global queue is still allocated to send out resets
117 * for connection which have no listeners and IP directly calls
118 * tcp_xmit_listeners_reset() which does any policy check.
120 * Protection and Synchronisation mechanism:
122 * The tcp data structure does not use any kind of lock for protecting
123 * its state but instead uses 'squeues' for mutual exclusion from various
124 * read and write side threads. To access a tcp member, the thread should
125 * always be behind squeue (via squeue_enter with flags as SQ_FILL, SQ_PROCESS,
126 * or SQ_NODRAIN). Since the squeues allow a direct function call, caller
127 * can pass any tcp function having prototype of edesc_t as argument
128 * (different from traditional STREAMs model where packets come in only
129 * designated entry points). The list of functions that can be directly
130 * called via squeue are listed before the usual function prototype.
134 * TCP is MT-Hot and we use a reference based scheme to make sure that the
135 * tcp structure doesn't disappear when its needed. When the application
136 * creates an outgoing connection or accepts an incoming connection, we
137 * start out with 2 references on 'conn_ref'. One for TCP and one for IP.
138 * The IP reference is just a symbolic reference since ip_tcpclose()
139 * looks at tcp structure after tcp_close_output() returns which could
140 * have dropped the last TCP reference. So as long as the connection is
141 * in attached state i.e. !TCP_IS_DETACHED, we have 2 references on the
142 * conn_t. The classifier puts its own reference when the connection is
143 * inserted in listen or connected hash. Anytime a thread needs to enter
144 * the tcp connection perimeter, it retrieves the conn/tcp from q->ptr
145 * on write side or by doing a classify on read side and then puts a
146 * reference on the conn before doing squeue_enter/tryenter/fill. For
147 * read side, the classifier itself puts the reference under fanout lock
148 * to make sure that tcp can't disappear before it gets processed. The
149 * squeue will drop this reference automatically so the called function
150 * doesn't have to do a DEC_REF.
152 * Opening a new connection:
154 * The outgoing connection open is pretty simple. tcp_open() does the
155 * work in creating the conn/tcp structure and initializing it. The
156 * squeue assignment is done based on the CPU the application
157 * is running on. So for outbound connections, processing is always done
158 * on application CPU which might be different from the incoming CPU
159 * being interrupted by the NIC. An optimal way would be to figure out
160 * the NIC <-> CPU binding at listen time, and assign the outgoing
161 * connection to the squeue attached to the CPU that will be interrupted
162 * for incoming packets (we know the NIC based on the bind IP address).
163 * This might seem like a problem if more data is going out but the
164 * fact is that in most cases the transmit is ACK driven transmit where
165 * the outgoing data normally sits on TCP's xmit queue waiting to be
168 * Accepting a connection:
170 * This is a more interesting case because of various races involved in
171 * establishing a eager in its own perimeter. Read the meta comment on
172 * top of tcp_input_listener(). But briefly, the squeue is picked by
173 * ip_fanout based on the ring or the sender (if loopback).
175 * Closing a connection:
177 * The close is fairly straight forward. tcp_close() calls tcp_close_output()
178 * via squeue to do the close and mark the tcp as detached if the connection
179 * was in state TCPS_ESTABLISHED or greater. In the later case, TCP keep its
180 * reference but tcp_close() drop IP's reference always. So if tcp was
181 * not killed, it is sitting in time_wait list with 2 reference - 1 for TCP
182 * and 1 because it is in classifier's connected hash. This is the condition
183 * we use to determine that its OK to clean up the tcp outside of squeue
184 * when time wait expires (check the ref under fanout and conn_lock and
185 * if it is 2, remove it from fanout hash and kill it).
187 * Although close just drops the necessary references and marks the
188 * tcp_detached state, tcp_close needs to know the tcp_detached has been
189 * set (under squeue) before letting the STREAM go away (because a
190 * inbound packet might attempt to go up the STREAM while the close
191 * has happened and tcp_detached is not set). So a special lock and
192 * flag is used along with a condition variable (tcp_closelock, tcp_closed,
193 * and tcp_closecv) to signal tcp_close that tcp_close_out() has marked
196 * Special provisions and fast paths:
198 * We make special provisions for sockfs by marking tcp_issocket
199 * whenever we have only sockfs on top of TCP. This allows us to skip
200 * putting the tcp in acceptor hash since a sockfs listener can never
201 * become acceptor and also avoid allocating a tcp_t for acceptor STREAM
202 * since eager has already been allocated and the accept now happens
203 * on acceptor STREAM. There is a big blob of comment on top of
204 * tcp_input_listener explaining the new accept. When socket is POP'd,
205 * sockfs sends us an ioctl to mark the fact and we go back to old
206 * behaviour. Once tcp_issocket is unset, its never set for the
207 * life of that connection.
211 * Since a packet is always executed on the correct TCP perimeter
212 * all IPsec processing is defered to IP including checking new
213 * connections and setting IPSEC policies for new connection. The
214 * only exception is tcp_xmit_listeners_reset() which is called
215 * directly from IP and needs to policy check to see if TH_RST
220 * Values for squeue switch:
225 int tcp_squeue_wput
= 2; /* /etc/systems */
229 * To prevent memory hog, limit the number of entries in tcp_free_list
230 * to 1% of available memory / number of cpus
232 uint_t tcp_free_list_max_cnt
= 0;
234 #define TCP_XMIT_LOWATER 4096
235 #define TCP_XMIT_HIWATER 49152
236 #define TCP_RECV_LOWATER 2048
237 #define TCP_RECV_HIWATER 128000
239 #define TIDUSZ 4096 /* transport interface data unit size */
242 * Size of acceptor hash list. It has to be a power of 2 for hashing.
244 #define TCP_ACCEPTOR_FANOUT_SIZE 512
247 #define TCP_ACCEPTOR_HASH(accid) \
248 (((uint_t)(accid) >> 8) & (TCP_ACCEPTOR_FANOUT_SIZE - 1))
250 #define TCP_ACCEPTOR_HASH(accid) \
251 ((uint_t)(accid) & (TCP_ACCEPTOR_FANOUT_SIZE - 1))
255 * Minimum number of connections which can be created per listener. Used
256 * when the listener connection count is in effect.
258 static uint32_t tcp_min_conn_listener
= 2;
260 uint32_t tcp_early_abort
= 30;
262 /* TCP Timer control structure */
263 typedef struct tcpt_s
{
264 pfv_t tcpt_pfv
; /* The routine we are to call */
265 tcp_t
*tcpt_tcp
; /* The parameter we are to pass in */
269 * Functions called directly via squeue having a prototype of edesc_t.
271 void tcp_input_listener(void *arg
, mblk_t
*mp
, void *arg2
,
272 ip_recv_attr_t
*ira
);
273 void tcp_input_data(void *arg
, mblk_t
*mp
, void *arg2
,
274 ip_recv_attr_t
*ira
);
275 static void tcp_linger_interrupted(void *arg
, mblk_t
*mp
, void *arg2
,
276 ip_recv_attr_t
*dummy
);
279 /* Prototype for TCP functions */
280 static void tcp_random_init(void);
281 int tcp_random(void);
282 static int tcp_connect_ipv4(tcp_t
*tcp
, ipaddr_t
*dstaddrp
,
283 in_port_t dstport
, uint_t srcid
);
284 static int tcp_connect_ipv6(tcp_t
*tcp
, in6_addr_t
*dstaddrp
,
285 in_port_t dstport
, uint32_t flowinfo
,
286 uint_t srcid
, uint32_t scope_id
);
287 static void tcp_iss_init(tcp_t
*tcp
);
288 static void tcp_reinit(tcp_t
*tcp
);
289 static void tcp_reinit_values(tcp_t
*tcp
);
291 static void tcp_wsrv(queue_t
*q
);
292 static void tcp_update_lso(tcp_t
*tcp
, ip_xmit_attr_t
*ixa
);
293 static void tcp_update_zcopy(tcp_t
*tcp
);
294 static void tcp_notify(void *, ip_xmit_attr_t
*, ixa_notify_type_t
,
296 static void *tcp_stack_init(netstackid_t stackid
, netstack_t
*ns
);
297 static void tcp_stack_fini(netstackid_t stackid
, void *arg
);
299 static int tcp_squeue_switch(int);
301 static int tcp_open(queue_t
*, dev_t
*, int, int, cred_t
*, boolean_t
);
302 static int tcp_openv4(queue_t
*, dev_t
*, int, int, cred_t
*);
303 static int tcp_openv6(queue_t
*, dev_t
*, int, int, cred_t
*);
305 static void tcp_squeue_add(squeue_t
*);
307 struct module_info tcp_rinfo
= {
308 TCP_MOD_ID
, TCP_MOD_NAME
, 0, INFPSZ
, TCP_RECV_HIWATER
, TCP_RECV_LOWATER
311 static struct module_info tcp_winfo
= {
312 TCP_MOD_ID
, TCP_MOD_NAME
, 0, INFPSZ
, 127, 16
316 * Entry points for TCP as a device. The normal case which supports
317 * the TCP functionality.
318 * We have separate open functions for the /dev/tcp and /dev/tcp6 devices.
320 struct qinit tcp_rinitv4
= {
321 NULL
, (pfi_t
)tcp_rsrv
, tcp_openv4
, tcp_tpi_close
, NULL
, &tcp_rinfo
324 struct qinit tcp_rinitv6
= {
325 NULL
, (pfi_t
)tcp_rsrv
, tcp_openv6
, tcp_tpi_close
, NULL
, &tcp_rinfo
328 struct qinit tcp_winit
= {
329 (pfi_t
)tcp_wput
, (pfi_t
)tcp_wsrv
, NULL
, NULL
, NULL
, &tcp_winfo
332 /* Initial entry point for TCP in socket mode. */
333 struct qinit tcp_sock_winit
= {
334 (pfi_t
)tcp_wput_sock
, (pfi_t
)tcp_wsrv
, NULL
, NULL
, NULL
, &tcp_winfo
337 /* TCP entry point during fallback */
338 struct qinit tcp_fallback_sock_winit
= {
339 (pfi_t
)tcp_wput_fallback
, NULL
, NULL
, NULL
, NULL
, &tcp_winfo
343 * Entry points for TCP as a acceptor STREAM opened by sockfs when doing
344 * an accept. Avoid allocating data structures since eager has already
347 struct qinit tcp_acceptor_rinit
= {
348 NULL
, (pfi_t
)tcp_rsrv
, NULL
, tcp_tpi_close_accept
, NULL
, &tcp_winfo
351 struct qinit tcp_acceptor_winit
= {
352 (pfi_t
)tcp_tpi_accept
, NULL
, NULL
, NULL
, NULL
, &tcp_winfo
355 /* For AF_INET aka /dev/tcp */
356 struct streamtab tcpinfov4
= {
357 &tcp_rinitv4
, &tcp_winit
360 /* For AF_INET6 aka /dev/tcp6 */
361 struct streamtab tcpinfov6
= {
362 &tcp_rinitv6
, &tcp_winit
366 * Following assumes TPI alignment requirements stay along 32 bit
369 #define ROUNDUP32(x) \
370 (((x) + (sizeof (int32_t) - 1)) & ~(sizeof (int32_t) - 1))
372 /* Template for response to info request. */
373 struct T_info_ack tcp_g_t_info_ack
= {
374 T_INFO_ACK
, /* PRIM_type */
376 T_INFINITE
, /* ETSDU_size */
377 T_INVALID
, /* CDATA_size */
378 T_INVALID
, /* DDATA_size */
379 sizeof (sin_t
), /* ADDR_size */
380 0, /* OPT_size - not initialized here */
381 TIDUSZ
, /* TIDU_size */
382 T_COTS_ORD
, /* SERV_type */
383 TCPS_IDLE
, /* CURRENT_state */
384 (XPG4_1
|EXPINLINE
) /* PROVIDER_flag */
387 struct T_info_ack tcp_g_t_info_ack_v6
= {
388 T_INFO_ACK
, /* PRIM_type */
390 T_INFINITE
, /* ETSDU_size */
391 T_INVALID
, /* CDATA_size */
392 T_INVALID
, /* DDATA_size */
393 sizeof (sin6_t
), /* ADDR_size */
394 0, /* OPT_size - not initialized here */
395 TIDUSZ
, /* TIDU_size */
396 T_COTS_ORD
, /* SERV_type */
397 TCPS_IDLE
, /* CURRENT_state */
398 (XPG4_1
|EXPINLINE
) /* PROVIDER_flag */
402 * TCP tunables related declarations. Definitions are in tcp_tunables.c
404 extern mod_prop_info_t tcp_propinfo_tbl
[];
405 extern int tcp_propinfo_count
;
407 #define IS_VMLOANED_MBLK(mp) \
408 (((mp)->b_datap->db_struioflag & STRUIO_ZC) != 0)
410 uint32_t do_tcpzcopy
= 1; /* 0: disable, 1: enable, 2: force */
413 * Forces all connections to obey the value of the tcps_maxpsz_multiplier
414 * tunable settable via NDD. Otherwise, the per-connection behavior is
415 * determined dynamically during tcp_set_destination(), which is the default.
417 boolean_t tcp_static_maxpsz
= B_FALSE
;
420 * If the receive buffer size is changed, this function is called to update
421 * the upper socket layer on the new delayed receive wake up threshold.
424 tcp_set_recv_threshold(tcp_t
*tcp
, uint32_t new_rcvthresh
)
426 uint32_t default_threshold
= SOCKET_RECVHIWATER
>> 3;
428 if (IPCL_IS_NONSTR(tcp
->tcp_connp
)) {
429 conn_t
*connp
= tcp
->tcp_connp
;
430 struct sock_proto_props sopp
;
433 * only increase rcvthresh upto default_threshold
435 if (new_rcvthresh
> default_threshold
)
436 new_rcvthresh
= default_threshold
;
438 sopp
.sopp_flags
= SOCKOPT_RCVTHRESH
;
439 sopp
.sopp_rcvthresh
= new_rcvthresh
;
441 (*connp
->conn_upcalls
->su_set_proto_props
)
442 (connp
->conn_upper_handle
, &sopp
);
447 * Figure out the value of window scale opton. Note that the rwnd is
448 * ASSUMED to be rounded up to the nearest MSS before the calculation.
449 * We cannot find the scale value and then do a round up of tcp_rwnd
450 * because the scale value may not be correct after that.
452 * Set the compiler flag to make this function inline.
455 tcp_set_ws_value(tcp_t
*tcp
)
458 uint32_t rwnd
= tcp
->tcp_rwnd
;
460 for (i
= 0; rwnd
> TCP_MAXWIN
&& i
< TCP_MAX_WINSHIFT
;
467 * Remove cached/latched IPsec references.
470 tcp_ipsec_cleanup(tcp_t
*tcp
)
472 conn_t
*connp
= tcp
->tcp_connp
;
474 ASSERT(connp
->conn_flags
& IPCL_TCPCONN
);
476 if (connp
->conn_latch
!= NULL
) {
477 IPLATCH_REFRELE(connp
->conn_latch
);
478 connp
->conn_latch
= NULL
;
480 if (connp
->conn_latch_in_policy
!= NULL
) {
481 IPPOL_REFRELE(connp
->conn_latch_in_policy
);
482 connp
->conn_latch_in_policy
= NULL
;
484 if (connp
->conn_latch_in_action
!= NULL
) {
485 IPACT_REFRELE(connp
->conn_latch_in_action
);
486 connp
->conn_latch_in_action
= NULL
;
488 if (connp
->conn_policy
!= NULL
) {
489 IPPH_REFRELE(connp
->conn_policy
, connp
->conn_netstack
);
490 connp
->conn_policy
= NULL
;
495 * Cleaup before placing on free list.
496 * Disassociate from the netstack/tcp_stack_t since the freelist
497 * is per squeue and not per netstack.
500 tcp_cleanup(tcp_t
*tcp
)
503 conn_t
*connp
= tcp
->tcp_connp
;
504 tcp_stack_t
*tcps
= tcp
->tcp_tcps
;
505 netstack_t
*ns
= tcps
->tcps_netstack
;
508 tcp_bind_hash_remove(tcp
);
510 /* Cleanup that which needs the netstack first */
511 tcp_ipsec_cleanup(tcp
);
512 ixa_cleanup(connp
->conn_ixa
);
514 if (connp
->conn_ht_iphc
!= NULL
) {
515 kmem_free(connp
->conn_ht_iphc
, connp
->conn_ht_iphc_allocated
);
516 connp
->conn_ht_iphc
= NULL
;
517 connp
->conn_ht_iphc_allocated
= 0;
518 connp
->conn_ht_iphc_len
= 0;
519 connp
->conn_ht_ulp
= NULL
;
520 connp
->conn_ht_ulp_len
= 0;
521 tcp
->tcp_ipha
= NULL
;
522 tcp
->tcp_ip6h
= NULL
;
523 tcp
->tcp_tcpha
= NULL
;
526 /* We clear any IP_OPTIONS and extension headers */
527 ip_pkt_free(&connp
->conn_xmit_ipp
);
532 * Since we will bzero the entire structure, we need to
533 * remove it and reinsert it in global hash list. We
534 * know the walkers can't get to this conn because we
535 * had set CONDEMNED flag earlier and checked reference
536 * under conn_lock so walker won't pick it and when we
537 * go the ipcl_globalhash_remove() below, no walker
540 ipcl_globalhash_remove(connp
);
542 /* Save some state */
543 mp
= tcp
->tcp_timercache
;
545 tcp_rsrv_mp
= tcp
->tcp_rsrv_mp
;
547 if (connp
->conn_cred
!= NULL
) {
548 crfree(connp
->conn_cred
);
549 connp
->conn_cred
= NULL
;
551 ipcl_conn_cleanup(connp
);
552 connp
->conn_flags
= IPCL_TCPCONN
;
555 * Now it is safe to decrement the reference counts.
556 * This might be the last reference on the netstack
557 * in which case it will cause the freeing of the IP Instance.
559 connp
->conn_netstack
= NULL
;
560 connp
->conn_ixa
->ixa_ipst
= NULL
;
562 ASSERT(tcps
!= NULL
);
563 tcp
->tcp_tcps
= NULL
;
565 bzero(tcp
, sizeof (tcp_t
));
567 /* restore the state */
568 tcp
->tcp_timercache
= mp
;
570 tcp
->tcp_rsrv_mp
= tcp_rsrv_mp
;
572 tcp
->tcp_connp
= connp
;
574 ASSERT(connp
->conn_tcp
== tcp
);
575 ASSERT(connp
->conn_flags
& IPCL_TCPCONN
);
576 connp
->conn_state_flags
= CONN_INCIPIENT
;
577 ASSERT(connp
->conn_proto
== IPPROTO_TCP
);
578 ASSERT(connp
->conn_ref
== 1);
582 * Adapt to the information, such as rtt and rtt_sd, provided from the
583 * DCE and IRE maintained by IP.
585 * Checks for multicast and broadcast destination address.
586 * Returns zero if ok; an errno on failure.
588 * Note that the MSS calculation here is based on the info given in
589 * the DCE and IRE. We do not do any calculation based on TCP options. They
590 * will be handled in tcp_input_data() when TCP knows which options to use.
592 * Note on how TCP gets its parameters for a connection.
594 * When a tcp_t structure is allocated, it gets all the default parameters.
595 * In tcp_set_destination(), it gets those metric parameters, like rtt, rtt_sd,
596 * spipe, rpipe, ... from the route metrics. Route metric overrides the
599 * An incoming SYN with a multicast or broadcast destination address is dropped
600 * in ip_fanout_v4/v6.
602 * An incoming SYN with a multicast or broadcast source address is always
603 * dropped in tcp_set_destination, since IPDF_ALLOW_MCBC is not set in
605 * The same logic in tcp_set_destination also serves to
606 * reject an attempt to connect to a broadcast or multicast (destination)
610 tcp_set_destination(tcp_t
*tcp
)
614 boolean_t tcp_detached
= TCP_IS_DETACHED(tcp
);
615 conn_t
*connp
= tcp
->tcp_connp
;
616 tcp_stack_t
*tcps
= tcp
->tcp_tcps
;
621 flags
= IPDF_LSO
| IPDF_ZCOPY
;
623 * Make sure we have a dce for the destination to avoid dce_ident
624 * contention for connected sockets.
626 flags
|= IPDF_UNIQUE_DCE
;
628 if (!tcps
->tcps_ignore_path_mtu
)
629 connp
->conn_ixa
->ixa_flags
|= IXAF_PMTU_DISCOVERY
;
631 /* Use conn_lock to satify ASSERT; tcp is already serialized */
632 mutex_enter(&connp
->conn_lock
);
633 error
= conn_connect(connp
, &uinfo
, flags
);
634 mutex_exit(&connp
->conn_lock
);
638 error
= tcp_build_hdrs(tcp
);
642 tcp
->tcp_localnet
= uinfo
.iulp_localnet
;
644 if (uinfo
.iulp_rtt
!= 0) {
647 tcp
->tcp_rtt_sa
= uinfo
.iulp_rtt
;
648 tcp
->tcp_rtt_sd
= uinfo
.iulp_rtt_sd
;
649 rto
= (tcp
->tcp_rtt_sa
>> 3) + tcp
->tcp_rtt_sd
+
650 tcps
->tcps_rexmit_interval_extra
+
651 (tcp
->tcp_rtt_sa
>> 5);
653 TCP_SET_RTO(tcp
, rto
);
655 if (uinfo
.iulp_ssthresh
!= 0)
656 tcp
->tcp_cwnd_ssthresh
= uinfo
.iulp_ssthresh
;
658 tcp
->tcp_cwnd_ssthresh
= TCP_MAX_LARGEWIN
;
659 if (uinfo
.iulp_spipe
> 0) {
660 connp
->conn_sndbuf
= MIN(uinfo
.iulp_spipe
,
662 if (tcps
->tcps_snd_lowat_fraction
!= 0) {
663 connp
->conn_sndlowat
= connp
->conn_sndbuf
/
664 tcps
->tcps_snd_lowat_fraction
;
666 (void) tcp_maxpsz_set(tcp
, B_TRUE
);
669 * Note that up till now, acceptor always inherits receive
670 * window from the listener. But if there is a metrics
671 * associated with a host, we should use that instead of
672 * inheriting it from listener. Thus we need to pass this
673 * info back to the caller.
675 if (uinfo
.iulp_rpipe
> 0) {
676 tcp
->tcp_rwnd
= MIN(uinfo
.iulp_rpipe
,
680 if (uinfo
.iulp_rtomax
> 0) {
681 tcp
->tcp_second_timer_threshold
=
686 * Use the metric option settings, iulp_tstamp_ok and
687 * iulp_wscale_ok, only for active open. What this means
688 * is that if the other side uses timestamp or window
689 * scale option, TCP will also use those options. That
690 * is for passive open. If the application sets a
691 * large window, window scale is enabled regardless of
692 * the value in iulp_wscale_ok. This is the behavior
693 * since 2.6. So we keep it.
694 * The only case left in passive open processing is the
696 * For ECN, it should probably be like SACK. But the
697 * current value is binary, so we treat it like the other
698 * cases. The metric only controls active open.For passive
699 * open, the ndd param, tcp_ecn_permitted, controls the
704 * The if check means that the following can only
705 * be turned on by the metrics only IRE, but not off.
707 if (uinfo
.iulp_tstamp_ok
)
708 tcp
->tcp_snd_ts_ok
= B_TRUE
;
709 if (uinfo
.iulp_wscale_ok
)
710 tcp
->tcp_snd_ws_ok
= B_TRUE
;
711 if (uinfo
.iulp_sack
== 2)
712 tcp
->tcp_snd_sack_ok
= B_TRUE
;
713 if (uinfo
.iulp_ecn_ok
)
714 tcp
->tcp_ecn_ok
= B_TRUE
;
719 * As above, the if check means that SACK can only be
720 * turned on by the metric only IRE.
722 if (uinfo
.iulp_sack
> 0) {
723 tcp
->tcp_snd_sack_ok
= B_TRUE
;
728 * XXX Note that currently, iulp_mtu can be as small as 68
729 * because of PMTUd. So tcp_mss may go to negative if combined
730 * length of all those options exceeds 28 bytes. But because
731 * of the tcp_mss_min check below, we may not have a problem if
732 * tcp_mss_min is of a reasonable value. The default is 1 so
733 * the negative problem still exists. And the check defeats PMTUd.
734 * In fact, if PMTUd finds that the MSS should be smaller than
735 * tcp_mss_min, TCP should turn off PMUTd and use the tcp_mss_min
738 * We do not deal with that now. All those problems related to
739 * PMTUd will be fixed later.
741 ASSERT(uinfo
.iulp_mtu
!= 0);
742 mss
= tcp
->tcp_initial_pmtu
= uinfo
.iulp_mtu
;
744 /* Sanity check for MSS value. */
745 if (connp
->conn_ipversion
== IPV4_VERSION
)
746 mss_max
= tcps
->tcps_mss_max_ipv4
;
748 mss_max
= tcps
->tcps_mss_max_ipv6
;
750 if (tcp
->tcp_ipsec_overhead
== 0)
751 tcp
->tcp_ipsec_overhead
= conn_ipsec_length(connp
);
753 mss
-= tcp
->tcp_ipsec_overhead
;
755 if (mss
< tcps
->tcps_mss_min
)
756 mss
= tcps
->tcps_mss_min
;
760 /* Note that this is the maximum MSS, excluding all options. */
764 * Update the tcp connection with LSO capability.
766 tcp_update_lso(tcp
, connp
->conn_ixa
);
769 * Initialize the ISS here now that we have the full connection ID.
770 * The RFC 1948 method of initial sequence number generation requires
771 * knowledge of the full connection ID before setting the ISS.
775 tcp
->tcp_loopback
= (uinfo
.iulp_loopback
| uinfo
.iulp_local
);
778 * Make sure that conn is not marked incipient
779 * for incoming connections. A blind
780 * removal of incipient flag is cheaper than
783 mutex_enter(&connp
->conn_lock
);
784 connp
->conn_state_flags
&= ~CONN_INCIPIENT
;
785 mutex_exit(&connp
->conn_lock
);
790 * tcp_clean_death / tcp_close_detached must not be called more than once
791 * on a tcp. Thus every function that potentially calls tcp_clean_death
792 * must check for the tcp state before calling tcp_clean_death.
793 * Eg. tcp_input_data, tcp_eager_kill, tcp_clean_death_wrapper,
794 * tcp_timer_handler, all check for the tcp state.
798 tcp_clean_death_wrapper(void *arg
, mblk_t
*mp
, void *arg2
,
799 ip_recv_attr_t
*dummy
)
801 tcp_t
*tcp
= ((conn_t
*)arg
)->conn_tcp
;
804 if (tcp
->tcp_state
> TCPS_BOUND
)
805 (void) tcp_clean_death(((conn_t
*)arg
)->conn_tcp
, ETIMEDOUT
);
809 * We are dying for some reason. Try to do it gracefully. (May be called
812 * Return -1 if the structure was not cleaned up (if the cleanup had to be
813 * done by a service procedure).
814 * TBD - Should the return value distinguish between the tcp_t being
815 * freed and it being reinitialized?
818 tcp_clean_death(tcp_t
*tcp
, int err
)
822 conn_t
*connp
= tcp
->tcp_connp
;
823 tcp_stack_t
*tcps
= tcp
->tcp_tcps
;
828 if (tcp
->tcp_linger_tid
!= 0 &&
829 TCP_TIMER_CANCEL(tcp
, tcp
->tcp_linger_tid
) >= 0) {
830 tcp_stop_lingering(tcp
);
834 ASSERT((connp
->conn_family
== AF_INET
&&
835 connp
->conn_ipversion
== IPV4_VERSION
) ||
836 (connp
->conn_family
== AF_INET6
&&
837 (connp
->conn_ipversion
== IPV4_VERSION
||
838 connp
->conn_ipversion
== IPV6_VERSION
)));
840 if (TCP_IS_DETACHED(tcp
)) {
841 if (tcp
->tcp_hard_binding
) {
843 * Its an eager that we are dealing with. We close the
844 * eager but in case a conn_ind has already gone to the
845 * listener, let tcp_accept_finish() send a discon_ind
846 * to the listener and drop the last reference. If the
847 * listener doesn't even know about the eager i.e. the
848 * conn_ind hasn't gone up, blow away the eager and drop
849 * the last reference as well. If the conn_ind has gone
850 * up, state should be BOUND. tcp_accept_finish
851 * will figure out that the connection has received a
852 * RST and will send a DISCON_IND to the application.
854 tcp_closei_local(tcp
);
855 if (!tcp
->tcp_tconnind_started
) {
858 tcp
->tcp_state
= TCPS_BOUND
;
859 DTRACE_TCP6(state__change
, void, NULL
,
860 ip_xmit_attr_t
*, connp
->conn_ixa
,
861 void, NULL
, tcp_t
*, tcp
, void, NULL
,
862 int32_t, TCPS_CLOSED
);
865 tcp_close_detached(tcp
);
870 TCP_STAT(tcps
, tcp_clean_death_nondetached
);
873 * The connection is dead. Decrement listener connection counter if
876 if (tcp
->tcp_listen_cnt
!= NULL
)
877 TCP_DECR_LISTEN_CNT(tcp
);
880 * When a connection is moved to TIME_WAIT state, the connection
881 * counter is already decremented. So no need to decrement here
882 * again. See SET_TIME_WAIT() macro.
884 if (tcp
->tcp_state
>= TCPS_ESTABLISHED
&&
885 tcp
->tcp_state
< TCPS_TIME_WAIT
) {
891 /* Trash all inbound data */
892 if (!IPCL_IS_NONSTR(connp
)) {
898 * If we are at least part way open and there is error
899 * (err==0 implies no error)
900 * notify our client by a T_DISCON_IND.
902 if ((tcp
->tcp_state
>= TCPS_SYN_SENT
) && err
) {
903 if (tcp
->tcp_state
>= TCPS_ESTABLISHED
&&
904 !TCP_IS_SOCKET(tcp
)) {
906 * Send M_FLUSH according to TPI. Because sockets will
907 * (and must) ignore FLUSHR we do that only for TPI
908 * endpoints and sockets in STREAMS mode.
910 (void) putnextctl1(q
, M_FLUSH
, FLUSHR
);
912 if (connp
->conn_debug
) {
913 (void) strlog(TCP_MOD_ID
, 0, 1, SL_TRACE
|SL_ERROR
,
914 "tcp_clean_death: discon err %d", err
);
916 if (IPCL_IS_NONSTR(connp
)) {
917 /* Direct socket, use upcall */
918 (*connp
->conn_upcalls
->su_disconnected
)(
919 connp
->conn_upper_handle
, tcp
->tcp_connid
, err
);
921 mp
= mi_tpi_discon_ind(NULL
, err
, 0);
925 if (connp
->conn_debug
) {
926 (void) strlog(TCP_MOD_ID
, 0, 1,
928 "tcp_clean_death, sending M_ERROR");
930 (void) putnextctl1(q
, M_ERROR
, EPROTO
);
933 if (tcp
->tcp_state
<= TCPS_SYN_RCVD
) {
934 /* SYN_SENT or SYN_RCVD */
935 TCPS_BUMP_MIB(tcps
, tcpAttemptFails
);
936 } else if (tcp
->tcp_state
<= TCPS_CLOSE_WAIT
) {
937 /* ESTABLISHED or CLOSE_WAIT */
938 TCPS_BUMP_MIB(tcps
, tcpEstabResets
);
943 * ESTABLISHED non-STREAMS eagers are not 'detached' because
944 * an upper handle is obtained when the SYN-ACK comes in. So it
945 * should receive the 'disconnected' upcall, but tcp_reinit should
946 * not be called since this is an eager.
948 if (tcp
->tcp_listener
!= NULL
&& IPCL_IS_NONSTR(connp
)) {
949 tcp_closei_local(tcp
);
950 tcp
->tcp_state
= TCPS_BOUND
;
951 DTRACE_TCP6(state__change
, void, NULL
, ip_xmit_attr_t
*,
952 connp
->conn_ixa
, void, NULL
, tcp_t
*, tcp
, void, NULL
,
953 int32_t, TCPS_CLOSED
);
958 if (IPCL_IS_NONSTR(connp
))
959 (void) tcp_do_unbind(connp
);
965 * In case tcp is in the "lingering state" and waits for the SO_LINGER timeout
966 * to expire, stop the wait and finish the close.
969 tcp_stop_lingering(tcp_t
*tcp
)
972 tcp_stack_t
*tcps
= tcp
->tcp_tcps
;
973 conn_t
*connp
= tcp
->tcp_connp
;
975 tcp
->tcp_linger_tid
= 0;
976 if (tcp
->tcp_state
> TCPS_LISTEN
) {
977 tcp_acceptor_hash_remove(tcp
);
978 mutex_enter(&tcp
->tcp_non_sq_lock
);
979 if (tcp
->tcp_flow_stopped
) {
982 mutex_exit(&tcp
->tcp_non_sq_lock
);
984 if (tcp
->tcp_timer_tid
!= 0) {
985 delta
= TCP_TIMER_CANCEL(tcp
, tcp
->tcp_timer_tid
);
986 tcp
->tcp_timer_tid
= 0;
989 * Need to cancel those timers which will not be used when
990 * TCP is detached. This has to be done before the conn_wq
993 tcp_timers_stop(tcp
);
995 tcp
->tcp_detached
= B_TRUE
;
996 connp
->conn_rq
= NULL
;
997 connp
->conn_wq
= NULL
;
999 if (tcp
->tcp_state
== TCPS_TIME_WAIT
) {
1000 tcp_time_wait_append(tcp
);
1001 TCP_DBGSTAT(tcps
, tcp_detach_time_wait
);
1006 * If delta is zero the timer event wasn't executed and was
1007 * successfully canceled. In this case we need to restart it
1008 * with the minimal delta possible.
1011 tcp
->tcp_timer_tid
= TCP_TIMER(tcp
, tcp_timer
,
1015 tcp_closei_local(tcp
);
1016 CONN_DEC_REF(connp
);
1019 tcp
->tcp_detached
= B_TRUE
;
1020 connp
->conn_rq
= NULL
;
1021 connp
->conn_wq
= NULL
;
1023 /* Signal closing thread that it can complete close */
1024 mutex_enter(&tcp
->tcp_closelock
);
1025 tcp
->tcp_closed
= 1;
1026 cv_signal(&tcp
->tcp_closecv
);
1027 mutex_exit(&tcp
->tcp_closelock
);
1029 /* If we have an upper handle (socket), release it */
1030 if (IPCL_IS_NONSTR(connp
)) {
1031 ASSERT(connp
->conn_upper_handle
!= NULL
);
1032 (*connp
->conn_upcalls
->su_closed
)(connp
->conn_upper_handle
);
1033 connp
->conn_upper_handle
= NULL
;
1034 connp
->conn_upcalls
= NULL
;
1039 tcp_close_common(conn_t
*connp
, int flags
)
1041 tcp_t
*tcp
= connp
->conn_tcp
;
1042 mblk_t
*mp
= &tcp
->tcp_closemp
;
1043 boolean_t conn_ioctl_cleanup_reqd
= B_FALSE
;
1046 ASSERT(connp
->conn_ref
>= 2);
1049 * Mark the conn as closing. ipsq_pending_mp_add will not
1050 * add any mp to the pending mp list, after this conn has
1053 mutex_enter(&connp
->conn_lock
);
1054 connp
->conn_state_flags
|= CONN_CLOSING
;
1055 if (connp
->conn_oper_pending_ill
!= NULL
)
1056 conn_ioctl_cleanup_reqd
= B_TRUE
;
1057 CONN_INC_REF_LOCKED(connp
);
1058 mutex_exit(&connp
->conn_lock
);
1059 tcp
->tcp_closeflags
= (uint8_t)flags
;
1060 ASSERT(connp
->conn_ref
>= 3);
1063 * tcp_closemp_used is used below without any protection of a lock
1064 * as we don't expect any one else to use it concurrently at this
1065 * point otherwise it would be a major defect.
1068 if (mp
->b_prev
== NULL
)
1069 tcp
->tcp_closemp_used
= B_TRUE
;
1071 cmn_err(CE_PANIC
, "tcp_close: concurrent use of tcp_closemp: "
1072 "connp %p tcp %p\n", (void *)connp
, (void *)tcp
);
1074 TCP_DEBUG_GETPCSTACK(tcp
->tcmp_stk
, 15);
1077 * Cleanup any queued ioctls here. This must be done before the wq/rq
1078 * are re-written by tcp_close_output().
1080 if (conn_ioctl_cleanup_reqd
)
1081 conn_ioctl_cleanup(connp
);
1084 * As CONN_CLOSING is set, no further ioctls should be passed down to
1085 * IP for this conn (see the guards in tcp_ioctl, tcp_wput_ioctl and
1086 * tcp_wput_iocdata). If the ioctl was queued on an ipsq,
1087 * conn_ioctl_cleanup should have found it and removed it. If the ioctl
1088 * was still in flight at the time, we wait for it here. See comments
1089 * for CONN_INC_IOCTLREF in ip.h for details.
1091 mutex_enter(&connp
->conn_lock
);
1092 while (connp
->conn_ioctlref
> 0)
1093 cv_wait(&connp
->conn_cv
, &connp
->conn_lock
);
1094 ASSERT(connp
->conn_ioctlref
== 0);
1095 ASSERT(connp
->conn_oper_pending_ill
== NULL
);
1096 mutex_exit(&connp
->conn_lock
);
1098 SQUEUE_ENTER_ONE(connp
->conn_sqp
, mp
, tcp_close_output
, connp
,
1099 NULL
, tcp_squeue_flag
, SQTAG_IP_TCP_CLOSE
);
1102 * For non-STREAMS sockets, the normal case is that the conn makes
1103 * an upcall when it's finally closed, so there is no need to wait
1104 * in the protocol. But in case of SO_LINGER the thread sleeps here
1105 * so it can properly deal with the thread being interrupted.
1107 if (IPCL_IS_NONSTR(connp
) && connp
->conn_linger
== 0)
1110 mutex_enter(&tcp
->tcp_closelock
);
1111 while (!tcp
->tcp_closed
) {
1112 if (!cv_wait_sig(&tcp
->tcp_closecv
, &tcp
->tcp_closelock
)) {
1114 * The cv_wait_sig() was interrupted. We now do the
1117 * 1) If the endpoint was lingering, we allow this
1118 * to be interrupted by cancelling the linger timeout
1119 * and closing normally.
1121 * 2) Revert to calling cv_wait()
1123 * We revert to using cv_wait() to avoid an
1124 * infinite loop which can occur if the calling
1125 * thread is higher priority than the squeue worker
1126 * thread and is bound to the same cpu.
1128 if (connp
->conn_linger
&& connp
->conn_lingertime
> 0) {
1129 mutex_exit(&tcp
->tcp_closelock
);
1130 /* Entering squeue, bump ref count. */
1131 CONN_INC_REF(connp
);
1132 bp
= allocb_wait(0, BPRI_HI
, STR_NOSIG
, NULL
);
1133 SQUEUE_ENTER_ONE(connp
->conn_sqp
, bp
,
1134 tcp_linger_interrupted
, connp
, NULL
,
1135 tcp_squeue_flag
, SQTAG_IP_TCP_CLOSE
);
1136 mutex_enter(&tcp
->tcp_closelock
);
1141 while (!tcp
->tcp_closed
)
1142 cv_wait(&tcp
->tcp_closecv
, &tcp
->tcp_closelock
);
1143 mutex_exit(&tcp
->tcp_closelock
);
1146 * In the case of listener streams that have eagers in the q or q0
1147 * we wait for the eagers to drop their reference to us. conn_rq and
1148 * conn_wq of the eagers point to our queues. By waiting for the
1149 * refcnt to drop to 1, we are sure that the eagers have cleaned
1150 * up their queue pointers and also dropped their references to us.
1152 * For non-STREAMS sockets we do not have to wait here; the
1153 * listener will instead make a su_closed upcall when the last
1154 * reference is dropped.
1156 if (tcp
->tcp_wait_for_eagers
&& !IPCL_IS_NONSTR(connp
)) {
1157 mutex_enter(&connp
->conn_lock
);
1158 while (connp
->conn_ref
!= 1) {
1159 cv_wait(&connp
->conn_cv
, &connp
->conn_lock
);
1161 mutex_exit(&connp
->conn_lock
);
1165 connp
->conn_cpid
= NOPID
;
1169 * Called by tcp_close() routine via squeue when lingering is
1170 * interrupted by a signal.
1175 tcp_linger_interrupted(void *arg
, mblk_t
*mp
, void *arg2
, ip_recv_attr_t
*dummy
)
1177 conn_t
*connp
= (conn_t
*)arg
;
1178 tcp_t
*tcp
= connp
->conn_tcp
;
1181 if (tcp
->tcp_linger_tid
!= 0 &&
1182 TCP_TIMER_CANCEL(tcp
, tcp
->tcp_linger_tid
) >= 0) {
1183 tcp_stop_lingering(tcp
);
1184 tcp
->tcp_client_errno
= EINTR
;
1189 * Clean up the b_next and b_prev fields of every mblk pointed at by *mpp.
1190 * Some stream heads get upset if they see these later on as anything but NULL.
1193 tcp_close_mpp(mblk_t
**mpp
)
1197 if ((mp
= *mpp
) != NULL
) {
1201 } while ((mp
= mp
->b_cont
) != NULL
);
1209 /* Do detached close. */
1211 tcp_close_detached(tcp_t
*tcp
)
1217 * Clustering code serializes TCP disconnect callbacks and
1218 * cluster tcp list walks by blocking a TCP disconnect callback
1219 * if a cluster tcp list walk is in progress. This ensures
1220 * accurate accounting of TCPs in the cluster code even though
1221 * the TCP list walk itself is not atomic.
1223 tcp_closei_local(tcp
);
1224 CONN_DEC_REF(tcp
->tcp_connp
);
1228 * The tcp_t is going away. Remove it from all lists and set it
1229 * to TCPS_CLOSED. The freeing up of memory is deferred until
1230 * tcp_inactive. This is needed since a thread in tcp_rput might have
1231 * done a CONN_INC_REF on this structure before it was removed from the
1235 tcp_closei_local(tcp_t
*tcp
)
1237 conn_t
*connp
= tcp
->tcp_connp
;
1238 tcp_stack_t
*tcps
= tcp
->tcp_tcps
;
1241 if (!TCP_IS_SOCKET(tcp
))
1242 tcp_acceptor_hash_remove(tcp
);
1244 TCPS_UPDATE_MIB(tcps
, tcpHCInSegs
, tcp
->tcp_ibsegs
);
1245 tcp
->tcp_ibsegs
= 0;
1246 TCPS_UPDATE_MIB(tcps
, tcpHCOutSegs
, tcp
->tcp_obsegs
);
1247 tcp
->tcp_obsegs
= 0;
1250 * This can be called via tcp_time_wait_processing() if TCP gets a
1251 * SYN with sequence number outside the TIME-WAIT connection's
1252 * window. So we need to check for TIME-WAIT state here as the
1253 * connection counter is already decremented. See SET_TIME_WAIT()
1256 if (tcp
->tcp_state
>= TCPS_ESTABLISHED
&&
1257 tcp
->tcp_state
< TCPS_TIME_WAIT
) {
1258 TCPS_CONN_DEC(tcps
);
1262 * If we are an eager connection hanging off a listener that
1263 * hasn't formally accepted the connection yet, get off his
1264 * list and blow off any data that we have accumulated.
1266 if (tcp
->tcp_listener
!= NULL
) {
1267 tcp_t
*listener
= tcp
->tcp_listener
;
1268 mutex_enter(&listener
->tcp_eager_lock
);
1270 * tcp_tconnind_started == B_TRUE means that the
1271 * conn_ind has already gone to listener. At
1272 * this point, eager will be closed but we
1273 * leave it in listeners eager list so that
1274 * if listener decides to close without doing
1275 * accept, we can clean this up. In tcp_tli_accept
1276 * we take care of the case of accept on closed
1279 if (!tcp
->tcp_tconnind_started
) {
1280 tcp_eager_unlink(tcp
);
1281 mutex_exit(&listener
->tcp_eager_lock
);
1283 * We don't want to have any pointers to the
1284 * listener queue, after we have released our
1285 * reference on the listener
1287 ASSERT(tcp
->tcp_detached
);
1288 connp
->conn_rq
= NULL
;
1289 connp
->conn_wq
= NULL
;
1290 CONN_DEC_REF(listener
->tcp_connp
);
1292 mutex_exit(&listener
->tcp_eager_lock
);
1296 /* Stop all the timers */
1297 tcp_timers_stop(tcp
);
1299 if (tcp
->tcp_state
== TCPS_LISTEN
) {
1300 if (tcp
->tcp_ip_addr_cache
) {
1301 kmem_free((void *)tcp
->tcp_ip_addr_cache
,
1302 IP_ADDR_CACHE_SIZE
* sizeof (ipaddr_t
));
1303 tcp
->tcp_ip_addr_cache
= NULL
;
1307 /* Decrement listerner connection counter if necessary. */
1308 if (tcp
->tcp_listen_cnt
!= NULL
)
1309 TCP_DECR_LISTEN_CNT(tcp
);
1311 mutex_enter(&tcp
->tcp_non_sq_lock
);
1312 if (tcp
->tcp_flow_stopped
)
1314 mutex_exit(&tcp
->tcp_non_sq_lock
);
1316 tcp_bind_hash_remove(tcp
);
1318 * If the tcp_time_wait_collector (which runs outside the squeue)
1319 * is trying to remove this tcp from the time wait list, we will
1320 * block in tcp_time_wait_remove while trying to acquire the
1321 * tcp_time_wait_lock. The logic in tcp_time_wait_collector also
1322 * requires the ipcl_hash_remove to be ordered after the
1323 * tcp_time_wait_remove for the refcnt checks to work correctly.
1325 if (tcp
->tcp_state
== TCPS_TIME_WAIT
)
1326 (void) tcp_time_wait_remove(tcp
, NULL
);
1327 CL_INET_DISCONNECT(connp
);
1328 ipcl_hash_remove(connp
);
1329 oldstate
= tcp
->tcp_state
;
1330 tcp
->tcp_state
= TCPS_CLOSED
;
1331 /* Need to probe before ixa_cleanup() is called */
1332 DTRACE_TCP6(state__change
, void, NULL
, ip_xmit_attr_t
*,
1333 connp
->conn_ixa
, void, NULL
, tcp_t
*, tcp
, void, NULL
,
1335 ixa_cleanup(connp
->conn_ixa
);
1338 * Mark the conn as CONDEMNED
1340 mutex_enter(&connp
->conn_lock
);
1341 connp
->conn_state_flags
|= CONN_CONDEMNED
;
1342 mutex_exit(&connp
->conn_lock
);
1344 ASSERT(tcp
->tcp_time_wait_next
== NULL
);
1345 ASSERT(tcp
->tcp_time_wait_prev
== NULL
);
1346 ASSERT(tcp
->tcp_time_wait_expire
== 0);
1348 tcp_ipsec_cleanup(tcp
);
1352 * tcp is dying (called from ipcl_conn_destroy and error cases).
1353 * Free the tcp_t in either case.
1356 tcp_free(tcp_t
*tcp
)
1359 conn_t
*connp
= tcp
->tcp_connp
;
1361 ASSERT(tcp
!= NULL
);
1362 ASSERT(tcp
->tcp_ptpahn
== NULL
&& tcp
->tcp_acceptor_hash
== NULL
);
1364 connp
->conn_rq
= NULL
;
1365 connp
->conn_wq
= NULL
;
1367 tcp_close_mpp(&tcp
->tcp_xmit_head
);
1368 tcp_close_mpp(&tcp
->tcp_reass_head
);
1369 if (tcp
->tcp_rcv_list
!= NULL
) {
1370 /* Free b_next chain */
1371 tcp_close_mpp(&tcp
->tcp_rcv_list
);
1373 if ((mp
= tcp
->tcp_urp_mp
) != NULL
) {
1376 if ((mp
= tcp
->tcp_urp_mark_mp
) != NULL
) {
1380 if (tcp
->tcp_fused_sigurg_mp
!= NULL
) {
1381 ASSERT(!IPCL_IS_NONSTR(tcp
->tcp_connp
));
1382 freeb(tcp
->tcp_fused_sigurg_mp
);
1383 tcp
->tcp_fused_sigurg_mp
= NULL
;
1386 if (tcp
->tcp_ordrel_mp
!= NULL
) {
1387 ASSERT(!IPCL_IS_NONSTR(tcp
->tcp_connp
));
1388 freeb(tcp
->tcp_ordrel_mp
);
1389 tcp
->tcp_ordrel_mp
= NULL
;
1392 TCP_NOTSACK_REMOVE_ALL(tcp
->tcp_notsack_list
, tcp
);
1393 bzero(&tcp
->tcp_sack_info
, sizeof (tcp_sack_info_t
));
1395 if (tcp
->tcp_hopopts
!= NULL
) {
1396 mi_free(tcp
->tcp_hopopts
);
1397 tcp
->tcp_hopopts
= NULL
;
1398 tcp
->tcp_hopoptslen
= 0;
1400 ASSERT(tcp
->tcp_hopoptslen
== 0);
1401 if (tcp
->tcp_dstopts
!= NULL
) {
1402 mi_free(tcp
->tcp_dstopts
);
1403 tcp
->tcp_dstopts
= NULL
;
1404 tcp
->tcp_dstoptslen
= 0;
1406 ASSERT(tcp
->tcp_dstoptslen
== 0);
1407 if (tcp
->tcp_rthdrdstopts
!= NULL
) {
1408 mi_free(tcp
->tcp_rthdrdstopts
);
1409 tcp
->tcp_rthdrdstopts
= NULL
;
1410 tcp
->tcp_rthdrdstoptslen
= 0;
1412 ASSERT(tcp
->tcp_rthdrdstoptslen
== 0);
1413 if (tcp
->tcp_rthdr
!= NULL
) {
1414 mi_free(tcp
->tcp_rthdr
);
1415 tcp
->tcp_rthdr
= NULL
;
1416 tcp
->tcp_rthdrlen
= 0;
1418 ASSERT(tcp
->tcp_rthdrlen
== 0);
1421 * Following is really a blowing away a union.
1422 * It happens to have exactly two members of identical size
1423 * the following code is enough.
1425 tcp_close_mpp(&tcp
->tcp_conn
.tcp_eager_conn_ind
);
1428 * If this is a non-STREAM socket still holding on to an upper
1429 * handle, release it. As a result of fallback we might also see
1430 * STREAMS based conns with upper handles, in which case there is
1431 * nothing to do other than clearing the field.
1433 if (connp
->conn_upper_handle
!= NULL
) {
1434 if (IPCL_IS_NONSTR(connp
)) {
1435 (*connp
->conn_upcalls
->su_closed
)(
1436 connp
->conn_upper_handle
);
1437 tcp
->tcp_detached
= B_TRUE
;
1439 connp
->conn_upper_handle
= NULL
;
1440 connp
->conn_upcalls
= NULL
;
1445 * tcp_get_conn/tcp_free_conn
1447 * tcp_get_conn is used to get a clean tcp connection structure.
1448 * It tries to reuse the connections put on the freelist by the
1449 * time_wait_collector failing which it goes to kmem_cache. This
1450 * way has two benefits compared to just allocating from and
1451 * freeing to kmem_cache.
1452 * 1) The time_wait_collector can free (which includes the cleanup)
1453 * outside the squeue. So when the interrupt comes, we have a clean
1454 * connection sitting in the freelist. Obviously, this buys us
1457 * 2) Defence against DOS attack. Allocating a tcp/conn in tcp_input_listener
1458 * has multiple disadvantages - tying up the squeue during alloc.
1459 * But allocating the conn/tcp in IP land is also not the best since
1460 * we can't check the 'q' and 'q0' which are protected by squeue and
1461 * blindly allocate memory which might have to be freed here if we are
1462 * not allowed to accept the connection. By using the freelist and
1463 * putting the conn/tcp back in freelist, we don't pay a penalty for
1464 * allocating memory without checking 'q/q0' and freeing it if we can't
1465 * accept the connection.
1467 * Care should be taken to put the conn back in the same squeue's freelist
1468 * from which it was allocated. Best results are obtained if conn is
1469 * allocated from listener's squeue and freed to the same. Time wait
1470 * collector will free up the freelist is the connection ends up sitting
1471 * there for too long.
1474 tcp_get_conn(void *arg
, tcp_stack_t
*tcps
)
1477 conn_t
*connp
= NULL
;
1478 squeue_t
*sqp
= (squeue_t
*)arg
;
1479 tcp_squeue_priv_t
*tcp_time_wait
;
1481 mblk_t
*tcp_rsrv_mp
= NULL
;
1484 *((tcp_squeue_priv_t
**)squeue_getprivate(sqp
, SQPRIVATE_TCP
));
1486 mutex_enter(&tcp_time_wait
->tcp_time_wait_lock
);
1487 tcp
= tcp_time_wait
->tcp_free_list
;
1488 ASSERT((tcp
!= NULL
) ^ (tcp_time_wait
->tcp_free_list_cnt
== 0));
1490 tcp_time_wait
->tcp_free_list
= tcp
->tcp_time_wait_next
;
1491 tcp_time_wait
->tcp_free_list_cnt
--;
1492 mutex_exit(&tcp_time_wait
->tcp_time_wait_lock
);
1493 tcp
->tcp_time_wait_next
= NULL
;
1494 connp
= tcp
->tcp_connp
;
1495 connp
->conn_flags
|= IPCL_REUSED
;
1497 ASSERT(tcp
->tcp_tcps
== NULL
);
1498 ASSERT(connp
->conn_netstack
== NULL
);
1499 ASSERT(tcp
->tcp_rsrv_mp
!= NULL
);
1500 ns
= tcps
->tcps_netstack
;
1502 connp
->conn_netstack
= ns
;
1503 connp
->conn_ixa
->ixa_ipst
= ns
->netstack_ip
;
1504 tcp
->tcp_tcps
= tcps
;
1505 ipcl_globalhash_insert(connp
);
1507 connp
->conn_ixa
->ixa_notify_cookie
= tcp
;
1508 ASSERT(connp
->conn_ixa
->ixa_notify
== tcp_notify
);
1509 connp
->conn_recv
= tcp_input_data
;
1510 ASSERT(connp
->conn_recvicmp
== tcp_icmp_input
);
1511 ASSERT(connp
->conn_verifyicmp
== tcp_verifyicmp
);
1512 return ((void *)connp
);
1514 mutex_exit(&tcp_time_wait
->tcp_time_wait_lock
);
1516 * Pre-allocate the tcp_rsrv_mp. This mblk will not be freed until
1517 * this conn_t/tcp_t is freed at ipcl_conn_destroy().
1519 tcp_rsrv_mp
= allocb(0, BPRI_HI
);
1520 if (tcp_rsrv_mp
== NULL
)
1523 if ((connp
= ipcl_conn_create(IPCL_TCPCONN
, KM_NOSLEEP
,
1524 tcps
->tcps_netstack
)) == NULL
) {
1529 tcp
= connp
->conn_tcp
;
1530 tcp
->tcp_rsrv_mp
= tcp_rsrv_mp
;
1531 mutex_init(&tcp
->tcp_rsrv_mp_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
1533 tcp
->tcp_tcps
= tcps
;
1535 connp
->conn_recv
= tcp_input_data
;
1536 connp
->conn_recvicmp
= tcp_icmp_input
;
1537 connp
->conn_verifyicmp
= tcp_verifyicmp
;
1540 * Register tcp_notify to listen to capability changes detected by IP.
1541 * This upcall is made in the context of the call to conn_ip_output
1542 * thus it is inside the squeue.
1544 connp
->conn_ixa
->ixa_notify
= tcp_notify
;
1545 connp
->conn_ixa
->ixa_notify_cookie
= tcp
;
1547 return ((void *)connp
);
1551 * Handle connect to IPv4 destinations, including connections for AF_INET6
1552 * sockets connecting to IPv4 mapped IPv6 destinations.
1553 * Returns zero if OK, a positive errno, or a negative TLI error.
1556 tcp_connect_ipv4(tcp_t
*tcp
, ipaddr_t
*dstaddrp
, in_port_t dstport
,
1559 ipaddr_t dstaddr
= *dstaddrp
;
1561 conn_t
*connp
= tcp
->tcp_connp
;
1562 tcp_stack_t
*tcps
= tcp
->tcp_tcps
;
1565 ASSERT(connp
->conn_ipversion
== IPV4_VERSION
);
1567 /* Check for attempt to connect to INADDR_ANY */
1568 if (dstaddr
== INADDR_ANY
) {
1570 * SunOS 4.x and 4.3 BSD allow an application
1571 * to connect a TCP socket to INADDR_ANY.
1572 * When they do this, the kernel picks the
1573 * address of one interface and uses it
1574 * instead. The kernel usually ends up
1575 * picking the address of the loopback
1576 * interface. This is an undocumented feature.
1577 * However, we provide the same thing here
1578 * in order to have source and binary
1579 * compatibility with SunOS 4.x.
1580 * Update the T_CONN_REQ (sin/sin6) since it is used to
1581 * generate the T_CONN_CON.
1583 dstaddr
= htonl(INADDR_LOOPBACK
);
1584 *dstaddrp
= dstaddr
;
1587 /* Handle __sin6_src_id if socket not bound to an IP address */
1588 if (srcid
!= 0 && connp
->conn_laddr_v4
== INADDR_ANY
) {
1589 ip_srcid_find_id(srcid
, &connp
->conn_laddr_v6
,
1590 IPCL_ZONEID(connp
), tcps
->tcps_netstack
);
1591 connp
->conn_saddr_v6
= connp
->conn_laddr_v6
;
1594 IN6_IPADDR_TO_V4MAPPED(dstaddr
, &connp
->conn_faddr_v6
);
1595 connp
->conn_fport
= dstport
;
1598 * At this point the remote destination address and remote port fields
1599 * in the tcp-four-tuple have been filled in the tcp structure. Now we
1600 * have to see which state tcp was in so we can take appropriate action.
1602 if (tcp
->tcp_state
== TCPS_IDLE
) {
1604 * We support a quick connect capability here, allowing
1605 * clients to transition directly from IDLE to SYN_SENT
1606 * tcp_bindi will pick an unused port, insert the connection
1607 * in the bind hash and transition to BOUND state.
1609 lport
= tcp_update_next_port(tcps
->tcps_next_port_to_try
,
1611 lport
= tcp_bindi(tcp
, lport
, &connp
->conn_laddr_v6
, 0, B_TRUE
,
1618 * Lookup the route to determine a source address and the uinfo.
1619 * Setup TCP parameters based on the metrics/DCE.
1621 error
= tcp_set_destination(tcp
);
1626 * Don't let an endpoint connect to itself.
1628 if (connp
->conn_faddr_v4
== connp
->conn_laddr_v4
&&
1629 connp
->conn_fport
== connp
->conn_lport
)
1632 tcp
->tcp_state
= TCPS_SYN_SENT
;
1634 return (ipcl_conn_insert_v4(connp
));
1638 * Handle connect to IPv6 destinations.
1639 * Returns zero if OK, a positive errno, or a negative TLI error.
1642 tcp_connect_ipv6(tcp_t
*tcp
, in6_addr_t
*dstaddrp
, in_port_t dstport
,
1643 uint32_t flowinfo
, uint_t srcid
, uint32_t scope_id
)
1646 conn_t
*connp
= tcp
->tcp_connp
;
1647 tcp_stack_t
*tcps
= tcp
->tcp_tcps
;
1650 ASSERT(connp
->conn_family
== AF_INET6
);
1653 * If we're here, it means that the destination address is a native
1654 * IPv6 address. Return an error if conn_ipversion is not IPv6. A
1655 * reason why it might not be IPv6 is if the socket was bound to an
1656 * IPv4-mapped IPv6 address.
1658 if (connp
->conn_ipversion
!= IPV6_VERSION
)
1662 * Interpret a zero destination to mean loopback.
1663 * Update the T_CONN_REQ (sin/sin6) since it is used to
1664 * generate the T_CONN_CON.
1666 if (IN6_IS_ADDR_UNSPECIFIED(dstaddrp
))
1667 *dstaddrp
= ipv6_loopback
;
1669 /* Handle __sin6_src_id if socket not bound to an IP address */
1670 if (srcid
!= 0 && IN6_IS_ADDR_UNSPECIFIED(&connp
->conn_laddr_v6
)) {
1671 ip_srcid_find_id(srcid
, &connp
->conn_laddr_v6
,
1672 IPCL_ZONEID(connp
), tcps
->tcps_netstack
);
1673 connp
->conn_saddr_v6
= connp
->conn_laddr_v6
;
1677 * Take care of the scope_id now.
1679 if (scope_id
!= 0 && IN6_IS_ADDR_LINKSCOPE(dstaddrp
)) {
1680 connp
->conn_ixa
->ixa_flags
|= IXAF_SCOPEID_SET
;
1681 connp
->conn_ixa
->ixa_scopeid
= scope_id
;
1683 connp
->conn_ixa
->ixa_flags
&= ~IXAF_SCOPEID_SET
;
1686 connp
->conn_flowinfo
= flowinfo
;
1687 connp
->conn_faddr_v6
= *dstaddrp
;
1688 connp
->conn_fport
= dstport
;
1691 * At this point the remote destination address and remote port fields
1692 * in the tcp-four-tuple have been filled in the tcp structure. Now we
1693 * have to see which state tcp was in so we can take appropriate action.
1695 if (tcp
->tcp_state
== TCPS_IDLE
) {
1697 * We support a quick connect capability here, allowing
1698 * clients to transition directly from IDLE to SYN_SENT
1699 * tcp_bindi will pick an unused port, insert the connection
1700 * in the bind hash and transition to BOUND state.
1702 lport
= tcp_update_next_port(tcps
->tcps_next_port_to_try
,
1704 lport
= tcp_bindi(tcp
, lport
, &connp
->conn_laddr_v6
, 0, B_TRUE
,
1711 * Lookup the route to determine a source address and the uinfo.
1712 * Setup TCP parameters based on the metrics/DCE.
1714 error
= tcp_set_destination(tcp
);
1719 * Don't let an endpoint connect to itself.
1721 if (IN6_ARE_ADDR_EQUAL(&connp
->conn_faddr_v6
, &connp
->conn_laddr_v6
) &&
1722 connp
->conn_fport
== connp
->conn_lport
)
1725 tcp
->tcp_state
= TCPS_SYN_SENT
;
1727 return (ipcl_conn_insert_v6(connp
));
1732 * Note that unlike other functions this returns a positive tli error
1733 * when it fails; it never returns an errno.
1736 tcp_disconnect_common(tcp_t
*tcp
, t_scalar_t seqnum
)
1739 tcp_stack_t
*tcps
= tcp
->tcp_tcps
;
1740 conn_t
*connp
= tcp
->tcp_connp
;
1743 * Right now, upper modules pass down a T_DISCON_REQ to TCP,
1744 * when the stream is in BOUND state. Do not send a reset,
1745 * since the destination IP address is not valid, and it can
1746 * be the initialized value of all zeros (broadcast address).
1748 if (tcp
->tcp_state
<= TCPS_BOUND
) {
1749 if (connp
->conn_debug
) {
1750 (void) strlog(TCP_MOD_ID
, 0, 1, SL_ERROR
|SL_TRACE
,
1751 "tcp_disconnect: bad state, %d", tcp
->tcp_state
);
1754 } else if (tcp
->tcp_state
>= TCPS_ESTABLISHED
) {
1755 TCPS_CONN_DEC(tcps
);
1758 if (seqnum
== -1 || tcp
->tcp_conn_req_max
== 0) {
1761 * According to TPI, for non-listeners, ignore seqnum
1763 * Following interpretation of -1 seqnum is historical
1764 * and implied TPI ? (TPI only states that for T_CONN_IND,
1765 * a valid seqnum should not be -1).
1767 * -1 means disconnect everything
1768 * regardless even on a listener.
1771 int old_state
= tcp
->tcp_state
;
1772 ip_stack_t
*ipst
= tcps
->tcps_netstack
->netstack_ip
;
1775 * The connection can't be on the tcp_time_wait_head list
1776 * since it is not detached.
1778 ASSERT(tcp
->tcp_time_wait_next
== NULL
);
1779 ASSERT(tcp
->tcp_time_wait_prev
== NULL
);
1780 ASSERT(tcp
->tcp_time_wait_expire
== 0);
1782 * If it used to be a listener, check to make sure no one else
1783 * has taken the port before switching back to LISTEN state.
1785 if (connp
->conn_ipversion
== IPV4_VERSION
) {
1786 lconnp
= ipcl_lookup_listener_v4(connp
->conn_lport
,
1787 connp
->conn_laddr_v4
, IPCL_ZONEID(connp
), ipst
);
1791 if (connp
->conn_ixa
->ixa_flags
& IXAF_SCOPEID_SET
)
1792 ifindex
= connp
->conn_ixa
->ixa_scopeid
;
1794 /* Allow conn_bound_if listeners? */
1795 lconnp
= ipcl_lookup_listener_v6(connp
->conn_lport
,
1796 &connp
->conn_laddr_v6
, ifindex
, IPCL_ZONEID(connp
),
1799 if (tcp
->tcp_conn_req_max
&& lconnp
== NULL
) {
1800 tcp
->tcp_state
= TCPS_LISTEN
;
1801 DTRACE_TCP6(state__change
, void, NULL
, ip_xmit_attr_t
*,
1802 connp
->conn_ixa
, void, NULL
, tcp_t
*, tcp
, void,
1803 NULL
, int32_t, old_state
);
1804 } else if (old_state
> TCPS_BOUND
) {
1805 tcp
->tcp_conn_req_max
= 0;
1806 tcp
->tcp_state
= TCPS_BOUND
;
1807 DTRACE_TCP6(state__change
, void, NULL
, ip_xmit_attr_t
*,
1808 connp
->conn_ixa
, void, NULL
, tcp_t
*, tcp
, void,
1809 NULL
, int32_t, old_state
);
1812 * If this end point is not going to become a listener,
1813 * decrement the listener connection count if
1814 * necessary. Note that we do not do this if it is
1815 * going to be a listner (the above if case) since
1816 * then it may remove the counter struct.
1818 if (tcp
->tcp_listen_cnt
!= NULL
)
1819 TCP_DECR_LISTEN_CNT(tcp
);
1822 CONN_DEC_REF(lconnp
);
1823 switch (old_state
) {
1826 TCPS_BUMP_MIB(tcps
, tcpAttemptFails
);
1828 case TCPS_ESTABLISHED
:
1829 case TCPS_CLOSE_WAIT
:
1830 TCPS_BUMP_MIB(tcps
, tcpEstabResets
);
1837 mutex_enter(&tcp
->tcp_eager_lock
);
1838 if ((tcp
->tcp_conn_req_cnt_q0
!= 0) ||
1839 (tcp
->tcp_conn_req_cnt_q
!= 0)) {
1840 tcp_eager_cleanup(tcp
, 0);
1842 mutex_exit(&tcp
->tcp_eager_lock
);
1844 tcp_xmit_ctl("tcp_disconnect", tcp
, tcp
->tcp_snxt
,
1845 tcp
->tcp_rnxt
, TH_RST
| TH_ACK
);
1850 } else if (!tcp_eager_blowoff(tcp
, seqnum
)) {
1857 * Our client hereby directs us to reject the connection request
1858 * that tcp_input_listener() marked with 'seqnum'. Rejection consists
1859 * of sending the appropriate RST, not an ICMP error.
1862 tcp_disconnect(tcp_t
*tcp
, mblk_t
*mp
)
1866 conn_t
*connp
= tcp
->tcp_connp
;
1868 ASSERT((uintptr_t)(mp
->b_wptr
- mp
->b_rptr
) <= (uintptr_t)INT_MAX
);
1869 if ((mp
->b_wptr
- mp
->b_rptr
) < sizeof (struct T_discon_req
)) {
1870 tcp_err_ack(tcp
, mp
, TPROTO
, 0);
1873 seqnum
= ((struct T_discon_req
*)mp
->b_rptr
)->SEQ_number
;
1874 error
= tcp_disconnect_common(tcp
, seqnum
);
1876 tcp_err_ack(tcp
, mp
, error
, 0);
1878 if (tcp
->tcp_state
>= TCPS_ESTABLISHED
) {
1879 /* Send M_FLUSH according to TPI */
1880 (void) putnextctl1(connp
->conn_rq
, M_FLUSH
, FLUSHRW
);
1882 mp
= mi_tpi_ok_ack_alloc(mp
);
1884 putnext(connp
->conn_rq
, mp
);
1889 * Handle reinitialization of a tcp structure.
1890 * Maintain "binding state" resetting the state to BOUND, LISTEN, or IDLE.
1893 tcp_reinit(tcp_t
*tcp
)
1896 tcp_stack_t
*tcps
= tcp
->tcp_tcps
;
1897 conn_t
*connp
= tcp
->tcp_connp
;
1900 /* tcp_reinit should never be called for detached tcp_t's */
1901 ASSERT(tcp
->tcp_listener
== NULL
);
1902 ASSERT((connp
->conn_family
== AF_INET
&&
1903 connp
->conn_ipversion
== IPV4_VERSION
) ||
1904 (connp
->conn_family
== AF_INET6
&&
1905 (connp
->conn_ipversion
== IPV4_VERSION
||
1906 connp
->conn_ipversion
== IPV6_VERSION
)));
1908 /* Cancel outstanding timers */
1909 tcp_timers_stop(tcp
);
1912 * Reset everything in the state vector, after updating global
1913 * MIB data from instance counters.
1915 TCPS_UPDATE_MIB(tcps
, tcpHCInSegs
, tcp
->tcp_ibsegs
);
1916 tcp
->tcp_ibsegs
= 0;
1917 TCPS_UPDATE_MIB(tcps
, tcpHCOutSegs
, tcp
->tcp_obsegs
);
1918 tcp
->tcp_obsegs
= 0;
1920 tcp_close_mpp(&tcp
->tcp_xmit_head
);
1921 if (tcp
->tcp_snd_zcopy_aware
)
1922 tcp_zcopy_notify(tcp
);
1923 tcp
->tcp_xmit_last
= tcp
->tcp_xmit_tail
= NULL
;
1924 tcp
->tcp_unsent
= tcp
->tcp_xmit_tail_unsent
= 0;
1925 mutex_enter(&tcp
->tcp_non_sq_lock
);
1926 if (tcp
->tcp_flow_stopped
&&
1927 TCP_UNSENT_BYTES(tcp
) <= connp
->conn_sndlowat
) {
1930 mutex_exit(&tcp
->tcp_non_sq_lock
);
1931 tcp_close_mpp(&tcp
->tcp_reass_head
);
1932 tcp
->tcp_reass_tail
= NULL
;
1933 if (tcp
->tcp_rcv_list
!= NULL
) {
1934 /* Free b_next chain */
1935 tcp_close_mpp(&tcp
->tcp_rcv_list
);
1936 tcp
->tcp_rcv_last_head
= NULL
;
1937 tcp
->tcp_rcv_last_tail
= NULL
;
1938 tcp
->tcp_rcv_cnt
= 0;
1940 tcp
->tcp_rcv_last_tail
= NULL
;
1942 if ((mp
= tcp
->tcp_urp_mp
) != NULL
) {
1944 tcp
->tcp_urp_mp
= NULL
;
1946 if ((mp
= tcp
->tcp_urp_mark_mp
) != NULL
) {
1948 tcp
->tcp_urp_mark_mp
= NULL
;
1950 if (tcp
->tcp_fused_sigurg_mp
!= NULL
) {
1951 ASSERT(!IPCL_IS_NONSTR(tcp
->tcp_connp
));
1952 freeb(tcp
->tcp_fused_sigurg_mp
);
1953 tcp
->tcp_fused_sigurg_mp
= NULL
;
1955 if (tcp
->tcp_ordrel_mp
!= NULL
) {
1956 ASSERT(!IPCL_IS_NONSTR(tcp
->tcp_connp
));
1957 freeb(tcp
->tcp_ordrel_mp
);
1958 tcp
->tcp_ordrel_mp
= NULL
;
1962 * Following is a union with two members which are
1963 * identical types and size so the following cleanup
1966 tcp_close_mpp(&tcp
->tcp_conn
.tcp_eager_conn_ind
);
1968 CL_INET_DISCONNECT(connp
);
1971 * The connection can't be on the tcp_time_wait_head list
1972 * since it is not detached.
1974 ASSERT(tcp
->tcp_time_wait_next
== NULL
);
1975 ASSERT(tcp
->tcp_time_wait_prev
== NULL
);
1976 ASSERT(tcp
->tcp_time_wait_expire
== 0);
1979 * Reset/preserve other values
1981 tcp_reinit_values(tcp
);
1982 ipcl_hash_remove(connp
);
1983 /* Note that ixa_cred gets cleared in ixa_cleanup */
1984 ixa_cleanup(connp
->conn_ixa
);
1985 tcp_ipsec_cleanup(tcp
);
1987 connp
->conn_laddr_v6
= connp
->conn_bound_addr_v6
;
1988 connp
->conn_saddr_v6
= connp
->conn_bound_addr_v6
;
1989 oldstate
= tcp
->tcp_state
;
1991 if (tcp
->tcp_conn_req_max
!= 0) {
1993 * This is the case when a TLI program uses the same
1994 * transport end point to accept a connection. This
1995 * makes the TCP both a listener and acceptor. When
1996 * this connection is closed, we need to set the state
1997 * back to TCPS_LISTEN. Make sure that the eager list
2000 * Note that this stream is still bound to the four
2001 * tuples of the previous connection in IP. If a new
2002 * SYN with different foreign address comes in, IP will
2003 * not find it and will send it to the global queue. In
2004 * the global queue, TCP will do a tcp_lookup_listener()
2005 * to find this stream. This works because this stream
2006 * is only removed from connected hash.
2009 tcp
->tcp_state
= TCPS_LISTEN
;
2010 tcp
->tcp_eager_next_q0
= tcp
->tcp_eager_prev_q0
= tcp
;
2011 tcp
->tcp_eager_next_drop_q0
= tcp
;
2012 tcp
->tcp_eager_prev_drop_q0
= tcp
;
2014 * Initially set conn_recv to tcp_input_listener_unbound to try
2015 * to pick a good squeue for the listener when the first SYN
2016 * arrives. tcp_input_listener_unbound sets it to
2017 * tcp_input_listener on that first SYN.
2019 connp
->conn_recv
= tcp_input_listener_unbound
;
2021 connp
->conn_proto
= IPPROTO_TCP
;
2022 connp
->conn_faddr_v6
= ipv6_all_zeros
;
2023 connp
->conn_fport
= 0;
2025 (void) ipcl_bind_insert(connp
);
2027 tcp
->tcp_state
= TCPS_BOUND
;
2031 * Initialize to default values
2033 tcp_init_values(tcp
, NULL
);
2035 DTRACE_TCP6(state__change
, void, NULL
, ip_xmit_attr_t
*,
2036 connp
->conn_ixa
, void, NULL
, tcp_t
*, tcp
, void, NULL
,
2039 ASSERT(tcp
->tcp_ptpbhn
!= NULL
);
2040 tcp
->tcp_rwnd
= connp
->conn_rcvbuf
;
2041 tcp
->tcp_mss
= connp
->conn_ipversion
!= IPV4_VERSION
?
2042 tcps
->tcps_mss_def_ipv6
: tcps
->tcps_mss_def_ipv4
;
2046 * Force values to zero that need be zero.
2047 * Do not touch values asociated with the BOUND or LISTEN state
2048 * since the connection will end up in that state after the reinit.
2049 * NOTE: tcp_reinit_values MUST have a line for each field in the tcp_t
2053 tcp_reinit_values(tcp
)
2056 tcp_stack_t
*tcps
= tcp
->tcp_tcps
;
2057 conn_t
*connp
= tcp
->tcp_connp
;
2063 #define DONTCARE(x) ((x) = (x))
2064 #define PRESERVE(x) ((x) = (x))
2067 PRESERVE(tcp
->tcp_bind_hash_port
);
2068 PRESERVE(tcp
->tcp_bind_hash
);
2069 PRESERVE(tcp
->tcp_ptpbhn
);
2070 PRESERVE(tcp
->tcp_acceptor_hash
);
2071 PRESERVE(tcp
->tcp_ptpahn
);
2073 /* Should be ASSERT NULL on these with new code! */
2074 ASSERT(tcp
->tcp_time_wait_next
== NULL
);
2075 ASSERT(tcp
->tcp_time_wait_prev
== NULL
);
2076 ASSERT(tcp
->tcp_time_wait_expire
== 0);
2077 PRESERVE(tcp
->tcp_state
);
2078 PRESERVE(connp
->conn_rq
);
2079 PRESERVE(connp
->conn_wq
);
2081 ASSERT(tcp
->tcp_xmit_head
== NULL
);
2082 ASSERT(tcp
->tcp_xmit_last
== NULL
);
2083 ASSERT(tcp
->tcp_unsent
== 0);
2084 ASSERT(tcp
->tcp_xmit_tail
== NULL
);
2085 ASSERT(tcp
->tcp_xmit_tail_unsent
== 0);
2087 tcp
->tcp_snxt
= 0; /* Displayed in mib */
2088 tcp
->tcp_suna
= 0; /* Displayed in mib */
2090 DONTCARE(tcp
->tcp_cwnd
); /* Init in tcp_process_options */
2092 ASSERT(tcp
->tcp_ibsegs
== 0);
2093 ASSERT(tcp
->tcp_obsegs
== 0);
2095 if (connp
->conn_ht_iphc
!= NULL
) {
2096 kmem_free(connp
->conn_ht_iphc
, connp
->conn_ht_iphc_allocated
);
2097 connp
->conn_ht_iphc
= NULL
;
2098 connp
->conn_ht_iphc_allocated
= 0;
2099 connp
->conn_ht_iphc_len
= 0;
2100 connp
->conn_ht_ulp
= NULL
;
2101 connp
->conn_ht_ulp_len
= 0;
2102 tcp
->tcp_ipha
= NULL
;
2103 tcp
->tcp_ip6h
= NULL
;
2104 tcp
->tcp_tcpha
= NULL
;
2107 /* We clear any IP_OPTIONS and extension headers */
2108 ip_pkt_free(&connp
->conn_xmit_ipp
);
2110 DONTCARE(tcp
->tcp_naglim
); /* Init in tcp_init_values */
2111 DONTCARE(tcp
->tcp_ipha
);
2112 DONTCARE(tcp
->tcp_ip6h
);
2113 DONTCARE(tcp
->tcp_tcpha
);
2114 tcp
->tcp_valid_bits
= 0;
2116 DONTCARE(tcp
->tcp_timer_backoff
); /* Init in tcp_init_values */
2117 DONTCARE(tcp
->tcp_last_recv_time
); /* Init in tcp_init_values */
2118 tcp
->tcp_last_rcv_lbolt
= 0;
2120 tcp
->tcp_init_cwnd
= 0;
2122 tcp
->tcp_urp_last_valid
= 0;
2123 tcp
->tcp_hard_binding
= 0;
2125 tcp
->tcp_fin_acked
= 0;
2126 tcp
->tcp_fin_rcvd
= 0;
2127 tcp
->tcp_fin_sent
= 0;
2128 tcp
->tcp_ordrel_done
= 0;
2130 tcp
->tcp_detached
= 0;
2132 tcp
->tcp_snd_ws_ok
= B_FALSE
;
2133 tcp
->tcp_snd_ts_ok
= B_FALSE
;
2134 tcp
->tcp_zero_win_probe
= 0;
2136 tcp
->tcp_loopback
= 0;
2137 tcp
->tcp_localnet
= 0;
2138 tcp
->tcp_syn_defense
= 0;
2139 tcp
->tcp_set_timer
= 0;
2141 tcp
->tcp_active_open
= 0;
2142 tcp
->tcp_rexmit
= B_FALSE
;
2143 tcp
->tcp_xmit_zc_clean
= B_FALSE
;
2145 tcp
->tcp_snd_sack_ok
= B_FALSE
;
2146 tcp
->tcp_hwcksum
= B_FALSE
;
2148 DONTCARE(tcp
->tcp_maxpsz_multiplier
); /* Init in tcp_init_values */
2150 tcp
->tcp_conn_def_q0
= 0;
2151 tcp
->tcp_ip_forward_progress
= B_FALSE
;
2152 tcp
->tcp_ecn_ok
= B_FALSE
;
2154 tcp
->tcp_cwr
= B_FALSE
;
2155 tcp
->tcp_ecn_echo_on
= B_FALSE
;
2156 tcp
->tcp_is_wnd_shrnk
= B_FALSE
;
2158 TCP_NOTSACK_REMOVE_ALL(tcp
->tcp_notsack_list
, tcp
);
2159 bzero(&tcp
->tcp_sack_info
, sizeof (tcp_sack_info_t
));
2161 tcp
->tcp_rcv_ws
= 0;
2162 tcp
->tcp_snd_ws
= 0;
2163 tcp
->tcp_ts_recent
= 0;
2164 tcp
->tcp_rnxt
= 0; /* Displayed in mib */
2165 DONTCARE(tcp
->tcp_rwnd
); /* Set in tcp_reinit() */
2166 tcp
->tcp_initial_pmtu
= 0;
2168 ASSERT(tcp
->tcp_reass_head
== NULL
);
2169 ASSERT(tcp
->tcp_reass_tail
== NULL
);
2171 tcp
->tcp_cwnd_cnt
= 0;
2173 ASSERT(tcp
->tcp_rcv_list
== NULL
);
2174 ASSERT(tcp
->tcp_rcv_last_head
== NULL
);
2175 ASSERT(tcp
->tcp_rcv_last_tail
== NULL
);
2176 ASSERT(tcp
->tcp_rcv_cnt
== 0);
2178 DONTCARE(tcp
->tcp_cwnd_ssthresh
); /* Init in tcp_set_destination */
2179 DONTCARE(tcp
->tcp_cwnd_max
); /* Init in tcp_init_values */
2182 tcp
->tcp_rto
= 0; /* Displayed in MIB */
2183 DONTCARE(tcp
->tcp_rtt_sa
); /* Init in tcp_init_values */
2184 DONTCARE(tcp
->tcp_rtt_sd
); /* Init in tcp_init_values */
2185 tcp
->tcp_rtt_update
= 0;
2187 DONTCARE(tcp
->tcp_swl1
); /* Init in case TCPS_LISTEN/TCPS_SYN_SENT */
2188 DONTCARE(tcp
->tcp_swl2
); /* Init in case TCPS_LISTEN/TCPS_SYN_SENT */
2190 tcp
->tcp_rack
= 0; /* Displayed in mib */
2191 tcp
->tcp_rack_cnt
= 0;
2192 tcp
->tcp_rack_cur_max
= 0;
2193 tcp
->tcp_rack_abs_max
= 0;
2195 tcp
->tcp_max_swnd
= 0;
2197 ASSERT(tcp
->tcp_listener
== NULL
);
2199 DONTCARE(tcp
->tcp_irs
); /* tcp_valid_bits cleared */
2200 DONTCARE(tcp
->tcp_iss
); /* tcp_valid_bits cleared */
2201 DONTCARE(tcp
->tcp_fss
); /* tcp_valid_bits cleared */
2202 DONTCARE(tcp
->tcp_urg
); /* tcp_valid_bits cleared */
2204 ASSERT(tcp
->tcp_conn_req_cnt_q
== 0);
2205 ASSERT(tcp
->tcp_conn_req_cnt_q0
== 0);
2206 PRESERVE(tcp
->tcp_conn_req_max
);
2207 PRESERVE(tcp
->tcp_conn_req_seqnum
);
2209 DONTCARE(tcp
->tcp_first_timer_threshold
); /* Init in tcp_init_values */
2210 DONTCARE(tcp
->tcp_second_timer_threshold
); /* Init in tcp_init_values */
2211 DONTCARE(tcp
->tcp_first_ctimer_threshold
); /* Init in tcp_init_values */
2212 DONTCARE(tcp
->tcp_second_ctimer_threshold
); /* in tcp_init_values */
2214 DONTCARE(tcp
->tcp_urp_last
); /* tcp_urp_last_valid is cleared */
2215 ASSERT(tcp
->tcp_urp_mp
== NULL
);
2216 ASSERT(tcp
->tcp_urp_mark_mp
== NULL
);
2217 ASSERT(tcp
->tcp_fused_sigurg_mp
== NULL
);
2219 ASSERT(tcp
->tcp_eager_next_q
== NULL
);
2220 ASSERT(tcp
->tcp_eager_last_q
== NULL
);
2221 ASSERT((tcp
->tcp_eager_next_q0
== NULL
&&
2222 tcp
->tcp_eager_prev_q0
== NULL
) ||
2223 tcp
->tcp_eager_next_q0
== tcp
->tcp_eager_prev_q0
);
2224 ASSERT(tcp
->tcp_conn
.tcp_eager_conn_ind
== NULL
);
2226 ASSERT((tcp
->tcp_eager_next_drop_q0
== NULL
&&
2227 tcp
->tcp_eager_prev_drop_q0
== NULL
) ||
2228 tcp
->tcp_eager_next_drop_q0
== tcp
->tcp_eager_prev_drop_q0
);
2230 tcp
->tcp_client_errno
= 0;
2232 DONTCARE(connp
->conn_sum
); /* Init in tcp_init_values */
2234 connp
->conn_faddr_v6
= ipv6_all_zeros
; /* Displayed in MIB */
2236 PRESERVE(connp
->conn_bound_addr_v6
);
2237 tcp
->tcp_last_sent_len
= 0;
2238 tcp
->tcp_dupack_cnt
= 0;
2240 connp
->conn_fport
= 0; /* Displayed in MIB */
2241 PRESERVE(connp
->conn_lport
);
2243 PRESERVE(tcp
->tcp_acceptor_lockp
);
2245 ASSERT(tcp
->tcp_ordrel_mp
== NULL
);
2246 PRESERVE(tcp
->tcp_acceptor_id
);
2247 DONTCARE(tcp
->tcp_ipsec_overhead
);
2249 PRESERVE(connp
->conn_family
);
2250 /* Remove any remnants of mapped address binding */
2251 if (connp
->conn_family
== AF_INET6
) {
2252 connp
->conn_ipversion
= IPV6_VERSION
;
2253 tcp
->tcp_mss
= tcps
->tcps_mss_def_ipv6
;
2255 connp
->conn_ipversion
= IPV4_VERSION
;
2256 tcp
->tcp_mss
= tcps
->tcps_mss_def_ipv4
;
2259 connp
->conn_bound_if
= 0;
2260 connp
->conn_recv_ancillary
.crb_all
= 0;
2261 tcp
->tcp_recvifindex
= 0;
2262 tcp
->tcp_recvhops
= 0;
2263 tcp
->tcp_closed
= 0;
2264 if (tcp
->tcp_hopopts
!= NULL
) {
2265 mi_free(tcp
->tcp_hopopts
);
2266 tcp
->tcp_hopopts
= NULL
;
2267 tcp
->tcp_hopoptslen
= 0;
2269 ASSERT(tcp
->tcp_hopoptslen
== 0);
2270 if (tcp
->tcp_dstopts
!= NULL
) {
2271 mi_free(tcp
->tcp_dstopts
);
2272 tcp
->tcp_dstopts
= NULL
;
2273 tcp
->tcp_dstoptslen
= 0;
2275 ASSERT(tcp
->tcp_dstoptslen
== 0);
2276 if (tcp
->tcp_rthdrdstopts
!= NULL
) {
2277 mi_free(tcp
->tcp_rthdrdstopts
);
2278 tcp
->tcp_rthdrdstopts
= NULL
;
2279 tcp
->tcp_rthdrdstoptslen
= 0;
2281 ASSERT(tcp
->tcp_rthdrdstoptslen
== 0);
2282 if (tcp
->tcp_rthdr
!= NULL
) {
2283 mi_free(tcp
->tcp_rthdr
);
2284 tcp
->tcp_rthdr
= NULL
;
2285 tcp
->tcp_rthdrlen
= 0;
2287 ASSERT(tcp
->tcp_rthdrlen
== 0);
2289 /* Reset fusion-related fields */
2290 tcp
->tcp_fused
= B_FALSE
;
2291 tcp
->tcp_unfusable
= B_FALSE
;
2292 tcp
->tcp_fused_sigurg
= B_FALSE
;
2293 tcp
->tcp_loopback_peer
= NULL
;
2295 tcp
->tcp_lso
= B_FALSE
;
2297 tcp
->tcp_in_ack_unsent
= 0;
2298 tcp
->tcp_cork
= B_FALSE
;
2299 tcp
->tcp_tconnind_started
= B_FALSE
;
2301 PRESERVE(tcp
->tcp_squeue_bytes
);
2303 tcp
->tcp_closemp_used
= B_FALSE
;
2305 PRESERVE(tcp
->tcp_rsrv_mp
);
2306 PRESERVE(tcp
->tcp_rsrv_mp_lock
);
2309 DONTCARE(tcp
->tcmp_stk
[0]);
2312 PRESERVE(tcp
->tcp_connid
);
2314 ASSERT(tcp
->tcp_listen_cnt
== NULL
);
2315 ASSERT(tcp
->tcp_reass_tid
== 0);
2322 * Initialize the various fields in tcp_t. If parent (the listener) is non
2323 * NULL, certain values will be inheritted from it.
2326 tcp_init_values(tcp_t
*tcp
, tcp_t
*parent
)
2328 tcp_stack_t
*tcps
= tcp
->tcp_tcps
;
2329 conn_t
*connp
= tcp
->tcp_connp
;
2332 ASSERT((connp
->conn_family
== AF_INET
&&
2333 connp
->conn_ipversion
== IPV4_VERSION
) ||
2334 (connp
->conn_family
== AF_INET6
&&
2335 (connp
->conn_ipversion
== IPV4_VERSION
||
2336 connp
->conn_ipversion
== IPV6_VERSION
)));
2338 if (parent
== NULL
) {
2339 tcp
->tcp_naglim
= tcps
->tcps_naglim_def
;
2341 tcp
->tcp_rto_initial
= tcps
->tcps_rexmit_interval_initial
;
2342 tcp
->tcp_rto_min
= tcps
->tcps_rexmit_interval_min
;
2343 tcp
->tcp_rto_max
= tcps
->tcps_rexmit_interval_max
;
2345 tcp
->tcp_first_ctimer_threshold
=
2346 tcps
->tcps_ip_notify_cinterval
;
2347 tcp
->tcp_second_ctimer_threshold
=
2348 tcps
->tcps_ip_abort_cinterval
;
2349 tcp
->tcp_first_timer_threshold
= tcps
->tcps_ip_notify_interval
;
2350 tcp
->tcp_second_timer_threshold
= tcps
->tcps_ip_abort_interval
;
2352 tcp
->tcp_fin_wait_2_flush_interval
=
2353 tcps
->tcps_fin_wait_2_flush_interval
;
2355 tcp
->tcp_ka_interval
= tcps
->tcps_keepalive_interval
;
2356 tcp
->tcp_ka_abort_thres
= tcps
->tcps_keepalive_abort_interval
;
2359 * Default value of tcp_init_cwnd is 0, so no need to set here
2360 * if parent is NULL. But we need to inherit it from parent.
2363 /* Inherit various TCP parameters from the parent. */
2364 tcp
->tcp_naglim
= parent
->tcp_naglim
;
2366 tcp
->tcp_rto_initial
= parent
->tcp_rto_initial
;
2367 tcp
->tcp_rto_min
= parent
->tcp_rto_min
;
2368 tcp
->tcp_rto_max
= parent
->tcp_rto_max
;
2370 tcp
->tcp_first_ctimer_threshold
=
2371 parent
->tcp_first_ctimer_threshold
;
2372 tcp
->tcp_second_ctimer_threshold
=
2373 parent
->tcp_second_ctimer_threshold
;
2374 tcp
->tcp_first_timer_threshold
=
2375 parent
->tcp_first_timer_threshold
;
2376 tcp
->tcp_second_timer_threshold
=
2377 parent
->tcp_second_timer_threshold
;
2379 tcp
->tcp_fin_wait_2_flush_interval
=
2380 parent
->tcp_fin_wait_2_flush_interval
;
2382 tcp
->tcp_ka_interval
= parent
->tcp_ka_interval
;
2383 tcp
->tcp_ka_abort_thres
= parent
->tcp_ka_abort_thres
;
2385 tcp
->tcp_init_cwnd
= parent
->tcp_init_cwnd
;
2389 * Initialize tcp_rtt_sa and tcp_rtt_sd so that the calculated RTO
2390 * will be close to tcp_rexmit_interval_initial. By doing this, we
2391 * allow the algorithm to adjust slowly to large fluctuations of RTT
2392 * during first few transmissions of a connection as seen in slow
2395 tcp
->tcp_rtt_sa
= tcp
->tcp_rto_initial
<< 2;
2396 tcp
->tcp_rtt_sd
= tcp
->tcp_rto_initial
>> 1;
2397 rto
= (tcp
->tcp_rtt_sa
>> 3) + tcp
->tcp_rtt_sd
+
2398 tcps
->tcps_rexmit_interval_extra
+ (tcp
->tcp_rtt_sa
>> 5) +
2399 tcps
->tcps_conn_grace_period
;
2400 TCP_SET_RTO(tcp
, rto
);
2402 tcp
->tcp_timer_backoff
= 0;
2403 tcp
->tcp_ms_we_have_waited
= 0;
2404 tcp
->tcp_last_recv_time
= ddi_get_lbolt();
2405 tcp
->tcp_cwnd_max
= tcps
->tcps_cwnd_max_
;
2406 tcp
->tcp_cwnd_ssthresh
= TCP_MAX_LARGEWIN
;
2407 tcp
->tcp_snd_burst
= TCP_CWND_INFINITE
;
2409 tcp
->tcp_maxpsz_multiplier
= tcps
->tcps_maxpsz_multiplier
;
2411 /* NOTE: ISS is now set in tcp_set_destination(). */
2413 /* Reset fusion-related fields */
2414 tcp
->tcp_fused
= B_FALSE
;
2415 tcp
->tcp_unfusable
= B_FALSE
;
2416 tcp
->tcp_fused_sigurg
= B_FALSE
;
2417 tcp
->tcp_loopback_peer
= NULL
;
2419 /* We rebuild the header template on the next connect/conn_request */
2421 connp
->conn_mlp_type
= mlptSingle
;
2424 * Init the window scale to the max so tcp_rwnd_set() won't pare
2425 * down tcp_rwnd. tcp_set_destination() will set the right value later.
2427 tcp
->tcp_rcv_ws
= TCP_MAX_WINSHIFT
;
2428 tcp
->tcp_rwnd
= connp
->conn_rcvbuf
;
2430 tcp
->tcp_cork
= B_FALSE
;
2432 * Init the tcp_debug option if it wasn't already set. This value
2433 * determines whether TCP
2434 * calls strlog() to print out debug messages. Doing this
2435 * initialization here means that this value is not inherited thru
2438 if (!connp
->conn_debug
)
2439 connp
->conn_debug
= tcps
->tcps_dbg
;
2443 * Update the TCP connection according to change of PMTU.
2445 * Path MTU might have changed by either increase or decrease, so need to
2446 * adjust the MSS based on the value of ixa_pmtu. No need to handle tiny
2447 * or negative MSS, since tcp_mss_set() will do it.
2450 tcp_update_pmtu(tcp_t
*tcp
, boolean_t decrease_only
)
2454 conn_t
*connp
= tcp
->tcp_connp
;
2455 ip_xmit_attr_t
*ixa
= connp
->conn_ixa
;
2458 if (tcp
->tcp_tcps
->tcps_ignore_path_mtu
)
2461 if (tcp
->tcp_state
< TCPS_ESTABLISHED
)
2465 * Always call ip_get_pmtu() to make sure that IP has updated
2466 * ixa_flags properly.
2468 pmtu
= ip_get_pmtu(ixa
);
2469 ixaflags
= ixa
->ixa_flags
;
2472 * Calculate the MSS by decreasing the PMTU by conn_ht_iphc_len and
2473 * IPsec overhead if applied. Make sure to use the most recent
2474 * IPsec information.
2476 mss
= pmtu
- connp
->conn_ht_iphc_len
- conn_ipsec_length(connp
);
2479 * Nothing to change, so just return.
2481 if (mss
== tcp
->tcp_mss
)
2485 * Currently, for ICMP errors, only PMTU decrease is handled.
2487 if (mss
> tcp
->tcp_mss
&& decrease_only
)
2490 DTRACE_PROBE2(tcp_update_pmtu
, int32_t, tcp
->tcp_mss
, uint32_t, mss
);
2493 * Update ixa_fragsize and ixa_pmtu.
2495 ixa
->ixa_fragsize
= ixa
->ixa_pmtu
= pmtu
;
2498 * Adjust MSS and all relevant variables.
2500 tcp_mss_set(tcp
, mss
);
2503 * If the PMTU is below the min size maintained by IP, then ip_get_pmtu
2504 * has set IXAF_PMTU_TOO_SMALL and cleared IXAF_PMTU_IPV4_DF. Since TCP
2505 * has a (potentially different) min size we do the same. Make sure to
2506 * clear IXAF_DONTFRAG, which is used by IP to decide whether to
2507 * fragment the packet.
2509 * LSO over IPv6 can not be fragmented. So need to disable LSO
2510 * when IPv6 fragmentation is needed.
2512 if (mss
< tcp
->tcp_tcps
->tcps_mss_min
)
2513 ixaflags
|= IXAF_PMTU_TOO_SMALL
;
2515 if (ixaflags
& IXAF_PMTU_TOO_SMALL
)
2516 ixaflags
&= ~(IXAF_DONTFRAG
| IXAF_PMTU_IPV4_DF
);
2518 if ((connp
->conn_ipversion
== IPV4_VERSION
) &&
2519 !(ixaflags
& IXAF_PMTU_IPV4_DF
)) {
2520 tcp
->tcp_ipha
->ipha_fragment_offset_and_flags
= 0;
2522 ixa
->ixa_flags
= ixaflags
;
2526 tcp_maxpsz_set(tcp_t
*tcp
, boolean_t set_maxblk
)
2528 conn_t
*connp
= tcp
->tcp_connp
;
2529 queue_t
*q
= connp
->conn_rq
;
2530 int32_t mss
= tcp
->tcp_mss
;
2533 if (TCP_IS_DETACHED(tcp
))
2535 if (tcp
->tcp_fused
) {
2536 maxpsz
= tcp_fuse_maxpsz(tcp
);
2538 } else if (tcp
->tcp_maxpsz_multiplier
== 0) {
2540 * Set the sd_qn_maxpsz according to the socket send buffer
2541 * size, and sd_maxblk to INFPSZ (-1). This will essentially
2542 * instruct the stream head to copyin user data into contiguous
2543 * kernel-allocated buffers without breaking it up into smaller
2544 * chunks. We round up the buffer size to the nearest SMSS.
2546 maxpsz
= MSS_ROUNDUP(connp
->conn_sndbuf
, mss
);
2550 * Set sd_qn_maxpsz to approx half the (receivers) buffer
2551 * (and a multiple of the mss). This instructs the stream
2552 * head to break down larger than SMSS writes into SMSS-
2553 * size mblks, up to tcp_maxpsz_multiplier mblks at a time.
2555 maxpsz
= tcp
->tcp_maxpsz_multiplier
* mss
;
2556 if (maxpsz
> connp
->conn_sndbuf
/ 2) {
2557 maxpsz
= connp
->conn_sndbuf
/ 2;
2558 /* Round up to nearest mss */
2559 maxpsz
= MSS_ROUNDUP(maxpsz
, mss
);
2563 (void) proto_set_maxpsz(q
, connp
, maxpsz
);
2564 if (!(IPCL_IS_NONSTR(connp
)))
2565 connp
->conn_wq
->q_maxpsz
= maxpsz
;
2567 (void) proto_set_tx_maxblk(q
, connp
, mss
);
2571 /* For /dev/tcp aka AF_INET open */
2573 tcp_openv4(queue_t
*q
, dev_t
*devp
, int flag
, int sflag
, cred_t
*credp
)
2575 return (tcp_open(q
, devp
, flag
, sflag
, credp
, B_FALSE
));
2578 /* For /dev/tcp6 aka AF_INET6 open */
2580 tcp_openv6(queue_t
*q
, dev_t
*devp
, int flag
, int sflag
, cred_t
*credp
)
2582 return (tcp_open(q
, devp
, flag
, sflag
, credp
, B_TRUE
));
2586 tcp_create_common(cred_t
*credp
, boolean_t isv6
, boolean_t issocket
,
2595 ASSERT(errorp
!= NULL
);
2597 * Find the proper zoneid and netstack.
2600 * Special case for install: miniroot needs to be able to
2601 * access files via NFS as though it were always in the
2604 if (credp
== kcred
&& nfs_global_client_only
!= 0) {
2605 zoneid
= GLOBAL_ZONEID
;
2606 tcps
= netstack_find_by_stackid(GLOBAL_NETSTACKID
)->
2608 ASSERT(tcps
!= NULL
);
2613 if ((err
= secpolicy_basic_net_access(credp
)) != 0) {
2618 ns
= netstack_find_by_cred(credp
);
2620 tcps
= ns
->netstack_tcp
;
2621 ASSERT(tcps
!= NULL
);
2624 * For exclusive stacks we set the zoneid to zero
2625 * to make TCP operate as if in the global zone.
2627 if (tcps
->tcps_netstack
->netstack_stackid
!=
2629 zoneid
= GLOBAL_ZONEID
;
2631 zoneid
= crgetzoneid(credp
);
2634 sqp
= IP_SQUEUE_GET((uint_t
)gethrtime());
2635 connp
= (conn_t
*)tcp_get_conn(sqp
, tcps
);
2637 * Both tcp_get_conn and netstack_find_by_cred incremented refcnt,
2638 * so we drop it by one.
2640 netstack_rele(tcps
->tcps_netstack
);
2641 if (connp
== NULL
) {
2645 ASSERT(connp
->conn_ixa
->ixa_protocol
== connp
->conn_proto
);
2647 connp
->conn_sqp
= sqp
;
2648 connp
->conn_initial_sqp
= connp
->conn_sqp
;
2649 connp
->conn_ixa
->ixa_sqp
= connp
->conn_sqp
;
2650 tcp
= connp
->conn_tcp
;
2653 * Besides asking IP to set the checksum for us, have conn_ip_output
2654 * to do the following checks when necessary:
2656 * IXAF_VERIFY_SOURCE: drop packets when our outer source goes invalid
2657 * IXAF_VERIFY_PMTU: verify PMTU changes
2658 * IXAF_VERIFY_LSO: verify LSO capability changes
2660 connp
->conn_ixa
->ixa_flags
|= IXAF_SET_ULP_CKSUM
| IXAF_VERIFY_SOURCE
|
2661 IXAF_VERIFY_PMTU
| IXAF_VERIFY_LSO
;
2663 if (!tcps
->tcps_dev_flow_ctl
)
2664 connp
->conn_ixa
->ixa_flags
|= IXAF_NO_DEV_FLOW_CTL
;
2667 connp
->conn_ixa
->ixa_src_preferences
= IPV6_PREFER_SRC_DEFAULT
;
2668 connp
->conn_ipversion
= IPV6_VERSION
;
2669 connp
->conn_family
= AF_INET6
;
2670 tcp
->tcp_mss
= tcps
->tcps_mss_def_ipv6
;
2671 connp
->conn_default_ttl
= tcps
->tcps_ipv6_hoplimit
;
2673 connp
->conn_ipversion
= IPV4_VERSION
;
2674 connp
->conn_family
= AF_INET
;
2675 tcp
->tcp_mss
= tcps
->tcps_mss_def_ipv4
;
2676 connp
->conn_default_ttl
= tcps
->tcps_ipv4_ttl
;
2678 connp
->conn_xmit_ipp
.ipp_unicast_hops
= connp
->conn_default_ttl
;
2681 connp
->conn_cred
= credp
;
2682 connp
->conn_cpid
= curproc
->p_pid
;
2683 connp
->conn_open_time
= ddi_get_lbolt64();
2685 /* Cache things in the ixa without any refhold */
2686 ASSERT(!(connp
->conn_ixa
->ixa_free_flags
& IXA_FREE_CRED
));
2687 connp
->conn_ixa
->ixa_cred
= credp
;
2688 connp
->conn_ixa
->ixa_cpid
= connp
->conn_cpid
;
2690 connp
->conn_zoneid
= zoneid
;
2691 /* conn_allzones can not be set this early, hence no IPCL_ZONEID */
2692 connp
->conn_ixa
->ixa_zoneid
= zoneid
;
2693 connp
->conn_mlp_type
= mlptSingle
;
2694 ASSERT(connp
->conn_netstack
== tcps
->tcps_netstack
);
2695 ASSERT(tcp
->tcp_tcps
== tcps
);
2698 * If the caller has the process-wide flag set, then default to MAC
2699 * exempt mode. This allows read-down to unlabeled hosts.
2701 if (getpflags(NET_MAC_AWARE
, credp
) != 0)
2702 connp
->conn_mac_mode
= CONN_MAC_AWARE
;
2704 connp
->conn_zone_is_global
= (crgetzoneid(credp
) == GLOBAL_ZONEID
);
2707 tcp
->tcp_issocket
= 1;
2710 connp
->conn_rcvbuf
= tcps
->tcps_recv_hiwat
;
2711 connp
->conn_sndbuf
= tcps
->tcps_xmit_hiwat
;
2712 connp
->conn_sndlowat
= tcps
->tcps_xmit_lowat
;
2713 connp
->conn_so_type
= SOCK_STREAM
;
2714 connp
->conn_wroff
= connp
->conn_ht_iphc_allocated
+
2715 tcps
->tcps_wroff_xtra
;
2717 SOCK_CONNID_INIT(tcp
->tcp_connid
);
2718 /* DTrace ignores this - it isn't a tcp:::state-change */
2719 tcp
->tcp_state
= TCPS_IDLE
;
2720 tcp_init_values(tcp
, NULL
);
2725 tcp_open(queue_t
*q
, dev_t
*devp
, int flag
, int sflag
, cred_t
*credp
,
2729 conn_t
*connp
= NULL
;
2731 vmem_t
*minor_arena
= NULL
;
2735 if (q
->q_ptr
!= NULL
)
2738 if (sflag
== MODOPEN
)
2741 if ((ip_minor_arena_la
!= NULL
) && (flag
& SO_SOCKSTR
) &&
2742 ((conn_dev
= inet_minor_alloc(ip_minor_arena_la
)) != 0)) {
2743 minor_arena
= ip_minor_arena_la
;
2746 * Either minor numbers in the large arena were exhausted
2747 * or a non socket application is doing the open.
2748 * Try to allocate from the small arena.
2750 if ((conn_dev
= inet_minor_alloc(ip_minor_arena_sa
)) == 0) {
2753 minor_arena
= ip_minor_arena_sa
;
2756 ASSERT(minor_arena
!= NULL
);
2758 *devp
= makedevice(getmajor(*devp
), (minor_t
)conn_dev
);
2760 if (flag
& SO_FALLBACK
) {
2762 * Non streams socket needs a stream to fallback to
2764 RD(q
)->q_ptr
= (void *)conn_dev
;
2765 WR(q
)->q_qinfo
= &tcp_fallback_sock_winit
;
2766 WR(q
)->q_ptr
= (void *)minor_arena
;
2769 } else if (flag
& SO_ACCEPTOR
) {
2770 q
->q_qinfo
= &tcp_acceptor_rinit
;
2772 * the conn_dev and minor_arena will be subsequently used by
2773 * tcp_tli_accept() and tcp_tpi_close_accept() to figure out
2774 * the minor device number for this connection from the q_ptr.
2776 RD(q
)->q_ptr
= (void *)conn_dev
;
2777 WR(q
)->q_qinfo
= &tcp_acceptor_winit
;
2778 WR(q
)->q_ptr
= (void *)minor_arena
;
2783 issocket
= flag
& SO_SOCKSTR
;
2784 connp
= tcp_create_common(credp
, isv6
, issocket
, &err
);
2786 if (connp
== NULL
) {
2787 inet_minor_free(minor_arena
, conn_dev
);
2788 q
->q_ptr
= WR(q
)->q_ptr
= NULL
;
2793 connp
->conn_wq
= WR(q
);
2794 q
->q_ptr
= WR(q
)->q_ptr
= connp
;
2796 connp
->conn_dev
= conn_dev
;
2797 connp
->conn_minor_arena
= minor_arena
;
2799 ASSERT(q
->q_qinfo
== &tcp_rinitv4
|| q
->q_qinfo
== &tcp_rinitv6
);
2800 ASSERT(WR(q
)->q_qinfo
== &tcp_winit
);
2802 tcp
= connp
->conn_tcp
;
2805 WR(q
)->q_qinfo
= &tcp_sock_winit
;
2808 tcp
->tcp_acceptor_id
= (t_uscalar_t
)RD(q
);
2810 tcp
->tcp_acceptor_id
= conn_dev
;
2812 tcp_acceptor_hash_insert(tcp
->tcp_acceptor_id
, tcp
);
2816 * Put the ref for TCP. Ref for IP was already put
2817 * by ipcl_conn_create. Also Make the conn_t globally
2818 * visible to walkers
2820 mutex_enter(&connp
->conn_lock
);
2821 CONN_INC_REF_LOCKED(connp
);
2822 ASSERT(connp
->conn_ref
== 2);
2823 connp
->conn_state_flags
&= ~CONN_INCIPIENT
;
2824 mutex_exit(&connp
->conn_lock
);
2831 * Build/update the tcp header template (in conn_ht_iphc) based on
2832 * conn_xmit_ipp. The headers include ip6_t, any extension
2833 * headers, and the maximum size tcp header (to avoid reallocation
2834 * on the fly for additional tcp options).
2836 * Assumes the caller has already set conn_{faddr,laddr,fport,lport,flowinfo}.
2837 * Returns failure if can't allocate memory.
2840 tcp_build_hdrs(tcp_t
*tcp
)
2842 tcp_stack_t
*tcps
= tcp
->tcp_tcps
;
2843 conn_t
*connp
= tcp
->tcp_connp
;
2844 char buf
[TCP_MAX_HDR_LENGTH
];
2846 uint_t ulplen
= TCP_MIN_HEADER_LENGTH
;
2847 uint_t extralen
= TCP_MAX_TCP_OPTIONS_LENGTH
;
2853 * We might be called after the connection is set up, and we might
2854 * have TS options already in the TCP header. Thus we save any
2855 * existing tcp header.
2857 buflen
= connp
->conn_ht_ulp_len
;
2859 bcopy(connp
->conn_ht_ulp
, buf
, buflen
);
2860 extralen
-= buflen
- ulplen
;
2864 /* Grab lock to satisfy ASSERT; TCP is serialized using squeue */
2865 mutex_enter(&connp
->conn_lock
);
2866 error
= conn_build_hdr_template(connp
, ulplen
, extralen
,
2867 &connp
->conn_laddr_v6
, &connp
->conn_faddr_v6
, connp
->conn_flowinfo
);
2868 mutex_exit(&connp
->conn_lock
);
2873 * Any routing header/option has been massaged. The checksum difference
2874 * is stored in conn_sum for later use.
2876 tcpha
= (tcpha_t
*)connp
->conn_ht_ulp
;
2877 tcp
->tcp_tcpha
= tcpha
;
2879 /* restore any old tcp header */
2881 bcopy(buf
, connp
->conn_ht_ulp
, buflen
);
2886 tcpha
->tha_offset_and_reserved
= (5 << 4);
2887 tcpha
->tha_lport
= connp
->conn_lport
;
2888 tcpha
->tha_fport
= connp
->conn_fport
;
2892 * IP wants our header length in the checksum field to
2893 * allow it to perform a single pseudo-header+checksum
2894 * calculation on behalf of TCP.
2895 * Include the adjustment for a source route once IP_OPTIONS is set.
2897 cksum
= sizeof (tcpha_t
) + connp
->conn_sum
;
2898 cksum
= (cksum
>> 16) + (cksum
& 0xFFFF);
2899 ASSERT(cksum
< 0x10000);
2900 tcpha
->tha_sum
= htons(cksum
);
2902 if (connp
->conn_ipversion
== IPV4_VERSION
)
2903 tcp
->tcp_ipha
= (ipha_t
*)connp
->conn_ht_iphc
;
2905 tcp
->tcp_ip6h
= (ip6_t
*)connp
->conn_ht_iphc
;
2907 if (connp
->conn_ht_iphc_allocated
+ tcps
->tcps_wroff_xtra
>
2908 connp
->conn_wroff
) {
2909 connp
->conn_wroff
= connp
->conn_ht_iphc_allocated
+
2910 tcps
->tcps_wroff_xtra
;
2911 (void) proto_set_tx_wroff(connp
->conn_rq
, connp
,
2918 * tcp_rwnd_set() is called to adjust the receive window to a desired value.
2919 * We do not allow the receive window to shrink. After setting rwnd,
2920 * set the flow control hiwat of the stream.
2922 * This function is called in 2 cases:
2924 * 1) Before data transfer begins, in tcp_input_listener() for accepting a
2925 * connection (passive open) and in tcp_input_data() for active connect.
2926 * This is called after tcp_mss_set() when the desired MSS value is known.
2927 * This makes sure that our window size is a mutiple of the other side's
2929 * 2) Handling SO_RCVBUF option.
2931 * It is ASSUMED that the requested size is a multiple of the current MSS.
2933 * XXX - Should allow a lower rwnd than tcp_recv_hiwat_minmss * mss if the
2937 tcp_rwnd_set(tcp_t
*tcp
, uint32_t rwnd
)
2939 uint32_t mss
= tcp
->tcp_mss
;
2940 uint32_t old_max_rwnd
;
2941 uint32_t max_transmittable_rwnd
;
2942 boolean_t tcp_detached
= TCP_IS_DETACHED(tcp
);
2943 tcp_stack_t
*tcps
= tcp
->tcp_tcps
;
2944 conn_t
*connp
= tcp
->tcp_connp
;
2947 * Insist on a receive window that is at least
2948 * tcp_recv_hiwat_minmss * MSS (default 4 * MSS) to avoid
2949 * funny TCP interactions of Nagle algorithm, SWS avoidance
2950 * and delayed acknowledgement.
2952 rwnd
= MAX(rwnd
, tcps
->tcps_recv_hiwat_minmss
* mss
);
2954 if (tcp
->tcp_fused
) {
2956 tcp_t
*peer_tcp
= tcp
->tcp_loopback_peer
;
2958 ASSERT(peer_tcp
!= NULL
);
2959 sth_hiwat
= tcp_fuse_set_rcv_hiwat(tcp
, rwnd
);
2960 if (!tcp_detached
) {
2961 (void) proto_set_rx_hiwat(connp
->conn_rq
, connp
,
2963 tcp_set_recv_threshold(tcp
, sth_hiwat
>> 3);
2966 /* Caller could have changed tcp_rwnd; update tha_win */
2967 if (tcp
->tcp_tcpha
!= NULL
) {
2968 tcp
->tcp_tcpha
->tha_win
=
2969 htons(tcp
->tcp_rwnd
>> tcp
->tcp_rcv_ws
);
2971 if ((tcp
->tcp_rcv_ws
> 0) && rwnd
> tcp
->tcp_cwnd_max
)
2972 tcp
->tcp_cwnd_max
= rwnd
;
2975 * In the fusion case, the maxpsz stream head value of
2976 * our peer is set according to its send buffer size
2977 * and our receive buffer size; since the latter may
2978 * have changed we need to update the peer's maxpsz.
2980 (void) tcp_maxpsz_set(peer_tcp
, B_TRUE
);
2985 old_max_rwnd
= tcp
->tcp_rwnd
;
2987 old_max_rwnd
= connp
->conn_rcvbuf
;
2991 * If window size info has already been exchanged, TCP should not
2992 * shrink the window. Shrinking window is doable if done carefully.
2993 * We may add that support later. But so far there is not a real
2996 if (rwnd
< old_max_rwnd
&& tcp
->tcp_state
> TCPS_SYN_SENT
) {
2997 /* MSS may have changed, do a round up again. */
2998 rwnd
= MSS_ROUNDUP(old_max_rwnd
, mss
);
3002 * tcp_rcv_ws starts with TCP_MAX_WINSHIFT so the following check
3003 * can be applied even before the window scale option is decided.
3005 max_transmittable_rwnd
= TCP_MAXWIN
<< tcp
->tcp_rcv_ws
;
3006 if (rwnd
> max_transmittable_rwnd
) {
3007 rwnd
= max_transmittable_rwnd
-
3008 (max_transmittable_rwnd
% mss
);
3010 rwnd
= max_transmittable_rwnd
;
3012 * If we're over the limit we may have to back down tcp_rwnd.
3013 * The increment below won't work for us. So we set all three
3014 * here and the increment below will have no effect.
3016 tcp
->tcp_rwnd
= old_max_rwnd
= rwnd
;
3018 if (tcp
->tcp_localnet
) {
3019 tcp
->tcp_rack_abs_max
=
3020 MIN(tcps
->tcps_local_dacks_max
, rwnd
/ mss
/ 2);
3023 * For a remote host on a different subnet (through a router),
3024 * we ack every other packet to be conforming to RFC1122.
3025 * tcp_deferred_acks_max is default to 2.
3027 tcp
->tcp_rack_abs_max
=
3028 MIN(tcps
->tcps_deferred_acks_max
, rwnd
/ mss
/ 2);
3030 if (tcp
->tcp_rack_cur_max
> tcp
->tcp_rack_abs_max
)
3031 tcp
->tcp_rack_cur_max
= tcp
->tcp_rack_abs_max
;
3033 tcp
->tcp_rack_cur_max
= 0;
3035 * Increment the current rwnd by the amount the maximum grew (we
3036 * can not overwrite it since we might be in the middle of a
3039 tcp
->tcp_rwnd
+= rwnd
- old_max_rwnd
;
3040 connp
->conn_rcvbuf
= rwnd
;
3042 /* Are we already connected? */
3043 if (tcp
->tcp_tcpha
!= NULL
) {
3044 tcp
->tcp_tcpha
->tha_win
=
3045 htons(tcp
->tcp_rwnd
>> tcp
->tcp_rcv_ws
);
3048 if ((tcp
->tcp_rcv_ws
> 0) && rwnd
> tcp
->tcp_cwnd_max
)
3049 tcp
->tcp_cwnd_max
= rwnd
;
3054 tcp_set_recv_threshold(tcp
, rwnd
>> 3);
3056 (void) proto_set_rx_hiwat(connp
->conn_rq
, connp
, rwnd
);
3061 tcp_do_unbind(conn_t
*connp
)
3063 tcp_t
*tcp
= connp
->conn_tcp
;
3066 switch (tcp
->tcp_state
) {
3071 return (-TOUTSTATE
);
3075 * Need to clean up all the eagers since after the unbind, segments
3076 * will no longer be delivered to this listener stream.
3078 mutex_enter(&tcp
->tcp_eager_lock
);
3079 if (tcp
->tcp_conn_req_cnt_q0
!= 0 || tcp
->tcp_conn_req_cnt_q
!= 0) {
3080 tcp_eager_cleanup(tcp
, 0);
3082 mutex_exit(&tcp
->tcp_eager_lock
);
3084 /* Clean up the listener connection counter if necessary. */
3085 if (tcp
->tcp_listen_cnt
!= NULL
)
3086 TCP_DECR_LISTEN_CNT(tcp
);
3087 connp
->conn_laddr_v6
= ipv6_all_zeros
;
3088 connp
->conn_saddr_v6
= ipv6_all_zeros
;
3089 tcp_bind_hash_remove(tcp
);
3090 oldstate
= tcp
->tcp_state
;
3091 tcp
->tcp_state
= TCPS_IDLE
;
3092 DTRACE_TCP6(state__change
, void, NULL
, ip_xmit_attr_t
*,
3093 connp
->conn_ixa
, void, NULL
, tcp_t
*, tcp
, void, NULL
,
3097 bzero(&connp
->conn_ports
, sizeof (connp
->conn_ports
));
3103 * Collect protocol properties to send to the upper handle.
3106 tcp_get_proto_props(tcp_t
*tcp
, struct sock_proto_props
*sopp
)
3108 conn_t
*connp
= tcp
->tcp_connp
;
3110 sopp
->sopp_flags
= SOCKOPT_RCVHIWAT
| SOCKOPT_MAXBLK
| SOCKOPT_WROFF
;
3111 sopp
->sopp_maxblk
= tcp_maxpsz_set(tcp
, B_FALSE
);
3113 sopp
->sopp_rxhiwat
= tcp
->tcp_fused
?
3114 tcp_fuse_set_rcv_hiwat(tcp
, connp
->conn_rcvbuf
) :
3117 * Determine what write offset value to use depending on SACK and
3118 * whether the endpoint is fused or not.
3120 if (tcp
->tcp_fused
) {
3121 ASSERT(tcp
->tcp_loopback
);
3122 ASSERT(tcp
->tcp_loopback_peer
!= NULL
);
3124 * For fused tcp loopback, set the stream head's write
3125 * offset value to zero since we won't be needing any room
3126 * for TCP/IP headers. This would also improve performance
3127 * since it would reduce the amount of work done by kmem.
3128 * Non-fused tcp loopback case is handled separately below.
3130 sopp
->sopp_wroff
= 0;
3132 * Update the peer's transmit parameters according to
3133 * our recently calculated high water mark value.
3135 (void) tcp_maxpsz_set(tcp
->tcp_loopback_peer
, B_TRUE
);
3136 } else if (tcp
->tcp_snd_sack_ok
) {
3137 sopp
->sopp_wroff
= connp
->conn_ht_iphc_allocated
+
3138 (tcp
->tcp_loopback
? 0 : tcp
->tcp_tcps
->tcps_wroff_xtra
);
3140 sopp
->sopp_wroff
= connp
->conn_ht_iphc_len
+
3141 (tcp
->tcp_loopback
? 0 : tcp
->tcp_tcps
->tcps_wroff_xtra
);
3144 if (tcp
->tcp_loopback
) {
3145 sopp
->sopp_flags
|= SOCKOPT_LOOPBACK
;
3146 sopp
->sopp_loopback
= B_TRUE
;
3151 * Check the usability of ZEROCOPY. It's instead checking the flag set by IP.
3154 tcp_zcopy_check(tcp_t
*tcp
)
3156 conn_t
*connp
= tcp
->tcp_connp
;
3157 ip_xmit_attr_t
*ixa
= connp
->conn_ixa
;
3158 boolean_t zc_enabled
= B_FALSE
;
3159 tcp_stack_t
*tcps
= tcp
->tcp_tcps
;
3161 if (do_tcpzcopy
== 2)
3162 zc_enabled
= B_TRUE
;
3163 else if ((do_tcpzcopy
== 1) && (ixa
->ixa_flags
& IXAF_ZCOPY_CAPAB
))
3164 zc_enabled
= B_TRUE
;
3166 tcp
->tcp_snd_zcopy_on
= zc_enabled
;
3167 if (!TCP_IS_DETACHED(tcp
)) {
3169 ixa
->ixa_flags
|= IXAF_VERIFY_ZCOPY
;
3170 (void) proto_set_tx_copyopt(connp
->conn_rq
, connp
,
3172 TCP_STAT(tcps
, tcp_zcopy_on
);
3174 ixa
->ixa_flags
&= ~IXAF_VERIFY_ZCOPY
;
3175 (void) proto_set_tx_copyopt(connp
->conn_rq
, connp
,
3177 TCP_STAT(tcps
, tcp_zcopy_off
);
3180 return (zc_enabled
);
3184 * Backoff from a zero-copy message by copying data to a new allocated
3185 * message and freeing the original desballoca'ed segmapped message.
3187 * This function is called by following two callers:
3188 * 1. tcp_timer: fix_xmitlist is set to B_TRUE, because it's safe to free
3189 * the origial desballoca'ed message and notify sockfs. This is in re-
3191 * 2. tcp_output: fix_xmitlist is set to B_FALSE. Flag STRUIO_ZCNOTIFY need
3192 * to be copied to new message.
3195 tcp_zcopy_backoff(tcp_t
*tcp
, mblk_t
*bp
, boolean_t fix_xmitlist
)
3198 mblk_t
*head
= NULL
;
3199 mblk_t
*tail
= NULL
;
3200 tcp_stack_t
*tcps
= tcp
->tcp_tcps
;
3203 while (bp
!= NULL
) {
3204 if (IS_VMLOANED_MBLK(bp
)) {
3205 TCP_STAT(tcps
, tcp_zcopy_backoff
);
3206 if ((nbp
= copyb(bp
)) == NULL
) {
3207 tcp
->tcp_xmit_zc_clean
= B_FALSE
;
3210 return ((head
== NULL
) ? bp
: head
);
3213 if (bp
->b_datap
->db_struioflag
& STRUIO_ZCNOTIFY
) {
3215 tcp_zcopy_notify(tcp
);
3217 nbp
->b_datap
->db_struioflag
|=
3220 nbp
->b_cont
= bp
->b_cont
;
3223 * Copy saved information and adjust tcp_xmit_tail
3227 nbp
->b_prev
= bp
->b_prev
;
3228 nbp
->b_next
= bp
->b_next
;
3230 if (tcp
->tcp_xmit_tail
== bp
)
3231 tcp
->tcp_xmit_tail
= nbp
;
3234 /* Free the original message. */
3257 tcp
->tcp_xmit_last
= tail
;
3258 tcp
->tcp_xmit_zc_clean
= B_TRUE
;
3265 tcp_zcopy_notify(tcp_t
*tcp
)
3270 if (tcp
->tcp_detached
)
3272 connp
= tcp
->tcp_connp
;
3273 if (IPCL_IS_NONSTR(connp
)) {
3274 (*connp
->conn_upcalls
->su_zcopy_notify
)
3275 (connp
->conn_upper_handle
);
3278 stp
= STREAM(connp
->conn_rq
);
3279 mutex_enter(&stp
->sd_lock
);
3280 stp
->sd_flag
|= STZCNOTIFY
;
3281 cv_broadcast(&stp
->sd_zcopy_wait
);
3282 mutex_exit(&stp
->sd_lock
);
3286 * Update the TCP connection according to change of LSO capability.
3289 tcp_update_lso(tcp_t
*tcp
, ip_xmit_attr_t
*ixa
)
3292 * We check against IPv4 header length to preserve the old behavior
3293 * of only enabling LSO when there are no IP options.
3294 * But this restriction might not be necessary at all. Before removing
3295 * it, need to verify how LSO is handled for source routing case, with
3296 * which IP does software checksum.
3298 * For IPv6, whenever any extension header is needed, LSO is supressed.
3300 if (ixa
->ixa_ip_hdr_length
!= ((ixa
->ixa_flags
& IXAF_IS_IPV4
) ?
3301 IP_SIMPLE_HDR_LENGTH
: IPV6_HDR_LEN
))
3305 * Either the LSO capability newly became usable, or it has changed.
3307 if (ixa
->ixa_flags
& IXAF_LSO_CAPAB
) {
3308 ill_lso_capab_t
*lsoc
= &ixa
->ixa_lso_capab
;
3310 ASSERT(lsoc
->ill_lso_max
> 0);
3311 tcp
->tcp_lso_max
= MIN(TCP_MAX_LSO_LENGTH
, lsoc
->ill_lso_max
);
3313 DTRACE_PROBE3(tcp_update_lso
, boolean_t
, tcp
->tcp_lso
,
3314 boolean_t
, B_TRUE
, uint32_t, tcp
->tcp_lso_max
);
3317 * If LSO to be enabled, notify the STREAM header with larger
3321 tcp
->tcp_maxpsz_multiplier
= 0;
3323 tcp
->tcp_lso
= B_TRUE
;
3324 TCP_STAT(tcp
->tcp_tcps
, tcp_lso_enabled
);
3325 } else { /* LSO capability is not usable any more. */
3326 DTRACE_PROBE3(tcp_update_lso
, boolean_t
, tcp
->tcp_lso
,
3327 boolean_t
, B_FALSE
, uint32_t, tcp
->tcp_lso_max
);
3330 * If LSO to be disabled, notify the STREAM header with smaller
3331 * data block. And need to restore fragsize to PMTU.
3334 tcp
->tcp_maxpsz_multiplier
=
3335 tcp
->tcp_tcps
->tcps_maxpsz_multiplier
;
3336 ixa
->ixa_fragsize
= ixa
->ixa_pmtu
;
3337 tcp
->tcp_lso
= B_FALSE
;
3338 TCP_STAT(tcp
->tcp_tcps
, tcp_lso_disabled
);
3342 (void) tcp_maxpsz_set(tcp
, B_TRUE
);
3346 * Update the TCP connection according to change of ZEROCOPY capability.
3349 tcp_update_zcopy(tcp_t
*tcp
)
3351 conn_t
*connp
= tcp
->tcp_connp
;
3352 tcp_stack_t
*tcps
= tcp
->tcp_tcps
;
3354 if (tcp
->tcp_snd_zcopy_on
) {
3355 tcp
->tcp_snd_zcopy_on
= B_FALSE
;
3356 if (!TCP_IS_DETACHED(tcp
)) {
3357 (void) proto_set_tx_copyopt(connp
->conn_rq
, connp
,
3359 TCP_STAT(tcps
, tcp_zcopy_off
);
3362 tcp
->tcp_snd_zcopy_on
= B_TRUE
;
3363 if (!TCP_IS_DETACHED(tcp
)) {
3364 (void) proto_set_tx_copyopt(connp
->conn_rq
, connp
,
3366 TCP_STAT(tcps
, tcp_zcopy_on
);
3372 * Notify function registered with ip_xmit_attr_t. It's called in the squeue
3373 * so it's safe to update the TCP connection.
3377 tcp_notify(void *arg
, ip_xmit_attr_t
*ixa
, ixa_notify_type_t ntype
,
3378 ixa_notify_arg_t narg
)
3380 tcp_t
*tcp
= (tcp_t
*)arg
;
3381 conn_t
*connp
= tcp
->tcp_connp
;
3385 tcp_update_lso(tcp
, connp
->conn_ixa
);
3388 tcp_update_pmtu(tcp
, B_FALSE
);
3391 tcp_update_zcopy(tcp
);
3399 * The TCP write service routine should never be called...
3403 tcp_wsrv(queue_t
*q
)
3405 tcp_stack_t
*tcps
= Q_TO_TCP(q
)->tcp_tcps
;
3407 TCP_STAT(tcps
, tcp_wsrv_called
);
3411 * Hash list lookup routine for tcp_t structures.
3412 * Returns with a CONN_INC_REF tcp structure. Caller must do a CONN_DEC_REF.
3415 tcp_acceptor_hash_lookup(t_uscalar_t id
, tcp_stack_t
*tcps
)
3420 tf
= &tcps
->tcps_acceptor_fanout
[TCP_ACCEPTOR_HASH(id
)];
3421 mutex_enter(&tf
->tf_lock
);
3422 for (tcp
= tf
->tf_tcp
; tcp
!= NULL
;
3423 tcp
= tcp
->tcp_acceptor_hash
) {
3424 if (tcp
->tcp_acceptor_id
== id
) {
3425 CONN_INC_REF(tcp
->tcp_connp
);
3426 mutex_exit(&tf
->tf_lock
);
3430 mutex_exit(&tf
->tf_lock
);
3435 * Hash list insertion routine for tcp_t structures.
3438 tcp_acceptor_hash_insert(t_uscalar_t id
, tcp_t
*tcp
)
3443 tcp_stack_t
*tcps
= tcp
->tcp_tcps
;
3445 tf
= &tcps
->tcps_acceptor_fanout
[TCP_ACCEPTOR_HASH(id
)];
3447 if (tcp
->tcp_ptpahn
!= NULL
)
3448 tcp_acceptor_hash_remove(tcp
);
3450 mutex_enter(&tf
->tf_lock
);
3453 tcpnext
->tcp_ptpahn
= &tcp
->tcp_acceptor_hash
;
3454 tcp
->tcp_acceptor_hash
= tcpnext
;
3455 tcp
->tcp_ptpahn
= tcpp
;
3457 tcp
->tcp_acceptor_lockp
= &tf
->tf_lock
; /* For tcp_*_hash_remove */
3458 mutex_exit(&tf
->tf_lock
);
3462 * Hash list removal routine for tcp_t structures.
3465 tcp_acceptor_hash_remove(tcp_t
*tcp
)
3471 * Extract the lock pointer in case there are concurrent
3472 * hash_remove's for this instance.
3474 lockp
= tcp
->tcp_acceptor_lockp
;
3476 if (tcp
->tcp_ptpahn
== NULL
)
3479 ASSERT(lockp
!= NULL
);
3481 if (tcp
->tcp_ptpahn
) {
3482 tcpnext
= tcp
->tcp_acceptor_hash
;
3484 tcpnext
->tcp_ptpahn
= tcp
->tcp_ptpahn
;
3485 tcp
->tcp_acceptor_hash
= NULL
;
3487 *tcp
->tcp_ptpahn
= tcpnext
;
3488 tcp
->tcp_ptpahn
= NULL
;
3491 tcp
->tcp_acceptor_lockp
= NULL
;
3495 * Type three generator adapted from the random() function in 4.4 BSD:
3499 * Copyright (c) 1983, 1993
3500 * The Regents of the University of California. All rights reserved.
3502 * Redistribution and use in source and binary forms, with or without
3503 * modification, are permitted provided that the following conditions
3505 * 1. Redistributions of source code must retain the above copyright
3506 * notice, this list of conditions and the following disclaimer.
3507 * 2. Redistributions in binary form must reproduce the above copyright
3508 * notice, this list of conditions and the following disclaimer in the
3509 * documentation and/or other materials provided with the distribution.
3510 * 3. All advertising materials mentioning features or use of this software
3511 * must display the following acknowledgement:
3512 * This product includes software developed by the University of
3513 * California, Berkeley and its contributors.
3514 * 4. Neither the name of the University nor the names of its contributors
3515 * may be used to endorse or promote products derived from this software
3516 * without specific prior written permission.
3518 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
3519 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
3520 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
3521 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
3522 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
3523 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
3524 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
3525 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
3526 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
3527 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
3531 /* Type 3 -- x**31 + x**3 + 1 */
3536 /* Protected by tcp_random_lock */
3537 static int tcp_randtbl
[DEG_3
+ 1];
3539 static int *tcp_random_fptr
= &tcp_randtbl
[SEP_3
+ 1];
3540 static int *tcp_random_rptr
= &tcp_randtbl
[1];
3542 static int *tcp_random_state
= &tcp_randtbl
[1];
3543 static int *tcp_random_end_ptr
= &tcp_randtbl
[DEG_3
+ 1];
3545 kmutex_t tcp_random_lock
;
3548 tcp_random_init(void)
3556 * Use high-res timer and current time for seed. Gethrtime() returns
3557 * a longlong, which may contain resolution down to nanoseconds.
3558 * The current time will either be a 32-bit or a 64-bit quantity.
3559 * XOR the two together in a 64-bit result variable.
3560 * Convert the result to a 32-bit value by multiplying the high-order
3561 * 32-bits by the low-order 32-bits.
3565 (void) drv_getparm(TIME
, &wallclock
);
3566 result
= (uint64_t)wallclock
^ (uint64_t)hrt
;
3567 mutex_enter(&tcp_random_lock
);
3568 tcp_random_state
[0] = ((result
>> 32) & 0xffffffff) *
3569 (result
& 0xffffffff);
3571 for (i
= 1; i
< DEG_3
; i
++)
3572 tcp_random_state
[i
] = 1103515245 * tcp_random_state
[i
- 1]
3574 tcp_random_fptr
= &tcp_random_state
[SEP_3
];
3575 tcp_random_rptr
= &tcp_random_state
[0];
3576 mutex_exit(&tcp_random_lock
);
3577 for (i
= 0; i
< 10 * DEG_3
; i
++)
3578 (void) tcp_random();
3582 * tcp_random: Return a random number in the range [1 - (128K + 1)].
3583 * This range is selected to be approximately centered on TCP_ISS / 2,
3584 * and easy to compute. We get this value by generating a 32-bit random
3585 * number, selecting out the high-order 17 bits, and then adding one so
3586 * that we never return zero.
3593 mutex_enter(&tcp_random_lock
);
3594 *tcp_random_fptr
+= *tcp_random_rptr
;
3597 * The high-order bits are more random than the low-order bits,
3598 * so we select out the high-order 17 bits and add one so that
3599 * we never return zero.
3601 i
= ((*tcp_random_fptr
>> 15) & 0x1ffff) + 1;
3602 if (++tcp_random_fptr
>= tcp_random_end_ptr
) {
3603 tcp_random_fptr
= tcp_random_state
;
3605 } else if (++tcp_random_rptr
>= tcp_random_end_ptr
)
3606 tcp_random_rptr
= tcp_random_state
;
3608 mutex_exit(&tcp_random_lock
);
3613 * Split this function out so that if the secret changes, I'm okay.
3615 * Initialize the tcp_iss_cookie and tcp_iss_key.
3618 #define PASSWD_SIZE 16 /* MUST be multiple of 4 */
3621 tcp_iss_key_init(uint8_t *phrase
, int len
, tcp_stack_t
*tcps
)
3624 int32_t current_time
;
3628 uint8_t passwd
[PASSWD_SIZE
];
3633 * Start with the current absolute time.
3635 (void) drv_getparm(TIME
, &t
);
3636 tcp_iss_cookie
.current_time
= t
;
3639 * XXX - Need a more random number per RFC 1750, not this crap.
3640 * OTOH, if what follows is pretty random, then I'm in better shape.
3642 tcp_iss_cookie
.randnum
= (uint32_t)(gethrtime() + tcp_random());
3643 tcp_iss_cookie
.pad
= 0x365c; /* Picked from HMAC pad values. */
3646 * The cpu_type_info is pretty non-random. Ugggh. It does serve
3647 * as a good template.
3649 bcopy(&cpu_list
->cpu_type_info
, &tcp_iss_cookie
.passwd
,
3650 min(PASSWD_SIZE
, sizeof (cpu_list
->cpu_type_info
)));
3653 * The pass-phrase. Normally this is supplied by user-called NDD.
3655 bcopy(phrase
, &tcp_iss_cookie
.passwd
, min(PASSWD_SIZE
, len
));
3658 * See 4010593 if this section becomes a problem again,
3659 * but the local ethernet address is useful here.
3661 (void) localetheraddr(NULL
,
3662 (struct ether_addr
*)&tcp_iss_cookie
.ether
);
3665 * Hash 'em all together. The MD5Final is called per-connection.
3667 mutex_enter(&tcps
->tcps_iss_key_lock
);
3668 MD5Init(&tcps
->tcps_iss_key
);
3669 MD5Update(&tcps
->tcps_iss_key
, (uchar_t
*)&tcp_iss_cookie
,
3670 sizeof (tcp_iss_cookie
));
3671 mutex_exit(&tcps
->tcps_iss_key_lock
);
3675 * Called by IP when IP is loaded into the kernel
3678 tcp_ddi_g_init(void)
3680 tcp_timercache
= kmem_cache_create("tcp_timercache",
3681 sizeof (tcp_timer_t
) + sizeof (mblk_t
), 0,
3682 NULL
, NULL
, NULL
, NULL
, NULL
, 0);
3684 tcp_notsack_blk_cache
= kmem_cache_create("tcp_notsack_blk_cache",
3685 sizeof (notsack_blk_t
), 0, NULL
, NULL
, NULL
, NULL
, NULL
, 0);
3687 mutex_init(&tcp_random_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
3689 /* Initialize the random number generator */
3692 /* A single callback independently of how many netstacks we have */
3693 ip_squeue_init(tcp_squeue_add
);
3695 tcp_g_kstat
= tcp_g_kstat_init(&tcp_g_statistics
);
3697 tcp_squeue_flag
= tcp_squeue_switch(tcp_squeue_wput
);
3700 * We want to be informed each time a stack is created or
3701 * destroyed in the kernel, so we can maintain the
3702 * set of tcp_stack_t's.
3704 netstack_register(NS_TCP
, tcp_stack_init
, NULL
, tcp_stack_fini
);
3708 #define INET_NAME "ip"
3711 * Initialize the TCP stack instance.
3714 tcp_stack_init(netstackid_t stackid
, netstack_t
*ns
)
3722 tcps
= (tcp_stack_t
*)kmem_zalloc(sizeof (*tcps
), KM_SLEEP
);
3723 tcps
->tcps_netstack
= ns
;
3725 /* Initialize locks */
3726 mutex_init(&tcps
->tcps_iss_key_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
3727 mutex_init(&tcps
->tcps_epriv_port_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
3729 tcps
->tcps_g_num_epriv_ports
= TCP_NUM_EPRIV_PORTS
;
3730 tcps
->tcps_g_epriv_ports
[0] = ULP_DEF_EPRIV_PORT1
;
3731 tcps
->tcps_g_epriv_ports
[1] = ULP_DEF_EPRIV_PORT2
;
3732 tcps
->tcps_min_anonpriv_port
= 512;
3734 tcps
->tcps_bind_fanout
= kmem_zalloc(sizeof (tf_t
) *
3735 TCP_BIND_FANOUT_SIZE
, KM_SLEEP
);
3736 tcps
->tcps_acceptor_fanout
= kmem_zalloc(sizeof (tf_t
) *
3737 TCP_ACCEPTOR_FANOUT_SIZE
, KM_SLEEP
);
3739 for (i
= 0; i
< TCP_BIND_FANOUT_SIZE
; i
++) {
3740 mutex_init(&tcps
->tcps_bind_fanout
[i
].tf_lock
, NULL
,
3741 MUTEX_DEFAULT
, NULL
);
3744 for (i
= 0; i
< TCP_ACCEPTOR_FANOUT_SIZE
; i
++) {
3745 mutex_init(&tcps
->tcps_acceptor_fanout
[i
].tf_lock
, NULL
,
3746 MUTEX_DEFAULT
, NULL
);
3749 /* TCP's IPsec code calls the packet dropper. */
3750 ip_drop_register(&tcps
->tcps_dropper
, "TCP IPsec policy enforcement");
3752 arrsz
= tcp_propinfo_count
* sizeof (mod_prop_info_t
);
3753 tcps
->tcps_propinfo_tbl
= (mod_prop_info_t
*)kmem_alloc(arrsz
,
3755 bcopy(tcp_propinfo_tbl
, tcps
->tcps_propinfo_tbl
, arrsz
);
3758 * Note: To really walk the device tree you need the devinfo
3759 * pointer to your device which is only available after probe/attach.
3760 * The following is safe only because it uses ddi_root_node()
3762 tcp_max_optsize
= optcom_max_optsize(tcp_opt_obj
.odb_opt_des_arr
,
3763 tcp_opt_obj
.odb_opt_arr_cnt
);
3766 * Initialize RFC 1948 secret values. This will probably be reset once
3767 * by the boot scripts.
3769 * Use NULL name, as the name is caught by the new lockstats.
3771 * Initialize with some random, non-guessable string, like the global
3775 tcp_iss_key_init((uint8_t *)&tcp_g_t_info_ack
,
3776 sizeof (tcp_g_t_info_ack
), tcps
);
3778 tcps
->tcps_kstat
= tcp_kstat2_init(stackid
);
3779 tcps
->tcps_mibkp
= tcp_kstat_init(stackid
);
3781 major
= mod_name_to_major(INET_NAME
);
3782 error
= ldi_ident_from_major(major
, &tcps
->tcps_ldi_ident
);
3784 tcps
->tcps_ixa_cleanup_mp
= allocb_wait(0, BPRI_MED
, STR_NOSIG
, NULL
);
3785 ASSERT(tcps
->tcps_ixa_cleanup_mp
!= NULL
);
3786 cv_init(&tcps
->tcps_ixa_cleanup_cv
, NULL
, CV_DEFAULT
, NULL
);
3787 mutex_init(&tcps
->tcps_ixa_cleanup_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
3789 mutex_init(&tcps
->tcps_reclaim_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
3790 tcps
->tcps_reclaim
= B_FALSE
;
3791 tcps
->tcps_reclaim_tid
= 0;
3792 tcps
->tcps_reclaim_period
= tcps
->tcps_rexmit_interval_max
;
3795 * ncpus is the current number of CPUs, which can be bigger than
3796 * boot_ncpus. But we don't want to use ncpus to allocate all the
3797 * tcp_stats_cpu_t at system boot up time since it will be 1. While
3798 * we handle adding CPU in tcp_cpu_update(), it will be slow if
3799 * there are many CPUs as we will be adding them 1 by 1.
3801 * Note that tcps_sc_cnt never decreases and the tcps_sc[x] pointers
3802 * are not freed until the stack is going away. So there is no need
3803 * to grab a lock to access the per CPU tcps_sc[x] pointer.
3805 mutex_enter(&cpu_lock
);
3806 tcps
->tcps_sc_cnt
= MAX(ncpus
, boot_ncpus
);
3807 mutex_exit(&cpu_lock
);
3808 tcps
->tcps_sc
= kmem_zalloc(max_ncpus
* sizeof (tcp_stats_cpu_t
*),
3810 for (i
= 0; i
< tcps
->tcps_sc_cnt
; i
++) {
3811 tcps
->tcps_sc
[i
] = kmem_zalloc(sizeof (tcp_stats_cpu_t
),
3815 mutex_init(&tcps
->tcps_listener_conf_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
3816 list_create(&tcps
->tcps_listener_conf
, sizeof (tcp_listener_t
),
3817 offsetof(tcp_listener_t
, tl_link
));
3823 * Called when the IP module is about to be unloaded.
3826 tcp_ddi_g_destroy(void)
3828 tcp_g_kstat_fini(tcp_g_kstat
);
3830 bzero(&tcp_g_statistics
, sizeof (tcp_g_statistics
));
3832 mutex_destroy(&tcp_random_lock
);
3834 kmem_cache_destroy(tcp_timercache
);
3835 kmem_cache_destroy(tcp_notsack_blk_cache
);
3837 netstack_unregister(NS_TCP
);
3841 * Free the TCP stack instance.
3844 tcp_stack_fini(netstackid_t stackid
, void *arg
)
3846 tcp_stack_t
*tcps
= (tcp_stack_t
*)arg
;
3849 freeb(tcps
->tcps_ixa_cleanup_mp
);
3850 tcps
->tcps_ixa_cleanup_mp
= NULL
;
3851 cv_destroy(&tcps
->tcps_ixa_cleanup_cv
);
3852 mutex_destroy(&tcps
->tcps_ixa_cleanup_lock
);
3855 * Set tcps_reclaim to false tells tcp_reclaim_timer() not to restart
3858 mutex_enter(&tcps
->tcps_reclaim_lock
);
3859 tcps
->tcps_reclaim
= B_FALSE
;
3860 mutex_exit(&tcps
->tcps_reclaim_lock
);
3861 if (tcps
->tcps_reclaim_tid
!= 0)
3862 (void) untimeout(tcps
->tcps_reclaim_tid
);
3863 mutex_destroy(&tcps
->tcps_reclaim_lock
);
3865 tcp_listener_conf_cleanup(tcps
);
3867 for (i
= 0; i
< tcps
->tcps_sc_cnt
; i
++)
3868 kmem_free(tcps
->tcps_sc
[i
], sizeof (tcp_stats_cpu_t
));
3869 kmem_free(tcps
->tcps_sc
, max_ncpus
* sizeof (tcp_stats_cpu_t
*));
3871 kmem_free(tcps
->tcps_propinfo_tbl
,
3872 tcp_propinfo_count
* sizeof (mod_prop_info_t
));
3873 tcps
->tcps_propinfo_tbl
= NULL
;
3875 for (i
= 0; i
< TCP_BIND_FANOUT_SIZE
; i
++) {
3876 ASSERT(tcps
->tcps_bind_fanout
[i
].tf_tcp
== NULL
);
3877 mutex_destroy(&tcps
->tcps_bind_fanout
[i
].tf_lock
);
3880 for (i
= 0; i
< TCP_ACCEPTOR_FANOUT_SIZE
; i
++) {
3881 ASSERT(tcps
->tcps_acceptor_fanout
[i
].tf_tcp
== NULL
);
3882 mutex_destroy(&tcps
->tcps_acceptor_fanout
[i
].tf_lock
);
3885 kmem_free(tcps
->tcps_bind_fanout
, sizeof (tf_t
) * TCP_BIND_FANOUT_SIZE
);
3886 tcps
->tcps_bind_fanout
= NULL
;
3888 kmem_free(tcps
->tcps_acceptor_fanout
, sizeof (tf_t
) *
3889 TCP_ACCEPTOR_FANOUT_SIZE
);
3890 tcps
->tcps_acceptor_fanout
= NULL
;
3892 mutex_destroy(&tcps
->tcps_iss_key_lock
);
3893 mutex_destroy(&tcps
->tcps_epriv_port_lock
);
3895 ip_drop_unregister(&tcps
->tcps_dropper
);
3897 tcp_kstat2_fini(stackid
, tcps
->tcps_kstat
);
3898 tcps
->tcps_kstat
= NULL
;
3900 tcp_kstat_fini(stackid
, tcps
->tcps_mibkp
);
3901 tcps
->tcps_mibkp
= NULL
;
3903 ldi_ident_release(tcps
->tcps_ldi_ident
);
3904 kmem_free(tcps
, sizeof (*tcps
));
3908 * Generate ISS, taking into account NDD changes may happen halfway through.
3909 * (If the iss is not zero, set it.)
3913 tcp_iss_init(tcp_t
*tcp
)
3916 struct { uint32_t ports
; in6_addr_t src
; in6_addr_t dst
; } arg
;
3918 tcp_stack_t
*tcps
= tcp
->tcp_tcps
;
3919 conn_t
*connp
= tcp
->tcp_connp
;
3921 tcps
->tcps_iss_incr_extra
+= (ISS_INCR
>> 1);
3922 tcp
->tcp_iss
= tcps
->tcps_iss_incr_extra
;
3923 switch (tcps
->tcps_strong_iss
) {
3925 mutex_enter(&tcps
->tcps_iss_key_lock
);
3926 context
= tcps
->tcps_iss_key
;
3927 mutex_exit(&tcps
->tcps_iss_key_lock
);
3928 arg
.ports
= connp
->conn_ports
;
3929 arg
.src
= connp
->conn_laddr_v6
;
3930 arg
.dst
= connp
->conn_faddr_v6
;
3931 MD5Update(&context
, (uchar_t
*)&arg
, sizeof (arg
));
3932 MD5Final((uchar_t
*)answer
, &context
);
3933 tcp
->tcp_iss
+= answer
[0] ^ answer
[1] ^ answer
[2] ^ answer
[3];
3935 * Now that we've hashed into a unique per-connection sequence
3936 * space, add a random increment per strong_iss == 1. So I
3937 * guess we'll have to...
3941 tcp
->tcp_iss
+= (gethrtime() >> ISS_NSEC_SHT
) + tcp_random();
3944 tcp
->tcp_iss
+= (uint32_t)gethrestime_sec() * ISS_INCR
;
3947 tcp
->tcp_valid_bits
= TCP_ISS_VALID
;
3948 tcp
->tcp_fss
= tcp
->tcp_iss
- 1;
3949 tcp
->tcp_suna
= tcp
->tcp_iss
;
3950 tcp
->tcp_snxt
= tcp
->tcp_iss
+ 1;
3951 tcp
->tcp_rexmit_nxt
= tcp
->tcp_snxt
;
3952 tcp
->tcp_csuna
= tcp
->tcp_snxt
;
3956 * tcp_{set,clr}qfull() functions are used to either set or clear QFULL
3957 * on the specified backing STREAMS q. Note, the caller may make the
3958 * decision to call based on the tcp_t.tcp_flow_stopped value which
3959 * when check outside the q's lock is only an advisory check ...
3962 tcp_setqfull(tcp_t
*tcp
)
3964 tcp_stack_t
*tcps
= tcp
->tcp_tcps
;
3965 conn_t
*connp
= tcp
->tcp_connp
;
3967 if (tcp
->tcp_closed
)
3970 conn_setqfull(connp
, &tcp
->tcp_flow_stopped
);
3971 if (tcp
->tcp_flow_stopped
)
3972 TCP_STAT(tcps
, tcp_flwctl_on
);
3976 tcp_clrqfull(tcp_t
*tcp
)
3978 conn_t
*connp
= tcp
->tcp_connp
;
3980 if (tcp
->tcp_closed
)
3982 conn_clrqfull(connp
, &tcp
->tcp_flow_stopped
);
3986 tcp_squeue_switch(int val
)
4004 * This is called once for each squeue - globally for all stack
4008 tcp_squeue_add(squeue_t
*sqp
)
4010 tcp_squeue_priv_t
*tcp_time_wait
= kmem_zalloc(
4011 sizeof (tcp_squeue_priv_t
), KM_SLEEP
);
4013 *squeue_getprivate(sqp
, SQPRIVATE_TCP
) = (intptr_t)tcp_time_wait
;
4014 if (tcp_free_list_max_cnt
== 0) {
4015 int tcp_ncpus
= ((boot_max_ncpus
== -1) ?
4016 max_ncpus
: boot_max_ncpus
);
4019 * Limit number of entries to 1% of availble memory / tcp_ncpus
4021 tcp_free_list_max_cnt
= (freemem
* PAGESIZE
) /
4022 (tcp_ncpus
* sizeof (tcp_t
) * 100);
4024 tcp_time_wait
->tcp_free_list_cnt
= 0;
4027 * Return unix error is tli error is TSYSERR, otherwise return a negative
4031 tcp_do_bind(conn_t
*connp
, struct sockaddr
*sa
, socklen_t len
, cred_t
*cr
,
4032 boolean_t bind_to_req_port_only
)
4035 tcp_t
*tcp
= connp
->conn_tcp
;
4037 if (tcp
->tcp_state
>= TCPS_BOUND
) {
4038 if (connp
->conn_debug
) {
4039 (void) strlog(TCP_MOD_ID
, 0, 1, SL_ERROR
|SL_TRACE
,
4040 "tcp_bind: bad state, %d", tcp
->tcp_state
);
4042 return (-TOUTSTATE
);
4045 error
= tcp_bind_check(connp
, sa
, len
, cr
, bind_to_req_port_only
);
4049 ASSERT(tcp
->tcp_state
== TCPS_BOUND
);
4050 tcp
->tcp_conn_req_max
= 0;
4055 * If the return value from this function is positive, it's a UNIX error.
4056 * Otherwise, if it's negative, then the absolute value is a TLI error.
4057 * the TPI routine tcp_tpi_connect() is a wrapper function for this.
4060 tcp_do_connect(conn_t
*connp
, const struct sockaddr
*sa
, socklen_t len
,
4061 cred_t
*cr
, pid_t pid
)
4063 tcp_t
*tcp
= connp
->conn_tcp
;
4064 sin_t
*sin
= (sin_t
*)sa
;
4065 sin6_t
*sin6
= (sin6_t
*)sa
;
4072 tcp_stack_t
*tcps
= tcp
->tcp_tcps
;
4074 ip_xmit_attr_t
*ixa
= connp
->conn_ixa
;
4076 oldstate
= tcp
->tcp_state
;
4081 * Should never happen
4085 case sizeof (sin_t
):
4087 if (sin
->sin_port
== 0) {
4090 if (connp
->conn_ipv6_v6only
) {
4091 return (EAFNOSUPPORT
);
4095 case sizeof (sin6_t
):
4096 sin6
= (sin6_t
*)sa
;
4097 if (sin6
->sin6_port
== 0) {
4103 * If we're connecting to an IPv4-mapped IPv6 address, we need to
4104 * make sure that the conn_ipversion is IPV4_VERSION. We
4105 * need to this before we call tcp_bindi() so that the port lookup
4106 * code will look for ports in the correct port space (IPv4 and
4107 * IPv6 have separate port spaces).
4109 if (connp
->conn_family
== AF_INET6
&&
4110 connp
->conn_ipversion
== IPV6_VERSION
&&
4111 IN6_IS_ADDR_V4MAPPED(&sin6
->sin6_addr
)) {
4112 if (connp
->conn_ipv6_v6only
)
4113 return (EADDRNOTAVAIL
);
4115 connp
->conn_ipversion
= IPV4_VERSION
;
4118 switch (tcp
->tcp_state
) {
4121 * Listening sockets are not allowed to issue connect().
4123 if (IPCL_IS_NONSTR(connp
))
4124 return (EOPNOTSUPP
);
4128 * We support quick connect, refer to comments in
4135 return (-TOUTSTATE
);
4139 * We update our cred/cpid based on the caller of connect
4141 if (connp
->conn_cred
!= cr
) {
4143 crfree(connp
->conn_cred
);
4144 connp
->conn_cred
= cr
;
4146 connp
->conn_cpid
= pid
;
4148 /* Cache things in the ixa without any refhold */
4149 ASSERT(!(ixa
->ixa_free_flags
& IXA_FREE_CRED
));
4151 ixa
->ixa_cpid
= pid
;
4152 if (is_system_labeled()) {
4153 /* We need to restart with a label based on the cred */
4154 ip_xmit_attr_restore_tsl(ixa
, ixa
->ixa_cred
);
4157 if (connp
->conn_family
== AF_INET6
) {
4158 if (!IN6_IS_ADDR_V4MAPPED(&sin6
->sin6_addr
)) {
4159 error
= tcp_connect_ipv6(tcp
, &sin6
->sin6_addr
,
4160 sin6
->sin6_port
, sin6
->sin6_flowinfo
,
4161 sin6
->__sin6_src_id
, sin6
->sin6_scope_id
);
4164 * Destination adress is mapped IPv6 address.
4165 * Source bound address should be unspecified or
4166 * IPv6 mapped address as well.
4168 if (!IN6_IS_ADDR_UNSPECIFIED(
4169 &connp
->conn_bound_addr_v6
) &&
4170 !IN6_IS_ADDR_V4MAPPED(&connp
->conn_bound_addr_v6
)) {
4171 return (EADDRNOTAVAIL
);
4173 dstaddrp
= &V4_PART_OF_V6((sin6
->sin6_addr
));
4174 dstport
= sin6
->sin6_port
;
4175 srcid
= sin6
->__sin6_src_id
;
4176 error
= tcp_connect_ipv4(tcp
, dstaddrp
, dstport
,
4180 dstaddrp
= &sin
->sin_addr
.s_addr
;
4181 dstport
= sin
->sin_port
;
4183 error
= tcp_connect_ipv4(tcp
, dstaddrp
, dstport
, srcid
);
4187 goto connect_failed
;
4189 CL_INET_CONNECT(connp
, B_TRUE
, error
);
4191 goto connect_failed
;
4193 /* connect succeeded */
4194 TCPS_BUMP_MIB(tcps
, tcpActiveOpens
);
4195 tcp
->tcp_active_open
= 1;
4198 * tcp_set_destination() does not adjust for TCP/IP header length.
4200 mss
= tcp
->tcp_mss
- connp
->conn_ht_iphc_len
;
4203 * Just make sure our rwnd is at least rcvbuf * MSS large, and round up
4204 * to the nearest MSS.
4206 * We do the round up here because we need to get the interface MTU
4207 * first before we can do the round up.
4209 tcp
->tcp_rwnd
= connp
->conn_rcvbuf
;
4210 tcp
->tcp_rwnd
= MAX(MSS_ROUNDUP(tcp
->tcp_rwnd
, mss
),
4211 tcps
->tcps_recv_hiwat_minmss
* mss
);
4212 connp
->conn_rcvbuf
= tcp
->tcp_rwnd
;
4213 tcp_set_ws_value(tcp
);
4214 tcp
->tcp_tcpha
->tha_win
= htons(tcp
->tcp_rwnd
>> tcp
->tcp_rcv_ws
);
4215 if (tcp
->tcp_rcv_ws
> 0 || tcps
->tcps_wscale_always
)
4216 tcp
->tcp_snd_ws_ok
= B_TRUE
;
4219 * Set tcp_snd_ts_ok to true
4220 * so that tcp_xmit_mp will
4221 * include the timestamp
4222 * option in the SYN segment.
4224 if (tcps
->tcps_tstamp_always
||
4225 (tcp
->tcp_rcv_ws
&& tcps
->tcps_tstamp_if_wscale
)) {
4226 tcp
->tcp_snd_ts_ok
= B_TRUE
;
4230 * Note that tcp_snd_sack_ok can be set in tcp_set_destination() if
4231 * the SACK metric is set. So here we just check the per stack SACK
4234 if (tcps
->tcps_sack_permitted
== 2) {
4235 ASSERT(tcp
->tcp_num_sack_blk
== 0);
4236 ASSERT(tcp
->tcp_notsack_list
== NULL
);
4237 tcp
->tcp_snd_sack_ok
= B_TRUE
;
4241 * Should we use ECN? Note that the current
4242 * default value (SunOS 5.9) of tcp_ecn_permitted
4243 * is 1. The reason for doing this is that there
4244 * are equipments out there that will drop ECN
4245 * enabled IP packets. Setting it to 1 avoids
4246 * compatibility problems.
4248 if (tcps
->tcps_ecn_permitted
== 2)
4249 tcp
->tcp_ecn_ok
= B_TRUE
;
4251 /* Trace change from BOUND -> SYN_SENT here */
4252 DTRACE_TCP6(state__change
, void, NULL
, ip_xmit_attr_t
*,
4253 connp
->conn_ixa
, void, NULL
, tcp_t
*, tcp
, void, NULL
,
4254 int32_t, TCPS_BOUND
);
4256 TCP_TIMER_RESTART(tcp
, tcp
->tcp_rto
);
4257 syn_mp
= tcp_xmit_mp(tcp
, NULL
, 0, NULL
, NULL
,
4258 tcp
->tcp_iss
, B_FALSE
, NULL
, B_FALSE
);
4259 if (syn_mp
!= NULL
) {
4261 * We must bump the generation before sending the syn
4262 * to ensure that we use the right generation in case
4263 * this thread issues a "connected" up call.
4265 SOCK_CONNID_BUMP(tcp
->tcp_connid
);
4267 * DTrace sending the first SYN as a
4268 * tcp:::connect-request event.
4270 DTRACE_TCP5(connect__request
, mblk_t
*, NULL
,
4271 ip_xmit_attr_t
*, connp
->conn_ixa
,
4272 void_ip_t
*, syn_mp
->b_rptr
, tcp_t
*, tcp
,
4274 &syn_mp
->b_rptr
[connp
->conn_ixa
->ixa_ip_hdr_length
]);
4275 tcp_send_data(tcp
, syn_mp
);
4278 if (tcp
->tcp_conn
.tcp_opts_conn_req
!= NULL
)
4279 tcp_close_mpp(&tcp
->tcp_conn
.tcp_opts_conn_req
);
4283 connp
->conn_faddr_v6
= ipv6_all_zeros
;
4284 connp
->conn_fport
= 0;
4285 tcp
->tcp_state
= oldstate
;
4286 if (tcp
->tcp_conn
.tcp_opts_conn_req
!= NULL
)
4287 tcp_close_mpp(&tcp
->tcp_conn
.tcp_opts_conn_req
);
4292 tcp_do_listen(conn_t
*connp
, struct sockaddr
*sa
, socklen_t len
,
4293 int backlog
, cred_t
*cr
, boolean_t bind_to_req_port_only
)
4295 tcp_t
*tcp
= connp
->conn_tcp
;
4297 tcp_stack_t
*tcps
= tcp
->tcp_tcps
;
4300 /* All Solaris components should pass a cred for this operation. */
4303 if (tcp
->tcp_state
>= TCPS_BOUND
) {
4304 if ((tcp
->tcp_state
== TCPS_BOUND
||
4305 tcp
->tcp_state
== TCPS_LISTEN
) && backlog
> 0) {
4307 * Handle listen() increasing backlog.
4308 * This is more "liberal" then what the TPI spec
4309 * requires but is needed to avoid a t_unbind
4310 * when handling listen() since the port number
4311 * might be "stolen" between the unbind and bind.
4315 if (connp
->conn_debug
) {
4316 (void) strlog(TCP_MOD_ID
, 0, 1, SL_ERROR
|SL_TRACE
,
4317 "tcp_listen: bad state, %d", tcp
->tcp_state
);
4319 return (-TOUTSTATE
);
4326 ASSERT(IPCL_IS_NONSTR(connp
));
4327 /* Do an implicit bind: Request for a generic port. */
4328 if (connp
->conn_family
== AF_INET
) {
4329 len
= sizeof (sin_t
);
4330 sin
= (sin_t
*)&addr
;
4332 sin
->sin_family
= AF_INET
;
4334 ASSERT(connp
->conn_family
== AF_INET6
);
4335 len
= sizeof (sin6_t
);
4336 sin6
= (sin6_t
*)&addr
;
4338 sin6
->sin6_family
= AF_INET6
;
4340 sa
= (struct sockaddr
*)&addr
;
4343 error
= tcp_bind_check(connp
, sa
, len
, cr
,
4344 bind_to_req_port_only
);
4347 /* Fall through and do the fanout insertion */
4351 ASSERT(tcp
->tcp_state
== TCPS_BOUND
|| tcp
->tcp_state
== TCPS_LISTEN
);
4352 tcp
->tcp_conn_req_max
= backlog
;
4353 if (tcp
->tcp_conn_req_max
) {
4354 if (tcp
->tcp_conn_req_max
< tcps
->tcps_conn_req_min
)
4355 tcp
->tcp_conn_req_max
= tcps
->tcps_conn_req_min
;
4356 if (tcp
->tcp_conn_req_max
> tcps
->tcps_conn_req_max_q
)
4357 tcp
->tcp_conn_req_max
= tcps
->tcps_conn_req_max_q
;
4359 * If this is a listener, do not reset the eager list
4360 * and other stuffs. Note that we don't check if the
4361 * existing eager list meets the new tcp_conn_req_max
4364 if (tcp
->tcp_state
!= TCPS_LISTEN
) {
4365 tcp
->tcp_state
= TCPS_LISTEN
;
4366 DTRACE_TCP6(state__change
, void, NULL
, ip_xmit_attr_t
*,
4367 connp
->conn_ixa
, void, NULL
, tcp_t
*, tcp
,
4368 void, NULL
, int32_t, TCPS_BOUND
);
4369 /* Initialize the chain. Don't need the eager_lock */
4370 tcp
->tcp_eager_next_q0
= tcp
->tcp_eager_prev_q0
= tcp
;
4371 tcp
->tcp_eager_next_drop_q0
= tcp
;
4372 tcp
->tcp_eager_prev_drop_q0
= tcp
;
4373 tcp
->tcp_second_ctimer_threshold
=
4374 tcps
->tcps_ip_abort_linterval
;
4379 * We need to make sure that the conn_recv is set to a non-null
4380 * value before we insert the conn into the classifier table.
4381 * This is to avoid a race with an incoming packet which does an
4383 * We initially set it to tcp_input_listener_unbound to try to
4384 * pick a good squeue for the listener when the first SYN arrives.
4385 * tcp_input_listener_unbound sets it to tcp_input_listener on that
4388 connp
->conn_recv
= tcp_input_listener_unbound
;
4390 /* Insert the listener in the classifier table */
4391 error
= ip_laddr_fanout_insert(connp
);
4393 /* Undo the bind - release the port number */
4394 oldstate
= tcp
->tcp_state
;
4395 tcp
->tcp_state
= TCPS_IDLE
;
4396 DTRACE_TCP6(state__change
, void, NULL
, ip_xmit_attr_t
*,
4397 connp
->conn_ixa
, void, NULL
, tcp_t
*, tcp
, void, NULL
,
4399 connp
->conn_bound_addr_v6
= ipv6_all_zeros
;
4401 connp
->conn_laddr_v6
= ipv6_all_zeros
;
4402 connp
->conn_saddr_v6
= ipv6_all_zeros
;
4403 connp
->conn_ports
= 0;
4405 if (connp
->conn_anon_port
) {
4408 zone
= crgetzone(cr
);
4409 connp
->conn_anon_port
= B_FALSE
;
4410 (void) tsol_mlp_anon(zone
, connp
->conn_mlp_type
,
4411 connp
->conn_proto
, connp
->conn_lport
, B_FALSE
);
4413 connp
->conn_mlp_type
= mlptSingle
;
4415 tcp_bind_hash_remove(tcp
);
4419 * If there is a connection limit, allocate and initialize
4420 * the counter struct. Note that since listen can be called
4421 * multiple times, the struct may have been allready allocated.
4423 if (!list_is_empty(&tcps
->tcps_listener_conf
) &&
4424 tcp
->tcp_listen_cnt
== NULL
) {
4425 tcp_listen_cnt_t
*tlc
;
4428 ratio
= tcp_find_listener_conf(tcps
,
4429 ntohs(connp
->conn_lport
));
4431 uint32_t mem_ratio
, tot_buf
;
4433 tlc
= kmem_alloc(sizeof (tcp_listen_cnt_t
),
4436 * Calculate the connection limit based on
4437 * the configured ratio and maxusers. Maxusers
4438 * are calculated based on memory size,
4439 * ~ 1 user per MB. Note that the conn_rcvbuf
4440 * and conn_sndbuf may change after a
4441 * connection is accepted. So what we have
4442 * is only an approximation.
4444 if ((tot_buf
= connp
->conn_rcvbuf
+
4445 connp
->conn_sndbuf
) < MB
) {
4446 mem_ratio
= MB
/ tot_buf
;
4447 tlc
->tlc_max
= maxusers
/ ratio
*
4450 mem_ratio
= tot_buf
/ MB
;
4451 tlc
->tlc_max
= maxusers
/ ratio
/
4454 /* At least we should allow two connections! */
4455 if (tlc
->tlc_max
<= tcp_min_conn_listener
)
4456 tlc
->tlc_max
= tcp_min_conn_listener
;
4459 tcp
->tcp_listen_cnt
= tlc
;