4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 1991, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2011, Joyent Inc. All rights reserved.
25 * Copyright (c) 2011 Nexenta Systems, Inc. All rights reserved.
27 /* Copyright (c) 1990 Mentat Inc. */
29 #include <sys/types.h>
30 #include <sys/stream.h>
31 #include <sys/strsun.h>
32 #include <sys/strsubr.h>
33 #include <sys/stropts.h>
34 #include <sys/strlog.h>
35 #define _SUN_TPI_VERSION 2
36 #include <sys/tihdr.h>
37 #include <sys/timod.h>
39 #include <sys/sunddi.h>
40 #include <sys/suntpi.h>
41 #include <sys/xti_inet.h>
42 #include <sys/cmn_err.h>
43 #include <sys/debug.h>
45 #include <sys/vtrace.h>
47 #include <sys/ethernet.h>
48 #include <sys/cpuvar.h>
50 #include <sys/pattr.h>
51 #include <sys/policy.h>
54 #include <sys/sunldi.h>
56 #include <sys/errno.h>
57 #include <sys/signal.h>
58 #include <sys/socket.h>
59 #include <sys/socketvar.h>
60 #include <sys/sockio.h>
61 #include <sys/isa_defs.h>
63 #include <sys/random.h>
65 #include <sys/systm.h>
66 #include <netinet/in.h>
67 #include <netinet/tcp.h>
68 #include <netinet/ip6.h>
69 #include <netinet/icmp6.h>
71 #include <net/route.h>
72 #include <inet/ipsec_impl.h>
74 #include <inet/common.h>
76 #include <inet/ip_impl.h>
78 #include <inet/ip_ndp.h>
79 #include <inet/proto_set.h>
80 #include <inet/mib2.h>
81 #include <inet/optcom.h>
82 #include <inet/snmpcom.h>
83 #include <inet/kstatcom.h>
85 #include <inet/tcp_impl.h>
86 #include <inet/tcp_cluster.h>
87 #include <inet/udp_impl.h>
88 #include <net/pfkeyv2.h>
89 #include <inet/ipdrop.h>
91 #include <inet/ipclassifier.h>
92 #include <inet/ip_ire.h>
93 #include <inet/ip_ftable.h>
94 #include <inet/ip_if.h>
95 #include <inet/ipp_common.h>
96 #include <inet/ip_rts.h>
97 #include <inet/ip_netinfo.h>
98 #include <sys/squeue_impl.h>
99 #include <sys/squeue.h>
100 #include <sys/tsol/label.h>
101 #include <sys/tsol/tnet.h>
102 #include <rpc/pmap_prot.h>
103 #include <sys/callo.h>
106 * TCP Notes: aka FireEngine Phase I (PSARC 2002/433)
108 * (Read the detailed design doc in PSARC case directory)
110 * The entire tcp state is contained in tcp_t and conn_t structure
111 * which are allocated in tandem using ipcl_conn_create() and passing
112 * IPCL_TCPCONN as a flag. We use 'conn_ref' and 'conn_lock' to protect
113 * the references on the tcp_t. The tcp_t structure is never compressed
114 * and packets always land on the correct TCP perimeter from the time
115 * eager is created till the time tcp_t dies (as such the old mentat
116 * TCP global queue is not used for detached state and no IPSEC checking
117 * is required). The global queue is still allocated to send out resets
118 * for connection which have no listeners and IP directly calls
119 * tcp_xmit_listeners_reset() which does any policy check.
121 * Protection and Synchronisation mechanism:
123 * The tcp data structure does not use any kind of lock for protecting
124 * its state but instead uses 'squeues' for mutual exclusion from various
125 * read and write side threads. To access a tcp member, the thread should
126 * always be behind squeue (via squeue_enter with flags as SQ_FILL, SQ_PROCESS,
127 * or SQ_NODRAIN). Since the squeues allow a direct function call, caller
128 * can pass any tcp function having prototype of edesc_t as argument
129 * (different from traditional STREAMs model where packets come in only
130 * designated entry points). The list of functions that can be directly
131 * called via squeue are listed before the usual function prototype.
135 * TCP is MT-Hot and we use a reference based scheme to make sure that the
136 * tcp structure doesn't disappear when its needed. When the application
137 * creates an outgoing connection or accepts an incoming connection, we
138 * start out with 2 references on 'conn_ref'. One for TCP and one for IP.
139 * The IP reference is just a symbolic reference since ip_tcpclose()
140 * looks at tcp structure after tcp_close_output() returns which could
141 * have dropped the last TCP reference. So as long as the connection is
142 * in attached state i.e. !TCP_IS_DETACHED, we have 2 references on the
143 * conn_t. The classifier puts its own reference when the connection is
144 * inserted in listen or connected hash. Anytime a thread needs to enter
145 * the tcp connection perimeter, it retrieves the conn/tcp from q->ptr
146 * on write side or by doing a classify on read side and then puts a
147 * reference on the conn before doing squeue_enter/tryenter/fill. For
148 * read side, the classifier itself puts the reference under fanout lock
149 * to make sure that tcp can't disappear before it gets processed. The
150 * squeue will drop this reference automatically so the called function
151 * doesn't have to do a DEC_REF.
153 * Opening a new connection:
155 * The outgoing connection open is pretty simple. tcp_open() does the
156 * work in creating the conn/tcp structure and initializing it. The
157 * squeue assignment is done based on the CPU the application
158 * is running on. So for outbound connections, processing is always done
159 * on application CPU which might be different from the incoming CPU
160 * being interrupted by the NIC. An optimal way would be to figure out
161 * the NIC <-> CPU binding at listen time, and assign the outgoing
162 * connection to the squeue attached to the CPU that will be interrupted
163 * for incoming packets (we know the NIC based on the bind IP address).
164 * This might seem like a problem if more data is going out but the
165 * fact is that in most cases the transmit is ACK driven transmit where
166 * the outgoing data normally sits on TCP's xmit queue waiting to be
169 * Accepting a connection:
171 * This is a more interesting case because of various races involved in
172 * establishing a eager in its own perimeter. Read the meta comment on
173 * top of tcp_input_listener(). But briefly, the squeue is picked by
174 * ip_fanout based on the ring or the sender (if loopback).
176 * Closing a connection:
178 * The close is fairly straight forward. tcp_close() calls tcp_close_output()
179 * via squeue to do the close and mark the tcp as detached if the connection
180 * was in state TCPS_ESTABLISHED or greater. In the later case, TCP keep its
181 * reference but tcp_close() drop IP's reference always. So if tcp was
182 * not killed, it is sitting in time_wait list with 2 reference - 1 for TCP
183 * and 1 because it is in classifier's connected hash. This is the condition
184 * we use to determine that its OK to clean up the tcp outside of squeue
185 * when time wait expires (check the ref under fanout and conn_lock and
186 * if it is 2, remove it from fanout hash and kill it).
188 * Although close just drops the necessary references and marks the
189 * tcp_detached state, tcp_close needs to know the tcp_detached has been
190 * set (under squeue) before letting the STREAM go away (because a
191 * inbound packet might attempt to go up the STREAM while the close
192 * has happened and tcp_detached is not set). So a special lock and
193 * flag is used along with a condition variable (tcp_closelock, tcp_closed,
194 * and tcp_closecv) to signal tcp_close that tcp_close_out() has marked
197 * Special provisions and fast paths:
199 * We make special provisions for sockfs by marking tcp_issocket
200 * whenever we have only sockfs on top of TCP. This allows us to skip
201 * putting the tcp in acceptor hash since a sockfs listener can never
202 * become acceptor and also avoid allocating a tcp_t for acceptor STREAM
203 * since eager has already been allocated and the accept now happens
204 * on acceptor STREAM. There is a big blob of comment on top of
205 * tcp_input_listener explaining the new accept. When socket is POP'd,
206 * sockfs sends us an ioctl to mark the fact and we go back to old
207 * behaviour. Once tcp_issocket is unset, its never set for the
208 * life of that connection.
212 * Since a packet is always executed on the correct TCP perimeter
213 * all IPsec processing is defered to IP including checking new
214 * connections and setting IPSEC policies for new connection. The
215 * only exception is tcp_xmit_listeners_reset() which is called
216 * directly from IP and needs to policy check to see if TH_RST
221 * Values for squeue switch:
226 int tcp_squeue_wput
= 2; /* /etc/systems */
230 * To prevent memory hog, limit the number of entries in tcp_free_list
231 * to 1% of available memory / number of cpus
233 uint_t tcp_free_list_max_cnt
= 0;
235 #define TCP_XMIT_LOWATER 4096
236 #define TCP_XMIT_HIWATER 49152
237 #define TCP_RECV_LOWATER 2048
238 #define TCP_RECV_HIWATER 128000
240 #define TIDUSZ 4096 /* transport interface data unit size */
243 * Size of acceptor hash list. It has to be a power of 2 for hashing.
245 #define TCP_ACCEPTOR_FANOUT_SIZE 512
248 #define TCP_ACCEPTOR_HASH(accid) \
249 (((uint_t)(accid) >> 8) & (TCP_ACCEPTOR_FANOUT_SIZE - 1))
251 #define TCP_ACCEPTOR_HASH(accid) \
252 ((uint_t)(accid) & (TCP_ACCEPTOR_FANOUT_SIZE - 1))
256 * Minimum number of connections which can be created per listener. Used
257 * when the listener connection count is in effect.
259 static uint32_t tcp_min_conn_listener
= 2;
261 uint32_t tcp_early_abort
= 30;
263 /* TCP Timer control structure */
264 typedef struct tcpt_s
{
265 pfv_t tcpt_pfv
; /* The routine we are to call */
266 tcp_t
*tcpt_tcp
; /* The parameter we are to pass in */
270 * Functions called directly via squeue having a prototype of edesc_t.
272 void tcp_input_listener(void *arg
, mblk_t
*mp
, void *arg2
,
273 ip_recv_attr_t
*ira
);
274 void tcp_input_data(void *arg
, mblk_t
*mp
, void *arg2
,
275 ip_recv_attr_t
*ira
);
276 static void tcp_linger_interrupted(void *arg
, mblk_t
*mp
, void *arg2
,
277 ip_recv_attr_t
*dummy
);
280 /* Prototype for TCP functions */
281 static void tcp_random_init(void);
282 int tcp_random(void);
283 static int tcp_connect_ipv4(tcp_t
*tcp
, ipaddr_t
*dstaddrp
,
284 in_port_t dstport
, uint_t srcid
);
285 static int tcp_connect_ipv6(tcp_t
*tcp
, in6_addr_t
*dstaddrp
,
286 in_port_t dstport
, uint32_t flowinfo
,
287 uint_t srcid
, uint32_t scope_id
);
288 static void tcp_iss_init(tcp_t
*tcp
);
289 static void tcp_reinit(tcp_t
*tcp
);
290 static void tcp_reinit_values(tcp_t
*tcp
);
292 static void tcp_wsrv(queue_t
*q
);
293 static void tcp_update_lso(tcp_t
*tcp
, ip_xmit_attr_t
*ixa
);
294 static void tcp_update_zcopy(tcp_t
*tcp
);
295 static void tcp_notify(void *, ip_xmit_attr_t
*, ixa_notify_type_t
,
297 static void *tcp_stack_init(netstackid_t stackid
, netstack_t
*ns
);
298 static void tcp_stack_fini(netstackid_t stackid
, void *arg
);
300 static int tcp_squeue_switch(int);
302 static int tcp_open(queue_t
*, dev_t
*, int, int, cred_t
*, boolean_t
);
303 static int tcp_openv4(queue_t
*, dev_t
*, int, int, cred_t
*);
304 static int tcp_openv6(queue_t
*, dev_t
*, int, int, cred_t
*);
306 static void tcp_squeue_add(squeue_t
*);
308 struct module_info tcp_rinfo
= {
309 TCP_MOD_ID
, TCP_MOD_NAME
, 0, INFPSZ
, TCP_RECV_HIWATER
, TCP_RECV_LOWATER
312 static struct module_info tcp_winfo
= {
313 TCP_MOD_ID
, TCP_MOD_NAME
, 0, INFPSZ
, 127, 16
317 * Entry points for TCP as a device. The normal case which supports
318 * the TCP functionality.
319 * We have separate open functions for the /dev/tcp and /dev/tcp6 devices.
321 struct qinit tcp_rinitv4
= {
322 NULL
, (pfi_t
)tcp_rsrv
, tcp_openv4
, tcp_tpi_close
, NULL
, &tcp_rinfo
325 struct qinit tcp_rinitv6
= {
326 NULL
, (pfi_t
)tcp_rsrv
, tcp_openv6
, tcp_tpi_close
, NULL
, &tcp_rinfo
329 struct qinit tcp_winit
= {
330 (pfi_t
)tcp_wput
, (pfi_t
)tcp_wsrv
, NULL
, NULL
, NULL
, &tcp_winfo
333 /* Initial entry point for TCP in socket mode. */
334 struct qinit tcp_sock_winit
= {
335 (pfi_t
)tcp_wput_sock
, (pfi_t
)tcp_wsrv
, NULL
, NULL
, NULL
, &tcp_winfo
338 /* TCP entry point during fallback */
339 struct qinit tcp_fallback_sock_winit
= {
340 (pfi_t
)tcp_wput_fallback
, NULL
, NULL
, NULL
, NULL
, &tcp_winfo
344 * Entry points for TCP as a acceptor STREAM opened by sockfs when doing
345 * an accept. Avoid allocating data structures since eager has already
348 struct qinit tcp_acceptor_rinit
= {
349 NULL
, (pfi_t
)tcp_rsrv
, NULL
, tcp_tpi_close_accept
, NULL
, &tcp_winfo
352 struct qinit tcp_acceptor_winit
= {
353 (pfi_t
)tcp_tpi_accept
, NULL
, NULL
, NULL
, NULL
, &tcp_winfo
356 /* For AF_INET aka /dev/tcp */
357 struct streamtab tcpinfov4
= {
358 &tcp_rinitv4
, &tcp_winit
361 /* For AF_INET6 aka /dev/tcp6 */
362 struct streamtab tcpinfov6
= {
363 &tcp_rinitv6
, &tcp_winit
367 * Following assumes TPI alignment requirements stay along 32 bit
370 #define ROUNDUP32(x) \
371 (((x) + (sizeof (int32_t) - 1)) & ~(sizeof (int32_t) - 1))
373 /* Template for response to info request. */
374 struct T_info_ack tcp_g_t_info_ack
= {
375 T_INFO_ACK
, /* PRIM_type */
377 T_INFINITE
, /* ETSDU_size */
378 T_INVALID
, /* CDATA_size */
379 T_INVALID
, /* DDATA_size */
380 sizeof (sin_t
), /* ADDR_size */
381 0, /* OPT_size - not initialized here */
382 TIDUSZ
, /* TIDU_size */
383 T_COTS_ORD
, /* SERV_type */
384 TCPS_IDLE
, /* CURRENT_state */
385 (XPG4_1
|EXPINLINE
) /* PROVIDER_flag */
388 struct T_info_ack tcp_g_t_info_ack_v6
= {
389 T_INFO_ACK
, /* PRIM_type */
391 T_INFINITE
, /* ETSDU_size */
392 T_INVALID
, /* CDATA_size */
393 T_INVALID
, /* DDATA_size */
394 sizeof (sin6_t
), /* ADDR_size */
395 0, /* OPT_size - not initialized here */
396 TIDUSZ
, /* TIDU_size */
397 T_COTS_ORD
, /* SERV_type */
398 TCPS_IDLE
, /* CURRENT_state */
399 (XPG4_1
|EXPINLINE
) /* PROVIDER_flag */
403 * TCP tunables related declarations. Definitions are in tcp_tunables.c
405 extern mod_prop_info_t tcp_propinfo_tbl
[];
406 extern int tcp_propinfo_count
;
408 #define IS_VMLOANED_MBLK(mp) \
409 (((mp)->b_datap->db_struioflag & STRUIO_ZC) != 0)
411 uint32_t do_tcpzcopy
= 1; /* 0: disable, 1: enable, 2: force */
414 * Forces all connections to obey the value of the tcps_maxpsz_multiplier
415 * tunable settable via NDD. Otherwise, the per-connection behavior is
416 * determined dynamically during tcp_set_destination(), which is the default.
418 boolean_t tcp_static_maxpsz
= B_FALSE
;
421 * If the receive buffer size is changed, this function is called to update
422 * the upper socket layer on the new delayed receive wake up threshold.
425 tcp_set_recv_threshold(tcp_t
*tcp
, uint32_t new_rcvthresh
)
427 uint32_t default_threshold
= SOCKET_RECVHIWATER
>> 3;
429 if (IPCL_IS_NONSTR(tcp
->tcp_connp
)) {
430 conn_t
*connp
= tcp
->tcp_connp
;
431 struct sock_proto_props sopp
;
434 * only increase rcvthresh upto default_threshold
436 if (new_rcvthresh
> default_threshold
)
437 new_rcvthresh
= default_threshold
;
439 sopp
.sopp_flags
= SOCKOPT_RCVTHRESH
;
440 sopp
.sopp_rcvthresh
= new_rcvthresh
;
442 (*connp
->conn_upcalls
->su_set_proto_props
)
443 (connp
->conn_upper_handle
, &sopp
);
448 * Figure out the value of window scale opton. Note that the rwnd is
449 * ASSUMED to be rounded up to the nearest MSS before the calculation.
450 * We cannot find the scale value and then do a round up of tcp_rwnd
451 * because the scale value may not be correct after that.
453 * Set the compiler flag to make this function inline.
456 tcp_set_ws_value(tcp_t
*tcp
)
459 uint32_t rwnd
= tcp
->tcp_rwnd
;
461 for (i
= 0; rwnd
> TCP_MAXWIN
&& i
< TCP_MAX_WINSHIFT
;
468 * Remove cached/latched IPsec references.
471 tcp_ipsec_cleanup(tcp_t
*tcp
)
473 conn_t
*connp
= tcp
->tcp_connp
;
475 ASSERT(connp
->conn_flags
& IPCL_TCPCONN
);
477 if (connp
->conn_latch
!= NULL
) {
478 IPLATCH_REFRELE(connp
->conn_latch
);
479 connp
->conn_latch
= NULL
;
481 if (connp
->conn_latch_in_policy
!= NULL
) {
482 IPPOL_REFRELE(connp
->conn_latch_in_policy
);
483 connp
->conn_latch_in_policy
= NULL
;
485 if (connp
->conn_latch_in_action
!= NULL
) {
486 IPACT_REFRELE(connp
->conn_latch_in_action
);
487 connp
->conn_latch_in_action
= NULL
;
489 if (connp
->conn_policy
!= NULL
) {
490 IPPH_REFRELE(connp
->conn_policy
, connp
->conn_netstack
);
491 connp
->conn_policy
= NULL
;
496 * Cleaup before placing on free list.
497 * Disassociate from the netstack/tcp_stack_t since the freelist
498 * is per squeue and not per netstack.
501 tcp_cleanup(tcp_t
*tcp
)
504 conn_t
*connp
= tcp
->tcp_connp
;
505 tcp_stack_t
*tcps
= tcp
->tcp_tcps
;
506 netstack_t
*ns
= tcps
->tcps_netstack
;
509 tcp_bind_hash_remove(tcp
);
511 /* Cleanup that which needs the netstack first */
512 tcp_ipsec_cleanup(tcp
);
513 ixa_cleanup(connp
->conn_ixa
);
515 if (connp
->conn_ht_iphc
!= NULL
) {
516 kmem_free(connp
->conn_ht_iphc
, connp
->conn_ht_iphc_allocated
);
517 connp
->conn_ht_iphc
= NULL
;
518 connp
->conn_ht_iphc_allocated
= 0;
519 connp
->conn_ht_iphc_len
= 0;
520 connp
->conn_ht_ulp
= NULL
;
521 connp
->conn_ht_ulp_len
= 0;
522 tcp
->tcp_ipha
= NULL
;
523 tcp
->tcp_ip6h
= NULL
;
524 tcp
->tcp_tcpha
= NULL
;
527 /* We clear any IP_OPTIONS and extension headers */
528 ip_pkt_free(&connp
->conn_xmit_ipp
);
533 * Since we will bzero the entire structure, we need to
534 * remove it and reinsert it in global hash list. We
535 * know the walkers can't get to this conn because we
536 * had set CONDEMNED flag earlier and checked reference
537 * under conn_lock so walker won't pick it and when we
538 * go the ipcl_globalhash_remove() below, no walker
541 ipcl_globalhash_remove(connp
);
543 /* Save some state */
544 mp
= tcp
->tcp_timercache
;
546 tcp_rsrv_mp
= tcp
->tcp_rsrv_mp
;
548 if (connp
->conn_cred
!= NULL
) {
549 crfree(connp
->conn_cred
);
550 connp
->conn_cred
= NULL
;
552 ipcl_conn_cleanup(connp
);
553 connp
->conn_flags
= IPCL_TCPCONN
;
556 * Now it is safe to decrement the reference counts.
557 * This might be the last reference on the netstack
558 * in which case it will cause the freeing of the IP Instance.
560 connp
->conn_netstack
= NULL
;
561 connp
->conn_ixa
->ixa_ipst
= NULL
;
563 ASSERT(tcps
!= NULL
);
564 tcp
->tcp_tcps
= NULL
;
566 bzero(tcp
, sizeof (tcp_t
));
568 /* restore the state */
569 tcp
->tcp_timercache
= mp
;
571 tcp
->tcp_rsrv_mp
= tcp_rsrv_mp
;
573 tcp
->tcp_connp
= connp
;
575 ASSERT(connp
->conn_tcp
== tcp
);
576 ASSERT(connp
->conn_flags
& IPCL_TCPCONN
);
577 connp
->conn_state_flags
= CONN_INCIPIENT
;
578 ASSERT(connp
->conn_proto
== IPPROTO_TCP
);
579 ASSERT(connp
->conn_ref
== 1);
583 * Adapt to the information, such as rtt and rtt_sd, provided from the
584 * DCE and IRE maintained by IP.
586 * Checks for multicast and broadcast destination address.
587 * Returns zero if ok; an errno on failure.
589 * Note that the MSS calculation here is based on the info given in
590 * the DCE and IRE. We do not do any calculation based on TCP options. They
591 * will be handled in tcp_input_data() when TCP knows which options to use.
593 * Note on how TCP gets its parameters for a connection.
595 * When a tcp_t structure is allocated, it gets all the default parameters.
596 * In tcp_set_destination(), it gets those metric parameters, like rtt, rtt_sd,
597 * spipe, rpipe, ... from the route metrics. Route metric overrides the
600 * An incoming SYN with a multicast or broadcast destination address is dropped
601 * in ip_fanout_v4/v6.
603 * An incoming SYN with a multicast or broadcast source address is always
604 * dropped in tcp_set_destination, since IPDF_ALLOW_MCBC is not set in
606 * The same logic in tcp_set_destination also serves to
607 * reject an attempt to connect to a broadcast or multicast (destination)
611 tcp_set_destination(tcp_t
*tcp
)
615 boolean_t tcp_detached
= TCP_IS_DETACHED(tcp
);
616 conn_t
*connp
= tcp
->tcp_connp
;
617 tcp_stack_t
*tcps
= tcp
->tcp_tcps
;
622 flags
= IPDF_LSO
| IPDF_ZCOPY
;
624 * Make sure we have a dce for the destination to avoid dce_ident
625 * contention for connected sockets.
627 flags
|= IPDF_UNIQUE_DCE
;
629 if (!tcps
->tcps_ignore_path_mtu
)
630 connp
->conn_ixa
->ixa_flags
|= IXAF_PMTU_DISCOVERY
;
632 /* Use conn_lock to satify ASSERT; tcp is already serialized */
633 mutex_enter(&connp
->conn_lock
);
634 error
= conn_connect(connp
, &uinfo
, flags
);
635 mutex_exit(&connp
->conn_lock
);
639 error
= tcp_build_hdrs(tcp
);
643 tcp
->tcp_localnet
= uinfo
.iulp_localnet
;
645 if (uinfo
.iulp_rtt
!= 0) {
648 tcp
->tcp_rtt_sa
= uinfo
.iulp_rtt
;
649 tcp
->tcp_rtt_sd
= uinfo
.iulp_rtt_sd
;
650 rto
= (tcp
->tcp_rtt_sa
>> 3) + tcp
->tcp_rtt_sd
+
651 tcps
->tcps_rexmit_interval_extra
+
652 (tcp
->tcp_rtt_sa
>> 5);
654 TCP_SET_RTO(tcp
, rto
);
656 if (uinfo
.iulp_ssthresh
!= 0)
657 tcp
->tcp_cwnd_ssthresh
= uinfo
.iulp_ssthresh
;
659 tcp
->tcp_cwnd_ssthresh
= TCP_MAX_LARGEWIN
;
660 if (uinfo
.iulp_spipe
> 0) {
661 connp
->conn_sndbuf
= MIN(uinfo
.iulp_spipe
,
663 if (tcps
->tcps_snd_lowat_fraction
!= 0) {
664 connp
->conn_sndlowat
= connp
->conn_sndbuf
/
665 tcps
->tcps_snd_lowat_fraction
;
667 (void) tcp_maxpsz_set(tcp
, B_TRUE
);
670 * Note that up till now, acceptor always inherits receive
671 * window from the listener. But if there is a metrics
672 * associated with a host, we should use that instead of
673 * inheriting it from listener. Thus we need to pass this
674 * info back to the caller.
676 if (uinfo
.iulp_rpipe
> 0) {
677 tcp
->tcp_rwnd
= MIN(uinfo
.iulp_rpipe
,
681 if (uinfo
.iulp_rtomax
> 0) {
682 tcp
->tcp_second_timer_threshold
=
687 * Use the metric option settings, iulp_tstamp_ok and
688 * iulp_wscale_ok, only for active open. What this means
689 * is that if the other side uses timestamp or window
690 * scale option, TCP will also use those options. That
691 * is for passive open. If the application sets a
692 * large window, window scale is enabled regardless of
693 * the value in iulp_wscale_ok. This is the behavior
694 * since 2.6. So we keep it.
695 * The only case left in passive open processing is the
697 * For ECN, it should probably be like SACK. But the
698 * current value is binary, so we treat it like the other
699 * cases. The metric only controls active open.For passive
700 * open, the ndd param, tcp_ecn_permitted, controls the
705 * The if check means that the following can only
706 * be turned on by the metrics only IRE, but not off.
708 if (uinfo
.iulp_tstamp_ok
)
709 tcp
->tcp_snd_ts_ok
= B_TRUE
;
710 if (uinfo
.iulp_wscale_ok
)
711 tcp
->tcp_snd_ws_ok
= B_TRUE
;
712 if (uinfo
.iulp_sack
== 2)
713 tcp
->tcp_snd_sack_ok
= B_TRUE
;
714 if (uinfo
.iulp_ecn_ok
)
715 tcp
->tcp_ecn_ok
= B_TRUE
;
720 * As above, the if check means that SACK can only be
721 * turned on by the metric only IRE.
723 if (uinfo
.iulp_sack
> 0) {
724 tcp
->tcp_snd_sack_ok
= B_TRUE
;
729 * XXX Note that currently, iulp_mtu can be as small as 68
730 * because of PMTUd. So tcp_mss may go to negative if combined
731 * length of all those options exceeds 28 bytes. But because
732 * of the tcp_mss_min check below, we may not have a problem if
733 * tcp_mss_min is of a reasonable value. The default is 1 so
734 * the negative problem still exists. And the check defeats PMTUd.
735 * In fact, if PMTUd finds that the MSS should be smaller than
736 * tcp_mss_min, TCP should turn off PMUTd and use the tcp_mss_min
739 * We do not deal with that now. All those problems related to
740 * PMTUd will be fixed later.
742 ASSERT(uinfo
.iulp_mtu
!= 0);
743 mss
= tcp
->tcp_initial_pmtu
= uinfo
.iulp_mtu
;
745 /* Sanity check for MSS value. */
746 if (connp
->conn_ipversion
== IPV4_VERSION
)
747 mss_max
= tcps
->tcps_mss_max_ipv4
;
749 mss_max
= tcps
->tcps_mss_max_ipv6
;
751 if (tcp
->tcp_ipsec_overhead
== 0)
752 tcp
->tcp_ipsec_overhead
= conn_ipsec_length(connp
);
754 mss
-= tcp
->tcp_ipsec_overhead
;
756 if (mss
< tcps
->tcps_mss_min
)
757 mss
= tcps
->tcps_mss_min
;
761 /* Note that this is the maximum MSS, excluding all options. */
765 * Update the tcp connection with LSO capability.
767 tcp_update_lso(tcp
, connp
->conn_ixa
);
770 * Initialize the ISS here now that we have the full connection ID.
771 * The RFC 1948 method of initial sequence number generation requires
772 * knowledge of the full connection ID before setting the ISS.
776 tcp
->tcp_loopback
= (uinfo
.iulp_loopback
| uinfo
.iulp_local
);
779 * Make sure that conn is not marked incipient
780 * for incoming connections. A blind
781 * removal of incipient flag is cheaper than
784 mutex_enter(&connp
->conn_lock
);
785 connp
->conn_state_flags
&= ~CONN_INCIPIENT
;
786 mutex_exit(&connp
->conn_lock
);
791 * tcp_clean_death / tcp_close_detached must not be called more than once
792 * on a tcp. Thus every function that potentially calls tcp_clean_death
793 * must check for the tcp state before calling tcp_clean_death.
794 * Eg. tcp_input_data, tcp_eager_kill, tcp_clean_death_wrapper,
795 * tcp_timer_handler, all check for the tcp state.
799 tcp_clean_death_wrapper(void *arg
, mblk_t
*mp
, void *arg2
,
800 ip_recv_attr_t
*dummy
)
802 tcp_t
*tcp
= ((conn_t
*)arg
)->conn_tcp
;
805 if (tcp
->tcp_state
> TCPS_BOUND
)
806 (void) tcp_clean_death(((conn_t
*)arg
)->conn_tcp
, ETIMEDOUT
);
810 * We are dying for some reason. Try to do it gracefully. (May be called
813 * Return -1 if the structure was not cleaned up (if the cleanup had to be
814 * done by a service procedure).
815 * TBD - Should the return value distinguish between the tcp_t being
816 * freed and it being reinitialized?
819 tcp_clean_death(tcp_t
*tcp
, int err
)
823 conn_t
*connp
= tcp
->tcp_connp
;
824 tcp_stack_t
*tcps
= tcp
->tcp_tcps
;
829 if (tcp
->tcp_linger_tid
!= 0 &&
830 TCP_TIMER_CANCEL(tcp
, tcp
->tcp_linger_tid
) >= 0) {
831 tcp_stop_lingering(tcp
);
835 ASSERT((connp
->conn_family
== AF_INET
&&
836 connp
->conn_ipversion
== IPV4_VERSION
) ||
837 (connp
->conn_family
== AF_INET6
&&
838 (connp
->conn_ipversion
== IPV4_VERSION
||
839 connp
->conn_ipversion
== IPV6_VERSION
)));
841 if (TCP_IS_DETACHED(tcp
)) {
842 if (tcp
->tcp_hard_binding
) {
844 * Its an eager that we are dealing with. We close the
845 * eager but in case a conn_ind has already gone to the
846 * listener, let tcp_accept_finish() send a discon_ind
847 * to the listener and drop the last reference. If the
848 * listener doesn't even know about the eager i.e. the
849 * conn_ind hasn't gone up, blow away the eager and drop
850 * the last reference as well. If the conn_ind has gone
851 * up, state should be BOUND. tcp_accept_finish
852 * will figure out that the connection has received a
853 * RST and will send a DISCON_IND to the application.
855 tcp_closei_local(tcp
);
856 if (!tcp
->tcp_tconnind_started
) {
859 tcp
->tcp_state
= TCPS_BOUND
;
860 DTRACE_TCP6(state__change
, void, NULL
,
861 ip_xmit_attr_t
*, connp
->conn_ixa
,
862 void, NULL
, tcp_t
*, tcp
, void, NULL
,
863 int32_t, TCPS_CLOSED
);
866 tcp_close_detached(tcp
);
871 TCP_STAT(tcps
, tcp_clean_death_nondetached
);
874 * The connection is dead. Decrement listener connection counter if
877 if (tcp
->tcp_listen_cnt
!= NULL
)
878 TCP_DECR_LISTEN_CNT(tcp
);
881 * When a connection is moved to TIME_WAIT state, the connection
882 * counter is already decremented. So no need to decrement here
883 * again. See SET_TIME_WAIT() macro.
885 if (tcp
->tcp_state
>= TCPS_ESTABLISHED
&&
886 tcp
->tcp_state
< TCPS_TIME_WAIT
) {
892 /* Trash all inbound data */
893 if (!IPCL_IS_NONSTR(connp
)) {
899 * If we are at least part way open and there is error
900 * (err==0 implies no error)
901 * notify our client by a T_DISCON_IND.
903 if ((tcp
->tcp_state
>= TCPS_SYN_SENT
) && err
) {
904 if (tcp
->tcp_state
>= TCPS_ESTABLISHED
&&
905 !TCP_IS_SOCKET(tcp
)) {
907 * Send M_FLUSH according to TPI. Because sockets will
908 * (and must) ignore FLUSHR we do that only for TPI
909 * endpoints and sockets in STREAMS mode.
911 (void) putnextctl1(q
, M_FLUSH
, FLUSHR
);
913 if (connp
->conn_debug
) {
914 (void) strlog(TCP_MOD_ID
, 0, 1, SL_TRACE
|SL_ERROR
,
915 "tcp_clean_death: discon err %d", err
);
917 if (IPCL_IS_NONSTR(connp
)) {
918 /* Direct socket, use upcall */
919 (*connp
->conn_upcalls
->su_disconnected
)(
920 connp
->conn_upper_handle
, tcp
->tcp_connid
, err
);
922 mp
= mi_tpi_discon_ind(NULL
, err
, 0);
926 if (connp
->conn_debug
) {
927 (void) strlog(TCP_MOD_ID
, 0, 1,
929 "tcp_clean_death, sending M_ERROR");
931 (void) putnextctl1(q
, M_ERROR
, EPROTO
);
934 if (tcp
->tcp_state
<= TCPS_SYN_RCVD
) {
935 /* SYN_SENT or SYN_RCVD */
936 TCPS_BUMP_MIB(tcps
, tcpAttemptFails
);
937 } else if (tcp
->tcp_state
<= TCPS_CLOSE_WAIT
) {
938 /* ESTABLISHED or CLOSE_WAIT */
939 TCPS_BUMP_MIB(tcps
, tcpEstabResets
);
944 * ESTABLISHED non-STREAMS eagers are not 'detached' because
945 * an upper handle is obtained when the SYN-ACK comes in. So it
946 * should receive the 'disconnected' upcall, but tcp_reinit should
947 * not be called since this is an eager.
949 if (tcp
->tcp_listener
!= NULL
&& IPCL_IS_NONSTR(connp
)) {
950 tcp_closei_local(tcp
);
951 tcp
->tcp_state
= TCPS_BOUND
;
952 DTRACE_TCP6(state__change
, void, NULL
, ip_xmit_attr_t
*,
953 connp
->conn_ixa
, void, NULL
, tcp_t
*, tcp
, void, NULL
,
954 int32_t, TCPS_CLOSED
);
959 if (IPCL_IS_NONSTR(connp
))
960 (void) tcp_do_unbind(connp
);
966 * In case tcp is in the "lingering state" and waits for the SO_LINGER timeout
967 * to expire, stop the wait and finish the close.
970 tcp_stop_lingering(tcp_t
*tcp
)
973 tcp_stack_t
*tcps
= tcp
->tcp_tcps
;
974 conn_t
*connp
= tcp
->tcp_connp
;
976 tcp
->tcp_linger_tid
= 0;
977 if (tcp
->tcp_state
> TCPS_LISTEN
) {
978 tcp_acceptor_hash_remove(tcp
);
979 mutex_enter(&tcp
->tcp_non_sq_lock
);
980 if (tcp
->tcp_flow_stopped
) {
983 mutex_exit(&tcp
->tcp_non_sq_lock
);
985 if (tcp
->tcp_timer_tid
!= 0) {
986 delta
= TCP_TIMER_CANCEL(tcp
, tcp
->tcp_timer_tid
);
987 tcp
->tcp_timer_tid
= 0;
990 * Need to cancel those timers which will not be used when
991 * TCP is detached. This has to be done before the conn_wq
994 tcp_timers_stop(tcp
);
996 tcp
->tcp_detached
= B_TRUE
;
997 connp
->conn_rq
= NULL
;
998 connp
->conn_wq
= NULL
;
1000 if (tcp
->tcp_state
== TCPS_TIME_WAIT
) {
1001 tcp_time_wait_append(tcp
);
1002 TCP_DBGSTAT(tcps
, tcp_detach_time_wait
);
1007 * If delta is zero the timer event wasn't executed and was
1008 * successfully canceled. In this case we need to restart it
1009 * with the minimal delta possible.
1012 tcp
->tcp_timer_tid
= TCP_TIMER(tcp
, tcp_timer
,
1016 tcp_closei_local(tcp
);
1017 CONN_DEC_REF(connp
);
1020 tcp
->tcp_detached
= B_TRUE
;
1021 connp
->conn_rq
= NULL
;
1022 connp
->conn_wq
= NULL
;
1024 /* Signal closing thread that it can complete close */
1025 mutex_enter(&tcp
->tcp_closelock
);
1026 tcp
->tcp_closed
= 1;
1027 cv_signal(&tcp
->tcp_closecv
);
1028 mutex_exit(&tcp
->tcp_closelock
);
1030 /* If we have an upper handle (socket), release it */
1031 if (IPCL_IS_NONSTR(connp
)) {
1032 ASSERT(connp
->conn_upper_handle
!= NULL
);
1033 (*connp
->conn_upcalls
->su_closed
)(connp
->conn_upper_handle
);
1034 connp
->conn_upper_handle
= NULL
;
1035 connp
->conn_upcalls
= NULL
;
1040 tcp_close_common(conn_t
*connp
, int flags
)
1042 tcp_t
*tcp
= connp
->conn_tcp
;
1043 mblk_t
*mp
= &tcp
->tcp_closemp
;
1044 boolean_t conn_ioctl_cleanup_reqd
= B_FALSE
;
1047 ASSERT(connp
->conn_ref
>= 2);
1050 * Mark the conn as closing. ipsq_pending_mp_add will not
1051 * add any mp to the pending mp list, after this conn has
1054 mutex_enter(&connp
->conn_lock
);
1055 connp
->conn_state_flags
|= CONN_CLOSING
;
1056 if (connp
->conn_oper_pending_ill
!= NULL
)
1057 conn_ioctl_cleanup_reqd
= B_TRUE
;
1058 CONN_INC_REF_LOCKED(connp
);
1059 mutex_exit(&connp
->conn_lock
);
1060 tcp
->tcp_closeflags
= (uint8_t)flags
;
1061 ASSERT(connp
->conn_ref
>= 3);
1064 * tcp_closemp_used is used below without any protection of a lock
1065 * as we don't expect any one else to use it concurrently at this
1066 * point otherwise it would be a major defect.
1069 if (mp
->b_prev
== NULL
)
1070 tcp
->tcp_closemp_used
= B_TRUE
;
1072 cmn_err(CE_PANIC
, "tcp_close: concurrent use of tcp_closemp: "
1073 "connp %p tcp %p\n", (void *)connp
, (void *)tcp
);
1075 TCP_DEBUG_GETPCSTACK(tcp
->tcmp_stk
, 15);
1078 * Cleanup any queued ioctls here. This must be done before the wq/rq
1079 * are re-written by tcp_close_output().
1081 if (conn_ioctl_cleanup_reqd
)
1082 conn_ioctl_cleanup(connp
);
1085 * As CONN_CLOSING is set, no further ioctls should be passed down to
1086 * IP for this conn (see the guards in tcp_ioctl, tcp_wput_ioctl and
1087 * tcp_wput_iocdata). If the ioctl was queued on an ipsq,
1088 * conn_ioctl_cleanup should have found it and removed it. If the ioctl
1089 * was still in flight at the time, we wait for it here. See comments
1090 * for CONN_INC_IOCTLREF in ip.h for details.
1092 mutex_enter(&connp
->conn_lock
);
1093 while (connp
->conn_ioctlref
> 0)
1094 cv_wait(&connp
->conn_cv
, &connp
->conn_lock
);
1095 ASSERT(connp
->conn_ioctlref
== 0);
1096 ASSERT(connp
->conn_oper_pending_ill
== NULL
);
1097 mutex_exit(&connp
->conn_lock
);
1099 SQUEUE_ENTER_ONE(connp
->conn_sqp
, mp
, tcp_close_output
, connp
,
1100 NULL
, tcp_squeue_flag
, SQTAG_IP_TCP_CLOSE
);
1103 * For non-STREAMS sockets, the normal case is that the conn makes
1104 * an upcall when it's finally closed, so there is no need to wait
1105 * in the protocol. But in case of SO_LINGER the thread sleeps here
1106 * so it can properly deal with the thread being interrupted.
1108 if (IPCL_IS_NONSTR(connp
) && connp
->conn_linger
== 0)
1111 mutex_enter(&tcp
->tcp_closelock
);
1112 while (!tcp
->tcp_closed
) {
1113 if (!cv_wait_sig(&tcp
->tcp_closecv
, &tcp
->tcp_closelock
)) {
1115 * The cv_wait_sig() was interrupted. We now do the
1118 * 1) If the endpoint was lingering, we allow this
1119 * to be interrupted by cancelling the linger timeout
1120 * and closing normally.
1122 * 2) Revert to calling cv_wait()
1124 * We revert to using cv_wait() to avoid an
1125 * infinite loop which can occur if the calling
1126 * thread is higher priority than the squeue worker
1127 * thread and is bound to the same cpu.
1129 if (connp
->conn_linger
&& connp
->conn_lingertime
> 0) {
1130 mutex_exit(&tcp
->tcp_closelock
);
1131 /* Entering squeue, bump ref count. */
1132 CONN_INC_REF(connp
);
1133 bp
= allocb_wait(0, BPRI_HI
, STR_NOSIG
, NULL
);
1134 SQUEUE_ENTER_ONE(connp
->conn_sqp
, bp
,
1135 tcp_linger_interrupted
, connp
, NULL
,
1136 tcp_squeue_flag
, SQTAG_IP_TCP_CLOSE
);
1137 mutex_enter(&tcp
->tcp_closelock
);
1142 while (!tcp
->tcp_closed
)
1143 cv_wait(&tcp
->tcp_closecv
, &tcp
->tcp_closelock
);
1144 mutex_exit(&tcp
->tcp_closelock
);
1147 * In the case of listener streams that have eagers in the q or q0
1148 * we wait for the eagers to drop their reference to us. conn_rq and
1149 * conn_wq of the eagers point to our queues. By waiting for the
1150 * refcnt to drop to 1, we are sure that the eagers have cleaned
1151 * up their queue pointers and also dropped their references to us.
1153 * For non-STREAMS sockets we do not have to wait here; the
1154 * listener will instead make a su_closed upcall when the last
1155 * reference is dropped.
1157 if (tcp
->tcp_wait_for_eagers
&& !IPCL_IS_NONSTR(connp
)) {
1158 mutex_enter(&connp
->conn_lock
);
1159 while (connp
->conn_ref
!= 1) {
1160 cv_wait(&connp
->conn_cv
, &connp
->conn_lock
);
1162 mutex_exit(&connp
->conn_lock
);
1166 connp
->conn_cpid
= NOPID
;
1170 * Called by tcp_close() routine via squeue when lingering is
1171 * interrupted by a signal.
1176 tcp_linger_interrupted(void *arg
, mblk_t
*mp
, void *arg2
, ip_recv_attr_t
*dummy
)
1178 conn_t
*connp
= (conn_t
*)arg
;
1179 tcp_t
*tcp
= connp
->conn_tcp
;
1182 if (tcp
->tcp_linger_tid
!= 0 &&
1183 TCP_TIMER_CANCEL(tcp
, tcp
->tcp_linger_tid
) >= 0) {
1184 tcp_stop_lingering(tcp
);
1185 tcp
->tcp_client_errno
= EINTR
;
1190 * Clean up the b_next and b_prev fields of every mblk pointed at by *mpp.
1191 * Some stream heads get upset if they see these later on as anything but NULL.
1194 tcp_close_mpp(mblk_t
**mpp
)
1198 if ((mp
= *mpp
) != NULL
) {
1202 } while ((mp
= mp
->b_cont
) != NULL
);
1210 /* Do detached close. */
1212 tcp_close_detached(tcp_t
*tcp
)
1218 * Clustering code serializes TCP disconnect callbacks and
1219 * cluster tcp list walks by blocking a TCP disconnect callback
1220 * if a cluster tcp list walk is in progress. This ensures
1221 * accurate accounting of TCPs in the cluster code even though
1222 * the TCP list walk itself is not atomic.
1224 tcp_closei_local(tcp
);
1225 CONN_DEC_REF(tcp
->tcp_connp
);
1229 * The tcp_t is going away. Remove it from all lists and set it
1230 * to TCPS_CLOSED. The freeing up of memory is deferred until
1231 * tcp_inactive. This is needed since a thread in tcp_rput might have
1232 * done a CONN_INC_REF on this structure before it was removed from the
1236 tcp_closei_local(tcp_t
*tcp
)
1238 conn_t
*connp
= tcp
->tcp_connp
;
1239 tcp_stack_t
*tcps
= tcp
->tcp_tcps
;
1242 if (!TCP_IS_SOCKET(tcp
))
1243 tcp_acceptor_hash_remove(tcp
);
1245 TCPS_UPDATE_MIB(tcps
, tcpHCInSegs
, tcp
->tcp_ibsegs
);
1246 tcp
->tcp_ibsegs
= 0;
1247 TCPS_UPDATE_MIB(tcps
, tcpHCOutSegs
, tcp
->tcp_obsegs
);
1248 tcp
->tcp_obsegs
= 0;
1251 * This can be called via tcp_time_wait_processing() if TCP gets a
1252 * SYN with sequence number outside the TIME-WAIT connection's
1253 * window. So we need to check for TIME-WAIT state here as the
1254 * connection counter is already decremented. See SET_TIME_WAIT()
1257 if (tcp
->tcp_state
>= TCPS_ESTABLISHED
&&
1258 tcp
->tcp_state
< TCPS_TIME_WAIT
) {
1259 TCPS_CONN_DEC(tcps
);
1263 * If we are an eager connection hanging off a listener that
1264 * hasn't formally accepted the connection yet, get off his
1265 * list and blow off any data that we have accumulated.
1267 if (tcp
->tcp_listener
!= NULL
) {
1268 tcp_t
*listener
= tcp
->tcp_listener
;
1269 mutex_enter(&listener
->tcp_eager_lock
);
1271 * tcp_tconnind_started == B_TRUE means that the
1272 * conn_ind has already gone to listener. At
1273 * this point, eager will be closed but we
1274 * leave it in listeners eager list so that
1275 * if listener decides to close without doing
1276 * accept, we can clean this up. In tcp_tli_accept
1277 * we take care of the case of accept on closed
1280 if (!tcp
->tcp_tconnind_started
) {
1281 tcp_eager_unlink(tcp
);
1282 mutex_exit(&listener
->tcp_eager_lock
);
1284 * We don't want to have any pointers to the
1285 * listener queue, after we have released our
1286 * reference on the listener
1288 ASSERT(tcp
->tcp_detached
);
1289 connp
->conn_rq
= NULL
;
1290 connp
->conn_wq
= NULL
;
1291 CONN_DEC_REF(listener
->tcp_connp
);
1293 mutex_exit(&listener
->tcp_eager_lock
);
1297 /* Stop all the timers */
1298 tcp_timers_stop(tcp
);
1300 if (tcp
->tcp_state
== TCPS_LISTEN
) {
1301 if (tcp
->tcp_ip_addr_cache
) {
1302 kmem_free((void *)tcp
->tcp_ip_addr_cache
,
1303 IP_ADDR_CACHE_SIZE
* sizeof (ipaddr_t
));
1304 tcp
->tcp_ip_addr_cache
= NULL
;
1308 /* Decrement listerner connection counter if necessary. */
1309 if (tcp
->tcp_listen_cnt
!= NULL
)
1310 TCP_DECR_LISTEN_CNT(tcp
);
1312 mutex_enter(&tcp
->tcp_non_sq_lock
);
1313 if (tcp
->tcp_flow_stopped
)
1315 mutex_exit(&tcp
->tcp_non_sq_lock
);
1317 tcp_bind_hash_remove(tcp
);
1319 * If the tcp_time_wait_collector (which runs outside the squeue)
1320 * is trying to remove this tcp from the time wait list, we will
1321 * block in tcp_time_wait_remove while trying to acquire the
1322 * tcp_time_wait_lock. The logic in tcp_time_wait_collector also
1323 * requires the ipcl_hash_remove to be ordered after the
1324 * tcp_time_wait_remove for the refcnt checks to work correctly.
1326 if (tcp
->tcp_state
== TCPS_TIME_WAIT
)
1327 (void) tcp_time_wait_remove(tcp
, NULL
);
1328 CL_INET_DISCONNECT(connp
);
1329 ipcl_hash_remove(connp
);
1330 oldstate
= tcp
->tcp_state
;
1331 tcp
->tcp_state
= TCPS_CLOSED
;
1332 /* Need to probe before ixa_cleanup() is called */
1333 DTRACE_TCP6(state__change
, void, NULL
, ip_xmit_attr_t
*,
1334 connp
->conn_ixa
, void, NULL
, tcp_t
*, tcp
, void, NULL
,
1336 ixa_cleanup(connp
->conn_ixa
);
1339 * Mark the conn as CONDEMNED
1341 mutex_enter(&connp
->conn_lock
);
1342 connp
->conn_state_flags
|= CONN_CONDEMNED
;
1343 mutex_exit(&connp
->conn_lock
);
1345 ASSERT(tcp
->tcp_time_wait_next
== NULL
);
1346 ASSERT(tcp
->tcp_time_wait_prev
== NULL
);
1347 ASSERT(tcp
->tcp_time_wait_expire
== 0);
1349 tcp_ipsec_cleanup(tcp
);
1353 * tcp is dying (called from ipcl_conn_destroy and error cases).
1354 * Free the tcp_t in either case.
1357 tcp_free(tcp_t
*tcp
)
1360 conn_t
*connp
= tcp
->tcp_connp
;
1362 ASSERT(tcp
!= NULL
);
1363 ASSERT(tcp
->tcp_ptpahn
== NULL
&& tcp
->tcp_acceptor_hash
== NULL
);
1365 connp
->conn_rq
= NULL
;
1366 connp
->conn_wq
= NULL
;
1368 tcp_close_mpp(&tcp
->tcp_xmit_head
);
1369 tcp_close_mpp(&tcp
->tcp_reass_head
);
1370 if (tcp
->tcp_rcv_list
!= NULL
) {
1371 /* Free b_next chain */
1372 tcp_close_mpp(&tcp
->tcp_rcv_list
);
1374 if ((mp
= tcp
->tcp_urp_mp
) != NULL
) {
1377 if ((mp
= tcp
->tcp_urp_mark_mp
) != NULL
) {
1381 if (tcp
->tcp_fused_sigurg_mp
!= NULL
) {
1382 ASSERT(!IPCL_IS_NONSTR(tcp
->tcp_connp
));
1383 freeb(tcp
->tcp_fused_sigurg_mp
);
1384 tcp
->tcp_fused_sigurg_mp
= NULL
;
1387 if (tcp
->tcp_ordrel_mp
!= NULL
) {
1388 ASSERT(!IPCL_IS_NONSTR(tcp
->tcp_connp
));
1389 freeb(tcp
->tcp_ordrel_mp
);
1390 tcp
->tcp_ordrel_mp
= NULL
;
1393 TCP_NOTSACK_REMOVE_ALL(tcp
->tcp_notsack_list
, tcp
);
1394 bzero(&tcp
->tcp_sack_info
, sizeof (tcp_sack_info_t
));
1396 if (tcp
->tcp_hopopts
!= NULL
) {
1397 mi_free(tcp
->tcp_hopopts
);
1398 tcp
->tcp_hopopts
= NULL
;
1399 tcp
->tcp_hopoptslen
= 0;
1401 ASSERT(tcp
->tcp_hopoptslen
== 0);
1402 if (tcp
->tcp_dstopts
!= NULL
) {
1403 mi_free(tcp
->tcp_dstopts
);
1404 tcp
->tcp_dstopts
= NULL
;
1405 tcp
->tcp_dstoptslen
= 0;
1407 ASSERT(tcp
->tcp_dstoptslen
== 0);
1408 if (tcp
->tcp_rthdrdstopts
!= NULL
) {
1409 mi_free(tcp
->tcp_rthdrdstopts
);
1410 tcp
->tcp_rthdrdstopts
= NULL
;
1411 tcp
->tcp_rthdrdstoptslen
= 0;
1413 ASSERT(tcp
->tcp_rthdrdstoptslen
== 0);
1414 if (tcp
->tcp_rthdr
!= NULL
) {
1415 mi_free(tcp
->tcp_rthdr
);
1416 tcp
->tcp_rthdr
= NULL
;
1417 tcp
->tcp_rthdrlen
= 0;
1419 ASSERT(tcp
->tcp_rthdrlen
== 0);
1422 * Following is really a blowing away a union.
1423 * It happens to have exactly two members of identical size
1424 * the following code is enough.
1426 tcp_close_mpp(&tcp
->tcp_conn
.tcp_eager_conn_ind
);
1429 * If this is a non-STREAM socket still holding on to an upper
1430 * handle, release it. As a result of fallback we might also see
1431 * STREAMS based conns with upper handles, in which case there is
1432 * nothing to do other than clearing the field.
1434 if (connp
->conn_upper_handle
!= NULL
) {
1435 if (IPCL_IS_NONSTR(connp
)) {
1436 (*connp
->conn_upcalls
->su_closed
)(
1437 connp
->conn_upper_handle
);
1438 tcp
->tcp_detached
= B_TRUE
;
1440 connp
->conn_upper_handle
= NULL
;
1441 connp
->conn_upcalls
= NULL
;
1446 * tcp_get_conn/tcp_free_conn
1448 * tcp_get_conn is used to get a clean tcp connection structure.
1449 * It tries to reuse the connections put on the freelist by the
1450 * time_wait_collector failing which it goes to kmem_cache. This
1451 * way has two benefits compared to just allocating from and
1452 * freeing to kmem_cache.
1453 * 1) The time_wait_collector can free (which includes the cleanup)
1454 * outside the squeue. So when the interrupt comes, we have a clean
1455 * connection sitting in the freelist. Obviously, this buys us
1458 * 2) Defence against DOS attack. Allocating a tcp/conn in tcp_input_listener
1459 * has multiple disadvantages - tying up the squeue during alloc.
1460 * But allocating the conn/tcp in IP land is also not the best since
1461 * we can't check the 'q' and 'q0' which are protected by squeue and
1462 * blindly allocate memory which might have to be freed here if we are
1463 * not allowed to accept the connection. By using the freelist and
1464 * putting the conn/tcp back in freelist, we don't pay a penalty for
1465 * allocating memory without checking 'q/q0' and freeing it if we can't
1466 * accept the connection.
1468 * Care should be taken to put the conn back in the same squeue's freelist
1469 * from which it was allocated. Best results are obtained if conn is
1470 * allocated from listener's squeue and freed to the same. Time wait
1471 * collector will free up the freelist is the connection ends up sitting
1472 * there for too long.
1475 tcp_get_conn(void *arg
, tcp_stack_t
*tcps
)
1478 conn_t
*connp
= NULL
;
1479 squeue_t
*sqp
= (squeue_t
*)arg
;
1480 tcp_squeue_priv_t
*tcp_time_wait
;
1482 mblk_t
*tcp_rsrv_mp
= NULL
;
1485 *((tcp_squeue_priv_t
**)squeue_getprivate(sqp
, SQPRIVATE_TCP
));
1487 mutex_enter(&tcp_time_wait
->tcp_time_wait_lock
);
1488 tcp
= tcp_time_wait
->tcp_free_list
;
1489 ASSERT((tcp
!= NULL
) ^ (tcp_time_wait
->tcp_free_list_cnt
== 0));
1491 tcp_time_wait
->tcp_free_list
= tcp
->tcp_time_wait_next
;
1492 tcp_time_wait
->tcp_free_list_cnt
--;
1493 mutex_exit(&tcp_time_wait
->tcp_time_wait_lock
);
1494 tcp
->tcp_time_wait_next
= NULL
;
1495 connp
= tcp
->tcp_connp
;
1496 connp
->conn_flags
|= IPCL_REUSED
;
1498 ASSERT(tcp
->tcp_tcps
== NULL
);
1499 ASSERT(connp
->conn_netstack
== NULL
);
1500 ASSERT(tcp
->tcp_rsrv_mp
!= NULL
);
1501 ns
= tcps
->tcps_netstack
;
1503 connp
->conn_netstack
= ns
;
1504 connp
->conn_ixa
->ixa_ipst
= ns
->netstack_ip
;
1505 tcp
->tcp_tcps
= tcps
;
1506 ipcl_globalhash_insert(connp
);
1508 connp
->conn_ixa
->ixa_notify_cookie
= tcp
;
1509 ASSERT(connp
->conn_ixa
->ixa_notify
== tcp_notify
);
1510 connp
->conn_recv
= tcp_input_data
;
1511 ASSERT(connp
->conn_recvicmp
== tcp_icmp_input
);
1512 ASSERT(connp
->conn_verifyicmp
== tcp_verifyicmp
);
1513 return ((void *)connp
);
1515 mutex_exit(&tcp_time_wait
->tcp_time_wait_lock
);
1517 * Pre-allocate the tcp_rsrv_mp. This mblk will not be freed until
1518 * this conn_t/tcp_t is freed at ipcl_conn_destroy().
1520 tcp_rsrv_mp
= allocb(0, BPRI_HI
);
1521 if (tcp_rsrv_mp
== NULL
)
1524 if ((connp
= ipcl_conn_create(IPCL_TCPCONN
, KM_NOSLEEP
,
1525 tcps
->tcps_netstack
)) == NULL
) {
1530 tcp
= connp
->conn_tcp
;
1531 tcp
->tcp_rsrv_mp
= tcp_rsrv_mp
;
1532 mutex_init(&tcp
->tcp_rsrv_mp_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
1534 tcp
->tcp_tcps
= tcps
;
1536 connp
->conn_recv
= tcp_input_data
;
1537 connp
->conn_recvicmp
= tcp_icmp_input
;
1538 connp
->conn_verifyicmp
= tcp_verifyicmp
;
1541 * Register tcp_notify to listen to capability changes detected by IP.
1542 * This upcall is made in the context of the call to conn_ip_output
1543 * thus it is inside the squeue.
1545 connp
->conn_ixa
->ixa_notify
= tcp_notify
;
1546 connp
->conn_ixa
->ixa_notify_cookie
= tcp
;
1548 return ((void *)connp
);
1552 * Handle connect to IPv4 destinations, including connections for AF_INET6
1553 * sockets connecting to IPv4 mapped IPv6 destinations.
1554 * Returns zero if OK, a positive errno, or a negative TLI error.
1557 tcp_connect_ipv4(tcp_t
*tcp
, ipaddr_t
*dstaddrp
, in_port_t dstport
,
1560 ipaddr_t dstaddr
= *dstaddrp
;
1562 conn_t
*connp
= tcp
->tcp_connp
;
1563 tcp_stack_t
*tcps
= tcp
->tcp_tcps
;
1566 ASSERT(connp
->conn_ipversion
== IPV4_VERSION
);
1568 /* Check for attempt to connect to INADDR_ANY */
1569 if (dstaddr
== INADDR_ANY
) {
1571 * SunOS 4.x and 4.3 BSD allow an application
1572 * to connect a TCP socket to INADDR_ANY.
1573 * When they do this, the kernel picks the
1574 * address of one interface and uses it
1575 * instead. The kernel usually ends up
1576 * picking the address of the loopback
1577 * interface. This is an undocumented feature.
1578 * However, we provide the same thing here
1579 * in order to have source and binary
1580 * compatibility with SunOS 4.x.
1581 * Update the T_CONN_REQ (sin/sin6) since it is used to
1582 * generate the T_CONN_CON.
1584 dstaddr
= htonl(INADDR_LOOPBACK
);
1585 *dstaddrp
= dstaddr
;
1588 /* Handle __sin6_src_id if socket not bound to an IP address */
1589 if (srcid
!= 0 && connp
->conn_laddr_v4
== INADDR_ANY
) {
1590 ip_srcid_find_id(srcid
, &connp
->conn_laddr_v6
,
1591 IPCL_ZONEID(connp
), tcps
->tcps_netstack
);
1592 connp
->conn_saddr_v6
= connp
->conn_laddr_v6
;
1595 IN6_IPADDR_TO_V4MAPPED(dstaddr
, &connp
->conn_faddr_v6
);
1596 connp
->conn_fport
= dstport
;
1599 * At this point the remote destination address and remote port fields
1600 * in the tcp-four-tuple have been filled in the tcp structure. Now we
1601 * have to see which state tcp was in so we can take appropriate action.
1603 if (tcp
->tcp_state
== TCPS_IDLE
) {
1605 * We support a quick connect capability here, allowing
1606 * clients to transition directly from IDLE to SYN_SENT
1607 * tcp_bindi will pick an unused port, insert the connection
1608 * in the bind hash and transition to BOUND state.
1610 lport
= tcp_update_next_port(tcps
->tcps_next_port_to_try
,
1612 lport
= tcp_bindi(tcp
, lport
, &connp
->conn_laddr_v6
, 0, B_TRUE
,
1619 * Lookup the route to determine a source address and the uinfo.
1620 * Setup TCP parameters based on the metrics/DCE.
1622 error
= tcp_set_destination(tcp
);
1627 * Don't let an endpoint connect to itself.
1629 if (connp
->conn_faddr_v4
== connp
->conn_laddr_v4
&&
1630 connp
->conn_fport
== connp
->conn_lport
)
1633 tcp
->tcp_state
= TCPS_SYN_SENT
;
1635 return (ipcl_conn_insert_v4(connp
));
1639 * Handle connect to IPv6 destinations.
1640 * Returns zero if OK, a positive errno, or a negative TLI error.
1643 tcp_connect_ipv6(tcp_t
*tcp
, in6_addr_t
*dstaddrp
, in_port_t dstport
,
1644 uint32_t flowinfo
, uint_t srcid
, uint32_t scope_id
)
1647 conn_t
*connp
= tcp
->tcp_connp
;
1648 tcp_stack_t
*tcps
= tcp
->tcp_tcps
;
1651 ASSERT(connp
->conn_family
== AF_INET6
);
1654 * If we're here, it means that the destination address is a native
1655 * IPv6 address. Return an error if conn_ipversion is not IPv6. A
1656 * reason why it might not be IPv6 is if the socket was bound to an
1657 * IPv4-mapped IPv6 address.
1659 if (connp
->conn_ipversion
!= IPV6_VERSION
)
1663 * Interpret a zero destination to mean loopback.
1664 * Update the T_CONN_REQ (sin/sin6) since it is used to
1665 * generate the T_CONN_CON.
1667 if (IN6_IS_ADDR_UNSPECIFIED(dstaddrp
))
1668 *dstaddrp
= ipv6_loopback
;
1670 /* Handle __sin6_src_id if socket not bound to an IP address */
1671 if (srcid
!= 0 && IN6_IS_ADDR_UNSPECIFIED(&connp
->conn_laddr_v6
)) {
1672 ip_srcid_find_id(srcid
, &connp
->conn_laddr_v6
,
1673 IPCL_ZONEID(connp
), tcps
->tcps_netstack
);
1674 connp
->conn_saddr_v6
= connp
->conn_laddr_v6
;
1678 * Take care of the scope_id now.
1680 if (scope_id
!= 0 && IN6_IS_ADDR_LINKSCOPE(dstaddrp
)) {
1681 connp
->conn_ixa
->ixa_flags
|= IXAF_SCOPEID_SET
;
1682 connp
->conn_ixa
->ixa_scopeid
= scope_id
;
1684 connp
->conn_ixa
->ixa_flags
&= ~IXAF_SCOPEID_SET
;
1687 connp
->conn_flowinfo
= flowinfo
;
1688 connp
->conn_faddr_v6
= *dstaddrp
;
1689 connp
->conn_fport
= dstport
;
1692 * At this point the remote destination address and remote port fields
1693 * in the tcp-four-tuple have been filled in the tcp structure. Now we
1694 * have to see which state tcp was in so we can take appropriate action.
1696 if (tcp
->tcp_state
== TCPS_IDLE
) {
1698 * We support a quick connect capability here, allowing
1699 * clients to transition directly from IDLE to SYN_SENT
1700 * tcp_bindi will pick an unused port, insert the connection
1701 * in the bind hash and transition to BOUND state.
1703 lport
= tcp_update_next_port(tcps
->tcps_next_port_to_try
,
1705 lport
= tcp_bindi(tcp
, lport
, &connp
->conn_laddr_v6
, 0, B_TRUE
,
1712 * Lookup the route to determine a source address and the uinfo.
1713 * Setup TCP parameters based on the metrics/DCE.
1715 error
= tcp_set_destination(tcp
);
1720 * Don't let an endpoint connect to itself.
1722 if (IN6_ARE_ADDR_EQUAL(&connp
->conn_faddr_v6
, &connp
->conn_laddr_v6
) &&
1723 connp
->conn_fport
== connp
->conn_lport
)
1726 tcp
->tcp_state
= TCPS_SYN_SENT
;
1728 return (ipcl_conn_insert_v6(connp
));
1733 * Note that unlike other functions this returns a positive tli error
1734 * when it fails; it never returns an errno.
1737 tcp_disconnect_common(tcp_t
*tcp
, t_scalar_t seqnum
)
1740 tcp_stack_t
*tcps
= tcp
->tcp_tcps
;
1741 conn_t
*connp
= tcp
->tcp_connp
;
1744 * Right now, upper modules pass down a T_DISCON_REQ to TCP,
1745 * when the stream is in BOUND state. Do not send a reset,
1746 * since the destination IP address is not valid, and it can
1747 * be the initialized value of all zeros (broadcast address).
1749 if (tcp
->tcp_state
<= TCPS_BOUND
) {
1750 if (connp
->conn_debug
) {
1751 (void) strlog(TCP_MOD_ID
, 0, 1, SL_ERROR
|SL_TRACE
,
1752 "tcp_disconnect: bad state, %d", tcp
->tcp_state
);
1755 } else if (tcp
->tcp_state
>= TCPS_ESTABLISHED
) {
1756 TCPS_CONN_DEC(tcps
);
1759 if (seqnum
== -1 || tcp
->tcp_conn_req_max
== 0) {
1762 * According to TPI, for non-listeners, ignore seqnum
1764 * Following interpretation of -1 seqnum is historical
1765 * and implied TPI ? (TPI only states that for T_CONN_IND,
1766 * a valid seqnum should not be -1).
1768 * -1 means disconnect everything
1769 * regardless even on a listener.
1772 int old_state
= tcp
->tcp_state
;
1773 ip_stack_t
*ipst
= tcps
->tcps_netstack
->netstack_ip
;
1776 * The connection can't be on the tcp_time_wait_head list
1777 * since it is not detached.
1779 ASSERT(tcp
->tcp_time_wait_next
== NULL
);
1780 ASSERT(tcp
->tcp_time_wait_prev
== NULL
);
1781 ASSERT(tcp
->tcp_time_wait_expire
== 0);
1783 * If it used to be a listener, check to make sure no one else
1784 * has taken the port before switching back to LISTEN state.
1786 if (connp
->conn_ipversion
== IPV4_VERSION
) {
1787 lconnp
= ipcl_lookup_listener_v4(connp
->conn_lport
,
1788 connp
->conn_laddr_v4
, IPCL_ZONEID(connp
), ipst
);
1792 if (connp
->conn_ixa
->ixa_flags
& IXAF_SCOPEID_SET
)
1793 ifindex
= connp
->conn_ixa
->ixa_scopeid
;
1795 /* Allow conn_bound_if listeners? */
1796 lconnp
= ipcl_lookup_listener_v6(connp
->conn_lport
,
1797 &connp
->conn_laddr_v6
, ifindex
, IPCL_ZONEID(connp
),
1800 if (tcp
->tcp_conn_req_max
&& lconnp
== NULL
) {
1801 tcp
->tcp_state
= TCPS_LISTEN
;
1802 DTRACE_TCP6(state__change
, void, NULL
, ip_xmit_attr_t
*,
1803 connp
->conn_ixa
, void, NULL
, tcp_t
*, tcp
, void,
1804 NULL
, int32_t, old_state
);
1805 } else if (old_state
> TCPS_BOUND
) {
1806 tcp
->tcp_conn_req_max
= 0;
1807 tcp
->tcp_state
= TCPS_BOUND
;
1808 DTRACE_TCP6(state__change
, void, NULL
, ip_xmit_attr_t
*,
1809 connp
->conn_ixa
, void, NULL
, tcp_t
*, tcp
, void,
1810 NULL
, int32_t, old_state
);
1813 * If this end point is not going to become a listener,
1814 * decrement the listener connection count if
1815 * necessary. Note that we do not do this if it is
1816 * going to be a listner (the above if case) since
1817 * then it may remove the counter struct.
1819 if (tcp
->tcp_listen_cnt
!= NULL
)
1820 TCP_DECR_LISTEN_CNT(tcp
);
1823 CONN_DEC_REF(lconnp
);
1824 switch (old_state
) {
1827 TCPS_BUMP_MIB(tcps
, tcpAttemptFails
);
1829 case TCPS_ESTABLISHED
:
1830 case TCPS_CLOSE_WAIT
:
1831 TCPS_BUMP_MIB(tcps
, tcpEstabResets
);
1838 mutex_enter(&tcp
->tcp_eager_lock
);
1839 if ((tcp
->tcp_conn_req_cnt_q0
!= 0) ||
1840 (tcp
->tcp_conn_req_cnt_q
!= 0)) {
1841 tcp_eager_cleanup(tcp
, 0);
1843 mutex_exit(&tcp
->tcp_eager_lock
);
1845 tcp_xmit_ctl("tcp_disconnect", tcp
, tcp
->tcp_snxt
,
1846 tcp
->tcp_rnxt
, TH_RST
| TH_ACK
);
1851 } else if (!tcp_eager_blowoff(tcp
, seqnum
)) {
1858 * Our client hereby directs us to reject the connection request
1859 * that tcp_input_listener() marked with 'seqnum'. Rejection consists
1860 * of sending the appropriate RST, not an ICMP error.
1863 tcp_disconnect(tcp_t
*tcp
, mblk_t
*mp
)
1867 conn_t
*connp
= tcp
->tcp_connp
;
1869 ASSERT((uintptr_t)(mp
->b_wptr
- mp
->b_rptr
) <= (uintptr_t)INT_MAX
);
1870 if ((mp
->b_wptr
- mp
->b_rptr
) < sizeof (struct T_discon_req
)) {
1871 tcp_err_ack(tcp
, mp
, TPROTO
, 0);
1874 seqnum
= ((struct T_discon_req
*)mp
->b_rptr
)->SEQ_number
;
1875 error
= tcp_disconnect_common(tcp
, seqnum
);
1877 tcp_err_ack(tcp
, mp
, error
, 0);
1879 if (tcp
->tcp_state
>= TCPS_ESTABLISHED
) {
1880 /* Send M_FLUSH according to TPI */
1881 (void) putnextctl1(connp
->conn_rq
, M_FLUSH
, FLUSHRW
);
1883 mp
= mi_tpi_ok_ack_alloc(mp
);
1885 putnext(connp
->conn_rq
, mp
);
1890 * Handle reinitialization of a tcp structure.
1891 * Maintain "binding state" resetting the state to BOUND, LISTEN, or IDLE.
1894 tcp_reinit(tcp_t
*tcp
)
1897 tcp_stack_t
*tcps
= tcp
->tcp_tcps
;
1898 conn_t
*connp
= tcp
->tcp_connp
;
1901 /* tcp_reinit should never be called for detached tcp_t's */
1902 ASSERT(tcp
->tcp_listener
== NULL
);
1903 ASSERT((connp
->conn_family
== AF_INET
&&
1904 connp
->conn_ipversion
== IPV4_VERSION
) ||
1905 (connp
->conn_family
== AF_INET6
&&
1906 (connp
->conn_ipversion
== IPV4_VERSION
||
1907 connp
->conn_ipversion
== IPV6_VERSION
)));
1909 /* Cancel outstanding timers */
1910 tcp_timers_stop(tcp
);
1913 * Reset everything in the state vector, after updating global
1914 * MIB data from instance counters.
1916 TCPS_UPDATE_MIB(tcps
, tcpHCInSegs
, tcp
->tcp_ibsegs
);
1917 tcp
->tcp_ibsegs
= 0;
1918 TCPS_UPDATE_MIB(tcps
, tcpHCOutSegs
, tcp
->tcp_obsegs
);
1919 tcp
->tcp_obsegs
= 0;
1921 tcp_close_mpp(&tcp
->tcp_xmit_head
);
1922 if (tcp
->tcp_snd_zcopy_aware
)
1923 tcp_zcopy_notify(tcp
);
1924 tcp
->tcp_xmit_last
= tcp
->tcp_xmit_tail
= NULL
;
1925 tcp
->tcp_unsent
= tcp
->tcp_xmit_tail_unsent
= 0;
1926 mutex_enter(&tcp
->tcp_non_sq_lock
);
1927 if (tcp
->tcp_flow_stopped
&&
1928 TCP_UNSENT_BYTES(tcp
) <= connp
->conn_sndlowat
) {
1931 mutex_exit(&tcp
->tcp_non_sq_lock
);
1932 tcp_close_mpp(&tcp
->tcp_reass_head
);
1933 tcp
->tcp_reass_tail
= NULL
;
1934 if (tcp
->tcp_rcv_list
!= NULL
) {
1935 /* Free b_next chain */
1936 tcp_close_mpp(&tcp
->tcp_rcv_list
);
1937 tcp
->tcp_rcv_last_head
= NULL
;
1938 tcp
->tcp_rcv_last_tail
= NULL
;
1939 tcp
->tcp_rcv_cnt
= 0;
1941 tcp
->tcp_rcv_last_tail
= NULL
;
1943 if ((mp
= tcp
->tcp_urp_mp
) != NULL
) {
1945 tcp
->tcp_urp_mp
= NULL
;
1947 if ((mp
= tcp
->tcp_urp_mark_mp
) != NULL
) {
1949 tcp
->tcp_urp_mark_mp
= NULL
;
1951 if (tcp
->tcp_fused_sigurg_mp
!= NULL
) {
1952 ASSERT(!IPCL_IS_NONSTR(tcp
->tcp_connp
));
1953 freeb(tcp
->tcp_fused_sigurg_mp
);
1954 tcp
->tcp_fused_sigurg_mp
= NULL
;
1956 if (tcp
->tcp_ordrel_mp
!= NULL
) {
1957 ASSERT(!IPCL_IS_NONSTR(tcp
->tcp_connp
));
1958 freeb(tcp
->tcp_ordrel_mp
);
1959 tcp
->tcp_ordrel_mp
= NULL
;
1963 * Following is a union with two members which are
1964 * identical types and size so the following cleanup
1967 tcp_close_mpp(&tcp
->tcp_conn
.tcp_eager_conn_ind
);
1969 CL_INET_DISCONNECT(connp
);
1972 * The connection can't be on the tcp_time_wait_head list
1973 * since it is not detached.
1975 ASSERT(tcp
->tcp_time_wait_next
== NULL
);
1976 ASSERT(tcp
->tcp_time_wait_prev
== NULL
);
1977 ASSERT(tcp
->tcp_time_wait_expire
== 0);
1980 * Reset/preserve other values
1982 tcp_reinit_values(tcp
);
1983 ipcl_hash_remove(connp
);
1984 /* Note that ixa_cred gets cleared in ixa_cleanup */
1985 ixa_cleanup(connp
->conn_ixa
);
1986 tcp_ipsec_cleanup(tcp
);
1988 connp
->conn_laddr_v6
= connp
->conn_bound_addr_v6
;
1989 connp
->conn_saddr_v6
= connp
->conn_bound_addr_v6
;
1990 oldstate
= tcp
->tcp_state
;
1992 if (tcp
->tcp_conn_req_max
!= 0) {
1994 * This is the case when a TLI program uses the same
1995 * transport end point to accept a connection. This
1996 * makes the TCP both a listener and acceptor. When
1997 * this connection is closed, we need to set the state
1998 * back to TCPS_LISTEN. Make sure that the eager list
2001 * Note that this stream is still bound to the four
2002 * tuples of the previous connection in IP. If a new
2003 * SYN with different foreign address comes in, IP will
2004 * not find it and will send it to the global queue. In
2005 * the global queue, TCP will do a tcp_lookup_listener()
2006 * to find this stream. This works because this stream
2007 * is only removed from connected hash.
2010 tcp
->tcp_state
= TCPS_LISTEN
;
2011 tcp
->tcp_eager_next_q0
= tcp
->tcp_eager_prev_q0
= tcp
;
2012 tcp
->tcp_eager_next_drop_q0
= tcp
;
2013 tcp
->tcp_eager_prev_drop_q0
= tcp
;
2015 * Initially set conn_recv to tcp_input_listener_unbound to try
2016 * to pick a good squeue for the listener when the first SYN
2017 * arrives. tcp_input_listener_unbound sets it to
2018 * tcp_input_listener on that first SYN.
2020 connp
->conn_recv
= tcp_input_listener_unbound
;
2022 connp
->conn_proto
= IPPROTO_TCP
;
2023 connp
->conn_faddr_v6
= ipv6_all_zeros
;
2024 connp
->conn_fport
= 0;
2026 (void) ipcl_bind_insert(connp
);
2028 tcp
->tcp_state
= TCPS_BOUND
;
2032 * Initialize to default values
2034 tcp_init_values(tcp
, NULL
);
2036 DTRACE_TCP6(state__change
, void, NULL
, ip_xmit_attr_t
*,
2037 connp
->conn_ixa
, void, NULL
, tcp_t
*, tcp
, void, NULL
,
2040 ASSERT(tcp
->tcp_ptpbhn
!= NULL
);
2041 tcp
->tcp_rwnd
= connp
->conn_rcvbuf
;
2042 tcp
->tcp_mss
= connp
->conn_ipversion
!= IPV4_VERSION
?
2043 tcps
->tcps_mss_def_ipv6
: tcps
->tcps_mss_def_ipv4
;
2047 * Force values to zero that need be zero.
2048 * Do not touch values asociated with the BOUND or LISTEN state
2049 * since the connection will end up in that state after the reinit.
2050 * NOTE: tcp_reinit_values MUST have a line for each field in the tcp_t
2054 tcp_reinit_values(tcp
)
2057 tcp_stack_t
*tcps
= tcp
->tcp_tcps
;
2058 conn_t
*connp
= tcp
->tcp_connp
;
2064 #define DONTCARE(x) ((x) = (x))
2065 #define PRESERVE(x) ((x) = (x))
2068 PRESERVE(tcp
->tcp_bind_hash_port
);
2069 PRESERVE(tcp
->tcp_bind_hash
);
2070 PRESERVE(tcp
->tcp_ptpbhn
);
2071 PRESERVE(tcp
->tcp_acceptor_hash
);
2072 PRESERVE(tcp
->tcp_ptpahn
);
2074 /* Should be ASSERT NULL on these with new code! */
2075 ASSERT(tcp
->tcp_time_wait_next
== NULL
);
2076 ASSERT(tcp
->tcp_time_wait_prev
== NULL
);
2077 ASSERT(tcp
->tcp_time_wait_expire
== 0);
2078 PRESERVE(tcp
->tcp_state
);
2079 PRESERVE(connp
->conn_rq
);
2080 PRESERVE(connp
->conn_wq
);
2082 ASSERT(tcp
->tcp_xmit_head
== NULL
);
2083 ASSERT(tcp
->tcp_xmit_last
== NULL
);
2084 ASSERT(tcp
->tcp_unsent
== 0);
2085 ASSERT(tcp
->tcp_xmit_tail
== NULL
);
2086 ASSERT(tcp
->tcp_xmit_tail_unsent
== 0);
2088 tcp
->tcp_snxt
= 0; /* Displayed in mib */
2089 tcp
->tcp_suna
= 0; /* Displayed in mib */
2091 DONTCARE(tcp
->tcp_cwnd
); /* Init in tcp_process_options */
2093 ASSERT(tcp
->tcp_ibsegs
== 0);
2094 ASSERT(tcp
->tcp_obsegs
== 0);
2096 if (connp
->conn_ht_iphc
!= NULL
) {
2097 kmem_free(connp
->conn_ht_iphc
, connp
->conn_ht_iphc_allocated
);
2098 connp
->conn_ht_iphc
= NULL
;
2099 connp
->conn_ht_iphc_allocated
= 0;
2100 connp
->conn_ht_iphc_len
= 0;
2101 connp
->conn_ht_ulp
= NULL
;
2102 connp
->conn_ht_ulp_len
= 0;
2103 tcp
->tcp_ipha
= NULL
;
2104 tcp
->tcp_ip6h
= NULL
;
2105 tcp
->tcp_tcpha
= NULL
;
2108 /* We clear any IP_OPTIONS and extension headers */
2109 ip_pkt_free(&connp
->conn_xmit_ipp
);
2111 DONTCARE(tcp
->tcp_naglim
); /* Init in tcp_init_values */
2112 DONTCARE(tcp
->tcp_ipha
);
2113 DONTCARE(tcp
->tcp_ip6h
);
2114 DONTCARE(tcp
->tcp_tcpha
);
2115 tcp
->tcp_valid_bits
= 0;
2117 DONTCARE(tcp
->tcp_timer_backoff
); /* Init in tcp_init_values */
2118 DONTCARE(tcp
->tcp_last_recv_time
); /* Init in tcp_init_values */
2119 tcp
->tcp_last_rcv_lbolt
= 0;
2121 tcp
->tcp_init_cwnd
= 0;
2123 tcp
->tcp_urp_last_valid
= 0;
2124 tcp
->tcp_hard_binding
= 0;
2126 tcp
->tcp_fin_acked
= 0;
2127 tcp
->tcp_fin_rcvd
= 0;
2128 tcp
->tcp_fin_sent
= 0;
2129 tcp
->tcp_ordrel_done
= 0;
2131 tcp
->tcp_detached
= 0;
2133 tcp
->tcp_snd_ws_ok
= B_FALSE
;
2134 tcp
->tcp_snd_ts_ok
= B_FALSE
;
2135 tcp
->tcp_zero_win_probe
= 0;
2137 tcp
->tcp_loopback
= 0;
2138 tcp
->tcp_localnet
= 0;
2139 tcp
->tcp_syn_defense
= 0;
2140 tcp
->tcp_set_timer
= 0;
2142 tcp
->tcp_active_open
= 0;
2143 tcp
->tcp_rexmit
= B_FALSE
;
2144 tcp
->tcp_xmit_zc_clean
= B_FALSE
;
2146 tcp
->tcp_snd_sack_ok
= B_FALSE
;
2147 tcp
->tcp_hwcksum
= B_FALSE
;
2149 DONTCARE(tcp
->tcp_maxpsz_multiplier
); /* Init in tcp_init_values */
2151 tcp
->tcp_conn_def_q0
= 0;
2152 tcp
->tcp_ip_forward_progress
= B_FALSE
;
2153 tcp
->tcp_ecn_ok
= B_FALSE
;
2155 tcp
->tcp_cwr
= B_FALSE
;
2156 tcp
->tcp_ecn_echo_on
= B_FALSE
;
2157 tcp
->tcp_is_wnd_shrnk
= B_FALSE
;
2159 TCP_NOTSACK_REMOVE_ALL(tcp
->tcp_notsack_list
, tcp
);
2160 bzero(&tcp
->tcp_sack_info
, sizeof (tcp_sack_info_t
));
2162 tcp
->tcp_rcv_ws
= 0;
2163 tcp
->tcp_snd_ws
= 0;
2164 tcp
->tcp_ts_recent
= 0;
2165 tcp
->tcp_rnxt
= 0; /* Displayed in mib */
2166 DONTCARE(tcp
->tcp_rwnd
); /* Set in tcp_reinit() */
2167 tcp
->tcp_initial_pmtu
= 0;
2169 ASSERT(tcp
->tcp_reass_head
== NULL
);
2170 ASSERT(tcp
->tcp_reass_tail
== NULL
);
2172 tcp
->tcp_cwnd_cnt
= 0;
2174 ASSERT(tcp
->tcp_rcv_list
== NULL
);
2175 ASSERT(tcp
->tcp_rcv_last_head
== NULL
);
2176 ASSERT(tcp
->tcp_rcv_last_tail
== NULL
);
2177 ASSERT(tcp
->tcp_rcv_cnt
== 0);
2179 DONTCARE(tcp
->tcp_cwnd_ssthresh
); /* Init in tcp_set_destination */
2180 DONTCARE(tcp
->tcp_cwnd_max
); /* Init in tcp_init_values */
2183 tcp
->tcp_rto
= 0; /* Displayed in MIB */
2184 DONTCARE(tcp
->tcp_rtt_sa
); /* Init in tcp_init_values */
2185 DONTCARE(tcp
->tcp_rtt_sd
); /* Init in tcp_init_values */
2186 tcp
->tcp_rtt_update
= 0;
2188 DONTCARE(tcp
->tcp_swl1
); /* Init in case TCPS_LISTEN/TCPS_SYN_SENT */
2189 DONTCARE(tcp
->tcp_swl2
); /* Init in case TCPS_LISTEN/TCPS_SYN_SENT */
2191 tcp
->tcp_rack
= 0; /* Displayed in mib */
2192 tcp
->tcp_rack_cnt
= 0;
2193 tcp
->tcp_rack_cur_max
= 0;
2194 tcp
->tcp_rack_abs_max
= 0;
2196 tcp
->tcp_max_swnd
= 0;
2198 ASSERT(tcp
->tcp_listener
== NULL
);
2200 DONTCARE(tcp
->tcp_irs
); /* tcp_valid_bits cleared */
2201 DONTCARE(tcp
->tcp_iss
); /* tcp_valid_bits cleared */
2202 DONTCARE(tcp
->tcp_fss
); /* tcp_valid_bits cleared */
2203 DONTCARE(tcp
->tcp_urg
); /* tcp_valid_bits cleared */
2205 ASSERT(tcp
->tcp_conn_req_cnt_q
== 0);
2206 ASSERT(tcp
->tcp_conn_req_cnt_q0
== 0);
2207 PRESERVE(tcp
->tcp_conn_req_max
);
2208 PRESERVE(tcp
->tcp_conn_req_seqnum
);
2210 DONTCARE(tcp
->tcp_first_timer_threshold
); /* Init in tcp_init_values */
2211 DONTCARE(tcp
->tcp_second_timer_threshold
); /* Init in tcp_init_values */
2212 DONTCARE(tcp
->tcp_first_ctimer_threshold
); /* Init in tcp_init_values */
2213 DONTCARE(tcp
->tcp_second_ctimer_threshold
); /* in tcp_init_values */
2215 DONTCARE(tcp
->tcp_urp_last
); /* tcp_urp_last_valid is cleared */
2216 ASSERT(tcp
->tcp_urp_mp
== NULL
);
2217 ASSERT(tcp
->tcp_urp_mark_mp
== NULL
);
2218 ASSERT(tcp
->tcp_fused_sigurg_mp
== NULL
);
2220 ASSERT(tcp
->tcp_eager_next_q
== NULL
);
2221 ASSERT(tcp
->tcp_eager_last_q
== NULL
);
2222 ASSERT((tcp
->tcp_eager_next_q0
== NULL
&&
2223 tcp
->tcp_eager_prev_q0
== NULL
) ||
2224 tcp
->tcp_eager_next_q0
== tcp
->tcp_eager_prev_q0
);
2225 ASSERT(tcp
->tcp_conn
.tcp_eager_conn_ind
== NULL
);
2227 ASSERT((tcp
->tcp_eager_next_drop_q0
== NULL
&&
2228 tcp
->tcp_eager_prev_drop_q0
== NULL
) ||
2229 tcp
->tcp_eager_next_drop_q0
== tcp
->tcp_eager_prev_drop_q0
);
2231 tcp
->tcp_client_errno
= 0;
2233 DONTCARE(connp
->conn_sum
); /* Init in tcp_init_values */
2235 connp
->conn_faddr_v6
= ipv6_all_zeros
; /* Displayed in MIB */
2237 PRESERVE(connp
->conn_bound_addr_v6
);
2238 tcp
->tcp_last_sent_len
= 0;
2239 tcp
->tcp_dupack_cnt
= 0;
2241 connp
->conn_fport
= 0; /* Displayed in MIB */
2242 PRESERVE(connp
->conn_lport
);
2244 PRESERVE(tcp
->tcp_acceptor_lockp
);
2246 ASSERT(tcp
->tcp_ordrel_mp
== NULL
);
2247 PRESERVE(tcp
->tcp_acceptor_id
);
2248 DONTCARE(tcp
->tcp_ipsec_overhead
);
2250 PRESERVE(connp
->conn_family
);
2251 /* Remove any remnants of mapped address binding */
2252 if (connp
->conn_family
== AF_INET6
) {
2253 connp
->conn_ipversion
= IPV6_VERSION
;
2254 tcp
->tcp_mss
= tcps
->tcps_mss_def_ipv6
;
2256 connp
->conn_ipversion
= IPV4_VERSION
;
2257 tcp
->tcp_mss
= tcps
->tcps_mss_def_ipv4
;
2260 connp
->conn_bound_if
= 0;
2261 connp
->conn_recv_ancillary
.crb_all
= 0;
2262 tcp
->tcp_recvifindex
= 0;
2263 tcp
->tcp_recvhops
= 0;
2264 tcp
->tcp_closed
= 0;
2265 if (tcp
->tcp_hopopts
!= NULL
) {
2266 mi_free(tcp
->tcp_hopopts
);
2267 tcp
->tcp_hopopts
= NULL
;
2268 tcp
->tcp_hopoptslen
= 0;
2270 ASSERT(tcp
->tcp_hopoptslen
== 0);
2271 if (tcp
->tcp_dstopts
!= NULL
) {
2272 mi_free(tcp
->tcp_dstopts
);
2273 tcp
->tcp_dstopts
= NULL
;
2274 tcp
->tcp_dstoptslen
= 0;
2276 ASSERT(tcp
->tcp_dstoptslen
== 0);
2277 if (tcp
->tcp_rthdrdstopts
!= NULL
) {
2278 mi_free(tcp
->tcp_rthdrdstopts
);
2279 tcp
->tcp_rthdrdstopts
= NULL
;
2280 tcp
->tcp_rthdrdstoptslen
= 0;
2282 ASSERT(tcp
->tcp_rthdrdstoptslen
== 0);
2283 if (tcp
->tcp_rthdr
!= NULL
) {
2284 mi_free(tcp
->tcp_rthdr
);
2285 tcp
->tcp_rthdr
= NULL
;
2286 tcp
->tcp_rthdrlen
= 0;
2288 ASSERT(tcp
->tcp_rthdrlen
== 0);
2290 /* Reset fusion-related fields */
2291 tcp
->tcp_fused
= B_FALSE
;
2292 tcp
->tcp_unfusable
= B_FALSE
;
2293 tcp
->tcp_fused_sigurg
= B_FALSE
;
2294 tcp
->tcp_loopback_peer
= NULL
;
2296 tcp
->tcp_lso
= B_FALSE
;
2298 tcp
->tcp_in_ack_unsent
= 0;
2299 tcp
->tcp_cork
= B_FALSE
;
2300 tcp
->tcp_tconnind_started
= B_FALSE
;
2302 PRESERVE(tcp
->tcp_squeue_bytes
);
2304 tcp
->tcp_closemp_used
= B_FALSE
;
2306 PRESERVE(tcp
->tcp_rsrv_mp
);
2307 PRESERVE(tcp
->tcp_rsrv_mp_lock
);
2310 DONTCARE(tcp
->tcmp_stk
[0]);
2313 PRESERVE(tcp
->tcp_connid
);
2315 ASSERT(tcp
->tcp_listen_cnt
== NULL
);
2316 ASSERT(tcp
->tcp_reass_tid
== 0);
2323 * Initialize the various fields in tcp_t. If parent (the listener) is non
2324 * NULL, certain values will be inheritted from it.
2327 tcp_init_values(tcp_t
*tcp
, tcp_t
*parent
)
2329 tcp_stack_t
*tcps
= tcp
->tcp_tcps
;
2330 conn_t
*connp
= tcp
->tcp_connp
;
2333 ASSERT((connp
->conn_family
== AF_INET
&&
2334 connp
->conn_ipversion
== IPV4_VERSION
) ||
2335 (connp
->conn_family
== AF_INET6
&&
2336 (connp
->conn_ipversion
== IPV4_VERSION
||
2337 connp
->conn_ipversion
== IPV6_VERSION
)));
2339 if (parent
== NULL
) {
2340 tcp
->tcp_naglim
= tcps
->tcps_naglim_def
;
2342 tcp
->tcp_rto_initial
= tcps
->tcps_rexmit_interval_initial
;
2343 tcp
->tcp_rto_min
= tcps
->tcps_rexmit_interval_min
;
2344 tcp
->tcp_rto_max
= tcps
->tcps_rexmit_interval_max
;
2346 tcp
->tcp_first_ctimer_threshold
=
2347 tcps
->tcps_ip_notify_cinterval
;
2348 tcp
->tcp_second_ctimer_threshold
=
2349 tcps
->tcps_ip_abort_cinterval
;
2350 tcp
->tcp_first_timer_threshold
= tcps
->tcps_ip_notify_interval
;
2351 tcp
->tcp_second_timer_threshold
= tcps
->tcps_ip_abort_interval
;
2353 tcp
->tcp_fin_wait_2_flush_interval
=
2354 tcps
->tcps_fin_wait_2_flush_interval
;
2356 tcp
->tcp_ka_interval
= tcps
->tcps_keepalive_interval
;
2357 tcp
->tcp_ka_abort_thres
= tcps
->tcps_keepalive_abort_interval
;
2358 tcp
->tcp_ka_cnt
= 0;
2359 tcp
->tcp_ka_rinterval
= 0;
2362 * Default value of tcp_init_cwnd is 0, so no need to set here
2363 * if parent is NULL. But we need to inherit it from parent.
2366 /* Inherit various TCP parameters from the parent. */
2367 tcp
->tcp_naglim
= parent
->tcp_naglim
;
2369 tcp
->tcp_rto_initial
= parent
->tcp_rto_initial
;
2370 tcp
->tcp_rto_min
= parent
->tcp_rto_min
;
2371 tcp
->tcp_rto_max
= parent
->tcp_rto_max
;
2373 tcp
->tcp_first_ctimer_threshold
=
2374 parent
->tcp_first_ctimer_threshold
;
2375 tcp
->tcp_second_ctimer_threshold
=
2376 parent
->tcp_second_ctimer_threshold
;
2377 tcp
->tcp_first_timer_threshold
=
2378 parent
->tcp_first_timer_threshold
;
2379 tcp
->tcp_second_timer_threshold
=
2380 parent
->tcp_second_timer_threshold
;
2382 tcp
->tcp_fin_wait_2_flush_interval
=
2383 parent
->tcp_fin_wait_2_flush_interval
;
2385 tcp
->tcp_ka_interval
= parent
->tcp_ka_interval
;
2386 tcp
->tcp_ka_abort_thres
= parent
->tcp_ka_abort_thres
;
2388 tcp
->tcp_init_cwnd
= parent
->tcp_init_cwnd
;
2392 * Initialize tcp_rtt_sa and tcp_rtt_sd so that the calculated RTO
2393 * will be close to tcp_rexmit_interval_initial. By doing this, we
2394 * allow the algorithm to adjust slowly to large fluctuations of RTT
2395 * during first few transmissions of a connection as seen in slow
2398 tcp
->tcp_rtt_sa
= tcp
->tcp_rto_initial
<< 2;
2399 tcp
->tcp_rtt_sd
= tcp
->tcp_rto_initial
>> 1;
2400 rto
= (tcp
->tcp_rtt_sa
>> 3) + tcp
->tcp_rtt_sd
+
2401 tcps
->tcps_rexmit_interval_extra
+ (tcp
->tcp_rtt_sa
>> 5) +
2402 tcps
->tcps_conn_grace_period
;
2403 TCP_SET_RTO(tcp
, rto
);
2405 tcp
->tcp_timer_backoff
= 0;
2406 tcp
->tcp_ms_we_have_waited
= 0;
2407 tcp
->tcp_last_recv_time
= ddi_get_lbolt();
2408 tcp
->tcp_cwnd_max
= tcps
->tcps_cwnd_max_
;
2409 tcp
->tcp_cwnd_ssthresh
= TCP_MAX_LARGEWIN
;
2410 tcp
->tcp_snd_burst
= TCP_CWND_INFINITE
;
2412 tcp
->tcp_maxpsz_multiplier
= tcps
->tcps_maxpsz_multiplier
;
2414 /* NOTE: ISS is now set in tcp_set_destination(). */
2416 /* Reset fusion-related fields */
2417 tcp
->tcp_fused
= B_FALSE
;
2418 tcp
->tcp_unfusable
= B_FALSE
;
2419 tcp
->tcp_fused_sigurg
= B_FALSE
;
2420 tcp
->tcp_loopback_peer
= NULL
;
2422 /* We rebuild the header template on the next connect/conn_request */
2424 connp
->conn_mlp_type
= mlptSingle
;
2427 * Init the window scale to the max so tcp_rwnd_set() won't pare
2428 * down tcp_rwnd. tcp_set_destination() will set the right value later.
2430 tcp
->tcp_rcv_ws
= TCP_MAX_WINSHIFT
;
2431 tcp
->tcp_rwnd
= connp
->conn_rcvbuf
;
2433 tcp
->tcp_cork
= B_FALSE
;
2435 * Init the tcp_debug option if it wasn't already set. This value
2436 * determines whether TCP
2437 * calls strlog() to print out debug messages. Doing this
2438 * initialization here means that this value is not inherited thru
2441 if (!connp
->conn_debug
)
2442 connp
->conn_debug
= tcps
->tcps_dbg
;
2446 * Update the TCP connection according to change of PMTU.
2448 * Path MTU might have changed by either increase or decrease, so need to
2449 * adjust the MSS based on the value of ixa_pmtu. No need to handle tiny
2450 * or negative MSS, since tcp_mss_set() will do it.
2453 tcp_update_pmtu(tcp_t
*tcp
, boolean_t decrease_only
)
2457 conn_t
*connp
= tcp
->tcp_connp
;
2458 ip_xmit_attr_t
*ixa
= connp
->conn_ixa
;
2461 if (tcp
->tcp_tcps
->tcps_ignore_path_mtu
)
2464 if (tcp
->tcp_state
< TCPS_ESTABLISHED
)
2468 * Always call ip_get_pmtu() to make sure that IP has updated
2469 * ixa_flags properly.
2471 pmtu
= ip_get_pmtu(ixa
);
2472 ixaflags
= ixa
->ixa_flags
;
2475 * Calculate the MSS by decreasing the PMTU by conn_ht_iphc_len and
2476 * IPsec overhead if applied. Make sure to use the most recent
2477 * IPsec information.
2479 mss
= pmtu
- connp
->conn_ht_iphc_len
- conn_ipsec_length(connp
);
2482 * Nothing to change, so just return.
2484 if (mss
== tcp
->tcp_mss
)
2488 * Currently, for ICMP errors, only PMTU decrease is handled.
2490 if (mss
> tcp
->tcp_mss
&& decrease_only
)
2493 DTRACE_PROBE2(tcp_update_pmtu
, int32_t, tcp
->tcp_mss
, uint32_t, mss
);
2496 * Update ixa_fragsize and ixa_pmtu.
2498 ixa
->ixa_fragsize
= ixa
->ixa_pmtu
= pmtu
;
2501 * Adjust MSS and all relevant variables.
2503 tcp_mss_set(tcp
, mss
);
2506 * If the PMTU is below the min size maintained by IP, then ip_get_pmtu
2507 * has set IXAF_PMTU_TOO_SMALL and cleared IXAF_PMTU_IPV4_DF. Since TCP
2508 * has a (potentially different) min size we do the same. Make sure to
2509 * clear IXAF_DONTFRAG, which is used by IP to decide whether to
2510 * fragment the packet.
2512 * LSO over IPv6 can not be fragmented. So need to disable LSO
2513 * when IPv6 fragmentation is needed.
2515 if (mss
< tcp
->tcp_tcps
->tcps_mss_min
)
2516 ixaflags
|= IXAF_PMTU_TOO_SMALL
;
2518 if (ixaflags
& IXAF_PMTU_TOO_SMALL
)
2519 ixaflags
&= ~(IXAF_DONTFRAG
| IXAF_PMTU_IPV4_DF
);
2521 if ((connp
->conn_ipversion
== IPV4_VERSION
) &&
2522 !(ixaflags
& IXAF_PMTU_IPV4_DF
)) {
2523 tcp
->tcp_ipha
->ipha_fragment_offset_and_flags
= 0;
2525 ixa
->ixa_flags
= ixaflags
;
2529 tcp_maxpsz_set(tcp_t
*tcp
, boolean_t set_maxblk
)
2531 conn_t
*connp
= tcp
->tcp_connp
;
2532 queue_t
*q
= connp
->conn_rq
;
2533 int32_t mss
= tcp
->tcp_mss
;
2536 if (TCP_IS_DETACHED(tcp
))
2538 if (tcp
->tcp_fused
) {
2539 maxpsz
= tcp_fuse_maxpsz(tcp
);
2541 } else if (tcp
->tcp_maxpsz_multiplier
== 0) {
2543 * Set the sd_qn_maxpsz according to the socket send buffer
2544 * size, and sd_maxblk to INFPSZ (-1). This will essentially
2545 * instruct the stream head to copyin user data into contiguous
2546 * kernel-allocated buffers without breaking it up into smaller
2547 * chunks. We round up the buffer size to the nearest SMSS.
2549 maxpsz
= MSS_ROUNDUP(connp
->conn_sndbuf
, mss
);
2553 * Set sd_qn_maxpsz to approx half the (receivers) buffer
2554 * (and a multiple of the mss). This instructs the stream
2555 * head to break down larger than SMSS writes into SMSS-
2556 * size mblks, up to tcp_maxpsz_multiplier mblks at a time.
2558 maxpsz
= tcp
->tcp_maxpsz_multiplier
* mss
;
2559 if (maxpsz
> connp
->conn_sndbuf
/ 2) {
2560 maxpsz
= connp
->conn_sndbuf
/ 2;
2561 /* Round up to nearest mss */
2562 maxpsz
= MSS_ROUNDUP(maxpsz
, mss
);
2566 (void) proto_set_maxpsz(q
, connp
, maxpsz
);
2567 if (!(IPCL_IS_NONSTR(connp
)))
2568 connp
->conn_wq
->q_maxpsz
= maxpsz
;
2570 (void) proto_set_tx_maxblk(q
, connp
, mss
);
2574 /* For /dev/tcp aka AF_INET open */
2576 tcp_openv4(queue_t
*q
, dev_t
*devp
, int flag
, int sflag
, cred_t
*credp
)
2578 return (tcp_open(q
, devp
, flag
, sflag
, credp
, B_FALSE
));
2581 /* For /dev/tcp6 aka AF_INET6 open */
2583 tcp_openv6(queue_t
*q
, dev_t
*devp
, int flag
, int sflag
, cred_t
*credp
)
2585 return (tcp_open(q
, devp
, flag
, sflag
, credp
, B_TRUE
));
2589 tcp_create_common(cred_t
*credp
, boolean_t isv6
, boolean_t issocket
,
2598 ASSERT(errorp
!= NULL
);
2600 * Find the proper zoneid and netstack.
2603 * Special case for install: miniroot needs to be able to
2604 * access files via NFS as though it were always in the
2607 if (credp
== kcred
&& nfs_global_client_only
!= 0) {
2608 zoneid
= GLOBAL_ZONEID
;
2609 tcps
= netstack_find_by_stackid(GLOBAL_NETSTACKID
)->
2611 ASSERT(tcps
!= NULL
);
2616 if ((err
= secpolicy_basic_net_access(credp
)) != 0) {
2621 ns
= netstack_find_by_cred(credp
);
2623 tcps
= ns
->netstack_tcp
;
2624 ASSERT(tcps
!= NULL
);
2627 * For exclusive stacks we set the zoneid to zero
2628 * to make TCP operate as if in the global zone.
2630 if (tcps
->tcps_netstack
->netstack_stackid
!=
2632 zoneid
= GLOBAL_ZONEID
;
2634 zoneid
= crgetzoneid(credp
);
2637 sqp
= IP_SQUEUE_GET((uint_t
)gethrtime());
2638 connp
= (conn_t
*)tcp_get_conn(sqp
, tcps
);
2640 * Both tcp_get_conn and netstack_find_by_cred incremented refcnt,
2641 * so we drop it by one.
2643 netstack_rele(tcps
->tcps_netstack
);
2644 if (connp
== NULL
) {
2648 ASSERT(connp
->conn_ixa
->ixa_protocol
== connp
->conn_proto
);
2650 connp
->conn_sqp
= sqp
;
2651 connp
->conn_initial_sqp
= connp
->conn_sqp
;
2652 connp
->conn_ixa
->ixa_sqp
= connp
->conn_sqp
;
2653 tcp
= connp
->conn_tcp
;
2656 * Besides asking IP to set the checksum for us, have conn_ip_output
2657 * to do the following checks when necessary:
2659 * IXAF_VERIFY_SOURCE: drop packets when our outer source goes invalid
2660 * IXAF_VERIFY_PMTU: verify PMTU changes
2661 * IXAF_VERIFY_LSO: verify LSO capability changes
2663 connp
->conn_ixa
->ixa_flags
|= IXAF_SET_ULP_CKSUM
| IXAF_VERIFY_SOURCE
|
2664 IXAF_VERIFY_PMTU
| IXAF_VERIFY_LSO
;
2666 if (!tcps
->tcps_dev_flow_ctl
)
2667 connp
->conn_ixa
->ixa_flags
|= IXAF_NO_DEV_FLOW_CTL
;
2670 connp
->conn_ixa
->ixa_src_preferences
= IPV6_PREFER_SRC_DEFAULT
;
2671 connp
->conn_ipversion
= IPV6_VERSION
;
2672 connp
->conn_family
= AF_INET6
;
2673 tcp
->tcp_mss
= tcps
->tcps_mss_def_ipv6
;
2674 connp
->conn_default_ttl
= tcps
->tcps_ipv6_hoplimit
;
2676 connp
->conn_ipversion
= IPV4_VERSION
;
2677 connp
->conn_family
= AF_INET
;
2678 tcp
->tcp_mss
= tcps
->tcps_mss_def_ipv4
;
2679 connp
->conn_default_ttl
= tcps
->tcps_ipv4_ttl
;
2681 connp
->conn_xmit_ipp
.ipp_unicast_hops
= connp
->conn_default_ttl
;
2684 connp
->conn_cred
= credp
;
2685 connp
->conn_cpid
= curproc
->p_pid
;
2686 connp
->conn_open_time
= ddi_get_lbolt64();
2688 /* Cache things in the ixa without any refhold */
2689 ASSERT(!(connp
->conn_ixa
->ixa_free_flags
& IXA_FREE_CRED
));
2690 connp
->conn_ixa
->ixa_cred
= credp
;
2691 connp
->conn_ixa
->ixa_cpid
= connp
->conn_cpid
;
2693 connp
->conn_zoneid
= zoneid
;
2694 /* conn_allzones can not be set this early, hence no IPCL_ZONEID */
2695 connp
->conn_ixa
->ixa_zoneid
= zoneid
;
2696 connp
->conn_mlp_type
= mlptSingle
;
2697 ASSERT(connp
->conn_netstack
== tcps
->tcps_netstack
);
2698 ASSERT(tcp
->tcp_tcps
== tcps
);
2701 * If the caller has the process-wide flag set, then default to MAC
2702 * exempt mode. This allows read-down to unlabeled hosts.
2704 if (getpflags(NET_MAC_AWARE
, credp
) != 0)
2705 connp
->conn_mac_mode
= CONN_MAC_AWARE
;
2707 connp
->conn_zone_is_global
= (crgetzoneid(credp
) == GLOBAL_ZONEID
);
2710 tcp
->tcp_issocket
= 1;
2713 connp
->conn_rcvbuf
= tcps
->tcps_recv_hiwat
;
2714 connp
->conn_sndbuf
= tcps
->tcps_xmit_hiwat
;
2715 connp
->conn_sndlowat
= tcps
->tcps_xmit_lowat
;
2716 connp
->conn_so_type
= SOCK_STREAM
;
2717 connp
->conn_wroff
= connp
->conn_ht_iphc_allocated
+
2718 tcps
->tcps_wroff_xtra
;
2720 SOCK_CONNID_INIT(tcp
->tcp_connid
);
2721 /* DTrace ignores this - it isn't a tcp:::state-change */
2722 tcp
->tcp_state
= TCPS_IDLE
;
2723 tcp_init_values(tcp
, NULL
);
2728 tcp_open(queue_t
*q
, dev_t
*devp
, int flag
, int sflag
, cred_t
*credp
,
2732 conn_t
*connp
= NULL
;
2734 vmem_t
*minor_arena
= NULL
;
2738 if (q
->q_ptr
!= NULL
)
2741 if (sflag
== MODOPEN
)
2744 if ((ip_minor_arena_la
!= NULL
) && (flag
& SO_SOCKSTR
) &&
2745 ((conn_dev
= inet_minor_alloc(ip_minor_arena_la
)) != 0)) {
2746 minor_arena
= ip_minor_arena_la
;
2749 * Either minor numbers in the large arena were exhausted
2750 * or a non socket application is doing the open.
2751 * Try to allocate from the small arena.
2753 if ((conn_dev
= inet_minor_alloc(ip_minor_arena_sa
)) == 0) {
2756 minor_arena
= ip_minor_arena_sa
;
2759 ASSERT(minor_arena
!= NULL
);
2761 *devp
= makedevice(getmajor(*devp
), (minor_t
)conn_dev
);
2763 if (flag
& SO_FALLBACK
) {
2765 * Non streams socket needs a stream to fallback to
2767 RD(q
)->q_ptr
= (void *)conn_dev
;
2768 WR(q
)->q_qinfo
= &tcp_fallback_sock_winit
;
2769 WR(q
)->q_ptr
= (void *)minor_arena
;
2772 } else if (flag
& SO_ACCEPTOR
) {
2773 q
->q_qinfo
= &tcp_acceptor_rinit
;
2775 * the conn_dev and minor_arena will be subsequently used by
2776 * tcp_tli_accept() and tcp_tpi_close_accept() to figure out
2777 * the minor device number for this connection from the q_ptr.
2779 RD(q
)->q_ptr
= (void *)conn_dev
;
2780 WR(q
)->q_qinfo
= &tcp_acceptor_winit
;
2781 WR(q
)->q_ptr
= (void *)minor_arena
;
2786 issocket
= flag
& SO_SOCKSTR
;
2787 connp
= tcp_create_common(credp
, isv6
, issocket
, &err
);
2789 if (connp
== NULL
) {
2790 inet_minor_free(minor_arena
, conn_dev
);
2791 q
->q_ptr
= WR(q
)->q_ptr
= NULL
;
2796 connp
->conn_wq
= WR(q
);
2797 q
->q_ptr
= WR(q
)->q_ptr
= connp
;
2799 connp
->conn_dev
= conn_dev
;
2800 connp
->conn_minor_arena
= minor_arena
;
2802 ASSERT(q
->q_qinfo
== &tcp_rinitv4
|| q
->q_qinfo
== &tcp_rinitv6
);
2803 ASSERT(WR(q
)->q_qinfo
== &tcp_winit
);
2805 tcp
= connp
->conn_tcp
;
2808 WR(q
)->q_qinfo
= &tcp_sock_winit
;
2811 tcp
->tcp_acceptor_id
= (t_uscalar_t
)RD(q
);
2813 tcp
->tcp_acceptor_id
= conn_dev
;
2815 tcp_acceptor_hash_insert(tcp
->tcp_acceptor_id
, tcp
);
2819 * Put the ref for TCP. Ref for IP was already put
2820 * by ipcl_conn_create. Also Make the conn_t globally
2821 * visible to walkers
2823 mutex_enter(&connp
->conn_lock
);
2824 CONN_INC_REF_LOCKED(connp
);
2825 ASSERT(connp
->conn_ref
== 2);
2826 connp
->conn_state_flags
&= ~CONN_INCIPIENT
;
2827 mutex_exit(&connp
->conn_lock
);
2834 * Build/update the tcp header template (in conn_ht_iphc) based on
2835 * conn_xmit_ipp. The headers include ip6_t, any extension
2836 * headers, and the maximum size tcp header (to avoid reallocation
2837 * on the fly for additional tcp options).
2839 * Assumes the caller has already set conn_{faddr,laddr,fport,lport,flowinfo}.
2840 * Returns failure if can't allocate memory.
2843 tcp_build_hdrs(tcp_t
*tcp
)
2845 tcp_stack_t
*tcps
= tcp
->tcp_tcps
;
2846 conn_t
*connp
= tcp
->tcp_connp
;
2847 char buf
[TCP_MAX_HDR_LENGTH
];
2849 uint_t ulplen
= TCP_MIN_HEADER_LENGTH
;
2850 uint_t extralen
= TCP_MAX_TCP_OPTIONS_LENGTH
;
2856 * We might be called after the connection is set up, and we might
2857 * have TS options already in the TCP header. Thus we save any
2858 * existing tcp header.
2860 buflen
= connp
->conn_ht_ulp_len
;
2862 bcopy(connp
->conn_ht_ulp
, buf
, buflen
);
2863 extralen
-= buflen
- ulplen
;
2867 /* Grab lock to satisfy ASSERT; TCP is serialized using squeue */
2868 mutex_enter(&connp
->conn_lock
);
2869 error
= conn_build_hdr_template(connp
, ulplen
, extralen
,
2870 &connp
->conn_laddr_v6
, &connp
->conn_faddr_v6
, connp
->conn_flowinfo
);
2871 mutex_exit(&connp
->conn_lock
);
2876 * Any routing header/option has been massaged. The checksum difference
2877 * is stored in conn_sum for later use.
2879 tcpha
= (tcpha_t
*)connp
->conn_ht_ulp
;
2880 tcp
->tcp_tcpha
= tcpha
;
2882 /* restore any old tcp header */
2884 bcopy(buf
, connp
->conn_ht_ulp
, buflen
);
2889 tcpha
->tha_offset_and_reserved
= (5 << 4);
2890 tcpha
->tha_lport
= connp
->conn_lport
;
2891 tcpha
->tha_fport
= connp
->conn_fport
;
2895 * IP wants our header length in the checksum field to
2896 * allow it to perform a single pseudo-header+checksum
2897 * calculation on behalf of TCP.
2898 * Include the adjustment for a source route once IP_OPTIONS is set.
2900 cksum
= sizeof (tcpha_t
) + connp
->conn_sum
;
2901 cksum
= (cksum
>> 16) + (cksum
& 0xFFFF);
2902 ASSERT(cksum
< 0x10000);
2903 tcpha
->tha_sum
= htons(cksum
);
2905 if (connp
->conn_ipversion
== IPV4_VERSION
)
2906 tcp
->tcp_ipha
= (ipha_t
*)connp
->conn_ht_iphc
;
2908 tcp
->tcp_ip6h
= (ip6_t
*)connp
->conn_ht_iphc
;
2910 if (connp
->conn_ht_iphc_allocated
+ tcps
->tcps_wroff_xtra
>
2911 connp
->conn_wroff
) {
2912 connp
->conn_wroff
= connp
->conn_ht_iphc_allocated
+
2913 tcps
->tcps_wroff_xtra
;
2914 (void) proto_set_tx_wroff(connp
->conn_rq
, connp
,
2921 * tcp_rwnd_set() is called to adjust the receive window to a desired value.
2922 * We do not allow the receive window to shrink. After setting rwnd,
2923 * set the flow control hiwat of the stream.
2925 * This function is called in 2 cases:
2927 * 1) Before data transfer begins, in tcp_input_listener() for accepting a
2928 * connection (passive open) and in tcp_input_data() for active connect.
2929 * This is called after tcp_mss_set() when the desired MSS value is known.
2930 * This makes sure that our window size is a mutiple of the other side's
2932 * 2) Handling SO_RCVBUF option.
2934 * It is ASSUMED that the requested size is a multiple of the current MSS.
2936 * XXX - Should allow a lower rwnd than tcp_recv_hiwat_minmss * mss if the
2940 tcp_rwnd_set(tcp_t
*tcp
, uint32_t rwnd
)
2942 uint32_t mss
= tcp
->tcp_mss
;
2943 uint32_t old_max_rwnd
;
2944 uint32_t max_transmittable_rwnd
;
2945 boolean_t tcp_detached
= TCP_IS_DETACHED(tcp
);
2946 tcp_stack_t
*tcps
= tcp
->tcp_tcps
;
2947 conn_t
*connp
= tcp
->tcp_connp
;
2950 * Insist on a receive window that is at least
2951 * tcp_recv_hiwat_minmss * MSS (default 4 * MSS) to avoid
2952 * funny TCP interactions of Nagle algorithm, SWS avoidance
2953 * and delayed acknowledgement.
2955 rwnd
= MAX(rwnd
, tcps
->tcps_recv_hiwat_minmss
* mss
);
2957 if (tcp
->tcp_fused
) {
2959 tcp_t
*peer_tcp
= tcp
->tcp_loopback_peer
;
2961 ASSERT(peer_tcp
!= NULL
);
2962 sth_hiwat
= tcp_fuse_set_rcv_hiwat(tcp
, rwnd
);
2963 if (!tcp_detached
) {
2964 (void) proto_set_rx_hiwat(connp
->conn_rq
, connp
,
2966 tcp_set_recv_threshold(tcp
, sth_hiwat
>> 3);
2969 /* Caller could have changed tcp_rwnd; update tha_win */
2970 if (tcp
->tcp_tcpha
!= NULL
) {
2971 tcp
->tcp_tcpha
->tha_win
=
2972 htons(tcp
->tcp_rwnd
>> tcp
->tcp_rcv_ws
);
2974 if ((tcp
->tcp_rcv_ws
> 0) && rwnd
> tcp
->tcp_cwnd_max
)
2975 tcp
->tcp_cwnd_max
= rwnd
;
2978 * In the fusion case, the maxpsz stream head value of
2979 * our peer is set according to its send buffer size
2980 * and our receive buffer size; since the latter may
2981 * have changed we need to update the peer's maxpsz.
2983 (void) tcp_maxpsz_set(peer_tcp
, B_TRUE
);
2988 old_max_rwnd
= tcp
->tcp_rwnd
;
2990 old_max_rwnd
= connp
->conn_rcvbuf
;
2994 * If window size info has already been exchanged, TCP should not
2995 * shrink the window. Shrinking window is doable if done carefully.
2996 * We may add that support later. But so far there is not a real
2999 if (rwnd
< old_max_rwnd
&& tcp
->tcp_state
> TCPS_SYN_SENT
) {
3000 /* MSS may have changed, do a round up again. */
3001 rwnd
= MSS_ROUNDUP(old_max_rwnd
, mss
);
3005 * tcp_rcv_ws starts with TCP_MAX_WINSHIFT so the following check
3006 * can be applied even before the window scale option is decided.
3008 max_transmittable_rwnd
= TCP_MAXWIN
<< tcp
->tcp_rcv_ws
;
3009 if (rwnd
> max_transmittable_rwnd
) {
3010 rwnd
= max_transmittable_rwnd
-
3011 (max_transmittable_rwnd
% mss
);
3013 rwnd
= max_transmittable_rwnd
;
3015 * If we're over the limit we may have to back down tcp_rwnd.
3016 * The increment below won't work for us. So we set all three
3017 * here and the increment below will have no effect.
3019 tcp
->tcp_rwnd
= old_max_rwnd
= rwnd
;
3021 if (tcp
->tcp_localnet
) {
3022 tcp
->tcp_rack_abs_max
=
3023 MIN(tcps
->tcps_local_dacks_max
, rwnd
/ mss
/ 2);
3026 * For a remote host on a different subnet (through a router),
3027 * we ack every other packet to be conforming to RFC1122.
3028 * tcp_deferred_acks_max is default to 2.
3030 tcp
->tcp_rack_abs_max
=
3031 MIN(tcps
->tcps_deferred_acks_max
, rwnd
/ mss
/ 2);
3033 if (tcp
->tcp_rack_cur_max
> tcp
->tcp_rack_abs_max
)
3034 tcp
->tcp_rack_cur_max
= tcp
->tcp_rack_abs_max
;
3036 tcp
->tcp_rack_cur_max
= 0;
3038 * Increment the current rwnd by the amount the maximum grew (we
3039 * can not overwrite it since we might be in the middle of a
3042 tcp
->tcp_rwnd
+= rwnd
- old_max_rwnd
;
3043 connp
->conn_rcvbuf
= rwnd
;
3045 /* Are we already connected? */
3046 if (tcp
->tcp_tcpha
!= NULL
) {
3047 tcp
->tcp_tcpha
->tha_win
=
3048 htons(tcp
->tcp_rwnd
>> tcp
->tcp_rcv_ws
);
3051 if ((tcp
->tcp_rcv_ws
> 0) && rwnd
> tcp
->tcp_cwnd_max
)
3052 tcp
->tcp_cwnd_max
= rwnd
;
3057 tcp_set_recv_threshold(tcp
, rwnd
>> 3);
3059 (void) proto_set_rx_hiwat(connp
->conn_rq
, connp
, rwnd
);
3064 tcp_do_unbind(conn_t
*connp
)
3066 tcp_t
*tcp
= connp
->conn_tcp
;
3069 switch (tcp
->tcp_state
) {
3074 return (-TOUTSTATE
);
3078 * Need to clean up all the eagers since after the unbind, segments
3079 * will no longer be delivered to this listener stream.
3081 mutex_enter(&tcp
->tcp_eager_lock
);
3082 if (tcp
->tcp_conn_req_cnt_q0
!= 0 || tcp
->tcp_conn_req_cnt_q
!= 0) {
3083 tcp_eager_cleanup(tcp
, 0);
3085 mutex_exit(&tcp
->tcp_eager_lock
);
3087 /* Clean up the listener connection counter if necessary. */
3088 if (tcp
->tcp_listen_cnt
!= NULL
)
3089 TCP_DECR_LISTEN_CNT(tcp
);
3090 connp
->conn_laddr_v6
= ipv6_all_zeros
;
3091 connp
->conn_saddr_v6
= ipv6_all_zeros
;
3092 tcp_bind_hash_remove(tcp
);
3093 oldstate
= tcp
->tcp_state
;
3094 tcp
->tcp_state
= TCPS_IDLE
;
3095 DTRACE_TCP6(state__change
, void, NULL
, ip_xmit_attr_t
*,
3096 connp
->conn_ixa
, void, NULL
, tcp_t
*, tcp
, void, NULL
,
3100 bzero(&connp
->conn_ports
, sizeof (connp
->conn_ports
));
3106 * Collect protocol properties to send to the upper handle.
3109 tcp_get_proto_props(tcp_t
*tcp
, struct sock_proto_props
*sopp
)
3111 conn_t
*connp
= tcp
->tcp_connp
;
3113 sopp
->sopp_flags
= SOCKOPT_RCVHIWAT
| SOCKOPT_MAXBLK
| SOCKOPT_WROFF
;
3114 sopp
->sopp_maxblk
= tcp_maxpsz_set(tcp
, B_FALSE
);
3116 sopp
->sopp_rxhiwat
= tcp
->tcp_fused
?
3117 tcp_fuse_set_rcv_hiwat(tcp
, connp
->conn_rcvbuf
) :
3120 * Determine what write offset value to use depending on SACK and
3121 * whether the endpoint is fused or not.
3123 if (tcp
->tcp_fused
) {
3124 ASSERT(tcp
->tcp_loopback
);
3125 ASSERT(tcp
->tcp_loopback_peer
!= NULL
);
3127 * For fused tcp loopback, set the stream head's write
3128 * offset value to zero since we won't be needing any room
3129 * for TCP/IP headers. This would also improve performance
3130 * since it would reduce the amount of work done by kmem.
3131 * Non-fused tcp loopback case is handled separately below.
3133 sopp
->sopp_wroff
= 0;
3135 * Update the peer's transmit parameters according to
3136 * our recently calculated high water mark value.
3138 (void) tcp_maxpsz_set(tcp
->tcp_loopback_peer
, B_TRUE
);
3139 } else if (tcp
->tcp_snd_sack_ok
) {
3140 sopp
->sopp_wroff
= connp
->conn_ht_iphc_allocated
+
3141 (tcp
->tcp_loopback
? 0 : tcp
->tcp_tcps
->tcps_wroff_xtra
);
3143 sopp
->sopp_wroff
= connp
->conn_ht_iphc_len
+
3144 (tcp
->tcp_loopback
? 0 : tcp
->tcp_tcps
->tcps_wroff_xtra
);
3147 if (tcp
->tcp_loopback
) {
3148 sopp
->sopp_flags
|= SOCKOPT_LOOPBACK
;
3149 sopp
->sopp_loopback
= B_TRUE
;
3154 * Check the usability of ZEROCOPY. It's instead checking the flag set by IP.
3157 tcp_zcopy_check(tcp_t
*tcp
)
3159 conn_t
*connp
= tcp
->tcp_connp
;
3160 ip_xmit_attr_t
*ixa
= connp
->conn_ixa
;
3161 boolean_t zc_enabled
= B_FALSE
;
3162 tcp_stack_t
*tcps
= tcp
->tcp_tcps
;
3164 if (do_tcpzcopy
== 2)
3165 zc_enabled
= B_TRUE
;
3166 else if ((do_tcpzcopy
== 1) && (ixa
->ixa_flags
& IXAF_ZCOPY_CAPAB
))
3167 zc_enabled
= B_TRUE
;
3169 tcp
->tcp_snd_zcopy_on
= zc_enabled
;
3170 if (!TCP_IS_DETACHED(tcp
)) {
3172 ixa
->ixa_flags
|= IXAF_VERIFY_ZCOPY
;
3173 (void) proto_set_tx_copyopt(connp
->conn_rq
, connp
,
3175 TCP_STAT(tcps
, tcp_zcopy_on
);
3177 ixa
->ixa_flags
&= ~IXAF_VERIFY_ZCOPY
;
3178 (void) proto_set_tx_copyopt(connp
->conn_rq
, connp
,
3180 TCP_STAT(tcps
, tcp_zcopy_off
);
3183 return (zc_enabled
);
3187 * Backoff from a zero-copy message by copying data to a new allocated
3188 * message and freeing the original desballoca'ed segmapped message.
3190 * This function is called by following two callers:
3191 * 1. tcp_timer: fix_xmitlist is set to B_TRUE, because it's safe to free
3192 * the origial desballoca'ed message and notify sockfs. This is in re-
3194 * 2. tcp_output: fix_xmitlist is set to B_FALSE. Flag STRUIO_ZCNOTIFY need
3195 * to be copied to new message.
3198 tcp_zcopy_backoff(tcp_t
*tcp
, mblk_t
*bp
, boolean_t fix_xmitlist
)
3201 mblk_t
*head
= NULL
;
3202 mblk_t
*tail
= NULL
;
3203 tcp_stack_t
*tcps
= tcp
->tcp_tcps
;
3206 while (bp
!= NULL
) {
3207 if (IS_VMLOANED_MBLK(bp
)) {
3208 TCP_STAT(tcps
, tcp_zcopy_backoff
);
3209 if ((nbp
= copyb(bp
)) == NULL
) {
3210 tcp
->tcp_xmit_zc_clean
= B_FALSE
;
3213 return ((head
== NULL
) ? bp
: head
);
3216 if (bp
->b_datap
->db_struioflag
& STRUIO_ZCNOTIFY
) {
3218 tcp_zcopy_notify(tcp
);
3220 nbp
->b_datap
->db_struioflag
|=
3223 nbp
->b_cont
= bp
->b_cont
;
3226 * Copy saved information and adjust tcp_xmit_tail
3230 nbp
->b_prev
= bp
->b_prev
;
3231 nbp
->b_next
= bp
->b_next
;
3233 if (tcp
->tcp_xmit_tail
== bp
)
3234 tcp
->tcp_xmit_tail
= nbp
;
3237 /* Free the original message. */
3260 tcp
->tcp_xmit_last
= tail
;
3261 tcp
->tcp_xmit_zc_clean
= B_TRUE
;
3268 tcp_zcopy_notify(tcp_t
*tcp
)
3273 if (tcp
->tcp_detached
)
3275 connp
= tcp
->tcp_connp
;
3276 if (IPCL_IS_NONSTR(connp
)) {
3277 (*connp
->conn_upcalls
->su_zcopy_notify
)
3278 (connp
->conn_upper_handle
);
3281 stp
= STREAM(connp
->conn_rq
);
3282 mutex_enter(&stp
->sd_lock
);
3283 stp
->sd_flag
|= STZCNOTIFY
;
3284 cv_broadcast(&stp
->sd_zcopy_wait
);
3285 mutex_exit(&stp
->sd_lock
);
3289 * Update the TCP connection according to change of LSO capability.
3292 tcp_update_lso(tcp_t
*tcp
, ip_xmit_attr_t
*ixa
)
3295 * We check against IPv4 header length to preserve the old behavior
3296 * of only enabling LSO when there are no IP options.
3297 * But this restriction might not be necessary at all. Before removing
3298 * it, need to verify how LSO is handled for source routing case, with
3299 * which IP does software checksum.
3301 * For IPv6, whenever any extension header is needed, LSO is supressed.
3303 if (ixa
->ixa_ip_hdr_length
!= ((ixa
->ixa_flags
& IXAF_IS_IPV4
) ?
3304 IP_SIMPLE_HDR_LENGTH
: IPV6_HDR_LEN
))
3308 * Either the LSO capability newly became usable, or it has changed.
3310 if (ixa
->ixa_flags
& IXAF_LSO_CAPAB
) {
3311 ill_lso_capab_t
*lsoc
= &ixa
->ixa_lso_capab
;
3313 ASSERT(lsoc
->ill_lso_max
> 0);
3314 tcp
->tcp_lso_max
= MIN(TCP_MAX_LSO_LENGTH
, lsoc
->ill_lso_max
);
3316 DTRACE_PROBE3(tcp_update_lso
, boolean_t
, tcp
->tcp_lso
,
3317 boolean_t
, B_TRUE
, uint32_t, tcp
->tcp_lso_max
);
3320 * If LSO to be enabled, notify the STREAM header with larger
3324 tcp
->tcp_maxpsz_multiplier
= 0;
3326 tcp
->tcp_lso
= B_TRUE
;
3327 TCP_STAT(tcp
->tcp_tcps
, tcp_lso_enabled
);
3328 } else { /* LSO capability is not usable any more. */
3329 DTRACE_PROBE3(tcp_update_lso
, boolean_t
, tcp
->tcp_lso
,
3330 boolean_t
, B_FALSE
, uint32_t, tcp
->tcp_lso_max
);
3333 * If LSO to be disabled, notify the STREAM header with smaller
3334 * data block. And need to restore fragsize to PMTU.
3337 tcp
->tcp_maxpsz_multiplier
=
3338 tcp
->tcp_tcps
->tcps_maxpsz_multiplier
;
3339 ixa
->ixa_fragsize
= ixa
->ixa_pmtu
;
3340 tcp
->tcp_lso
= B_FALSE
;
3341 TCP_STAT(tcp
->tcp_tcps
, tcp_lso_disabled
);
3345 (void) tcp_maxpsz_set(tcp
, B_TRUE
);
3349 * Update the TCP connection according to change of ZEROCOPY capability.
3352 tcp_update_zcopy(tcp_t
*tcp
)
3354 conn_t
*connp
= tcp
->tcp_connp
;
3355 tcp_stack_t
*tcps
= tcp
->tcp_tcps
;
3357 if (tcp
->tcp_snd_zcopy_on
) {
3358 tcp
->tcp_snd_zcopy_on
= B_FALSE
;
3359 if (!TCP_IS_DETACHED(tcp
)) {
3360 (void) proto_set_tx_copyopt(connp
->conn_rq
, connp
,
3362 TCP_STAT(tcps
, tcp_zcopy_off
);
3365 tcp
->tcp_snd_zcopy_on
= B_TRUE
;
3366 if (!TCP_IS_DETACHED(tcp
)) {
3367 (void) proto_set_tx_copyopt(connp
->conn_rq
, connp
,
3369 TCP_STAT(tcps
, tcp_zcopy_on
);
3375 * Notify function registered with ip_xmit_attr_t. It's called in the squeue
3376 * so it's safe to update the TCP connection.
3380 tcp_notify(void *arg
, ip_xmit_attr_t
*ixa
, ixa_notify_type_t ntype
,
3381 ixa_notify_arg_t narg
)
3383 tcp_t
*tcp
= (tcp_t
*)arg
;
3384 conn_t
*connp
= tcp
->tcp_connp
;
3388 tcp_update_lso(tcp
, connp
->conn_ixa
);
3391 tcp_update_pmtu(tcp
, B_FALSE
);
3394 tcp_update_zcopy(tcp
);
3402 * The TCP write service routine should never be called...
3406 tcp_wsrv(queue_t
*q
)
3408 tcp_stack_t
*tcps
= Q_TO_TCP(q
)->tcp_tcps
;
3410 TCP_STAT(tcps
, tcp_wsrv_called
);
3414 * Hash list lookup routine for tcp_t structures.
3415 * Returns with a CONN_INC_REF tcp structure. Caller must do a CONN_DEC_REF.
3418 tcp_acceptor_hash_lookup(t_uscalar_t id
, tcp_stack_t
*tcps
)
3423 tf
= &tcps
->tcps_acceptor_fanout
[TCP_ACCEPTOR_HASH(id
)];
3424 mutex_enter(&tf
->tf_lock
);
3425 for (tcp
= tf
->tf_tcp
; tcp
!= NULL
;
3426 tcp
= tcp
->tcp_acceptor_hash
) {
3427 if (tcp
->tcp_acceptor_id
== id
) {
3428 CONN_INC_REF(tcp
->tcp_connp
);
3429 mutex_exit(&tf
->tf_lock
);
3433 mutex_exit(&tf
->tf_lock
);
3438 * Hash list insertion routine for tcp_t structures.
3441 tcp_acceptor_hash_insert(t_uscalar_t id
, tcp_t
*tcp
)
3446 tcp_stack_t
*tcps
= tcp
->tcp_tcps
;
3448 tf
= &tcps
->tcps_acceptor_fanout
[TCP_ACCEPTOR_HASH(id
)];
3450 if (tcp
->tcp_ptpahn
!= NULL
)
3451 tcp_acceptor_hash_remove(tcp
);
3453 mutex_enter(&tf
->tf_lock
);
3456 tcpnext
->tcp_ptpahn
= &tcp
->tcp_acceptor_hash
;
3457 tcp
->tcp_acceptor_hash
= tcpnext
;
3458 tcp
->tcp_ptpahn
= tcpp
;
3460 tcp
->tcp_acceptor_lockp
= &tf
->tf_lock
; /* For tcp_*_hash_remove */
3461 mutex_exit(&tf
->tf_lock
);
3465 * Hash list removal routine for tcp_t structures.
3468 tcp_acceptor_hash_remove(tcp_t
*tcp
)
3474 * Extract the lock pointer in case there are concurrent
3475 * hash_remove's for this instance.
3477 lockp
= tcp
->tcp_acceptor_lockp
;
3479 if (tcp
->tcp_ptpahn
== NULL
)
3482 ASSERT(lockp
!= NULL
);
3484 if (tcp
->tcp_ptpahn
) {
3485 tcpnext
= tcp
->tcp_acceptor_hash
;
3487 tcpnext
->tcp_ptpahn
= tcp
->tcp_ptpahn
;
3488 tcp
->tcp_acceptor_hash
= NULL
;
3490 *tcp
->tcp_ptpahn
= tcpnext
;
3491 tcp
->tcp_ptpahn
= NULL
;
3494 tcp
->tcp_acceptor_lockp
= NULL
;
3498 * Type three generator adapted from the random() function in 4.4 BSD:
3502 * Copyright (c) 1983, 1993
3503 * The Regents of the University of California. All rights reserved.
3505 * Redistribution and use in source and binary forms, with or without
3506 * modification, are permitted provided that the following conditions
3508 * 1. Redistributions of source code must retain the above copyright
3509 * notice, this list of conditions and the following disclaimer.
3510 * 2. Redistributions in binary form must reproduce the above copyright
3511 * notice, this list of conditions and the following disclaimer in the
3512 * documentation and/or other materials provided with the distribution.
3513 * 3. All advertising materials mentioning features or use of this software
3514 * must display the following acknowledgement:
3515 * This product includes software developed by the University of
3516 * California, Berkeley and its contributors.
3517 * 4. Neither the name of the University nor the names of its contributors
3518 * may be used to endorse or promote products derived from this software
3519 * without specific prior written permission.
3521 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
3522 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
3523 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
3524 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
3525 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
3526 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
3527 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
3528 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
3529 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
3530 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
3534 /* Type 3 -- x**31 + x**3 + 1 */
3539 /* Protected by tcp_random_lock */
3540 static int tcp_randtbl
[DEG_3
+ 1];
3542 static int *tcp_random_fptr
= &tcp_randtbl
[SEP_3
+ 1];
3543 static int *tcp_random_rptr
= &tcp_randtbl
[1];
3545 static int *tcp_random_state
= &tcp_randtbl
[1];
3546 static int *tcp_random_end_ptr
= &tcp_randtbl
[DEG_3
+ 1];
3548 kmutex_t tcp_random_lock
;
3551 tcp_random_init(void)
3559 * Use high-res timer and current time for seed. Gethrtime() returns
3560 * a longlong, which may contain resolution down to nanoseconds.
3561 * The current time will either be a 32-bit or a 64-bit quantity.
3562 * XOR the two together in a 64-bit result variable.
3563 * Convert the result to a 32-bit value by multiplying the high-order
3564 * 32-bits by the low-order 32-bits.
3568 (void) drv_getparm(TIME
, &wallclock
);
3569 result
= (uint64_t)wallclock
^ (uint64_t)hrt
;
3570 mutex_enter(&tcp_random_lock
);
3571 tcp_random_state
[0] = ((result
>> 32) & 0xffffffff) *
3572 (result
& 0xffffffff);
3574 for (i
= 1; i
< DEG_3
; i
++)
3575 tcp_random_state
[i
] = 1103515245 * tcp_random_state
[i
- 1]
3577 tcp_random_fptr
= &tcp_random_state
[SEP_3
];
3578 tcp_random_rptr
= &tcp_random_state
[0];
3579 mutex_exit(&tcp_random_lock
);
3580 for (i
= 0; i
< 10 * DEG_3
; i
++)
3581 (void) tcp_random();
3585 * tcp_random: Return a random number in the range [1 - (128K + 1)].
3586 * This range is selected to be approximately centered on TCP_ISS / 2,
3587 * and easy to compute. We get this value by generating a 32-bit random
3588 * number, selecting out the high-order 17 bits, and then adding one so
3589 * that we never return zero.
3596 mutex_enter(&tcp_random_lock
);
3597 *tcp_random_fptr
+= *tcp_random_rptr
;
3600 * The high-order bits are more random than the low-order bits,
3601 * so we select out the high-order 17 bits and add one so that
3602 * we never return zero.
3604 i
= ((*tcp_random_fptr
>> 15) & 0x1ffff) + 1;
3605 if (++tcp_random_fptr
>= tcp_random_end_ptr
) {
3606 tcp_random_fptr
= tcp_random_state
;
3608 } else if (++tcp_random_rptr
>= tcp_random_end_ptr
)
3609 tcp_random_rptr
= tcp_random_state
;
3611 mutex_exit(&tcp_random_lock
);
3616 * Split this function out so that if the secret changes, I'm okay.
3618 * Initialize the tcp_iss_cookie and tcp_iss_key.
3621 #define PASSWD_SIZE 16 /* MUST be multiple of 4 */
3624 tcp_iss_key_init(uint8_t *phrase
, int len
, tcp_stack_t
*tcps
)
3627 int32_t current_time
;
3631 uint8_t passwd
[PASSWD_SIZE
];
3636 * Start with the current absolute time.
3638 (void) drv_getparm(TIME
, &t
);
3639 tcp_iss_cookie
.current_time
= t
;
3642 * XXX - Need a more random number per RFC 1750, not this crap.
3643 * OTOH, if what follows is pretty random, then I'm in better shape.
3645 tcp_iss_cookie
.randnum
= (uint32_t)(gethrtime() + tcp_random());
3646 tcp_iss_cookie
.pad
= 0x365c; /* Picked from HMAC pad values. */
3649 * The cpu_type_info is pretty non-random. Ugggh. It does serve
3650 * as a good template.
3652 bcopy(&cpu_list
->cpu_type_info
, &tcp_iss_cookie
.passwd
,
3653 min(PASSWD_SIZE
, sizeof (cpu_list
->cpu_type_info
)));
3656 * The pass-phrase. Normally this is supplied by user-called NDD.
3658 bcopy(phrase
, &tcp_iss_cookie
.passwd
, min(PASSWD_SIZE
, len
));
3661 * See 4010593 if this section becomes a problem again,
3662 * but the local ethernet address is useful here.
3664 (void) localetheraddr(NULL
,
3665 (struct ether_addr
*)&tcp_iss_cookie
.ether
);
3668 * Hash 'em all together. The MD5Final is called per-connection.
3670 mutex_enter(&tcps
->tcps_iss_key_lock
);
3671 MD5Init(&tcps
->tcps_iss_key
);
3672 MD5Update(&tcps
->tcps_iss_key
, (uchar_t
*)&tcp_iss_cookie
,
3673 sizeof (tcp_iss_cookie
));
3674 mutex_exit(&tcps
->tcps_iss_key_lock
);
3678 * Called by IP when IP is loaded into the kernel
3681 tcp_ddi_g_init(void)
3683 tcp_timercache
= kmem_cache_create("tcp_timercache",
3684 sizeof (tcp_timer_t
) + sizeof (mblk_t
), 0,
3685 NULL
, NULL
, NULL
, NULL
, NULL
, 0);
3687 tcp_notsack_blk_cache
= kmem_cache_create("tcp_notsack_blk_cache",
3688 sizeof (notsack_blk_t
), 0, NULL
, NULL
, NULL
, NULL
, NULL
, 0);
3690 mutex_init(&tcp_random_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
3692 /* Initialize the random number generator */
3695 /* A single callback independently of how many netstacks we have */
3696 ip_squeue_init(tcp_squeue_add
);
3698 tcp_g_kstat
= tcp_g_kstat_init(&tcp_g_statistics
);
3700 tcp_squeue_flag
= tcp_squeue_switch(tcp_squeue_wput
);
3703 * We want to be informed each time a stack is created or
3704 * destroyed in the kernel, so we can maintain the
3705 * set of tcp_stack_t's.
3707 netstack_register(NS_TCP
, tcp_stack_init
, NULL
, tcp_stack_fini
);
3711 #define INET_NAME "ip"
3714 * Initialize the TCP stack instance.
3717 tcp_stack_init(netstackid_t stackid
, netstack_t
*ns
)
3725 tcps
= (tcp_stack_t
*)kmem_zalloc(sizeof (*tcps
), KM_SLEEP
);
3726 tcps
->tcps_netstack
= ns
;
3728 /* Initialize locks */
3729 mutex_init(&tcps
->tcps_iss_key_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
3730 mutex_init(&tcps
->tcps_epriv_port_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
3732 tcps
->tcps_g_num_epriv_ports
= TCP_NUM_EPRIV_PORTS
;
3733 tcps
->tcps_g_epriv_ports
[0] = ULP_DEF_EPRIV_PORT1
;
3734 tcps
->tcps_g_epriv_ports
[1] = ULP_DEF_EPRIV_PORT2
;
3735 tcps
->tcps_min_anonpriv_port
= 512;
3737 tcps
->tcps_bind_fanout
= kmem_zalloc(sizeof (tf_t
) *
3738 TCP_BIND_FANOUT_SIZE
, KM_SLEEP
);
3739 tcps
->tcps_acceptor_fanout
= kmem_zalloc(sizeof (tf_t
) *
3740 TCP_ACCEPTOR_FANOUT_SIZE
, KM_SLEEP
);
3742 for (i
= 0; i
< TCP_BIND_FANOUT_SIZE
; i
++) {
3743 mutex_init(&tcps
->tcps_bind_fanout
[i
].tf_lock
, NULL
,
3744 MUTEX_DEFAULT
, NULL
);
3747 for (i
= 0; i
< TCP_ACCEPTOR_FANOUT_SIZE
; i
++) {
3748 mutex_init(&tcps
->tcps_acceptor_fanout
[i
].tf_lock
, NULL
,
3749 MUTEX_DEFAULT
, NULL
);
3752 /* TCP's IPsec code calls the packet dropper. */
3753 ip_drop_register(&tcps
->tcps_dropper
, "TCP IPsec policy enforcement");
3755 arrsz
= tcp_propinfo_count
* sizeof (mod_prop_info_t
);
3756 tcps
->tcps_propinfo_tbl
= (mod_prop_info_t
*)kmem_alloc(arrsz
,
3758 bcopy(tcp_propinfo_tbl
, tcps
->tcps_propinfo_tbl
, arrsz
);
3761 * Note: To really walk the device tree you need the devinfo
3762 * pointer to your device which is only available after probe/attach.
3763 * The following is safe only because it uses ddi_root_node()
3765 tcp_max_optsize
= optcom_max_optsize(tcp_opt_obj
.odb_opt_des_arr
,
3766 tcp_opt_obj
.odb_opt_arr_cnt
);
3769 * Initialize RFC 1948 secret values. This will probably be reset once
3770 * by the boot scripts.
3772 * Use NULL name, as the name is caught by the new lockstats.
3774 * Initialize with some random, non-guessable string, like the global
3778 tcp_iss_key_init((uint8_t *)&tcp_g_t_info_ack
,
3779 sizeof (tcp_g_t_info_ack
), tcps
);
3781 tcps
->tcps_kstat
= tcp_kstat2_init(stackid
);
3782 tcps
->tcps_mibkp
= tcp_kstat_init(stackid
);
3784 major
= mod_name_to_major(INET_NAME
);
3785 error
= ldi_ident_from_major(major
, &tcps
->tcps_ldi_ident
);
3787 tcps
->tcps_ixa_cleanup_mp
= allocb_wait(0, BPRI_MED
, STR_NOSIG
, NULL
);
3788 ASSERT(tcps
->tcps_ixa_cleanup_mp
!= NULL
);
3789 cv_init(&tcps
->tcps_ixa_cleanup_cv
, NULL
, CV_DEFAULT
, NULL
);
3790 mutex_init(&tcps
->tcps_ixa_cleanup_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
3792 mutex_init(&tcps
->tcps_reclaim_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
3793 tcps
->tcps_reclaim
= B_FALSE
;
3794 tcps
->tcps_reclaim_tid
= 0;
3795 tcps
->tcps_reclaim_period
= tcps
->tcps_rexmit_interval_max
;
3798 * ncpus is the current number of CPUs, which can be bigger than
3799 * boot_ncpus. But we don't want to use ncpus to allocate all the
3800 * tcp_stats_cpu_t at system boot up time since it will be 1. While
3801 * we handle adding CPU in tcp_cpu_update(), it will be slow if
3802 * there are many CPUs as we will be adding them 1 by 1.
3804 * Note that tcps_sc_cnt never decreases and the tcps_sc[x] pointers
3805 * are not freed until the stack is going away. So there is no need
3806 * to grab a lock to access the per CPU tcps_sc[x] pointer.
3808 mutex_enter(&cpu_lock
);
3809 tcps
->tcps_sc_cnt
= MAX(ncpus
, boot_ncpus
);
3810 mutex_exit(&cpu_lock
);
3811 tcps
->tcps_sc
= kmem_zalloc(max_ncpus
* sizeof (tcp_stats_cpu_t
*),
3813 for (i
= 0; i
< tcps
->tcps_sc_cnt
; i
++) {
3814 tcps
->tcps_sc
[i
] = kmem_zalloc(sizeof (tcp_stats_cpu_t
),
3818 mutex_init(&tcps
->tcps_listener_conf_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
3819 list_create(&tcps
->tcps_listener_conf
, sizeof (tcp_listener_t
),
3820 offsetof(tcp_listener_t
, tl_link
));
3826 * Called when the IP module is about to be unloaded.
3829 tcp_ddi_g_destroy(void)
3831 tcp_g_kstat_fini(tcp_g_kstat
);
3833 bzero(&tcp_g_statistics
, sizeof (tcp_g_statistics
));
3835 mutex_destroy(&tcp_random_lock
);
3837 kmem_cache_destroy(tcp_timercache
);
3838 kmem_cache_destroy(tcp_notsack_blk_cache
);
3840 netstack_unregister(NS_TCP
);
3844 * Free the TCP stack instance.
3847 tcp_stack_fini(netstackid_t stackid
, void *arg
)
3849 tcp_stack_t
*tcps
= (tcp_stack_t
*)arg
;
3852 freeb(tcps
->tcps_ixa_cleanup_mp
);
3853 tcps
->tcps_ixa_cleanup_mp
= NULL
;
3854 cv_destroy(&tcps
->tcps_ixa_cleanup_cv
);
3855 mutex_destroy(&tcps
->tcps_ixa_cleanup_lock
);
3858 * Set tcps_reclaim to false tells tcp_reclaim_timer() not to restart
3861 mutex_enter(&tcps
->tcps_reclaim_lock
);
3862 tcps
->tcps_reclaim
= B_FALSE
;
3863 mutex_exit(&tcps
->tcps_reclaim_lock
);
3864 if (tcps
->tcps_reclaim_tid
!= 0)
3865 (void) untimeout(tcps
->tcps_reclaim_tid
);
3866 mutex_destroy(&tcps
->tcps_reclaim_lock
);
3868 tcp_listener_conf_cleanup(tcps
);
3870 for (i
= 0; i
< tcps
->tcps_sc_cnt
; i
++)
3871 kmem_free(tcps
->tcps_sc
[i
], sizeof (tcp_stats_cpu_t
));
3872 kmem_free(tcps
->tcps_sc
, max_ncpus
* sizeof (tcp_stats_cpu_t
*));
3874 kmem_free(tcps
->tcps_propinfo_tbl
,
3875 tcp_propinfo_count
* sizeof (mod_prop_info_t
));
3876 tcps
->tcps_propinfo_tbl
= NULL
;
3878 for (i
= 0; i
< TCP_BIND_FANOUT_SIZE
; i
++) {
3879 ASSERT(tcps
->tcps_bind_fanout
[i
].tf_tcp
== NULL
);
3880 mutex_destroy(&tcps
->tcps_bind_fanout
[i
].tf_lock
);
3883 for (i
= 0; i
< TCP_ACCEPTOR_FANOUT_SIZE
; i
++) {
3884 ASSERT(tcps
->tcps_acceptor_fanout
[i
].tf_tcp
== NULL
);
3885 mutex_destroy(&tcps
->tcps_acceptor_fanout
[i
].tf_lock
);
3888 kmem_free(tcps
->tcps_bind_fanout
, sizeof (tf_t
) * TCP_BIND_FANOUT_SIZE
);
3889 tcps
->tcps_bind_fanout
= NULL
;
3891 kmem_free(tcps
->tcps_acceptor_fanout
, sizeof (tf_t
) *
3892 TCP_ACCEPTOR_FANOUT_SIZE
);
3893 tcps
->tcps_acceptor_fanout
= NULL
;
3895 mutex_destroy(&tcps
->tcps_iss_key_lock
);
3896 mutex_destroy(&tcps
->tcps_epriv_port_lock
);
3898 ip_drop_unregister(&tcps
->tcps_dropper
);
3900 tcp_kstat2_fini(stackid
, tcps
->tcps_kstat
);
3901 tcps
->tcps_kstat
= NULL
;
3903 tcp_kstat_fini(stackid
, tcps
->tcps_mibkp
);
3904 tcps
->tcps_mibkp
= NULL
;
3906 ldi_ident_release(tcps
->tcps_ldi_ident
);
3907 kmem_free(tcps
, sizeof (*tcps
));
3911 * Generate ISS, taking into account NDD changes may happen halfway through.
3912 * (If the iss is not zero, set it.)
3916 tcp_iss_init(tcp_t
*tcp
)
3919 struct { uint32_t ports
; in6_addr_t src
; in6_addr_t dst
; } arg
;
3921 tcp_stack_t
*tcps
= tcp
->tcp_tcps
;
3922 conn_t
*connp
= tcp
->tcp_connp
;
3924 tcps
->tcps_iss_incr_extra
+= (ISS_INCR
>> 1);
3925 tcp
->tcp_iss
= tcps
->tcps_iss_incr_extra
;
3926 switch (tcps
->tcps_strong_iss
) {
3928 mutex_enter(&tcps
->tcps_iss_key_lock
);
3929 context
= tcps
->tcps_iss_key
;
3930 mutex_exit(&tcps
->tcps_iss_key_lock
);
3931 arg
.ports
= connp
->conn_ports
;
3932 arg
.src
= connp
->conn_laddr_v6
;
3933 arg
.dst
= connp
->conn_faddr_v6
;
3934 MD5Update(&context
, (uchar_t
*)&arg
, sizeof (arg
));
3935 MD5Final((uchar_t
*)answer
, &context
);
3936 tcp
->tcp_iss
+= answer
[0] ^ answer
[1] ^ answer
[2] ^ answer
[3];
3938 * Now that we've hashed into a unique per-connection sequence
3939 * space, add a random increment per strong_iss == 1. So I
3940 * guess we'll have to...
3944 tcp
->tcp_iss
+= (gethrtime() >> ISS_NSEC_SHT
) + tcp_random();
3947 tcp
->tcp_iss
+= (uint32_t)gethrestime_sec() * ISS_INCR
;
3950 tcp
->tcp_valid_bits
= TCP_ISS_VALID
;
3951 tcp
->tcp_fss
= tcp
->tcp_iss
- 1;
3952 tcp
->tcp_suna
= tcp
->tcp_iss
;
3953 tcp
->tcp_snxt
= tcp
->tcp_iss
+ 1;
3954 tcp
->tcp_rexmit_nxt
= tcp
->tcp_snxt
;
3955 tcp
->tcp_csuna
= tcp
->tcp_snxt
;
3959 * tcp_{set,clr}qfull() functions are used to either set or clear QFULL
3960 * on the specified backing STREAMS q. Note, the caller may make the
3961 * decision to call based on the tcp_t.tcp_flow_stopped value which
3962 * when check outside the q's lock is only an advisory check ...
3965 tcp_setqfull(tcp_t
*tcp
)
3967 tcp_stack_t
*tcps
= tcp
->tcp_tcps
;
3968 conn_t
*connp
= tcp
->tcp_connp
;
3970 if (tcp
->tcp_closed
)
3973 conn_setqfull(connp
, &tcp
->tcp_flow_stopped
);
3974 if (tcp
->tcp_flow_stopped
)
3975 TCP_STAT(tcps
, tcp_flwctl_on
);
3979 tcp_clrqfull(tcp_t
*tcp
)
3981 conn_t
*connp
= tcp
->tcp_connp
;
3983 if (tcp
->tcp_closed
)
3985 conn_clrqfull(connp
, &tcp
->tcp_flow_stopped
);
3989 tcp_squeue_switch(int val
)
4007 * This is called once for each squeue - globally for all stack
4011 tcp_squeue_add(squeue_t
*sqp
)
4013 tcp_squeue_priv_t
*tcp_time_wait
= kmem_zalloc(
4014 sizeof (tcp_squeue_priv_t
), KM_SLEEP
);
4016 *squeue_getprivate(sqp
, SQPRIVATE_TCP
) = (intptr_t)tcp_time_wait
;
4017 if (tcp_free_list_max_cnt
== 0) {
4018 int tcp_ncpus
= ((boot_max_ncpus
== -1) ?
4019 max_ncpus
: boot_max_ncpus
);
4022 * Limit number of entries to 1% of availble memory / tcp_ncpus
4024 tcp_free_list_max_cnt
= (freemem
* PAGESIZE
) /
4025 (tcp_ncpus
* sizeof (tcp_t
) * 100);
4027 tcp_time_wait
->tcp_free_list_cnt
= 0;
4030 * Return unix error is tli error is TSYSERR, otherwise return a negative
4034 tcp_do_bind(conn_t
*connp
, struct sockaddr
*sa
, socklen_t len
, cred_t
*cr
,
4035 boolean_t bind_to_req_port_only
)
4038 tcp_t
*tcp
= connp
->conn_tcp
;
4040 if (tcp
->tcp_state
>= TCPS_BOUND
) {
4041 if (connp
->conn_debug
) {
4042 (void) strlog(TCP_MOD_ID
, 0, 1, SL_ERROR
|SL_TRACE
,
4043 "tcp_bind: bad state, %d", tcp
->tcp_state
);
4045 return (-TOUTSTATE
);
4048 error
= tcp_bind_check(connp
, sa
, len
, cr
, bind_to_req_port_only
);
4052 ASSERT(tcp
->tcp_state
== TCPS_BOUND
);
4053 tcp
->tcp_conn_req_max
= 0;
4058 * If the return value from this function is positive, it's a UNIX error.
4059 * Otherwise, if it's negative, then the absolute value is a TLI error.
4060 * the TPI routine tcp_tpi_connect() is a wrapper function for this.
4063 tcp_do_connect(conn_t
*connp
, const struct sockaddr
*sa
, socklen_t len
,
4064 cred_t
*cr
, pid_t pid
)
4066 tcp_t
*tcp
= connp
->conn_tcp
;
4067 sin_t
*sin
= (sin_t
*)sa
;
4068 sin6_t
*sin6
= (sin6_t
*)sa
;
4075 tcp_stack_t
*tcps
= tcp
->tcp_tcps
;
4077 ip_xmit_attr_t
*ixa
= connp
->conn_ixa
;
4079 oldstate
= tcp
->tcp_state
;
4084 * Should never happen
4088 case sizeof (sin_t
):
4090 if (sin
->sin_port
== 0) {
4093 if (connp
->conn_ipv6_v6only
) {
4094 return (EAFNOSUPPORT
);
4098 case sizeof (sin6_t
):
4099 sin6
= (sin6_t
*)sa
;
4100 if (sin6
->sin6_port
== 0) {
4106 * If we're connecting to an IPv4-mapped IPv6 address, we need to
4107 * make sure that the conn_ipversion is IPV4_VERSION. We
4108 * need to this before we call tcp_bindi() so that the port lookup
4109 * code will look for ports in the correct port space (IPv4 and
4110 * IPv6 have separate port spaces).
4112 if (connp
->conn_family
== AF_INET6
&&
4113 connp
->conn_ipversion
== IPV6_VERSION
&&
4114 IN6_IS_ADDR_V4MAPPED(&sin6
->sin6_addr
)) {
4115 if (connp
->conn_ipv6_v6only
)
4116 return (EADDRNOTAVAIL
);
4118 connp
->conn_ipversion
= IPV4_VERSION
;
4121 switch (tcp
->tcp_state
) {
4124 * Listening sockets are not allowed to issue connect().
4126 if (IPCL_IS_NONSTR(connp
))
4127 return (EOPNOTSUPP
);
4131 * We support quick connect, refer to comments in
4138 return (-TOUTSTATE
);
4142 * We update our cred/cpid based on the caller of connect
4144 if (connp
->conn_cred
!= cr
) {
4146 crfree(connp
->conn_cred
);
4147 connp
->conn_cred
= cr
;
4149 connp
->conn_cpid
= pid
;
4151 /* Cache things in the ixa without any refhold */
4152 ASSERT(!(ixa
->ixa_free_flags
& IXA_FREE_CRED
));
4154 ixa
->ixa_cpid
= pid
;
4155 if (is_system_labeled()) {
4156 /* We need to restart with a label based on the cred */
4157 ip_xmit_attr_restore_tsl(ixa
, ixa
->ixa_cred
);
4160 if (connp
->conn_family
== AF_INET6
) {
4161 if (!IN6_IS_ADDR_V4MAPPED(&sin6
->sin6_addr
)) {
4162 error
= tcp_connect_ipv6(tcp
, &sin6
->sin6_addr
,
4163 sin6
->sin6_port
, sin6
->sin6_flowinfo
,
4164 sin6
->__sin6_src_id
, sin6
->sin6_scope_id
);
4167 * Destination adress is mapped IPv6 address.
4168 * Source bound address should be unspecified or
4169 * IPv6 mapped address as well.
4171 if (!IN6_IS_ADDR_UNSPECIFIED(
4172 &connp
->conn_bound_addr_v6
) &&
4173 !IN6_IS_ADDR_V4MAPPED(&connp
->conn_bound_addr_v6
)) {
4174 return (EADDRNOTAVAIL
);
4176 dstaddrp
= &V4_PART_OF_V6((sin6
->sin6_addr
));
4177 dstport
= sin6
->sin6_port
;
4178 srcid
= sin6
->__sin6_src_id
;
4179 error
= tcp_connect_ipv4(tcp
, dstaddrp
, dstport
,
4183 dstaddrp
= &sin
->sin_addr
.s_addr
;
4184 dstport
= sin
->sin_port
;
4186 error
= tcp_connect_ipv4(tcp
, dstaddrp
, dstport
, srcid
);
4190 goto connect_failed
;
4192 CL_INET_CONNECT(connp
, B_TRUE
, error
);
4194 goto connect_failed
;
4196 /* connect succeeded */
4197 TCPS_BUMP_MIB(tcps
, tcpActiveOpens
);
4198 tcp
->tcp_active_open
= 1;
4201 * tcp_set_destination() does not adjust for TCP/IP header length.
4203 mss
= tcp
->tcp_mss
- connp
->conn_ht_iphc_len
;
4206 * Just make sure our rwnd is at least rcvbuf * MSS large, and round up
4207 * to the nearest MSS.
4209 * We do the round up here because we need to get the interface MTU
4210 * first before we can do the round up.
4212 tcp
->tcp_rwnd
= connp
->conn_rcvbuf
;
4213 tcp
->tcp_rwnd
= MAX(MSS_ROUNDUP(tcp
->tcp_rwnd
, mss
),
4214 tcps
->tcps_recv_hiwat_minmss
* mss
);
4215 connp
->conn_rcvbuf
= tcp
->tcp_rwnd
;
4216 tcp_set_ws_value(tcp
);
4217 tcp
->tcp_tcpha
->tha_win
= htons(tcp
->tcp_rwnd
>> tcp
->tcp_rcv_ws
);
4218 if (tcp
->tcp_rcv_ws
> 0 || tcps
->tcps_wscale_always
)
4219 tcp
->tcp_snd_ws_ok
= B_TRUE
;
4222 * Set tcp_snd_ts_ok to true
4223 * so that tcp_xmit_mp will
4224 * include the timestamp
4225 * option in the SYN segment.
4227 if (tcps
->tcps_tstamp_always
||
4228 (tcp
->tcp_rcv_ws
&& tcps
->tcps_tstamp_if_wscale
)) {
4229 tcp
->tcp_snd_ts_ok
= B_TRUE
;
4233 * Note that tcp_snd_sack_ok can be set in tcp_set_destination() if
4234 * the SACK metric is set. So here we just check the per stack SACK
4237 if (tcps
->tcps_sack_permitted
== 2) {
4238 ASSERT(tcp
->tcp_num_sack_blk
== 0);
4239 ASSERT(tcp
->tcp_notsack_list
== NULL
);
4240 tcp
->tcp_snd_sack_ok
= B_TRUE
;
4244 * Should we use ECN? Note that the current
4245 * default value (SunOS 5.9) of tcp_ecn_permitted
4246 * is 1. The reason for doing this is that there
4247 * are equipments out there that will drop ECN
4248 * enabled IP packets. Setting it to 1 avoids
4249 * compatibility problems.
4251 if (tcps
->tcps_ecn_permitted
== 2)
4252 tcp
->tcp_ecn_ok
= B_TRUE
;
4254 /* Trace change from BOUND -> SYN_SENT here */
4255 DTRACE_TCP6(state__change
, void, NULL
, ip_xmit_attr_t
*,
4256 connp
->conn_ixa
, void, NULL
, tcp_t
*, tcp
, void, NULL
,
4257 int32_t, TCPS_BOUND
);
4259 TCP_TIMER_RESTART(tcp
, tcp
->tcp_rto
);
4260 syn_mp
= tcp_xmit_mp(tcp
, NULL
, 0, NULL
, NULL
,
4261 tcp
->tcp_iss
, B_FALSE
, NULL
, B_FALSE
);
4262 if (syn_mp
!= NULL
) {
4264 * We must bump the generation before sending the syn
4265 * to ensure that we use the right generation in case
4266 * this thread issues a "connected" up call.
4268 SOCK_CONNID_BUMP(tcp
->tcp_connid
);
4270 * DTrace sending the first SYN as a
4271 * tcp:::connect-request event.
4273 DTRACE_TCP5(connect__request
, mblk_t
*, NULL
,
4274 ip_xmit_attr_t
*, connp
->conn_ixa
,
4275 void_ip_t
*, syn_mp
->b_rptr
, tcp_t
*, tcp
,
4277 &syn_mp
->b_rptr
[connp
->conn_ixa
->ixa_ip_hdr_length
]);
4278 tcp_send_data(tcp
, syn_mp
);
4281 if (tcp
->tcp_conn
.tcp_opts_conn_req
!= NULL
)
4282 tcp_close_mpp(&tcp
->tcp_conn
.tcp_opts_conn_req
);
4286 connp
->conn_faddr_v6
= ipv6_all_zeros
;
4287 connp
->conn_fport
= 0;
4288 tcp
->tcp_state
= oldstate
;
4289 if (tcp
->tcp_conn
.tcp_opts_conn_req
!= NULL
)
4290 tcp_close_mpp(&tcp
->tcp_conn
.tcp_opts_conn_req
);
4295 tcp_do_listen(conn_t
*connp
, struct sockaddr
*sa
, socklen_t len
,
4296 int backlog
, cred_t
*cr
, boolean_t bind_to_req_port_only
)
4298 tcp_t
*tcp
= connp
->conn_tcp
;
4300 tcp_stack_t
*tcps
= tcp
->tcp_tcps
;
4303 /* All Solaris components should pass a cred for this operation. */
4306 if (tcp
->tcp_state
>= TCPS_BOUND
) {
4307 if ((tcp
->tcp_state
== TCPS_BOUND
||
4308 tcp
->tcp_state
== TCPS_LISTEN
) && backlog
> 0) {
4310 * Handle listen() increasing backlog.
4311 * This is more "liberal" then what the TPI spec
4312 * requires but is needed to avoid a t_unbind
4313 * when handling listen() since the port number
4314 * might be "stolen" between the unbind and bind.
4318 if (connp
->conn_debug
) {
4319 (void) strlog(TCP_MOD_ID
, 0, 1, SL_ERROR
|SL_TRACE
,
4320 "tcp_listen: bad state, %d", tcp
->tcp_state
);
4322 return (-TOUTSTATE
);
4329 ASSERT(IPCL_IS_NONSTR(connp
));
4330 /* Do an implicit bind: Request for a generic port. */
4331 if (connp
->conn_family
== AF_INET
) {
4332 len
= sizeof (sin_t
);
4333 sin
= (sin_t
*)&addr
;
4335 sin
->sin_family
= AF_INET
;
4337 ASSERT(connp
->conn_family
== AF_INET6
);
4338 len
= sizeof (sin6_t
);
4339 sin6
= (sin6_t
*)&addr
;
4341 sin6
->sin6_family
= AF_INET6
;
4343 sa
= (struct sockaddr
*)&addr
;
4346 error
= tcp_bind_check(connp
, sa
, len
, cr
,
4347 bind_to_req_port_only
);
4350 /* Fall through and do the fanout insertion */
4354 ASSERT(tcp
->tcp_state
== TCPS_BOUND
|| tcp
->tcp_state
== TCPS_LISTEN
);
4355 tcp
->tcp_conn_req_max
= backlog
;
4356 if (tcp
->tcp_conn_req_max
) {
4357 if (tcp
->tcp_conn_req_max
< tcps
->tcps_conn_req_min
)
4358 tcp
->tcp_conn_req_max
= tcps
->tcps_conn_req_min
;
4359 if (tcp
->tcp_conn_req_max
> tcps
->tcps_conn_req_max_q
)
4360 tcp
->tcp_conn_req_max
= tcps
->tcps_conn_req_max_q
;
4362 * If this is a listener, do not reset the eager list
4363 * and other stuffs. Note that we don't check if the
4364 * existing eager list meets the new tcp_conn_req_max
4367 if (tcp
->tcp_state
!= TCPS_LISTEN
) {
4368 tcp
->tcp_state
= TCPS_LISTEN
;
4369 DTRACE_TCP6(state__change
, void, NULL
, ip_xmit_attr_t
*,
4370 connp
->conn_ixa
, void, NULL
, tcp_t
*, tcp
,
4371 void, NULL
, int32_t, TCPS_BOUND
);
4372 /* Initialize the chain. Don't need the eager_lock */
4373 tcp
->tcp_eager_next_q0
= tcp
->tcp_eager_prev_q0
= tcp
;
4374 tcp
->tcp_eager_next_drop_q0
= tcp
;
4375 tcp
->tcp_eager_prev_drop_q0
= tcp
;
4376 tcp
->tcp_second_ctimer_threshold
=
4377 tcps
->tcps_ip_abort_linterval
;
4382 * We need to make sure that the conn_recv is set to a non-null
4383 * value before we insert the conn into the classifier table.
4384 * This is to avoid a race with an incoming packet which does an
4386 * We initially set it to tcp_input_listener_unbound to try to
4387 * pick a good squeue for the listener when the first SYN arrives.
4388 * tcp_input_listener_unbound sets it to tcp_input_listener on that
4391 connp
->conn_recv
= tcp_input_listener_unbound
;
4393 /* Insert the listener in the classifier table */
4394 error
= ip_laddr_fanout_insert(connp
);
4396 /* Undo the bind - release the port number */
4397 oldstate
= tcp
->tcp_state
;
4398 tcp
->tcp_state
= TCPS_IDLE
;
4399 DTRACE_TCP6(state__change
, void, NULL
, ip_xmit_attr_t
*,
4400 connp
->conn_ixa
, void, NULL
, tcp_t
*, tcp
, void, NULL
,
4402 connp
->conn_bound_addr_v6
= ipv6_all_zeros
;
4404 connp
->conn_laddr_v6
= ipv6_all_zeros
;
4405 connp
->conn_saddr_v6
= ipv6_all_zeros
;
4406 connp
->conn_ports
= 0;
4408 if (connp
->conn_anon_port
) {
4411 zone
= crgetzone(cr
);
4412 connp
->conn_anon_port
= B_FALSE
;
4413 (void) tsol_mlp_anon(zone
, connp
->conn_mlp_type
,
4414 connp
->conn_proto
, connp
->conn_lport
, B_FALSE
);
4416 connp
->conn_mlp_type
= mlptSingle
;
4418 tcp_bind_hash_remove(tcp
);
4422 * If there is a connection limit, allocate and initialize
4423 * the counter struct. Note that since listen can be called
4424 * multiple times, the struct may have been allready allocated.
4426 if (!list_is_empty(&tcps
->tcps_listener_conf
) &&
4427 tcp
->tcp_listen_cnt
== NULL
) {
4428 tcp_listen_cnt_t
*tlc
;
4431 ratio
= tcp_find_listener_conf(tcps
,
4432 ntohs(connp
->conn_lport
));
4434 uint32_t mem_ratio
, tot_buf
;
4436 tlc
= kmem_alloc(sizeof (tcp_listen_cnt_t
),
4439 * Calculate the connection limit based on
4440 * the configured ratio and maxusers. Maxusers
4441 * are calculated based on memory size,
4442 * ~ 1 user per MB. Note that the conn_rcvbuf
4443 * and conn_sndbuf may change after a
4444 * connection is accepted. So what we have
4445 * is only an approximation.
4447 if ((tot_buf
= connp
->conn_rcvbuf
+
4448 connp
->conn_sndbuf
) < MB
) {
4449 mem_ratio
= MB
/ tot_buf
;
4450 tlc
->tlc_max
= maxusers
/ ratio
*
4453 mem_ratio
= tot_buf
/ MB
;
4454 tlc
->tlc_max
= maxusers
/ ratio
/
4457 /* At least we should allow two connections! */
4458 if (tlc
->tlc_max
<= tcp_min_conn_listener
)
4459 tlc
->tlc_max
= tcp_min_conn_listener
;
4462 tcp
->tcp_listen_cnt
= tlc
;