4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
25 * Copyright (c) 2011 Joyent, Inc. All rights reserved.
28 /* This file contains all TCP input processing functions. */
30 #include <sys/types.h>
31 #include <sys/stream.h>
32 #include <sys/strsun.h>
33 #include <sys/strsubr.h>
34 #include <sys/stropts.h>
35 #include <sys/strlog.h>
36 #define _SUN_TPI_VERSION 2
37 #include <sys/tihdr.h>
38 #include <sys/suntpi.h>
39 #include <sys/xti_inet.h>
40 #include <sys/squeue_impl.h>
41 #include <sys/squeue.h>
42 #include <sys/tsol/tnet.h>
44 #include <inet/common.h>
47 #include <inet/tcp_impl.h>
48 #include <inet/tcp_cluster.h>
49 #include <inet/proto_set.h>
50 #include <inet/ipsec_impl.h>
53 * RFC1323-recommended phrasing of TSTAMP option, for easier parsing
57 #define TCPOPT_NOP_NOP_TSTAMP ((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | \
58 (TCPOPT_TSTAMP << 8) | 10)
60 #define TCPOPT_NOP_NOP_TSTAMP ((10 << 24) | (TCPOPT_TSTAMP << 16) | \
61 (TCPOPT_NOP << 8) | TCPOPT_NOP)
65 * Flags returned from tcp_parse_options.
67 #define TCP_OPT_MSS_PRESENT 1
68 #define TCP_OPT_WSCALE_PRESENT 2
69 #define TCP_OPT_TSTAMP_PRESENT 4
70 #define TCP_OPT_SACK_OK_PRESENT 8
71 #define TCP_OPT_SACK_PRESENT 16
74 * PAWS needs a timer for 24 days. This is the number of ticks in 24 days
76 #define PAWS_TIMEOUT ((clock_t)(24*24*60*60*hz))
79 * Since tcp_listener is not cleared atomically with tcp_detached
80 * being cleared we need this extra bit to tell a detached connection
81 * apart from one that is in the process of being accepted.
83 #define TCP_IS_DETACHED_NONEAGER(tcp) \
84 (TCP_IS_DETACHED(tcp) && \
85 (!(tcp)->tcp_hard_binding))
88 * Steps to do when a tcp_t moves to TIME-WAIT state.
90 * This connection is done, we don't need to account for it. Decrement
91 * the listener connection counter if needed.
93 * Decrement the connection counter of the stack. Note that this counter
94 * is per CPU. So the total number of connections in a stack is the sum of all
95 * of them. Since there is no lock for handling all of them exclusively, the
96 * resulting sum is only an approximation.
98 * Unconditionally clear the exclusive binding bit so this TIME-WAIT
99 * connection won't interfere with new ones.
101 * Start the TIME-WAIT timer. If upper layer has not closed the connection,
102 * the timer is handled within the context of this tcp_t. When the timer
103 * fires, tcp_clean_death() is called. If upper layer closes the connection
104 * during this period, tcp_time_wait_append() will be called to add this
105 * tcp_t to the global TIME-WAIT list. Note that this means that the
106 * actual wait time in TIME-WAIT state will be longer than the
107 * tcps_time_wait_interval since the period before upper layer closes the
108 * connection is not accounted for when tcp_time_wait_append() is called.
110 * If uppser layer has closed the connection, call tcp_time_wait_append()
114 #define SET_TIME_WAIT(tcps, tcp, connp) \
116 (tcp)->tcp_state = TCPS_TIME_WAIT; \
117 if ((tcp)->tcp_listen_cnt != NULL) \
118 TCP_DECR_LISTEN_CNT(tcp); \
120 (uint64_t *)&(tcps)->tcps_sc[CPU->cpu_seqid]->tcp_sc_conn_cnt); \
121 (connp)->conn_exclbind = 0; \
122 if (!TCP_IS_DETACHED(tcp)) { \
123 TCP_TIMER_RESTART(tcp, (tcps)->tcps_time_wait_interval); \
125 tcp_time_wait_append(tcp); \
126 TCP_DBGSTAT(tcps, tcp_rput_time_wait); \
131 * If tcp_drop_ack_unsent_cnt is greater than 0, when TCP receives more
132 * than tcp_drop_ack_unsent_cnt number of ACKs which acknowledge unsent
133 * data, TCP will not respond with an ACK. RFC 793 requires that
134 * TCP responds with an ACK for such a bogus ACK. By not following
135 * the RFC, we prevent TCP from getting into an ACK storm if somehow
136 * an attacker successfully spoofs an acceptable segment to our
137 * peer; or when our peer is "confused."
139 static uint32_t tcp_drop_ack_unsent_cnt
= 10;
142 * To protect TCP against attacker using a small window and requesting
143 * large amount of data (DoS attack by conuming memory), TCP checks the
144 * window advertised in the last ACK of the 3-way handshake. TCP uses
145 * the tcp_mss (the size of one packet) value for comparion. The window
146 * should be larger than tcp_mss. But while a sane TCP should advertise
147 * a receive window larger than or equal to 4*MSS to avoid stop and go
148 * tarrfic, not all TCP stacks do that. This is especially true when
149 * tcp_mss is a big value.
151 * To work around this issue, an additional fixed value for comparison
152 * is also used. If the advertised window is smaller than both tcp_mss
153 * and tcp_init_wnd_chk, the ACK is considered as invalid. So for large
154 * tcp_mss value (say, 8K), a window larger than tcp_init_wnd_chk but
155 * smaller than 8K is considered to be OK.
157 static uint32_t tcp_init_wnd_chk
= 4096;
159 /* Process ICMP source quench message or not. */
160 static boolean_t tcp_icmp_source_quench
= B_FALSE
;
162 static boolean_t tcp_outbound_squeue_switch
= B_FALSE
;
164 static mblk_t
*tcp_conn_create_v4(conn_t
*, conn_t
*, mblk_t
*,
166 static mblk_t
*tcp_conn_create_v6(conn_t
*, conn_t
*, mblk_t
*,
168 static boolean_t
tcp_drop_q0(tcp_t
*);
169 static void tcp_icmp_error_ipv6(tcp_t
*, mblk_t
*, ip_recv_attr_t
*);
170 static mblk_t
*tcp_input_add_ancillary(tcp_t
*, mblk_t
*, ip_pkt_t
*,
172 static void tcp_input_listener(void *, mblk_t
*, void *, ip_recv_attr_t
*);
173 static int tcp_parse_options(tcpha_t
*, tcp_opt_t
*);
174 static void tcp_process_options(tcp_t
*, tcpha_t
*);
175 static mblk_t
*tcp_reass(tcp_t
*, mblk_t
*, uint32_t);
176 static void tcp_reass_elim_overlap(tcp_t
*, mblk_t
*);
177 static void tcp_rsrv_input(void *, mblk_t
*, void *, ip_recv_attr_t
*);
178 static void tcp_set_rto(tcp_t
*, time_t);
179 static void tcp_setcred_data(mblk_t
*, ip_recv_attr_t
*);
182 * Set the MSS associated with a particular tcp based on its current value,
183 * and a new one passed in. Observe minimums and maximums, and reset other
184 * state variables that we want to view as multiples of MSS.
186 * The value of MSS could be either increased or descreased.
189 tcp_mss_set(tcp_t
*tcp
, uint32_t mss
)
192 tcp_stack_t
*tcps
= tcp
->tcp_tcps
;
193 conn_t
*connp
= tcp
->tcp_connp
;
195 if (connp
->conn_ipversion
== IPV4_VERSION
)
196 mss_max
= tcps
->tcps_mss_max_ipv4
;
198 mss_max
= tcps
->tcps_mss_max_ipv6
;
200 if (mss
< tcps
->tcps_mss_min
)
201 mss
= tcps
->tcps_mss_min
;
205 * Unless naglim has been set by our client to
206 * a non-mss value, force naglim to track mss.
207 * This can help to aggregate small writes.
209 if (mss
< tcp
->tcp_naglim
|| tcp
->tcp_mss
== tcp
->tcp_naglim
)
210 tcp
->tcp_naglim
= mss
;
212 * TCP should be able to buffer at least 4 MSS data for obvious
213 * performance reason.
215 if ((mss
<< 2) > connp
->conn_sndbuf
)
216 connp
->conn_sndbuf
= mss
<< 2;
219 * Set the send lowater to at least twice of MSS.
221 if ((mss
<< 1) > connp
->conn_sndlowat
)
222 connp
->conn_sndlowat
= mss
<< 1;
225 * Update tcp_cwnd according to the new value of MSS. Keep the
226 * previous ratio to preserve the transmit rate.
228 tcp
->tcp_cwnd
= (tcp
->tcp_cwnd
/ tcp
->tcp_mss
) * mss
;
229 tcp
->tcp_cwnd_cnt
= 0;
232 (void) tcp_maxpsz_set(tcp
, B_TRUE
);
236 * Extract option values from a tcp header. We put any found values into the
237 * tcpopt struct and return a bitmask saying which options were found.
240 tcp_parse_options(tcpha_t
*tcpha
, tcp_opt_t
*tcpopt
)
245 uchar_t
*up
= (uchar_t
*)tcpha
;
248 tcp_seq sack_begin
, sack_end
;
251 endp
= up
+ TCP_HDR_LENGTH(tcpha
);
252 up
+= TCP_MIN_HEADER_LENGTH
;
264 if (len
< TCPOPT_MAXSEG_LEN
||
265 up
[1] != TCPOPT_MAXSEG_LEN
)
268 mss
= BE16_TO_U16(up
+2);
269 /* Caller must handle tcp_mss_min and tcp_mss_max_* */
270 tcpopt
->tcp_opt_mss
= mss
;
271 found
|= TCP_OPT_MSS_PRESENT
;
273 up
+= TCPOPT_MAXSEG_LEN
;
277 if (len
< TCPOPT_WS_LEN
|| up
[1] != TCPOPT_WS_LEN
)
280 if (up
[2] > TCP_MAX_WINSHIFT
)
281 tcpopt
->tcp_opt_wscale
= TCP_MAX_WINSHIFT
;
283 tcpopt
->tcp_opt_wscale
= up
[2];
284 found
|= TCP_OPT_WSCALE_PRESENT
;
289 case TCPOPT_SACK_PERMITTED
:
290 if (len
< TCPOPT_SACK_OK_LEN
||
291 up
[1] != TCPOPT_SACK_OK_LEN
)
293 found
|= TCP_OPT_SACK_OK_PRESENT
;
294 up
+= TCPOPT_SACK_OK_LEN
;
298 if (len
<= 2 || up
[1] <= 2 || len
< up
[1])
301 /* If TCP is not interested in SACK blks... */
302 if ((tcp
= tcpopt
->tcp
) == NULL
) {
306 sack_len
= up
[1] - TCPOPT_HEADER_LEN
;
307 up
+= TCPOPT_HEADER_LEN
;
310 * If the list is empty, allocate one and assume
311 * nothing is sack'ed.
313 if (tcp
->tcp_notsack_list
== NULL
) {
314 tcp_notsack_update(&(tcp
->tcp_notsack_list
),
315 tcp
->tcp_suna
, tcp
->tcp_snxt
,
316 &(tcp
->tcp_num_notsack_blk
),
317 &(tcp
->tcp_cnt_notsack_list
));
320 * Make sure tcp_notsack_list is not NULL.
321 * This happens when kmem_alloc(KM_NOSLEEP)
324 if (tcp
->tcp_notsack_list
== NULL
) {
328 tcp
->tcp_fack
= tcp
->tcp_suna
;
331 while (sack_len
> 0) {
336 sack_begin
= BE32_TO_U32(up
);
338 sack_end
= BE32_TO_U32(up
);
342 * Bounds checking. Make sure the SACK
343 * info is within tcp_suna and tcp_snxt.
344 * If this SACK blk is out of bound, ignore
345 * it but continue to parse the following
348 if (SEQ_LEQ(sack_end
, sack_begin
) ||
349 SEQ_LT(sack_begin
, tcp
->tcp_suna
) ||
350 SEQ_GT(sack_end
, tcp
->tcp_snxt
)) {
353 tcp_notsack_insert(&(tcp
->tcp_notsack_list
),
354 sack_begin
, sack_end
,
355 &(tcp
->tcp_num_notsack_blk
),
356 &(tcp
->tcp_cnt_notsack_list
));
357 if (SEQ_GT(sack_end
, tcp
->tcp_fack
)) {
358 tcp
->tcp_fack
= sack_end
;
361 found
|= TCP_OPT_SACK_PRESENT
;
365 if (len
< TCPOPT_TSTAMP_LEN
||
366 up
[1] != TCPOPT_TSTAMP_LEN
)
369 tcpopt
->tcp_opt_ts_val
= BE32_TO_U32(up
+2);
370 tcpopt
->tcp_opt_ts_ecr
= BE32_TO_U32(up
+6);
372 found
|= TCP_OPT_TSTAMP_PRESENT
;
374 up
+= TCPOPT_TSTAMP_LEN
;
378 if (len
<= 1 || len
< (int)up
[1] || up
[1] == 0)
389 * Process all TCP option in SYN segment. Note that this function should
390 * be called after tcp_set_destination() is called so that the necessary info
391 * from IRE is already set in the tcp structure.
393 * This function sets up the correct tcp_mss value according to the
394 * MSS option value and our header size. It also sets up the window scale
395 * and timestamp values, and initialize SACK info blocks. But it does not
396 * change receive window size after setting the tcp_mss value. The caller
397 * should do the appropriate change.
400 tcp_process_options(tcp_t
*tcp
, tcpha_t
*tcpha
)
406 tcp_stack_t
*tcps
= tcp
->tcp_tcps
;
407 conn_t
*connp
= tcp
->tcp_connp
;
410 options
= tcp_parse_options(tcpha
, &tcpopt
);
413 * Process MSS option. Note that MSS option value does not account
414 * for IP or TCP options. This means that it is equal to MTU - minimum
415 * IP+TCP header size, which is 40 bytes for IPv4 and 60 bytes for
418 if (!(options
& TCP_OPT_MSS_PRESENT
)) {
419 if (connp
->conn_ipversion
== IPV4_VERSION
)
420 tcpopt
.tcp_opt_mss
= tcps
->tcps_mss_def_ipv4
;
422 tcpopt
.tcp_opt_mss
= tcps
->tcps_mss_def_ipv6
;
424 if (connp
->conn_ipversion
== IPV4_VERSION
)
425 mss_max
= tcps
->tcps_mss_max_ipv4
;
427 mss_max
= tcps
->tcps_mss_max_ipv6
;
428 if (tcpopt
.tcp_opt_mss
< tcps
->tcps_mss_min
)
429 tcpopt
.tcp_opt_mss
= tcps
->tcps_mss_min
;
430 else if (tcpopt
.tcp_opt_mss
> mss_max
)
431 tcpopt
.tcp_opt_mss
= mss_max
;
434 /* Process Window Scale option. */
435 if (options
& TCP_OPT_WSCALE_PRESENT
) {
436 tcp
->tcp_snd_ws
= tcpopt
.tcp_opt_wscale
;
437 tcp
->tcp_snd_ws_ok
= B_TRUE
;
439 tcp
->tcp_snd_ws
= B_FALSE
;
440 tcp
->tcp_snd_ws_ok
= B_FALSE
;
441 tcp
->tcp_rcv_ws
= B_FALSE
;
444 /* Process Timestamp option. */
445 if ((options
& TCP_OPT_TSTAMP_PRESENT
) &&
446 (tcp
->tcp_snd_ts_ok
|| TCP_IS_DETACHED(tcp
))) {
447 tmp_tcph
= (char *)tcp
->tcp_tcpha
;
449 tcp
->tcp_snd_ts_ok
= B_TRUE
;
450 tcp
->tcp_ts_recent
= tcpopt
.tcp_opt_ts_val
;
451 tcp
->tcp_last_rcv_lbolt
= ddi_get_lbolt64();
452 ASSERT(OK_32PTR(tmp_tcph
));
453 ASSERT(connp
->conn_ht_ulp_len
== TCP_MIN_HEADER_LENGTH
);
455 /* Fill in our template header with basic timestamp option. */
456 tmp_tcph
+= connp
->conn_ht_ulp_len
;
457 tmp_tcph
[0] = TCPOPT_NOP
;
458 tmp_tcph
[1] = TCPOPT_NOP
;
459 tmp_tcph
[2] = TCPOPT_TSTAMP
;
460 tmp_tcph
[3] = TCPOPT_TSTAMP_LEN
;
461 connp
->conn_ht_iphc_len
+= TCPOPT_REAL_TS_LEN
;
462 connp
->conn_ht_ulp_len
+= TCPOPT_REAL_TS_LEN
;
463 tcp
->tcp_tcpha
->tha_offset_and_reserved
+= (3 << 4);
465 tcp
->tcp_snd_ts_ok
= B_FALSE
;
469 * Process SACK options. If SACK is enabled for this connection,
470 * then allocate the SACK info structure. Note the following ways
471 * when tcp_snd_sack_ok is set to true.
473 * For active connection: in tcp_set_destination() called in
476 * For passive connection: in tcp_set_destination() called in
477 * tcp_input_listener().
479 * That's the reason why the extra TCP_IS_DETACHED() check is there.
480 * That check makes sure that if we did not send a SACK OK option,
481 * we will not enable SACK for this connection even though the other
482 * side sends us SACK OK option. For active connection, the SACK
483 * info structure has already been allocated. So we need to free
484 * it if SACK is disabled.
486 if ((options
& TCP_OPT_SACK_OK_PRESENT
) &&
487 (tcp
->tcp_snd_sack_ok
||
488 (tcps
->tcps_sack_permitted
!= 0 && TCP_IS_DETACHED(tcp
)))) {
489 ASSERT(tcp
->tcp_num_sack_blk
== 0);
490 ASSERT(tcp
->tcp_notsack_list
== NULL
);
492 tcp
->tcp_snd_sack_ok
= B_TRUE
;
493 if (tcp
->tcp_snd_ts_ok
) {
494 tcp
->tcp_max_sack_blk
= 3;
496 tcp
->tcp_max_sack_blk
= 4;
498 } else if (tcp
->tcp_snd_sack_ok
) {
500 * Resetting tcp_snd_sack_ok to B_FALSE so that
501 * no SACK info will be used for this
502 * connection. This assumes that SACK usage
503 * permission is negotiated. This may need
504 * to be changed once this is clarified.
506 ASSERT(tcp
->tcp_num_sack_blk
== 0);
507 ASSERT(tcp
->tcp_notsack_list
== NULL
);
508 tcp
->tcp_snd_sack_ok
= B_FALSE
;
512 * Now we know the exact TCP/IP header length, subtract
513 * that from tcp_mss to get our side's MSS.
515 tcp
->tcp_mss
-= connp
->conn_ht_iphc_len
;
518 * Here we assume that the other side's header size will be equal to
519 * our header size. We calculate the real MSS accordingly. Need to
520 * take into additional stuffs IPsec puts in.
522 * Real MSS = Opt.MSS - (our TCP/IP header - min TCP/IP header)
524 tcpopt
.tcp_opt_mss
-= connp
->conn_ht_iphc_len
+
525 tcp
->tcp_ipsec_overhead
-
526 ((connp
->conn_ipversion
== IPV4_VERSION
?
527 IP_SIMPLE_HDR_LENGTH
: IPV6_HDR_LEN
) + TCP_MIN_HEADER_LENGTH
);
530 * Set MSS to the smaller one of both ends of the connection.
531 * We should not have called tcp_mss_set() before, but our
532 * side of the MSS should have been set to a proper value
533 * by tcp_set_destination(). tcp_mss_set() will also set up the
534 * STREAM head parameters properly.
536 * If we have a larger-than-16-bit window but the other side
537 * didn't want to do window scale, tcp_rwnd_set() will take
540 tcp_mss_set(tcp
, MIN(tcpopt
.tcp_opt_mss
, tcp
->tcp_mss
));
543 * Initialize tcp_cwnd value. After tcp_mss_set(), tcp_mss has been
546 TCP_SET_INIT_CWND(tcp
, tcp
->tcp_mss
, tcps
->tcps_slow_start_initial
);
550 * Add a new piece to the tcp reassembly queue. If the gap at the beginning
551 * is filled, return as much as we can. The message passed in may be
552 * multi-part, chained using b_cont. "start" is the starting sequence
553 * number for this piece.
556 tcp_reass(tcp_t
*tcp
, mblk_t
*mp
, uint32_t start
)
563 tcp_stack_t
*tcps
= tcp
->tcp_tcps
;
566 /* Walk through all the new pieces. */
568 ASSERT((uintptr_t)(mp
->b_wptr
- mp
->b_rptr
) <=
570 end
= start
+ (int)(mp
->b_wptr
- mp
->b_rptr
);
571 next_mp
= mp
->b_cont
;
573 /* Empty. Blast it. */
578 TCP_REASS_SET_SEQ(mp
, start
);
579 TCP_REASS_SET_END(mp
, end
);
580 mp1
= tcp
->tcp_reass_tail
;
582 tcp
->tcp_reass_tail
= mp
;
583 tcp
->tcp_reass_head
= mp
;
584 TCPS_BUMP_MIB(tcps
, tcpInDataUnorderSegs
);
585 TCPS_UPDATE_MIB(tcps
, tcpInDataUnorderBytes
,
589 /* New stuff completely beyond tail? */
590 if (SEQ_GEQ(start
, TCP_REASS_END(mp1
))) {
591 /* Link it on end. */
593 tcp
->tcp_reass_tail
= mp
;
594 TCPS_BUMP_MIB(tcps
, tcpInDataUnorderSegs
);
595 TCPS_UPDATE_MIB(tcps
, tcpInDataUnorderBytes
,
599 mp1
= tcp
->tcp_reass_head
;
600 u1
= TCP_REASS_SEQ(mp1
);
601 /* New stuff at the front? */
602 if (SEQ_LT(start
, u1
)) {
603 /* Yes... Check for overlap. */
605 tcp
->tcp_reass_head
= mp
;
606 tcp_reass_elim_overlap(tcp
, mp
);
610 * The new piece fits somewhere between the head and tail.
611 * We find our slot, where mp1 precedes us and mp2 trails.
613 for (; (mp2
= mp1
->b_cont
) != NULL
; mp1
= mp2
) {
614 u1
= TCP_REASS_SEQ(mp2
);
615 if (SEQ_LEQ(start
, u1
))
618 /* Link ourselves in */
622 /* Trim overlap with following mblk(s) first */
623 tcp_reass_elim_overlap(tcp
, mp
);
625 /* Trim overlap with preceding mblk */
626 tcp_reass_elim_overlap(tcp
, mp1
);
628 } while (start
= end
, mp
= next_mp
);
629 mp1
= tcp
->tcp_reass_head
;
630 /* Anything ready to go? */
631 if (TCP_REASS_SEQ(mp1
) != tcp
->tcp_rnxt
)
633 /* Eat what we can off the queue */
636 end
= TCP_REASS_END(mp1
);
637 TCP_REASS_SET_SEQ(mp1
, 0);
638 TCP_REASS_SET_END(mp1
, 0);
640 tcp
->tcp_reass_tail
= NULL
;
643 if (end
!= TCP_REASS_SEQ(mp
)) {
649 mp1
= tcp
->tcp_reass_head
;
650 tcp
->tcp_reass_head
= mp
;
654 /* Eliminate any overlap that mp may have over later mblks */
656 tcp_reass_elim_overlap(tcp_t
*tcp
, mblk_t
*mp
)
661 tcp_stack_t
*tcps
= tcp
->tcp_tcps
;
663 end
= TCP_REASS_END(mp
);
664 while ((mp1
= mp
->b_cont
) != NULL
) {
665 u1
= TCP_REASS_SEQ(mp1
);
666 if (!SEQ_GT(end
, u1
))
668 if (!SEQ_GEQ(end
, TCP_REASS_END(mp1
))) {
669 mp
->b_wptr
-= end
- u1
;
670 TCP_REASS_SET_END(mp
, u1
);
671 TCPS_BUMP_MIB(tcps
, tcpInDataPartDupSegs
);
672 TCPS_UPDATE_MIB(tcps
, tcpInDataPartDupBytes
,
676 mp
->b_cont
= mp1
->b_cont
;
677 TCP_REASS_SET_SEQ(mp1
, 0);
678 TCP_REASS_SET_END(mp1
, 0);
680 TCPS_BUMP_MIB(tcps
, tcpInDataDupSegs
);
681 TCPS_UPDATE_MIB(tcps
, tcpInDataDupBytes
, end
- u1
);
684 tcp
->tcp_reass_tail
= mp
;
688 * This function does PAWS protection check. Returns B_TRUE if the
689 * segment passes the PAWS test, else returns B_FALSE.
692 tcp_paws_check(tcp_t
*tcp
, tcpha_t
*tcpha
, tcp_opt_t
*tcpoptp
)
697 conn_t
*connp
= tcp
->tcp_connp
;
699 flags
= (unsigned int)tcpha
->tha_flags
& 0xFF;
701 * If timestamp option is aligned nicely, get values inline,
702 * otherwise call general routine to parse. Only do that
703 * if timestamp is the only option.
705 if (TCP_HDR_LENGTH(tcpha
) == (uint32_t)TCP_MIN_HEADER_LENGTH
+
706 TCPOPT_REAL_TS_LEN
&&
707 OK_32PTR((up
= ((uint8_t *)tcpha
) +
708 TCP_MIN_HEADER_LENGTH
)) &&
709 *(uint32_t *)up
== TCPOPT_NOP_NOP_TSTAMP
) {
710 tcpoptp
->tcp_opt_ts_val
= ABE32_TO_U32((up
+4));
711 tcpoptp
->tcp_opt_ts_ecr
= ABE32_TO_U32((up
+8));
713 options
= TCP_OPT_TSTAMP_PRESENT
;
715 if (tcp
->tcp_snd_sack_ok
) {
720 options
= tcp_parse_options(tcpha
, tcpoptp
);
723 if (options
& TCP_OPT_TSTAMP_PRESENT
) {
725 * Do PAWS per RFC 1323 section 4.2. Accept RST
726 * regardless of the timestamp, page 18 RFC 1323.bis.
728 if ((flags
& TH_RST
) == 0 &&
729 TSTMP_LT(tcpoptp
->tcp_opt_ts_val
,
730 tcp
->tcp_ts_recent
)) {
731 if (LBOLT_FASTPATH64
<
732 (tcp
->tcp_last_rcv_lbolt
+ PAWS_TIMEOUT
)) {
733 /* This segment is not acceptable. */
737 * Connection has been idle for
738 * too long. Reset the timestamp
739 * and assume the segment is valid.
742 tcpoptp
->tcp_opt_ts_val
;
747 * If we don't get a timestamp on every packet, we
748 * figure we can't really trust 'em, so we stop sending
751 tcp
->tcp_snd_ts_ok
= B_FALSE
;
753 connp
->conn_ht_iphc_len
-= TCPOPT_REAL_TS_LEN
;
754 connp
->conn_ht_ulp_len
-= TCPOPT_REAL_TS_LEN
;
755 tcp
->tcp_tcpha
->tha_offset_and_reserved
-= (3 << 4);
757 * Adjust the tcp_mss and tcp_cwnd accordingly. We avoid
758 * doing a slow start here so as to not to lose on the
759 * transfer rate built up so far.
761 tcp_mss_set(tcp
, tcp
->tcp_mss
+ TCPOPT_REAL_TS_LEN
);
762 if (tcp
->tcp_snd_sack_ok
)
763 tcp
->tcp_max_sack_blk
= 4;
769 * Defense for the SYN attack -
770 * 1. When q0 is full, drop from the tail (tcp_eager_prev_drop_q0) the oldest
771 * one from the list of droppable eagers. This list is a subset of q0.
772 * see comments before the definition of MAKE_DROPPABLE().
773 * 2. Don't drop a SYN request before its first timeout. This gives every
774 * request at least til the first timeout to complete its 3-way handshake.
775 * 3. Maintain tcp_syn_rcvd_timeout as an accurate count of how many
776 * requests currently on the queue that has timed out. This will be used
777 * as an indicator of whether an attack is under way, so that appropriate
778 * actions can be taken. (It's incremented in tcp_timer() and decremented
779 * either when eager goes into ESTABLISHED, or gets freed up.)
780 * 4. The current threshold is - # of timeout > q0len/4 => SYN alert on
781 * # of timeout drops back to <= q0len/32 => SYN alert off
784 tcp_drop_q0(tcp_t
*tcp
)
788 tcp_stack_t
*tcps
= tcp
->tcp_tcps
;
790 ASSERT(MUTEX_HELD(&tcp
->tcp_eager_lock
));
791 ASSERT(tcp
->tcp_eager_next_q0
!= tcp
->tcp_eager_prev_q0
);
793 /* Pick oldest eager from the list of droppable eagers */
794 eager
= tcp
->tcp_eager_prev_drop_q0
;
796 /* If list is empty. return B_FALSE */
801 /* If allocated, the mp will be freed in tcp_clean_death_wrapper() */
802 if ((mp
= allocb(0, BPRI_HI
)) == NULL
)
806 * Take this eager out from the list of droppable eagers since we are
809 MAKE_UNDROPPABLE(eager
);
811 if (tcp
->tcp_connp
->conn_debug
) {
812 (void) strlog(TCP_MOD_ID
, 0, 3, SL_TRACE
,
813 "tcp_drop_q0: listen half-open queue (max=%d) overflow"
814 " (%d pending) on %s, drop one", tcps
->tcps_conn_req_max_q0
,
815 tcp
->tcp_conn_req_cnt_q0
,
816 tcp_display(tcp
, NULL
, DISP_PORT_ONLY
));
819 TCPS_BUMP_MIB(tcps
, tcpHalfOpenDrop
);
821 /* Put a reference on the conn as we are enqueueing it in the sqeue */
822 CONN_INC_REF(eager
->tcp_connp
);
824 SQUEUE_ENTER_ONE(eager
->tcp_connp
->conn_sqp
, mp
,
825 tcp_clean_death_wrapper
, eager
->tcp_connp
, NULL
,
826 SQ_FILL
, SQTAG_TCP_DROP_Q0
);
832 * Handle a SYN on an AF_INET6 socket; can be either IPv4 or IPv6
835 tcp_conn_create_v6(conn_t
*lconnp
, conn_t
*connp
, mblk_t
*mp
,
838 tcp_t
*ltcp
= lconnp
->conn_tcp
;
839 tcp_t
*tcp
= connp
->conn_tcp
;
844 uint_t ifindex
= ira
->ira_ruifindex
;
845 tcp_stack_t
*tcps
= tcp
->tcp_tcps
;
847 if (ira
->ira_flags
& IRAF_IS_IPV4
) {
848 ipha
= (ipha_t
*)mp
->b_rptr
;
850 connp
->conn_ipversion
= IPV4_VERSION
;
851 IN6_IPADDR_TO_V4MAPPED(ipha
->ipha_dst
, &connp
->conn_laddr_v6
);
852 IN6_IPADDR_TO_V4MAPPED(ipha
->ipha_src
, &connp
->conn_faddr_v6
);
853 connp
->conn_saddr_v6
= connp
->conn_laddr_v6
;
856 sin6
.sin6_addr
= connp
->conn_faddr_v6
;
857 sin6
.sin6_port
= connp
->conn_fport
;
858 sin6
.sin6_family
= AF_INET6
;
859 sin6
.__sin6_src_id
= ip_srcid_find_addr(&connp
->conn_laddr_v6
,
860 IPCL_ZONEID(lconnp
), tcps
->tcps_netstack
);
862 if (connp
->conn_recv_ancillary
.crb_recvdstaddr
) {
866 sin6d
.sin6_addr
= connp
->conn_laddr_v6
;
867 sin6d
.sin6_port
= connp
->conn_lport
;
868 sin6d
.sin6_family
= AF_INET
;
869 tpi_mp
= mi_tpi_extconn_ind(NULL
,
870 (char *)&sin6d
, sizeof (sin6_t
),
872 (t_scalar_t
)sizeof (intptr_t),
873 (char *)&sin6d
, sizeof (sin6_t
),
874 (t_scalar_t
)ltcp
->tcp_conn_req_seqnum
);
876 tpi_mp
= mi_tpi_conn_ind(NULL
,
877 (char *)&sin6
, sizeof (sin6_t
),
878 (char *)&tcp
, (t_scalar_t
)sizeof (intptr_t),
879 (t_scalar_t
)ltcp
->tcp_conn_req_seqnum
);
882 ip6h
= (ip6_t
*)mp
->b_rptr
;
884 connp
->conn_ipversion
= IPV6_VERSION
;
885 connp
->conn_laddr_v6
= ip6h
->ip6_dst
;
886 connp
->conn_faddr_v6
= ip6h
->ip6_src
;
887 connp
->conn_saddr_v6
= connp
->conn_laddr_v6
;
890 sin6
.sin6_addr
= connp
->conn_faddr_v6
;
891 sin6
.sin6_port
= connp
->conn_fport
;
892 sin6
.sin6_family
= AF_INET6
;
893 sin6
.sin6_flowinfo
= ip6h
->ip6_vcf
& ~IPV6_VERS_AND_FLOW_MASK
;
894 sin6
.__sin6_src_id
= ip_srcid_find_addr(&connp
->conn_laddr_v6
,
895 IPCL_ZONEID(lconnp
), tcps
->tcps_netstack
);
897 if (IN6_IS_ADDR_LINKSCOPE(&ip6h
->ip6_src
)) {
898 /* Pass up the scope_id of remote addr */
899 sin6
.sin6_scope_id
= ifindex
;
901 sin6
.sin6_scope_id
= 0;
903 if (connp
->conn_recv_ancillary
.crb_recvdstaddr
) {
907 sin6
.sin6_addr
= connp
->conn_laddr_v6
;
908 sin6d
.sin6_port
= connp
->conn_lport
;
909 sin6d
.sin6_family
= AF_INET6
;
910 if (IN6_IS_ADDR_LINKSCOPE(&connp
->conn_laddr_v6
))
911 sin6d
.sin6_scope_id
= ifindex
;
913 tpi_mp
= mi_tpi_extconn_ind(NULL
,
914 (char *)&sin6d
, sizeof (sin6_t
),
915 (char *)&tcp
, (t_scalar_t
)sizeof (intptr_t),
916 (char *)&sin6d
, sizeof (sin6_t
),
917 (t_scalar_t
)ltcp
->tcp_conn_req_seqnum
);
919 tpi_mp
= mi_tpi_conn_ind(NULL
,
920 (char *)&sin6
, sizeof (sin6_t
),
921 (char *)&tcp
, (t_scalar_t
)sizeof (intptr_t),
922 (t_scalar_t
)ltcp
->tcp_conn_req_seqnum
);
926 tcp
->tcp_mss
= tcps
->tcps_mss_def_ipv6
;
930 /* Handle a SYN on an AF_INET socket */
932 tcp_conn_create_v4(conn_t
*lconnp
, conn_t
*connp
, mblk_t
*mp
,
935 tcp_t
*ltcp
= lconnp
->conn_tcp
;
936 tcp_t
*tcp
= connp
->conn_tcp
;
938 mblk_t
*tpi_mp
= NULL
;
939 tcp_stack_t
*tcps
= tcp
->tcp_tcps
;
942 ASSERT(ira
->ira_flags
& IRAF_IS_IPV4
);
943 ipha
= (ipha_t
*)mp
->b_rptr
;
945 connp
->conn_ipversion
= IPV4_VERSION
;
946 IN6_IPADDR_TO_V4MAPPED(ipha
->ipha_dst
, &connp
->conn_laddr_v6
);
947 IN6_IPADDR_TO_V4MAPPED(ipha
->ipha_src
, &connp
->conn_faddr_v6
);
948 connp
->conn_saddr_v6
= connp
->conn_laddr_v6
;
951 sin
.sin_addr
.s_addr
= connp
->conn_faddr_v4
;
952 sin
.sin_port
= connp
->conn_fport
;
953 sin
.sin_family
= AF_INET
;
954 if (lconnp
->conn_recv_ancillary
.crb_recvdstaddr
) {
958 sind
.sin_addr
.s_addr
= connp
->conn_laddr_v4
;
959 sind
.sin_port
= connp
->conn_lport
;
960 sind
.sin_family
= AF_INET
;
961 tpi_mp
= mi_tpi_extconn_ind(NULL
,
962 (char *)&sind
, sizeof (sin_t
), (char *)&tcp
,
963 (t_scalar_t
)sizeof (intptr_t), (char *)&sind
,
964 sizeof (sin_t
), (t_scalar_t
)ltcp
->tcp_conn_req_seqnum
);
966 tpi_mp
= mi_tpi_conn_ind(NULL
,
967 (char *)&sin
, sizeof (sin_t
),
968 (char *)&tcp
, (t_scalar_t
)sizeof (intptr_t),
969 (t_scalar_t
)ltcp
->tcp_conn_req_seqnum
);
972 tcp
->tcp_mss
= tcps
->tcps_mss_def_ipv4
;
977 * Called via squeue to get on to eager's perimeter. It sends a
978 * TH_RST if eager is in the fanout table. The listener wants the
979 * eager to disappear either by means of tcp_eager_blowoff() or
980 * tcp_eager_cleanup() being called. tcp_eager_kill() can also be
981 * called (via squeue) if the eager cannot be inserted in the
982 * fanout table in tcp_input_listener().
986 tcp_eager_kill(void *arg
, mblk_t
*mp
, void *arg2
, ip_recv_attr_t
*dummy
)
988 conn_t
*econnp
= (conn_t
*)arg
;
989 tcp_t
*eager
= econnp
->conn_tcp
;
990 tcp_t
*listener
= eager
->tcp_listener
;
993 * We could be called because listener is closing. Since
994 * the eager was using listener's queue's, we avoid
995 * using the listeners queues from now on.
997 ASSERT(eager
->tcp_detached
);
998 econnp
->conn_rq
= NULL
;
999 econnp
->conn_wq
= NULL
;
1002 * An eager's conn_fanout will be NULL if it's a duplicate
1003 * for an existing 4-tuples in the conn fanout table.
1004 * We don't want to send an RST out in such case.
1006 if (econnp
->conn_fanout
!= NULL
&& eager
->tcp_state
> TCPS_LISTEN
) {
1007 tcp_xmit_ctl("tcp_eager_kill, can't wait",
1008 eager
, eager
->tcp_snxt
, 0, TH_RST
);
1011 /* We are here because listener wants this eager gone */
1012 if (listener
!= NULL
) {
1013 mutex_enter(&listener
->tcp_eager_lock
);
1014 tcp_eager_unlink(eager
);
1015 if (eager
->tcp_tconnind_started
) {
1017 * The eager has sent a conn_ind up to the
1018 * listener but listener decides to close
1019 * instead. We need to drop the extra ref
1020 * placed on eager in tcp_input_data() before
1021 * sending the conn_ind to listener.
1023 CONN_DEC_REF(econnp
);
1025 mutex_exit(&listener
->tcp_eager_lock
);
1026 CONN_DEC_REF(listener
->tcp_connp
);
1029 if (eager
->tcp_state
!= TCPS_CLOSED
)
1030 tcp_close_detached(eager
);
1034 * Reset any eager connection hanging off this listener marked
1035 * with 'seqnum' and then reclaim it's resources.
1038 tcp_eager_blowoff(tcp_t
*listener
, t_scalar_t seqnum
)
1044 mutex_enter(&listener
->tcp_eager_lock
);
1046 eager
= eager
->tcp_eager_next_q
;
1047 if (eager
== NULL
) {
1048 mutex_exit(&listener
->tcp_eager_lock
);
1051 } while (eager
->tcp_conn_req_seqnum
!= seqnum
);
1053 if (eager
->tcp_closemp_used
) {
1054 mutex_exit(&listener
->tcp_eager_lock
);
1057 eager
->tcp_closemp_used
= B_TRUE
;
1058 TCP_DEBUG_GETPCSTACK(eager
->tcmp_stk
, 15);
1059 CONN_INC_REF(eager
->tcp_connp
);
1060 mutex_exit(&listener
->tcp_eager_lock
);
1061 mp
= &eager
->tcp_closemp
;
1062 SQUEUE_ENTER_ONE(eager
->tcp_connp
->conn_sqp
, mp
, tcp_eager_kill
,
1063 eager
->tcp_connp
, NULL
, SQ_FILL
, SQTAG_TCP_EAGER_BLOWOFF
);
1068 * Reset any eager connection hanging off this listener
1069 * and then reclaim it's resources.
1072 tcp_eager_cleanup(tcp_t
*listener
, boolean_t q0_only
)
1076 tcp_stack_t
*tcps
= listener
->tcp_tcps
;
1078 ASSERT(MUTEX_HELD(&listener
->tcp_eager_lock
));
1081 /* First cleanup q */
1082 TCP_STAT(tcps
, tcp_eager_blowoff_q
);
1083 eager
= listener
->tcp_eager_next_q
;
1084 while (eager
!= NULL
) {
1085 if (!eager
->tcp_closemp_used
) {
1086 eager
->tcp_closemp_used
= B_TRUE
;
1087 TCP_DEBUG_GETPCSTACK(eager
->tcmp_stk
, 15);
1088 CONN_INC_REF(eager
->tcp_connp
);
1089 mp
= &eager
->tcp_closemp
;
1090 SQUEUE_ENTER_ONE(eager
->tcp_connp
->conn_sqp
, mp
,
1091 tcp_eager_kill
, eager
->tcp_connp
, NULL
,
1092 SQ_FILL
, SQTAG_TCP_EAGER_CLEANUP
);
1094 eager
= eager
->tcp_eager_next_q
;
1097 /* Then cleanup q0 */
1098 TCP_STAT(tcps
, tcp_eager_blowoff_q0
);
1099 eager
= listener
->tcp_eager_next_q0
;
1100 while (eager
!= listener
) {
1101 if (!eager
->tcp_closemp_used
) {
1102 eager
->tcp_closemp_used
= B_TRUE
;
1103 TCP_DEBUG_GETPCSTACK(eager
->tcmp_stk
, 15);
1104 CONN_INC_REF(eager
->tcp_connp
);
1105 mp
= &eager
->tcp_closemp
;
1106 SQUEUE_ENTER_ONE(eager
->tcp_connp
->conn_sqp
, mp
,
1107 tcp_eager_kill
, eager
->tcp_connp
, NULL
, SQ_FILL
,
1108 SQTAG_TCP_EAGER_CLEANUP_Q0
);
1110 eager
= eager
->tcp_eager_next_q0
;
1115 * If we are an eager connection hanging off a listener that hasn't
1116 * formally accepted the connection yet, get off his list and blow off
1117 * any data that we have accumulated.
1120 tcp_eager_unlink(tcp_t
*tcp
)
1122 tcp_t
*listener
= tcp
->tcp_listener
;
1124 ASSERT(listener
!= NULL
);
1125 ASSERT(MUTEX_HELD(&listener
->tcp_eager_lock
));
1126 if (tcp
->tcp_eager_next_q0
!= NULL
) {
1127 ASSERT(tcp
->tcp_eager_prev_q0
!= NULL
);
1129 /* Remove the eager tcp from q0 */
1130 tcp
->tcp_eager_next_q0
->tcp_eager_prev_q0
=
1131 tcp
->tcp_eager_prev_q0
;
1132 tcp
->tcp_eager_prev_q0
->tcp_eager_next_q0
=
1133 tcp
->tcp_eager_next_q0
;
1134 ASSERT(listener
->tcp_conn_req_cnt_q0
> 0);
1135 listener
->tcp_conn_req_cnt_q0
--;
1137 tcp
->tcp_eager_next_q0
= NULL
;
1138 tcp
->tcp_eager_prev_q0
= NULL
;
1141 * Take the eager out, if it is in the list of droppable
1144 MAKE_UNDROPPABLE(tcp
);
1146 if (tcp
->tcp_syn_rcvd_timeout
!= 0) {
1147 /* we have timed out before */
1148 ASSERT(listener
->tcp_syn_rcvd_timeout
> 0);
1149 listener
->tcp_syn_rcvd_timeout
--;
1152 tcp_t
**tcpp
= &listener
->tcp_eager_next_q
;
1155 for (; tcpp
[0]; tcpp
= &tcpp
[0]->tcp_eager_next_q
) {
1156 if (tcpp
[0] == tcp
) {
1157 if (listener
->tcp_eager_last_q
== tcp
) {
1159 * If we are unlinking the last
1160 * element on the list, adjust
1161 * tail pointer. Set tail pointer
1162 * to nil when list is empty.
1164 ASSERT(tcp
->tcp_eager_next_q
== NULL
);
1165 if (listener
->tcp_eager_last_q
==
1166 listener
->tcp_eager_next_q
) {
1167 listener
->tcp_eager_last_q
=
1171 * We won't get here if there
1172 * is only one eager in the
1175 ASSERT(prev
!= NULL
);
1176 listener
->tcp_eager_last_q
=
1180 tcpp
[0] = tcp
->tcp_eager_next_q
;
1181 tcp
->tcp_eager_next_q
= NULL
;
1182 tcp
->tcp_eager_last_q
= NULL
;
1183 ASSERT(listener
->tcp_conn_req_cnt_q
> 0);
1184 listener
->tcp_conn_req_cnt_q
--;
1190 tcp
->tcp_listener
= NULL
;
1196 * The sockfs ACCEPT path:
1197 * =======================
1199 * The eager is now established in its own perimeter as soon as SYN is
1200 * received in tcp_input_listener(). When sockfs receives conn_ind, it
1201 * completes the accept processing on the acceptor STREAM. The sending
1202 * of conn_ind part is common for both sockfs listener and a TLI/XTI
1203 * listener but a TLI/XTI listener completes the accept processing
1204 * on the listener perimeter.
1206 * Common control flow for 3 way handshake:
1207 * ----------------------------------------
1209 * incoming SYN (listener perimeter) -> tcp_input_listener()
1211 * incoming SYN-ACK-ACK (eager perim) -> tcp_input_data()
1212 * send T_CONN_IND (listener perim) -> tcp_send_conn_ind()
1214 * Sockfs ACCEPT Path:
1215 * -------------------
1217 * open acceptor stream (tcp_open allocates tcp_tli_accept()
1218 * as STREAM entry point)
1220 * soaccept() sends T_CONN_RES on the acceptor STREAM to tcp_tli_accept()
1222 * tcp_tli_accept() extracts the eager and makes the q->q_ptr <-> eager
1223 * association (we are not behind eager's squeue but sockfs is protecting us
1224 * and no one knows about this stream yet. The STREAMS entry point q->q_info
1225 * is changed to point at tcp_wput().
1227 * tcp_accept_common() sends any deferred eagers via tcp_send_pending() to
1228 * listener (done on listener's perimeter).
1230 * tcp_tli_accept() calls tcp_accept_finish() on eagers perimeter to finish
1233 * TLI/XTI client ACCEPT path:
1234 * ---------------------------
1236 * soaccept() sends T_CONN_RES on the listener STREAM.
1238 * tcp_tli_accept() -> tcp_accept_swap() complete the processing and send
1239 * a M_SETOPS mblk to eager perimeter to finish accept (tcp_accept_finish()).
1244 * listener->tcp_eager_lock protects the listeners->tcp_eager_next_q0 and
1245 * and listeners->tcp_eager_next_q.
1250 * 1) We start out in tcp_input_listener by eager placing a ref on
1251 * listener and listener adding eager to listeners->tcp_eager_next_q0.
1253 * 2) When a SYN-ACK-ACK arrives, we send the conn_ind to listener. Before
1254 * doing so we place a ref on the eager. This ref is finally dropped at the
1255 * end of tcp_accept_finish() while unwinding from the squeue, i.e. the
1256 * reference is dropped by the squeue framework.
1258 * 3) The ref on listener placed in 1 above is dropped in tcp_accept_finish
1260 * The reference must be released by the same entity that added the reference
1261 * In the above scheme, the eager is the entity that adds and releases the
1262 * references. Note that tcp_accept_finish executes in the squeue of the eager
1263 * (albeit after it is attached to the acceptor stream). Though 1. executes
1264 * in the listener's squeue, the eager is nascent at this point and the
1265 * reference can be considered to have been added on behalf of the eager.
1267 * Eager getting a Reset or listener closing:
1268 * ==========================================
1270 * Once the listener and eager are linked, the listener never does the unlink.
1271 * If the listener needs to close, tcp_eager_cleanup() is called which queues
1272 * a message on all eager perimeter. The eager then does the unlink, clears
1273 * any pointers to the listener's queue and drops the reference to the
1274 * listener. The listener waits in tcp_close outside the squeue until its
1275 * refcount has dropped to 1. This ensures that the listener has waited for
1276 * all eagers to clear their association with the listener.
1278 * Similarly, if eager decides to go away, it can unlink itself and close.
1279 * When the T_CONN_RES comes down, we check if eager has closed. Note that
1280 * the reference to eager is still valid because of the extra ref we put
1281 * in tcp_send_conn_ind.
1283 * Listener can always locate the eager under the protection
1284 * of the listener->tcp_eager_lock, and then do a refhold
1285 * on the eager during the accept processing.
1287 * The acceptor stream accesses the eager in the accept processing
1288 * based on the ref placed on eager before sending T_conn_ind.
1289 * The only entity that can negate this refhold is a listener close
1290 * which is mutually exclusive with an active acceptor stream.
1292 * Eager's reference on the listener
1293 * ===================================
1295 * If the accept happens (even on a closed eager) the eager drops its
1296 * reference on the listener at the start of tcp_accept_finish. If the
1297 * eager is killed due to an incoming RST before the T_conn_ind is sent up,
1298 * the reference is dropped in tcp_closei_local. If the listener closes,
1299 * the reference is dropped in tcp_eager_kill. In all cases the reference
1300 * is dropped while executing in the eager's context (squeue).
1304 /* Process the SYN packet, mp, directed at the listener 'tcp' */
1307 * THIS FUNCTION IS DIRECTLY CALLED BY IP VIA SQUEUE FOR SYN.
1308 * tcp_input_data will not see any packets for listeners since the listener
1309 * has conn_recv set to tcp_input_listener.
1313 tcp_input_listener(void *arg
, mblk_t
*mp
, void *arg2
, ip_recv_attr_t
*ira
)
1319 conn_t
*econnp
= NULL
;
1323 conn_t
*lconnp
= (conn_t
*)arg
;
1324 tcp_t
*listener
= lconnp
->conn_tcp
;
1325 tcp_stack_t
*tcps
= listener
->tcp_tcps
;
1326 ip_stack_t
*ipst
= tcps
->tcps_netstack
->netstack_ip
;
1329 uint_t ifindex
= ira
->ira_ruifindex
;
1330 boolean_t tlc_set
= B_FALSE
;
1332 ip_hdr_len
= ira
->ira_ip_hdr_length
;
1333 tcpha
= (tcpha_t
*)&mp
->b_rptr
[ip_hdr_len
];
1334 flags
= (unsigned int)tcpha
->tha_flags
& 0xFF;
1336 DTRACE_TCP5(receive
, mblk_t
*, NULL
, ip_xmit_attr_t
*, lconnp
->conn_ixa
,
1337 __dtrace_tcp_void_ip_t
*, mp
->b_rptr
, tcp_t
*, listener
,
1338 __dtrace_tcp_tcph_t
*, tcpha
);
1340 if (!(flags
& TH_SYN
)) {
1341 if ((flags
& TH_RST
) || (flags
& TH_URG
)) {
1345 if (flags
& TH_ACK
) {
1346 /* Note this executes in listener's squeue */
1347 tcp_xmit_listeners_reset(mp
, ira
, ipst
, lconnp
);
1355 if (listener
->tcp_state
!= TCPS_LISTEN
)
1358 ASSERT(IPCL_IS_BOUND(lconnp
));
1360 mutex_enter(&listener
->tcp_eager_lock
);
1363 * The system is under memory pressure, so we need to do our part
1364 * to relieve the pressure. So we only accept new request if there
1365 * is nothing waiting to be accepted or waiting to complete the 3-way
1366 * handshake. This means that busy listener will not get too many
1367 * new requests which they cannot handle in time while non-busy
1368 * listener is still functioning properly.
1370 if (tcps
->tcps_reclaim
&& (listener
->tcp_conn_req_cnt_q
> 0 ||
1371 listener
->tcp_conn_req_cnt_q0
> 0)) {
1372 mutex_exit(&listener
->tcp_eager_lock
);
1373 TCP_STAT(tcps
, tcp_listen_mem_drop
);
1377 if (listener
->tcp_conn_req_cnt_q
>= listener
->tcp_conn_req_max
) {
1378 mutex_exit(&listener
->tcp_eager_lock
);
1379 TCP_STAT(tcps
, tcp_listendrop
);
1380 TCPS_BUMP_MIB(tcps
, tcpListenDrop
);
1381 if (lconnp
->conn_debug
) {
1382 (void) strlog(TCP_MOD_ID
, 0, 1, SL_TRACE
|SL_ERROR
,
1383 "tcp_input_listener: listen backlog (max=%d) "
1384 "overflow (%d pending) on %s",
1385 listener
->tcp_conn_req_max
,
1386 listener
->tcp_conn_req_cnt_q
,
1387 tcp_display(listener
, NULL
, DISP_PORT_ONLY
));
1392 if (listener
->tcp_conn_req_cnt_q0
>=
1393 listener
->tcp_conn_req_max
+ tcps
->tcps_conn_req_max_q0
) {
1395 * Q0 is full. Drop a pending half-open req from the queue
1396 * to make room for the new SYN req. Also mark the time we
1399 * A more aggressive defense against SYN attack will
1400 * be to set the "tcp_syn_defense" flag now.
1402 TCP_STAT(tcps
, tcp_listendropq0
);
1403 listener
->tcp_last_rcv_lbolt
= ddi_get_lbolt64();
1404 if (!tcp_drop_q0(listener
)) {
1405 mutex_exit(&listener
->tcp_eager_lock
);
1406 TCPS_BUMP_MIB(tcps
, tcpListenDropQ0
);
1407 if (lconnp
->conn_debug
) {
1408 (void) strlog(TCP_MOD_ID
, 0, 3, SL_TRACE
,
1409 "tcp_input_listener: listen half-open "
1410 "queue (max=%d) full (%d pending) on %s",
1411 tcps
->tcps_conn_req_max_q0
,
1412 listener
->tcp_conn_req_cnt_q0
,
1413 tcp_display(listener
, NULL
,
1421 * Enforce the limit set on the number of connections per listener.
1422 * Note that tlc_cnt starts with 1. So need to add 1 to tlc_max
1425 if (listener
->tcp_listen_cnt
!= NULL
) {
1426 tcp_listen_cnt_t
*tlc
= listener
->tcp_listen_cnt
;
1429 if (atomic_add_32_nv(&tlc
->tlc_cnt
, 1) > tlc
->tlc_max
+ 1) {
1430 mutex_exit(&listener
->tcp_eager_lock
);
1431 now
= ddi_get_lbolt64();
1432 atomic_add_32(&tlc
->tlc_cnt
, -1);
1433 TCP_STAT(tcps
, tcp_listen_cnt_drop
);
1435 if (now
- tlc
->tlc_report_time
>
1436 MSEC_TO_TICK(TCP_TLC_REPORT_INTERVAL
)) {
1437 zcmn_err(lconnp
->conn_zoneid
, CE_WARN
,
1438 "Listener (port %d) connection max (%u) "
1439 "reached: %u attempts dropped total\n",
1440 ntohs(listener
->tcp_connp
->conn_lport
),
1441 tlc
->tlc_max
, tlc
->tlc_drop
);
1442 tlc
->tlc_report_time
= now
;
1449 mutex_exit(&listener
->tcp_eager_lock
);
1452 * IP sets ira_sqp to either the senders conn_sqp (for loopback)
1453 * or based on the ring (for packets from GLD). Otherwise it is
1454 * set based on lbolt i.e., a somewhat random number.
1456 ASSERT(ira
->ira_sqp
!= NULL
);
1457 new_sqp
= ira
->ira_sqp
;
1459 econnp
= (conn_t
*)tcp_get_conn(arg2
, tcps
);
1463 ASSERT(econnp
->conn_netstack
== lconnp
->conn_netstack
);
1464 econnp
->conn_sqp
= new_sqp
;
1465 econnp
->conn_initial_sqp
= new_sqp
;
1466 econnp
->conn_ixa
->ixa_sqp
= new_sqp
;
1468 econnp
->conn_fport
= tcpha
->tha_lport
;
1469 econnp
->conn_lport
= tcpha
->tha_fport
;
1471 err
= conn_inherit_parent(lconnp
, econnp
);
1475 /* We already know the laddr of the new connection is ours */
1476 econnp
->conn_ixa
->ixa_src_generation
= ipst
->ips_src_generation
;
1478 ASSERT(OK_32PTR(mp
->b_rptr
));
1479 ASSERT(IPH_HDR_VERSION(mp
->b_rptr
) == IPV4_VERSION
||
1480 IPH_HDR_VERSION(mp
->b_rptr
) == IPV6_VERSION
);
1482 if (lconnp
->conn_family
== AF_INET
) {
1483 ASSERT(IPH_HDR_VERSION(mp
->b_rptr
) == IPV4_VERSION
);
1484 tpi_mp
= tcp_conn_create_v4(lconnp
, econnp
, mp
, ira
);
1486 tpi_mp
= tcp_conn_create_v6(lconnp
, econnp
, mp
, ira
);
1492 eager
= econnp
->conn_tcp
;
1493 eager
->tcp_detached
= B_TRUE
;
1494 SOCK_CONNID_INIT(eager
->tcp_connid
);
1497 * Initialize the eager's tcp_t and inherit some parameters from
1500 tcp_init_values(eager
, listener
);
1502 ASSERT((econnp
->conn_ixa
->ixa_flags
&
1503 (IXAF_SET_ULP_CKSUM
| IXAF_VERIFY_SOURCE
|
1504 IXAF_VERIFY_PMTU
| IXAF_VERIFY_LSO
)) ==
1505 (IXAF_SET_ULP_CKSUM
| IXAF_VERIFY_SOURCE
|
1506 IXAF_VERIFY_PMTU
| IXAF_VERIFY_LSO
));
1508 if (!tcps
->tcps_dev_flow_ctl
)
1509 econnp
->conn_ixa
->ixa_flags
|= IXAF_NO_DEV_FLOW_CTL
;
1511 /* Prepare for diffing against previous packets */
1512 eager
->tcp_recvifindex
= 0;
1513 eager
->tcp_recvhops
= 0xffffffffU
;
1515 if (!(ira
->ira_flags
& IRAF_IS_IPV4
) && econnp
->conn_bound_if
== 0) {
1516 if (IN6_IS_ADDR_LINKSCOPE(&econnp
->conn_faddr_v6
) ||
1517 IN6_IS_ADDR_LINKSCOPE(&econnp
->conn_laddr_v6
)) {
1518 econnp
->conn_incoming_ifindex
= ifindex
;
1519 econnp
->conn_ixa
->ixa_flags
|= IXAF_SCOPEID_SET
;
1520 econnp
->conn_ixa
->ixa_scopeid
= ifindex
;
1524 if ((ira
->ira_flags
& (IRAF_IS_IPV4
|IRAF_IPV4_OPTIONS
)) ==
1525 (IRAF_IS_IPV4
|IRAF_IPV4_OPTIONS
) &&
1526 tcps
->tcps_rev_src_routes
) {
1527 ipha_t
*ipha
= (ipha_t
*)mp
->b_rptr
;
1528 ip_pkt_t
*ipp
= &econnp
->conn_xmit_ipp
;
1530 /* Source routing option copyover (reverse it) */
1531 err
= ip_find_hdr_v4(ipha
, ipp
, B_TRUE
);
1536 ip_pkt_source_route_reverse_v4(ipp
);
1539 ASSERT(eager
->tcp_conn
.tcp_eager_conn_ind
== NULL
);
1540 ASSERT(!eager
->tcp_tconnind_started
);
1542 * If the SYN came with a credential, it's a loopback packet or a
1543 * labeled packet; attach the credential to the TPI message.
1545 if (ira
->ira_cred
!= NULL
)
1546 mblk_setcred(tpi_mp
, ira
->ira_cred
, ira
->ira_cpid
);
1548 eager
->tcp_conn
.tcp_eager_conn_ind
= tpi_mp
;
1549 ASSERT(eager
->tcp_ordrel_mp
== NULL
);
1551 /* Inherit the listener's non-STREAMS flag */
1552 if (IPCL_IS_NONSTR(lconnp
)) {
1553 econnp
->conn_flags
|= IPCL_NONSTR
;
1554 /* All non-STREAMS tcp_ts are sockets */
1555 eager
->tcp_issocket
= B_TRUE
;
1558 * Pre-allocate the T_ordrel_ind mblk for TPI socket so that
1559 * at close time, we will always have that to send up.
1560 * Otherwise, we need to do special handling in case the
1561 * allocation fails at that time.
1563 if ((eager
->tcp_ordrel_mp
= mi_tpi_ordrel_ind()) == NULL
)
1567 * Now that the IP addresses and ports are setup in econnp we
1568 * can do the IPsec policy work.
1570 if (ira
->ira_flags
& IRAF_IPSEC_SECURE
) {
1571 if (lconnp
->conn_policy
!= NULL
) {
1573 * Inherit the policy from the listener; use
1576 if (!ip_ipsec_policy_inherit(econnp
, lconnp
, ira
)) {
1577 CONN_DEC_REF(econnp
);
1585 * tcp_set_destination() may set tcp_rwnd according to the route
1586 * metrics. If it does not, the eager's receive window will be set
1587 * to the listener's receive window later in this function.
1589 eager
->tcp_rwnd
= 0;
1591 if (is_system_labeled()) {
1592 ip_xmit_attr_t
*ixa
= econnp
->conn_ixa
;
1594 ASSERT(ira
->ira_tsl
!= NULL
);
1595 /* Discard any old label */
1596 if (ixa
->ixa_free_flags
& IXA_FREE_TSL
) {
1597 ASSERT(ixa
->ixa_tsl
!= NULL
);
1598 label_rele(ixa
->ixa_tsl
);
1599 ixa
->ixa_free_flags
&= ~IXA_FREE_TSL
;
1600 ixa
->ixa_tsl
= NULL
;
1602 if ((lconnp
->conn_mlp_type
!= mlptSingle
||
1603 lconnp
->conn_mac_mode
!= CONN_MAC_DEFAULT
) &&
1604 ira
->ira_tsl
!= NULL
) {
1606 * If this is an MLP connection or a MAC-Exempt
1607 * connection with an unlabeled node, packets are to be
1608 * exchanged using the security label of the received
1609 * SYN packet instead of the server application's label.
1610 * tsol_check_dest called from ip_set_destination
1611 * might later update TSF_UNLABELED by replacing
1612 * ixa_tsl with a new label.
1614 label_hold(ira
->ira_tsl
);
1615 ip_xmit_attr_replace_tsl(ixa
, ira
->ira_tsl
);
1616 DTRACE_PROBE2(mlp_syn_accept
, conn_t
*,
1617 econnp
, ts_label_t
*, ixa
->ixa_tsl
)
1619 ixa
->ixa_tsl
= crgetlabel(econnp
->conn_cred
);
1620 DTRACE_PROBE2(syn_accept
, conn_t
*,
1621 econnp
, ts_label_t
*, ixa
->ixa_tsl
)
1624 * conn_connect() called from tcp_set_destination will verify
1625 * the destination is allowed to receive packets at the
1626 * security label of the SYN-ACK we are generating. As part of
1627 * that, tsol_check_dest() may create a new effective label for
1629 * Finally conn_connect() will call conn_update_label.
1630 * All that remains for TCP to do is to call
1631 * conn_build_hdr_template which is done as part of
1632 * tcp_set_destination.
1637 * Since we will clear tcp_listener before we clear tcp_detached
1638 * in the accept code we need tcp_hard_binding aka tcp_accept_inprogress
1639 * so we can tell a TCP_IS_DETACHED_NONEAGER apart.
1641 eager
->tcp_hard_binding
= B_TRUE
;
1643 tcp_bind_hash_insert(&tcps
->tcps_bind_fanout
[
1644 TCP_BIND_HASH(econnp
->conn_lport
)], eager
, 0);
1646 CL_INET_CONNECT(econnp
, B_FALSE
, err
);
1648 tcp_bind_hash_remove(eager
);
1652 SOCK_CONNID_BUMP(eager
->tcp_connid
);
1655 * Adapt our mss, ttl, ... based on the remote address.
1658 if (tcp_set_destination(eager
) != 0) {
1659 TCPS_BUMP_MIB(tcps
, tcpAttemptFails
);
1660 /* Undo the bind_hash_insert */
1661 tcp_bind_hash_remove(eager
);
1665 /* Process all TCP options. */
1666 tcp_process_options(eager
, tcpha
);
1668 /* Is the other end ECN capable? */
1669 if (tcps
->tcps_ecn_permitted
>= 1 &&
1670 (tcpha
->tha_flags
& (TH_ECE
|TH_CWR
)) == (TH_ECE
|TH_CWR
)) {
1671 eager
->tcp_ecn_ok
= B_TRUE
;
1675 * The listener's conn_rcvbuf should be the default window size or a
1676 * window size changed via SO_RCVBUF option. First round up the
1677 * eager's tcp_rwnd to the nearest MSS. Then find out the window
1678 * scale option value if needed. Call tcp_rwnd_set() to finish the
1681 * Note if there is a rpipe metric associated with the remote host,
1682 * we should not inherit receive window size from listener.
1684 eager
->tcp_rwnd
= MSS_ROUNDUP(
1685 (eager
->tcp_rwnd
== 0 ? econnp
->conn_rcvbuf
:
1686 eager
->tcp_rwnd
), eager
->tcp_mss
);
1687 if (eager
->tcp_snd_ws_ok
)
1688 tcp_set_ws_value(eager
);
1690 * Note that this is the only place tcp_rwnd_set() is called for
1691 * accepting a connection. We need to call it here instead of
1692 * after the 3-way handshake because we need to tell the other
1693 * side our rwnd in the SYN-ACK segment.
1695 (void) tcp_rwnd_set(eager
, eager
->tcp_rwnd
);
1697 ASSERT(eager
->tcp_connp
->conn_rcvbuf
!= 0 &&
1698 eager
->tcp_connp
->conn_rcvbuf
== eager
->tcp_rwnd
);
1700 ASSERT(econnp
->conn_rcvbuf
!= 0 &&
1701 econnp
->conn_rcvbuf
== eager
->tcp_rwnd
);
1703 /* Put a ref on the listener for the eager. */
1704 CONN_INC_REF(lconnp
);
1705 mutex_enter(&listener
->tcp_eager_lock
);
1706 listener
->tcp_eager_next_q0
->tcp_eager_prev_q0
= eager
;
1707 eager
->tcp_eager_next_q0
= listener
->tcp_eager_next_q0
;
1708 listener
->tcp_eager_next_q0
= eager
;
1709 eager
->tcp_eager_prev_q0
= listener
;
1711 /* Set tcp_listener before adding it to tcp_conn_fanout */
1712 eager
->tcp_listener
= listener
;
1713 eager
->tcp_saved_listener
= listener
;
1716 * Set tcp_listen_cnt so that when the connection is done, the counter
1719 eager
->tcp_listen_cnt
= listener
->tcp_listen_cnt
;
1722 * Tag this detached tcp vector for later retrieval
1723 * by our listener client in tcp_accept().
1725 eager
->tcp_conn_req_seqnum
= listener
->tcp_conn_req_seqnum
;
1726 listener
->tcp_conn_req_cnt_q0
++;
1727 if (++listener
->tcp_conn_req_seqnum
== -1) {
1729 * -1 is "special" and defined in TPI as something
1730 * that should never be used in T_CONN_IND
1732 ++listener
->tcp_conn_req_seqnum
;
1734 mutex_exit(&listener
->tcp_eager_lock
);
1736 if (listener
->tcp_syn_defense
) {
1737 /* Don't drop the SYN that comes from a good IP source */
1738 ipaddr_t
*addr_cache
;
1740 addr_cache
= (ipaddr_t
*)(listener
->tcp_ip_addr_cache
);
1741 if (addr_cache
!= NULL
&& econnp
->conn_faddr_v4
==
1742 addr_cache
[IP_ADDR_CACHE_HASH(econnp
->conn_faddr_v4
)]) {
1743 eager
->tcp_dontdrop
= B_TRUE
;
1748 * We need to insert the eager in its own perimeter but as soon
1749 * as we do that, we expose the eager to the classifier and
1750 * should not touch any field outside the eager's perimeter.
1751 * So do all the work necessary before inserting the eager
1752 * in its own perimeter. Be optimistic that conn_connect()
1753 * will succeed but undo everything if it fails.
1755 seg_seq
= ntohl(tcpha
->tha_seq
);
1756 eager
->tcp_irs
= seg_seq
;
1757 eager
->tcp_rack
= seg_seq
;
1758 eager
->tcp_rnxt
= seg_seq
+ 1;
1759 eager
->tcp_tcpha
->tha_ack
= htonl(eager
->tcp_rnxt
);
1760 TCPS_BUMP_MIB(tcps
, tcpPassiveOpens
);
1761 eager
->tcp_state
= TCPS_SYN_RCVD
;
1762 DTRACE_TCP6(state__change
, void, NULL
, ip_xmit_attr_t
*,
1763 econnp
->conn_ixa
, void, NULL
, tcp_t
*, eager
, void, NULL
,
1764 int32_t, TCPS_LISTEN
);
1766 mp1
= tcp_xmit_mp(eager
, eager
->tcp_xmit_head
, eager
->tcp_mss
,
1767 NULL
, NULL
, eager
->tcp_iss
, B_FALSE
, NULL
, B_FALSE
);
1770 * Increment the ref count as we are going to
1771 * enqueueing an mp in squeue
1773 CONN_INC_REF(econnp
);
1778 * We need to start the rto timer. In normal case, we start
1779 * the timer after sending the packet on the wire (or at
1780 * least believing that packet was sent by waiting for
1781 * conn_ip_output() to return). Since this is the first packet
1782 * being sent on the wire for the eager, our initial tcp_rto
1783 * is at least tcp_rexmit_interval_min which is a fairly
1784 * large value to allow the algorithm to adjust slowly to large
1785 * fluctuations of RTT during first few transmissions.
1787 * Starting the timer first and then sending the packet in this
1788 * case shouldn't make much difference since tcp_rexmit_interval_min
1789 * is of the order of several 100ms and starting the timer
1790 * first and then sending the packet will result in difference
1791 * of few micro seconds.
1793 * Without this optimization, we are forced to hold the fanout
1794 * lock across the ipcl_bind_insert() and sending the packet
1795 * so that we don't race against an incoming packet (maybe RST)
1798 * It is necessary to acquire an extra reference on the eager
1799 * at this point and hold it until after tcp_send_data() to
1800 * ensure against an eager close race.
1803 CONN_INC_REF(econnp
);
1805 TCP_TIMER_RESTART(eager
, eager
->tcp_rto
);
1808 * Insert the eager in its own perimeter now. We are ready to deal
1809 * with any packets on eager.
1811 if (ipcl_conn_insert(econnp
) != 0)
1814 ASSERT(econnp
->conn_ixa
->ixa_notify_cookie
== econnp
->conn_tcp
);
1817 * Send the SYN-ACK. Use the right squeue so that conn_ixa is
1818 * only used by one thread at a time.
1820 if (econnp
->conn_sqp
== lconnp
->conn_sqp
) {
1821 DTRACE_TCP5(send
, mblk_t
*, NULL
, ip_xmit_attr_t
*,
1822 econnp
->conn_ixa
, __dtrace_tcp_void_ip_t
*, mp1
->b_rptr
,
1823 tcp_t
*, eager
, __dtrace_tcp_tcph_t
*,
1824 &mp1
->b_rptr
[econnp
->conn_ixa
->ixa_ip_hdr_length
]);
1825 (void) conn_ip_output(mp1
, econnp
->conn_ixa
);
1826 CONN_DEC_REF(econnp
);
1828 SQUEUE_ENTER_ONE(econnp
->conn_sqp
, mp1
, tcp_send_synack
,
1829 econnp
, NULL
, SQ_PROCESS
, SQTAG_TCP_SEND_SYNACK
);
1834 eager
->tcp_closemp_used
= B_TRUE
;
1835 TCP_DEBUG_GETPCSTACK(eager
->tcmp_stk
, 15);
1836 mp1
= &eager
->tcp_closemp
;
1837 SQUEUE_ENTER_ONE(econnp
->conn_sqp
, mp1
, tcp_eager_kill
,
1838 econnp
, NULL
, SQ_FILL
, SQTAG_TCP_CONN_REQ_2
);
1841 * If a connection already exists, send the mp to that connections so
1842 * that it can be appropriately dealt with.
1844 ipst
= tcps
->tcps_netstack
->netstack_ip
;
1846 if ((econnp
= ipcl_classify(mp
, ira
, ipst
)) != NULL
) {
1847 if (!IPCL_IS_CONNECTED(econnp
)) {
1849 * Something bad happened. ipcl_conn_insert()
1850 * failed because a connection already existed
1851 * in connected hash but we can't find it
1852 * anymore (someone blew it away). Just
1853 * free this message and hopefully remote
1854 * will retransmit at which time the SYN can be
1855 * treated as a new connection or dealth with
1856 * a TH_RST if a connection already exists.
1858 CONN_DEC_REF(econnp
);
1861 SQUEUE_ENTER_ONE(econnp
->conn_sqp
, mp
, tcp_input_data
,
1862 econnp
, ira
, SQ_FILL
, SQTAG_TCP_CONN_REQ_1
);
1865 /* Nobody wants this packet */
1870 CONN_DEC_REF(econnp
);
1874 atomic_add_32(&listener
->tcp_listen_cnt
->tlc_cnt
, -1);
1878 * In an ideal case of vertical partition in NUMA architecture, its
1879 * beneficial to have the listener and all the incoming connections
1880 * tied to the same squeue. The other constraint is that incoming
1881 * connections should be tied to the squeue attached to interrupted
1882 * CPU for obvious locality reason so this leaves the listener to
1883 * be tied to the same squeue. Our only problem is that when listener
1884 * is binding, the CPU that will get interrupted by the NIC whose
1885 * IP address the listener is binding to is not even known. So
1886 * the code below allows us to change that binding at the time the
1887 * CPU is interrupted by virtue of incoming connection's squeue.
1889 * This is usefull only in case of a listener bound to a specific IP
1890 * address. For other kind of listeners, they get bound the
1891 * very first time and there is no attempt to rebind them.
1894 tcp_input_listener_unbound(void *arg
, mblk_t
*mp
, void *arg2
,
1895 ip_recv_attr_t
*ira
)
1897 conn_t
*connp
= (conn_t
*)arg
;
1898 squeue_t
*sqp
= (squeue_t
*)arg2
;
1900 uint32_t conn_flags
;
1903 * IP sets ira_sqp to either the senders conn_sqp (for loopback)
1904 * or based on the ring (for packets from GLD). Otherwise it is
1905 * set based on lbolt i.e., a somewhat random number.
1907 ASSERT(ira
->ira_sqp
!= NULL
);
1908 new_sqp
= ira
->ira_sqp
;
1910 if (connp
->conn_fanout
== NULL
)
1913 if (!(connp
->conn_flags
& IPCL_FULLY_BOUND
)) {
1914 mutex_enter(&connp
->conn_fanout
->connf_lock
);
1915 mutex_enter(&connp
->conn_lock
);
1917 * No one from read or write side can access us now
1918 * except for already queued packets on this squeue.
1919 * But since we haven't changed the squeue yet, they
1920 * can't execute. If they are processed after we have
1921 * changed the squeue, they are sent back to the
1922 * correct squeue down below.
1923 * But a listner close can race with processing of
1924 * incoming SYN. If incoming SYN processing changes
1925 * the squeue then the listener close which is waiting
1926 * to enter the squeue would operate on the wrong
1927 * squeue. Hence we don't change the squeue here unless
1928 * the refcount is exactly the minimum refcount. The
1929 * minimum refcount of 4 is counted as - 1 each for
1930 * TCP and IP, 1 for being in the classifier hash, and
1931 * 1 for the mblk being processed.
1934 if (connp
->conn_ref
!= 4 ||
1935 connp
->conn_tcp
->tcp_state
!= TCPS_LISTEN
) {
1936 mutex_exit(&connp
->conn_lock
);
1937 mutex_exit(&connp
->conn_fanout
->connf_lock
);
1940 if (connp
->conn_sqp
!= new_sqp
) {
1941 while (connp
->conn_sqp
!= new_sqp
)
1942 (void) atomic_cas_ptr(&connp
->conn_sqp
, sqp
,
1944 /* No special MT issues for outbound ixa_sqp hint */
1945 connp
->conn_ixa
->ixa_sqp
= new_sqp
;
1949 conn_flags
= connp
->conn_flags
;
1950 conn_flags
|= IPCL_FULLY_BOUND
;
1951 (void) atomic_cas_32(&connp
->conn_flags
,
1952 connp
->conn_flags
, conn_flags
);
1953 } while (!(connp
->conn_flags
& IPCL_FULLY_BOUND
));
1955 mutex_exit(&connp
->conn_fanout
->connf_lock
);
1956 mutex_exit(&connp
->conn_lock
);
1959 * Assume we have picked a good squeue for the listener. Make
1960 * subsequent SYNs not try to change the squeue.
1962 connp
->conn_recv
= tcp_input_listener
;
1966 if (connp
->conn_sqp
!= sqp
) {
1967 CONN_INC_REF(connp
);
1968 SQUEUE_ENTER_ONE(connp
->conn_sqp
, mp
, connp
->conn_recv
, connp
,
1969 ira
, SQ_FILL
, SQTAG_TCP_CONN_REQ_UNBOUND
);
1971 tcp_input_listener(connp
, mp
, sqp
, ira
);
1976 * Send up all messages queued on tcp_rcv_list.
1979 tcp_rcv_drain(tcp_t
*tcp
)
1986 queue_t
*q
= tcp
->tcp_connp
->conn_rq
;
1988 /* Can't drain on an eager connection */
1989 if (tcp
->tcp_listener
!= NULL
)
1992 /* Can't be a non-STREAMS connection */
1993 ASSERT(!IPCL_IS_NONSTR(tcp
->tcp_connp
));
1995 /* No need for the push timer now. */
1996 if (tcp
->tcp_push_tid
!= 0) {
1997 (void) TCP_TIMER_CANCEL(tcp
, tcp
->tcp_push_tid
);
1998 tcp
->tcp_push_tid
= 0;
2002 * Handle two cases here: we are currently fused or we were
2003 * previously fused and have some urgent data to be delivered
2004 * upstream. The latter happens because we either ran out of
2005 * memory or were detached and therefore sending the SIGURG was
2006 * deferred until this point. In either case we pass control
2007 * over to tcp_fuse_rcv_drain() since it may need to complete
2010 if ((tcp
->tcp_fused
|| tcp
->tcp_fused_sigurg
)) {
2011 if (tcp_fuse_rcv_drain(q
, tcp
, tcp
->tcp_fused
? NULL
:
2012 &tcp
->tcp_fused_sigurg_mp
))
2016 while ((mp
= tcp
->tcp_rcv_list
) != NULL
) {
2017 tcp
->tcp_rcv_list
= mp
->b_next
;
2020 cnt
+= msgdsize(mp
);
2025 ASSERT(cnt
== tcp
->tcp_rcv_cnt
);
2027 tcp
->tcp_rcv_last_head
= NULL
;
2028 tcp
->tcp_rcv_last_tail
= NULL
;
2029 tcp
->tcp_rcv_cnt
= 0;
2032 return (tcp_rwnd_reopen(tcp
));
2038 * Queue data on tcp_rcv_list which is a b_next chain.
2039 * tcp_rcv_last_head/tail is the last element of this chain.
2040 * Each element of the chain is a b_cont chain.
2042 * M_DATA messages are added to the current element.
2043 * Other messages are added as new (b_next) elements.
2046 tcp_rcv_enqueue(tcp_t
*tcp
, mblk_t
*mp
, uint_t seg_len
, cred_t
*cr
)
2048 ASSERT(seg_len
== msgdsize(mp
));
2049 ASSERT(tcp
->tcp_rcv_list
== NULL
|| tcp
->tcp_rcv_last_head
!= NULL
);
2051 if (is_system_labeled()) {
2052 ASSERT(cr
!= NULL
|| msg_getcred(mp
, NULL
) != NULL
);
2054 * Provide for protocols above TCP such as RPC. NOPID leaves
2055 * db_cpid unchanged.
2056 * The cred could have already been set.
2059 mblk_setcred(mp
, cr
, NOPID
);
2062 if (tcp
->tcp_rcv_list
== NULL
) {
2063 ASSERT(tcp
->tcp_rcv_last_head
== NULL
);
2064 tcp
->tcp_rcv_list
= mp
;
2065 tcp
->tcp_rcv_last_head
= mp
;
2066 } else if (DB_TYPE(mp
) == DB_TYPE(tcp
->tcp_rcv_last_head
)) {
2067 tcp
->tcp_rcv_last_tail
->b_cont
= mp
;
2069 tcp
->tcp_rcv_last_head
->b_next
= mp
;
2070 tcp
->tcp_rcv_last_head
= mp
;
2076 tcp
->tcp_rcv_last_tail
= mp
;
2077 tcp
->tcp_rcv_cnt
+= seg_len
;
2078 tcp
->tcp_rwnd
-= seg_len
;
2081 /* Generate an ACK-only (no data) segment for a TCP endpoint */
2083 tcp_ack_mp(tcp_t
*tcp
)
2086 tcp_stack_t
*tcps
= tcp
->tcp_tcps
;
2087 conn_t
*connp
= tcp
->tcp_connp
;
2090 * There are a few cases to be considered while setting the sequence no.
2091 * Essentially, we can come here while processing an unacceptable pkt
2092 * in the TCPS_SYN_RCVD state, in which case we set the sequence number
2093 * to snxt (per RFC 793), note the swnd wouldn't have been set yet.
2094 * If we are here for a zero window probe, stick with suna. In all
2095 * other cases, we check if suna + swnd encompasses snxt and set
2096 * the sequence number to snxt, if so. If snxt falls outside the
2097 * window (the receiver probably shrunk its window), we will go with
2098 * suna + swnd, otherwise the sequence no will be unacceptable to the
2101 if (tcp
->tcp_zero_win_probe
) {
2102 seq_no
= tcp
->tcp_suna
;
2103 } else if (tcp
->tcp_state
== TCPS_SYN_RCVD
) {
2104 ASSERT(tcp
->tcp_swnd
== 0);
2105 seq_no
= tcp
->tcp_snxt
;
2107 seq_no
= SEQ_GT(tcp
->tcp_snxt
,
2108 (tcp
->tcp_suna
+ tcp
->tcp_swnd
)) ?
2109 (tcp
->tcp_suna
+ tcp
->tcp_swnd
) : tcp
->tcp_snxt
;
2112 if (tcp
->tcp_valid_bits
) {
2114 * For the complex case where we have to send some
2115 * controls (FIN or SYN), let tcp_xmit_mp do it.
2117 return (tcp_xmit_mp(tcp
, NULL
, 0, NULL
, NULL
, seq_no
, B_FALSE
,
2120 /* Generate a simple ACK */
2125 int32_t total_hdr_len
;
2126 int32_t tcp_hdr_len
;
2127 int32_t num_sack_blk
= 0;
2128 int32_t sack_opt_len
;
2129 ip_xmit_attr_t
*ixa
= connp
->conn_ixa
;
2132 * Allocate space for TCP + IP headers
2133 * and link-level header
2135 if (tcp
->tcp_snd_sack_ok
&& tcp
->tcp_num_sack_blk
> 0) {
2136 num_sack_blk
= MIN(tcp
->tcp_max_sack_blk
,
2137 tcp
->tcp_num_sack_blk
);
2138 sack_opt_len
= num_sack_blk
* sizeof (sack_blk_t
) +
2139 TCPOPT_NOP_LEN
* 2 + TCPOPT_HEADER_LEN
;
2140 total_hdr_len
= connp
->conn_ht_iphc_len
+ sack_opt_len
;
2141 tcp_hdr_len
= connp
->conn_ht_ulp_len
+ sack_opt_len
;
2143 total_hdr_len
= connp
->conn_ht_iphc_len
;
2144 tcp_hdr_len
= connp
->conn_ht_ulp_len
;
2146 mp1
= allocb(total_hdr_len
+ tcps
->tcps_wroff_xtra
, BPRI_MED
);
2150 /* Update the latest receive window size in TCP header. */
2151 tcp
->tcp_tcpha
->tha_win
=
2152 htons(tcp
->tcp_rwnd
>> tcp
->tcp_rcv_ws
);
2153 /* copy in prototype TCP + IP header */
2154 rptr
= mp1
->b_rptr
+ tcps
->tcps_wroff_xtra
;
2156 mp1
->b_wptr
= rptr
+ total_hdr_len
;
2157 bcopy(connp
->conn_ht_iphc
, rptr
, connp
->conn_ht_iphc_len
);
2159 tcpha
= (tcpha_t
*)&rptr
[ixa
->ixa_ip_hdr_length
];
2161 /* Set the TCP sequence number. */
2162 tcpha
->tha_seq
= htonl(seq_no
);
2164 /* Set up the TCP flag field. */
2165 tcpha
->tha_flags
= (uchar_t
)TH_ACK
;
2166 if (tcp
->tcp_ecn_echo_on
)
2167 tcpha
->tha_flags
|= TH_ECE
;
2169 tcp
->tcp_rack
= tcp
->tcp_rnxt
;
2170 tcp
->tcp_rack_cnt
= 0;
2172 /* fill in timestamp option if in use */
2173 if (tcp
->tcp_snd_ts_ok
) {
2174 uint32_t llbolt
= (uint32_t)LBOLT_FASTPATH
;
2177 (char *)tcpha
+ TCP_MIN_HEADER_LENGTH
+4);
2178 U32_TO_BE32(tcp
->tcp_ts_recent
,
2179 (char *)tcpha
+ TCP_MIN_HEADER_LENGTH
+8);
2182 /* Fill in SACK options */
2183 if (num_sack_blk
> 0) {
2184 uchar_t
*wptr
= (uchar_t
*)tcpha
+
2185 connp
->conn_ht_ulp_len
;
2189 wptr
[0] = TCPOPT_NOP
;
2190 wptr
[1] = TCPOPT_NOP
;
2191 wptr
[2] = TCPOPT_SACK
;
2192 wptr
[3] = TCPOPT_HEADER_LEN
+ num_sack_blk
*
2193 sizeof (sack_blk_t
);
2194 wptr
+= TCPOPT_REAL_SACK_LEN
;
2196 tmp
= tcp
->tcp_sack_list
;
2197 for (i
= 0; i
< num_sack_blk
; i
++) {
2198 U32_TO_BE32(tmp
[i
].begin
, wptr
);
2199 wptr
+= sizeof (tcp_seq
);
2200 U32_TO_BE32(tmp
[i
].end
, wptr
);
2201 wptr
+= sizeof (tcp_seq
);
2203 tcpha
->tha_offset_and_reserved
+=
2204 ((num_sack_blk
* 2 + 1) << 4);
2207 ixa
->ixa_pktlen
= total_hdr_len
;
2209 if (ixa
->ixa_flags
& IXAF_IS_IPV4
) {
2210 ((ipha_t
*)rptr
)->ipha_length
= htons(total_hdr_len
);
2212 ip6_t
*ip6
= (ip6_t
*)rptr
;
2214 ip6
->ip6_plen
= htons(total_hdr_len
- IPV6_HDR_LEN
);
2218 * Prime pump for checksum calculation in IP. Include the
2219 * adjustment for a source route if any.
2221 data_length
= tcp_hdr_len
+ connp
->conn_sum
;
2222 data_length
= (data_length
>> 16) + (data_length
& 0xFFFF);
2223 tcpha
->tha_sum
= htons(data_length
);
2225 if (tcp
->tcp_ip_forward_progress
) {
2226 tcp
->tcp_ip_forward_progress
= B_FALSE
;
2227 connp
->conn_ixa
->ixa_flags
|= IXAF_REACH_CONF
;
2229 connp
->conn_ixa
->ixa_flags
&= ~IXAF_REACH_CONF
;
2236 * Dummy socket upcalls for if/when the conn_t gets detached from a
2237 * direct-callback sonode via a user-driven close(). Easy to catch with
2238 * DTrace FBT, and should be mostly harmless.
2242 static sock_upper_handle_t
2243 tcp_dummy_newconn(sock_upper_handle_t x
, sock_lower_handle_t y
,
2244 sock_downcalls_t
*z
, cred_t
*cr
, pid_t pid
, sock_upcalls_t
**ignored
)
2246 ASSERT(0); /* Panic in debug, otherwise ignore. */
2252 tcp_dummy_connected(sock_upper_handle_t x
, sock_connid_t y
, cred_t
*cr
,
2256 /* Normally we'd crhold(cr) and attach it to socket state. */
2262 tcp_dummy_disconnected(sock_upper_handle_t x
, sock_connid_t y
, int blah
)
2264 ASSERT(0); /* Panic in debug, otherwise ignore. */
2270 tcp_dummy_opctl(sock_upper_handle_t x
, sock_opctl_action_t y
, uintptr_t blah
)
2273 /* We really want this one to be a harmless NOP for now. */
2279 tcp_dummy_recv(sock_upper_handle_t x
, mblk_t
*mp
, size_t len
, int flags
,
2280 int *error
, boolean_t
*push
)
2285 * Consume the message, set ESHUTDOWN, and return an error.
2295 tcp_dummy_set_proto_props(sock_upper_handle_t x
, struct sock_proto_props
*y
)
2297 ASSERT(0); /* Panic in debug, otherwise ignore. */
2302 tcp_dummy_txq_full(sock_upper_handle_t x
, boolean_t y
)
2304 ASSERT(0); /* Panic in debug, otherwise ignore. */
2309 tcp_dummy_signal_oob(sock_upper_handle_t x
, ssize_t len
)
2312 /* Otherwise, this would signal socket state about OOB data. */
2317 tcp_dummy_set_error(sock_upper_handle_t x
, int err
)
2319 ASSERT(0); /* Panic in debug, otherwise ignore. */
2324 tcp_dummy_onearg(sock_upper_handle_t x
)
2326 ASSERT(0); /* Panic in debug, otherwise ignore. */
2329 static sock_upcalls_t tcp_dummy_upcalls
= {
2331 tcp_dummy_connected
,
2332 tcp_dummy_disconnected
,
2335 tcp_dummy_set_proto_props
,
2337 tcp_dummy_signal_oob
,
2339 tcp_dummy_set_error
,
2344 * Handle M_DATA messages from IP. Its called directly from IP via
2345 * squeue for received IP packets.
2347 * The first argument is always the connp/tcp to which the mp belongs.
2348 * There are no exceptions to this rule. The caller has already put
2349 * a reference on this connp/tcp and once tcp_input_data() returns,
2350 * the squeue will do the refrele.
2352 * The TH_SYN for the listener directly go to tcp_input_listener via
2353 * squeue. ICMP errors go directly to tcp_icmp_input().
2355 * sqp: NULL = recursive, sqp != NULL means called from squeue
2358 tcp_input_data(void *arg
, mblk_t
*mp
, void *arg2
, ip_recv_attr_t
*ira
)
2360 int32_t bytes_acked
;
2364 uint32_t new_swnd
= 0;
2376 boolean_t ofo_seg
= B_FALSE
; /* Out of order segment */
2381 conn_t
*connp
= (conn_t
*)arg
;
2382 squeue_t
*sqp
= (squeue_t
*)arg2
;
2383 tcp_t
*tcp
= connp
->conn_tcp
;
2384 tcp_stack_t
*tcps
= tcp
->tcp_tcps
;
2385 sock_upcalls_t
*sockupcalls
;
2388 * RST from fused tcp loopback peer should trigger an unfuse.
2390 if (tcp
->tcp_fused
) {
2391 TCP_STAT(tcps
, tcp_fusion_aborted
);
2397 ASSERT(OK_32PTR(rptr
));
2399 ip_hdr_len
= ira
->ira_ip_hdr_length
;
2400 if (connp
->conn_recv_ancillary
.crb_all
!= 0) {
2402 * Record packet information in the ip_pkt_t
2405 if (ira
->ira_flags
& IRAF_IS_IPV4
) {
2406 (void) ip_find_hdr_v4((ipha_t
*)rptr
, &ipp
,
2412 * IPv6 packets can only be received by applications
2413 * that are prepared to receive IPv6 addresses.
2414 * The IP fanout must ensure this.
2416 ASSERT(connp
->conn_family
== AF_INET6
);
2418 (void) ip_find_hdr_v6(mp
, (ip6_t
*)rptr
, B_TRUE
, &ipp
,
2420 ASSERT(nexthdrp
== IPPROTO_TCP
);
2422 /* Could have caused a pullup? */
2427 ASSERT(DB_TYPE(mp
) == M_DATA
);
2428 ASSERT(mp
->b_next
== NULL
);
2430 tcpha
= (tcpha_t
*)&rptr
[ip_hdr_len
];
2431 seg_seq
= ntohl(tcpha
->tha_seq
);
2432 seg_ack
= ntohl(tcpha
->tha_ack
);
2433 ASSERT((uintptr_t)(mp
->b_wptr
- rptr
) <= (uintptr_t)INT_MAX
);
2434 seg_len
= (int)(mp
->b_wptr
- rptr
) -
2435 (ip_hdr_len
+ TCP_HDR_LENGTH(tcpha
));
2436 if ((mp1
= mp
->b_cont
) != NULL
&& mp1
->b_datap
->db_type
== M_DATA
) {
2438 ASSERT((uintptr_t)(mp1
->b_wptr
- mp1
->b_rptr
) <=
2439 (uintptr_t)INT_MAX
);
2440 seg_len
+= (int)(mp1
->b_wptr
- mp1
->b_rptr
);
2441 } while ((mp1
= mp1
->b_cont
) != NULL
&&
2442 mp1
->b_datap
->db_type
== M_DATA
);
2445 DTRACE_TCP5(receive
, mblk_t
*, NULL
, ip_xmit_attr_t
*, connp
->conn_ixa
,
2446 __dtrace_tcp_void_ip_t
*, iphdr
, tcp_t
*, tcp
,
2447 __dtrace_tcp_tcph_t
*, tcpha
);
2449 if (tcp
->tcp_state
== TCPS_TIME_WAIT
) {
2450 tcp_time_wait_processing(tcp
, mp
, seg_seq
, seg_ack
,
2451 seg_len
, tcpha
, ira
);
2457 * This is the correct place to update tcp_last_recv_time. Note
2458 * that it is also updated for tcp structure that belongs to
2459 * global and listener queues which do not really need updating.
2460 * But that should not cause any harm. And it is updated for
2461 * all kinds of incoming segments, not only for data segments.
2463 tcp
->tcp_last_recv_time
= LBOLT_FASTPATH
;
2466 flags
= (unsigned int)tcpha
->tha_flags
& 0xFF;
2468 BUMP_LOCAL(tcp
->tcp_ibsegs
);
2469 DTRACE_PROBE2(tcp__trace__recv
, mblk_t
*, mp
, tcp_t
*, tcp
);
2471 if ((flags
& TH_URG
) && sqp
!= NULL
) {
2473 * TCP can't handle urgent pointers that arrive before
2474 * the connection has been accept()ed since it can't
2475 * buffer OOB data. Discard segment if this happens.
2477 * We can't just rely on a non-null tcp_listener to indicate
2478 * that the accept() has completed since unlinking of the
2479 * eager and completion of the accept are not atomic.
2480 * tcp_detached, when it is not set (B_FALSE) indicates
2481 * that the accept() has completed.
2483 * Nor can it reassemble urgent pointers, so discard
2484 * if it's not the next segment expected.
2486 * Otherwise, collapse chain into one mblk (discard if
2487 * that fails). This makes sure the headers, retransmitted
2488 * data, and new data all are in the same mblk.
2491 if (tcp
->tcp_detached
|| !pullupmsg(mp
, -1)) {
2495 /* Update pointers into message */
2496 iphdr
= rptr
= mp
->b_rptr
;
2497 tcpha
= (tcpha_t
*)&rptr
[ip_hdr_len
];
2498 if (SEQ_GT(seg_seq
, tcp
->tcp_rnxt
)) {
2500 * Since we can't handle any data with this urgent
2501 * pointer that is out of sequence, we expunge
2502 * the data. This allows us to still register
2503 * the urgent mark and generate the M_PCSIG,
2506 mp
->b_wptr
= (uchar_t
*)tcpha
+ TCP_HDR_LENGTH(tcpha
);
2511 sockupcalls
= connp
->conn_upcalls
;
2512 /* A conn_t may have belonged to a now-closed socket. Be careful. */
2513 if (sockupcalls
== NULL
)
2514 sockupcalls
= &tcp_dummy_upcalls
;
2516 switch (tcp
->tcp_state
) {
2518 if (connp
->conn_final_sqp
== NULL
&&
2519 tcp_outbound_squeue_switch
&& sqp
!= NULL
) {
2520 ASSERT(connp
->conn_initial_sqp
== connp
->conn_sqp
);
2521 connp
->conn_final_sqp
= sqp
;
2522 if (connp
->conn_final_sqp
!= connp
->conn_sqp
) {
2523 DTRACE_PROBE1(conn__final__sqp__switch
,
2525 CONN_INC_REF(connp
);
2526 SQUEUE_SWITCH(connp
, connp
->conn_final_sqp
);
2527 SQUEUE_ENTER_ONE(connp
->conn_sqp
, mp
,
2528 tcp_input_data
, connp
, ira
, ip_squeue_flag
,
2529 SQTAG_CONNECT_FINISH
);
2532 DTRACE_PROBE1(conn__final__sqp__same
, conn_t
*, connp
);
2534 if (flags
& TH_ACK
) {
2536 * Note that our stack cannot send data before a
2537 * connection is established, therefore the
2538 * following check is valid. Otherwise, it has
2541 if (SEQ_LEQ(seg_ack
, tcp
->tcp_iss
) ||
2542 SEQ_GT(seg_ack
, tcp
->tcp_snxt
)) {
2546 tcp_xmit_ctl("TCPS_SYN_SENT-Bad_seq",
2547 tcp
, seg_ack
, 0, TH_RST
);
2550 ASSERT(tcp
->tcp_suna
+ 1 == seg_ack
);
2552 if (flags
& TH_RST
) {
2553 if (flags
& TH_ACK
) {
2554 DTRACE_TCP5(connect__refused
, mblk_t
*, NULL
,
2555 ip_xmit_attr_t
*, connp
->conn_ixa
,
2556 void_ip_t
*, iphdr
, tcp_t
*, tcp
,
2558 (void) tcp_clean_death(tcp
, ECONNREFUSED
);
2563 if (!(flags
& TH_SYN
)) {
2568 /* Process all TCP options. */
2569 tcp_process_options(tcp
, tcpha
);
2571 * The following changes our rwnd to be a multiple of the
2572 * MIN(peer MSS, our MSS) for performance reason.
2574 (void) tcp_rwnd_set(tcp
, MSS_ROUNDUP(connp
->conn_rcvbuf
,
2577 /* Is the other end ECN capable? */
2578 if (tcp
->tcp_ecn_ok
) {
2579 if ((flags
& (TH_ECE
|TH_CWR
)) != TH_ECE
) {
2580 tcp
->tcp_ecn_ok
= B_FALSE
;
2584 * Clear ECN flags because it may interfere with later
2587 flags
&= ~(TH_ECE
|TH_CWR
);
2589 tcp
->tcp_irs
= seg_seq
;
2590 tcp
->tcp_rack
= seg_seq
;
2591 tcp
->tcp_rnxt
= seg_seq
+ 1;
2592 tcp
->tcp_tcpha
->tha_ack
= htonl(tcp
->tcp_rnxt
);
2593 if (!TCP_IS_DETACHED(tcp
)) {
2594 /* Allocate room for SACK options if needed. */
2595 connp
->conn_wroff
= connp
->conn_ht_iphc_len
;
2596 if (tcp
->tcp_snd_sack_ok
)
2597 connp
->conn_wroff
+= TCPOPT_MAX_SACK_LEN
;
2598 if (!tcp
->tcp_loopback
)
2599 connp
->conn_wroff
+= tcps
->tcps_wroff_xtra
;
2601 (void) proto_set_tx_wroff(connp
->conn_rq
, connp
,
2604 if (flags
& TH_ACK
) {
2606 * If we can't get the confirmation upstream, pretend
2607 * we didn't even see this one.
2609 * XXX: how can we pretend we didn't see it if we
2610 * have updated rnxt et. al.
2612 * For loopback we defer sending up the T_CONN_CON
2613 * until after some checks below.
2617 * tcp_sendmsg() checks tcp_state without entering
2618 * the squeue so tcp_state should be updated before
2619 * sending up connection confirmation. Probe the
2620 * state change below when we are sure the connection
2621 * confirmation has been sent.
2623 tcp
->tcp_state
= TCPS_ESTABLISHED
;
2624 if (!tcp_conn_con(tcp
, iphdr
, mp
,
2625 tcp
->tcp_loopback
? &mp1
: NULL
, ira
)) {
2626 tcp
->tcp_state
= TCPS_SYN_SENT
;
2630 TCPS_CONN_INC(tcps
);
2631 /* SYN was acked - making progress */
2632 tcp
->tcp_ip_forward_progress
= B_TRUE
;
2634 /* One for the SYN */
2635 tcp
->tcp_suna
= tcp
->tcp_iss
+ 1;
2636 tcp
->tcp_valid_bits
&= ~TCP_ISS_VALID
;
2639 * If SYN was retransmitted, need to reset all
2640 * retransmission info. This is because this
2641 * segment will be treated as a dup ACK.
2643 if (tcp
->tcp_rexmit
) {
2644 tcp
->tcp_rexmit
= B_FALSE
;
2645 tcp
->tcp_rexmit_nxt
= tcp
->tcp_snxt
;
2646 tcp
->tcp_rexmit_max
= tcp
->tcp_snxt
;
2647 tcp
->tcp_snd_burst
= tcp
->tcp_localnet
?
2648 TCP_CWND_INFINITE
: TCP_CWND_NORMAL
;
2649 tcp
->tcp_ms_we_have_waited
= 0;
2652 * Set tcp_cwnd back to 1 MSS, per
2653 * recommendation from
2654 * draft-floyd-incr-init-win-01.txt,
2655 * Increasing TCP's Initial Window.
2657 tcp
->tcp_cwnd
= tcp
->tcp_mss
;
2660 tcp
->tcp_swl1
= seg_seq
;
2661 tcp
->tcp_swl2
= seg_ack
;
2663 new_swnd
= ntohs(tcpha
->tha_win
);
2664 tcp
->tcp_swnd
= new_swnd
;
2665 if (new_swnd
> tcp
->tcp_max_swnd
)
2666 tcp
->tcp_max_swnd
= new_swnd
;
2669 * Always send the three-way handshake ack immediately
2670 * in order to make the connection complete as soon as
2671 * possible on the accepting host.
2673 flags
|= TH_ACK_NEEDED
;
2676 * Trace connect-established here.
2678 DTRACE_TCP5(connect__established
, mblk_t
*, NULL
,
2679 ip_xmit_attr_t
*, tcp
->tcp_connp
->conn_ixa
,
2680 void_ip_t
*, iphdr
, tcp_t
*, tcp
, tcph_t
*, tcpha
);
2682 /* Trace change from SYN_SENT -> ESTABLISHED here */
2683 DTRACE_TCP6(state__change
, void, NULL
, ip_xmit_attr_t
*,
2684 connp
->conn_ixa
, void, NULL
, tcp_t
*, tcp
,
2685 void, NULL
, int32_t, TCPS_SYN_SENT
);
2688 * Special case for loopback. At this point we have
2689 * received SYN-ACK from the remote endpoint. In
2690 * order to ensure that both endpoints reach the
2691 * fused state prior to any data exchange, the final
2692 * ACK needs to be sent before we indicate T_CONN_CON
2693 * to the module upstream.
2695 if (tcp
->tcp_loopback
) {
2698 ASSERT(!tcp
->tcp_unfusable
);
2699 ASSERT(mp1
!= NULL
);
2701 * For loopback, we always get a pure SYN-ACK
2702 * and only need to send back the final ACK
2703 * with no data (this is because the other
2704 * tcp is ours and we don't do T/TCP). This
2705 * final ACK triggers the passive side to
2706 * perform fusion in ESTABLISHED state.
2708 if ((ack_mp
= tcp_ack_mp(tcp
)) != NULL
) {
2709 if (tcp
->tcp_ack_tid
!= 0) {
2710 (void) TCP_TIMER_CANCEL(tcp
,
2712 tcp
->tcp_ack_tid
= 0;
2714 tcp_send_data(tcp
, ack_mp
);
2715 BUMP_LOCAL(tcp
->tcp_obsegs
);
2716 TCPS_BUMP_MIB(tcps
, tcpOutAck
);
2718 if (!IPCL_IS_NONSTR(connp
)) {
2719 /* Send up T_CONN_CON */
2720 if (ira
->ira_cred
!= NULL
) {
2725 putnext(connp
->conn_rq
, mp1
);
2727 (*sockupcalls
->su_connected
)
2728 (connp
->conn_upper_handle
,
2739 * Forget fusion; we need to handle more
2740 * complex cases below. Send the deferred
2741 * T_CONN_CON message upstream and proceed
2742 * as usual. Mark this tcp as not capable
2745 TCP_STAT(tcps
, tcp_fusion_unfusable
);
2746 tcp
->tcp_unfusable
= B_TRUE
;
2747 if (!IPCL_IS_NONSTR(connp
)) {
2748 if (ira
->ira_cred
!= NULL
) {
2749 mblk_setcred(mp1
, ira
->ira_cred
,
2752 putnext(connp
->conn_rq
, mp1
);
2754 (*sockupcalls
->su_connected
)
2755 (connp
->conn_upper_handle
,
2756 tcp
->tcp_connid
, ira
->ira_cred
,
2763 * Check to see if there is data to be sent. If
2764 * yes, set the transmit flag. Then check to see
2765 * if received data processing needs to be done.
2766 * If not, go straight to xmit_check. This short
2767 * cut is OK as we don't support T/TCP.
2769 if (tcp
->tcp_unsent
)
2770 flags
|= TH_XMIT_NEEDED
;
2772 if (seg_len
== 0 && !(flags
& TH_URG
)) {
2781 tcp
->tcp_state
= TCPS_SYN_RCVD
;
2782 DTRACE_TCP6(state__change
, void, NULL
, ip_xmit_attr_t
*,
2783 connp
->conn_ixa
, void_ip_t
*, NULL
, tcp_t
*, tcp
,
2784 tcph_t
*, NULL
, int32_t, TCPS_SYN_SENT
);
2785 mp1
= tcp_xmit_mp(tcp
, tcp
->tcp_xmit_head
, tcp
->tcp_mss
,
2786 NULL
, NULL
, tcp
->tcp_iss
, B_FALSE
, NULL
, B_FALSE
);
2788 tcp_send_data(tcp
, mp1
);
2789 TCP_TIMER_RESTART(tcp
, tcp
->tcp_rto
);
2794 if (flags
& TH_ACK
) {
2798 * In this state, a SYN|ACK packet is either bogus
2799 * because the other side must be ACKing our SYN which
2800 * indicates it has seen the ACK for their SYN and
2801 * shouldn't retransmit it or we're crossing SYNs
2804 if ((flags
& TH_SYN
) && !tcp
->tcp_active_open
) {
2806 tcp_xmit_ctl("TCPS_SYN_RCVD-bad_syn",
2807 tcp
, seg_ack
, 0, TH_RST
);
2811 * NOTE: RFC 793 pg. 72 says this should be
2812 * tcp->tcp_suna <= seg_ack <= tcp->tcp_snxt
2813 * but that would mean we have an ack that ignored
2816 if (SEQ_LEQ(seg_ack
, tcp
->tcp_suna
) ||
2817 SEQ_GT(seg_ack
, tcp
->tcp_snxt
)) {
2819 tcp_xmit_ctl("TCPS_SYN_RCVD-bad_ack",
2820 tcp
, seg_ack
, 0, TH_RST
);
2824 * No sane TCP stack will send such a small window
2825 * without receiving any data. Just drop this invalid
2826 * ACK. We also shorten the abort timeout in case
2827 * this is an attack.
2829 pinit_wnd
= ntohs(tcpha
->tha_win
) << tcp
->tcp_snd_ws
;
2830 if (pinit_wnd
< tcp
->tcp_mss
&&
2831 pinit_wnd
< tcp_init_wnd_chk
) {
2833 TCP_STAT(tcps
, tcp_zwin_ack_syn
);
2834 tcp
->tcp_second_ctimer_threshold
=
2835 tcp_early_abort
* SECONDS
;
2842 * Only a TLI listener can come through this path when a
2843 * acceptor is going back to be a listener and a packet
2844 * for the acceptor hits the classifier. For a socket
2845 * listener, this can never happen because a listener
2846 * can never accept connection on itself and hence a
2847 * socket acceptor can not go back to being a listener.
2849 ASSERT(!TCP_IS_SOCKET(tcp
));
2854 ip_stack_t
*ipst
= tcps
->tcps_netstack
->netstack_ip
;
2857 * Don't accept any input on a closed tcp as this TCP logically
2858 * does not exist on the system. Don't proceed further with
2859 * this TCP. For instance, this packet could trigger another
2860 * close of this tcp which would be disastrous for tcp_refcnt.
2861 * tcp_close_detached / tcp_clean_death / tcp_closei_local must
2862 * be called at most once on a TCP. In this case we need to
2863 * refeed the packet into the classifier and figure out where
2864 * the packet should go.
2866 new_connp
= ipcl_classify(mp
, ira
, ipst
);
2867 if (new_connp
!= NULL
) {
2868 /* Drops ref on new_connp */
2869 tcp_reinput(new_connp
, mp
, ira
, ipst
);
2872 /* We failed to classify. For now just drop the packet */
2878 * Handle the case where the tcp_clean_death() has happened
2879 * on a connection (application hasn't closed yet) but a packet
2880 * was already queued on squeue before tcp_clean_death()
2881 * was processed. Calling tcp_clean_death() twice on same
2882 * connection can result in weird behaviour.
2891 * Already on the correct queue/perimeter.
2892 * If this is a detached connection and not an eager
2893 * connection hanging off a listener then new data
2894 * (past the FIN) will cause a reset.
2895 * We do a special check here where it
2896 * is out of the main line, rather than check
2897 * if we are detached every time we see new
2900 if (TCP_IS_DETACHED_NONEAGER(tcp
) &&
2901 (seg_len
> 0 && SEQ_GT(seg_seq
+ seg_len
, tcp
->tcp_rnxt
))) {
2902 TCPS_BUMP_MIB(tcps
, tcpInClosed
);
2903 DTRACE_PROBE2(tcp__trace__recv
, mblk_t
*, mp
, tcp_t
*, tcp
);
2905 tcp_xmit_ctl("new data when detached", tcp
,
2906 tcp
->tcp_snxt
, 0, TH_RST
);
2907 (void) tcp_clean_death(tcp
, EPROTO
);
2911 mp
->b_rptr
= (uchar_t
*)tcpha
+ TCP_HDR_LENGTH(tcpha
);
2912 urp
= ntohs(tcpha
->tha_urp
) - TCP_OLD_URP_INTERPRETATION
;
2913 new_swnd
= ntohs(tcpha
->tha_win
) <<
2914 ((tcpha
->tha_flags
& TH_SYN
) ? 0 : tcp
->tcp_snd_ws
);
2916 if (tcp
->tcp_snd_ts_ok
) {
2917 if (!tcp_paws_check(tcp
, tcpha
, &tcpopt
)) {
2919 * This segment is not acceptable.
2920 * Drop it and send back an ACK.
2923 flags
|= TH_ACK_NEEDED
;
2926 } else if (tcp
->tcp_snd_sack_ok
) {
2929 * SACK info in already updated in tcp_parse_options. Ignore
2930 * all other TCP options...
2932 (void) tcp_parse_options(tcpha
, &tcpopt
);
2936 gap
= seg_seq
- tcp
->tcp_rnxt
;
2937 rgap
= tcp
->tcp_rwnd
- (gap
+ seg_len
);
2939 * gap is the amount of sequence space between what we expect to see
2940 * and what we got for seg_seq. A positive value for gap means
2941 * something got lost. A negative value means we got some old stuff.
2944 /* Old stuff present. Is the SYN in there? */
2945 if (seg_seq
== tcp
->tcp_irs
&& (flags
& TH_SYN
) &&
2950 /* Recompute the gaps after noting the SYN. */
2953 TCPS_BUMP_MIB(tcps
, tcpInDataDupSegs
);
2954 TCPS_UPDATE_MIB(tcps
, tcpInDataDupBytes
,
2955 (seg_len
> -gap
? -gap
: seg_len
));
2956 /* Remove the old stuff from seg_len. */
2960 * Make sure to check for unack'd FIN when rest of data
2961 * has been previously ack'd.
2963 if (seg_len
< 0 || (seg_len
== 0 && !(flags
& TH_FIN
))) {
2965 * Resets are only valid if they lie within our offered
2966 * window. If the RST bit is set, we just ignore this
2969 if (flags
& TH_RST
) {
2975 * The arriving of dup data packets indicate that we
2976 * may have postponed an ack for too long, or the other
2977 * side's RTT estimate is out of shape. Start acking
2980 if (SEQ_GEQ(seg_seq
+ seg_len
- gap
, tcp
->tcp_rack
) &&
2981 tcp
->tcp_rack_cnt
>= 1 &&
2982 tcp
->tcp_rack_abs_max
> 2) {
2983 tcp
->tcp_rack_abs_max
--;
2985 tcp
->tcp_rack_cur_max
= 1;
2988 * This segment is "unacceptable". None of its
2989 * sequence space lies within our advertized window.
2991 * Adjust seg_len to the original value for tracing.
2994 if (connp
->conn_debug
) {
2995 (void) strlog(TCP_MOD_ID
, 0, 1, SL_TRACE
,
2996 "tcp_rput: unacceptable, gap %d, rgap %d, "
2997 "flags 0x%x, seg_seq %u, seg_ack %u, "
2998 "seg_len %d, rnxt %u, snxt %u, %s",
2999 gap
, rgap
, flags
, seg_seq
, seg_ack
,
3000 seg_len
, tcp
->tcp_rnxt
, tcp
->tcp_snxt
,
3001 tcp_display(tcp
, NULL
,
3002 DISP_ADDR_AND_PORT
));
3006 * Arrange to send an ACK in response to the
3007 * unacceptable segment per RFC 793 page 69. There
3008 * is only one small difference between ours and the
3009 * acceptability test in the RFC - we accept ACK-only
3010 * packet with SEG.SEQ = RCV.NXT+RCV.WND and no ACK
3011 * will be generated.
3013 * Note that we have to ACK an ACK-only packet at least
3014 * for stacks that send 0-length keep-alives with
3015 * SEG.SEQ = SND.NXT-1 as recommended by RFC1122,
3016 * section 4.2.3.6. As long as we don't ever generate
3017 * an unacceptable packet in response to an incoming
3018 * packet that is unacceptable, it should not cause
3021 flags
|= TH_ACK_NEEDED
;
3024 * Continue processing this segment in order to use the
3025 * ACK information it contains, but skip all other
3026 * sequence-number processing. Processing the ACK
3027 * information is necessary in order to
3028 * re-synchronize connections that may have lost
3031 * We clear seg_len and flag fields related to
3032 * sequence number processing as they are not
3033 * to be trusted for an unacceptable segment.
3036 flags
&= ~(TH_SYN
| TH_FIN
| TH_URG
);
3040 /* Fix seg_seq, and chew the gap off the front. */
3041 seg_seq
= tcp
->tcp_rnxt
;
3045 ASSERT((uintptr_t)(mp
->b_wptr
- mp
->b_rptr
) <=
3046 (uintptr_t)UINT_MAX
);
3047 gap
+= (uint_t
)(mp
->b_wptr
- mp
->b_rptr
);
3049 mp
->b_rptr
= mp
->b_wptr
- gap
;
3057 * If the urgent data has already been acknowledged, we
3058 * should ignore TH_URG below
3064 * rgap is the amount of stuff received out of window. A negative
3065 * value is the amount out of window.
3070 if (tcp
->tcp_rwnd
== 0) {
3071 TCPS_BUMP_MIB(tcps
, tcpInWinProbe
);
3073 TCPS_BUMP_MIB(tcps
, tcpInDataPastWinSegs
);
3074 TCPS_UPDATE_MIB(tcps
, tcpInDataPastWinBytes
, -rgap
);
3078 * seg_len does not include the FIN, so if more than
3079 * just the FIN is out of window, we act like we don't
3080 * see it. (If just the FIN is out of window, rgap
3081 * will be zero and we will go ahead and acknowledge
3086 /* Fix seg_len and make sure there is something left. */
3090 * Resets are only valid if they lie within our offered
3091 * window. If the RST bit is set, we just ignore this
3094 if (flags
& TH_RST
) {
3099 /* Per RFC 793, we need to send back an ACK. */
3100 flags
|= TH_ACK_NEEDED
;
3103 * Send SIGURG as soon as possible i.e. even
3104 * if the TH_URG was delivered in a window probe
3105 * packet (which will be unacceptable).
3107 * We generate a signal if none has been generated
3108 * for this connection or if this is a new urgent
3109 * byte. Also send a zero-length "unmarked" message
3110 * to inform SIOCATMARK that this is not the mark.
3112 * tcp_urp_last_valid is cleared when the T_exdata_ind
3113 * is sent up. This plus the check for old data
3114 * (gap >= 0) handles the wraparound of the sequence
3115 * number space without having to always track the
3116 * correct MAX(tcp_urp_last, tcp_rnxt). (BSD tracks
3117 * this max in its rcv_up variable).
3119 * This prevents duplicate SIGURGS due to a "late"
3120 * zero-window probe when the T_EXDATA_IND has already
3123 if ((flags
& TH_URG
) &&
3124 (!tcp
->tcp_urp_last_valid
|| SEQ_GT(urp
+ seg_seq
,
3125 tcp
->tcp_urp_last
))) {
3126 if (IPCL_IS_NONSTR(connp
)) {
3127 if (!TCP_IS_DETACHED(tcp
)) {
3128 (*sockupcalls
->su_signal_oob
)
3129 (connp
->conn_upper_handle
,
3133 mp1
= allocb(0, BPRI_MED
);
3138 if (!TCP_IS_DETACHED(tcp
) &&
3139 !putnextctl1(connp
->conn_rq
,
3141 /* Try again on the rexmit. */
3147 * If the next byte would be the mark
3148 * then mark with MARKNEXT else mark
3151 if (gap
== 0 && urp
== 0)
3152 mp1
->b_flag
|= MSGMARKNEXT
;
3154 mp1
->b_flag
|= MSGNOTMARKNEXT
;
3155 freemsg(tcp
->tcp_urp_mark_mp
);
3156 tcp
->tcp_urp_mark_mp
= mp1
;
3157 flags
|= TH_SEND_URP_MARK
;
3159 tcp
->tcp_urp_last_valid
= B_TRUE
;
3160 tcp
->tcp_urp_last
= urp
+ seg_seq
;
3163 * If this is a zero window probe, continue to
3164 * process the ACK part. But we need to set seg_len
3165 * to 0 to avoid data processing. Otherwise just
3166 * drop the segment and send back an ACK.
3168 if (tcp
->tcp_rwnd
== 0 && seg_seq
== tcp
->tcp_rnxt
) {
3169 flags
&= ~(TH_SYN
| TH_URG
);
3177 /* Pitch out of window stuff off the end. */
3181 ASSERT((uintptr_t)(mp2
->b_wptr
- mp2
->b_rptr
) <=
3182 (uintptr_t)INT_MAX
);
3183 rgap
-= (int)(mp2
->b_wptr
- mp2
->b_rptr
);
3185 mp2
->b_wptr
+= rgap
;
3186 if ((mp1
= mp2
->b_cont
) != NULL
) {
3192 } while ((mp2
= mp2
->b_cont
) != NULL
);
3196 * TCP should check ECN info for segments inside the window only.
3197 * Therefore the check should be done here.
3199 if (tcp
->tcp_ecn_ok
) {
3200 if (flags
& TH_CWR
) {
3201 tcp
->tcp_ecn_echo_on
= B_FALSE
;
3204 * Note that both ECN_CE and CWR can be set in the
3205 * same segment. In this case, we once again turn
3208 if (connp
->conn_ipversion
== IPV4_VERSION
) {
3209 uchar_t tos
= ((ipha_t
*)rptr
)->ipha_type_of_service
;
3211 if ((tos
& IPH_ECN_CE
) == IPH_ECN_CE
) {
3212 tcp
->tcp_ecn_echo_on
= B_TRUE
;
3215 uint32_t vcf
= ((ip6_t
*)rptr
)->ip6_vcf
;
3217 if ((vcf
& htonl(IPH_ECN_CE
<< 20)) ==
3218 htonl(IPH_ECN_CE
<< 20)) {
3219 tcp
->tcp_ecn_echo_on
= B_TRUE
;
3225 * Check whether we can update tcp_ts_recent. This test is
3226 * NOT the one in RFC 1323 3.4. It is from Braden, 1993, "TCP
3227 * Extensions for High Performance: An Update", Internet Draft.
3229 if (tcp
->tcp_snd_ts_ok
&&
3230 TSTMP_GEQ(tcpopt
.tcp_opt_ts_val
, tcp
->tcp_ts_recent
) &&
3231 SEQ_LEQ(seg_seq
, tcp
->tcp_rack
)) {
3232 tcp
->tcp_ts_recent
= tcpopt
.tcp_opt_ts_val
;
3233 tcp
->tcp_last_rcv_lbolt
= LBOLT_FASTPATH64
;
3236 if (seg_seq
!= tcp
->tcp_rnxt
|| tcp
->tcp_reass_head
) {
3238 * FIN in an out of order segment. We record this in
3239 * tcp_valid_bits and the seq num of FIN in tcp_ofo_fin_seq.
3240 * Clear the FIN so that any check on FIN flag will fail.
3241 * Remember that FIN also counts in the sequence number
3242 * space. So we need to ack out of order FIN only segments.
3244 if (flags
& TH_FIN
) {
3245 tcp
->tcp_valid_bits
|= TCP_OFO_FIN_VALID
;
3246 tcp
->tcp_ofo_fin_seq
= seg_seq
+ seg_len
;
3248 flags
|= TH_ACK_NEEDED
;
3251 /* Fill in the SACK blk list. */
3252 if (tcp
->tcp_snd_sack_ok
) {
3253 tcp_sack_insert(tcp
->tcp_sack_list
,
3254 seg_seq
, seg_seq
+ seg_len
,
3255 &(tcp
->tcp_num_sack_blk
));
3259 * Attempt reassembly and see if we have something
3262 mp
= tcp_reass(tcp
, mp
, seg_seq
);
3263 /* Always ack out of order packets */
3264 flags
|= TH_ACK_NEEDED
| TH_PUSH
;
3266 ASSERT((uintptr_t)(mp
->b_wptr
- mp
->b_rptr
) <=
3267 (uintptr_t)INT_MAX
);
3268 seg_len
= mp
->b_cont
? msgdsize(mp
) :
3269 (int)(mp
->b_wptr
- mp
->b_rptr
);
3270 seg_seq
= tcp
->tcp_rnxt
;
3272 * A gap is filled and the seq num and len
3273 * of the gap match that of a previously
3274 * received FIN, put the FIN flag back in.
3276 if ((tcp
->tcp_valid_bits
& TCP_OFO_FIN_VALID
) &&
3277 seg_seq
+ seg_len
== tcp
->tcp_ofo_fin_seq
) {
3279 tcp
->tcp_valid_bits
&=
3282 if (tcp
->tcp_reass_tid
!= 0) {
3283 (void) TCP_TIMER_CANCEL(tcp
,
3284 tcp
->tcp_reass_tid
);
3286 * Restart the timer if there is still
3287 * data in the reassembly queue.
3289 if (tcp
->tcp_reass_head
!= NULL
) {
3290 tcp
->tcp_reass_tid
= TCP_TIMER(
3291 tcp
, tcp_reass_timer
,
3292 tcps
->tcps_reass_timeout
);
3294 tcp
->tcp_reass_tid
= 0;
3299 * Keep going even with NULL mp.
3300 * There may be a useful ACK or something else
3301 * we don't want to miss.
3303 * But TCP should not perform fast retransmit
3304 * because of the ack number. TCP uses
3305 * seg_len == 0 to determine if it is a pure
3306 * ACK. And this is not a pure ACK.
3311 if (tcps
->tcps_reass_timeout
!= 0 &&
3312 tcp
->tcp_reass_tid
== 0) {
3313 tcp
->tcp_reass_tid
= TCP_TIMER(tcp
,
3315 tcps
->tcps_reass_timeout
);
3319 } else if (seg_len
> 0) {
3320 TCPS_BUMP_MIB(tcps
, tcpInDataInorderSegs
);
3321 TCPS_UPDATE_MIB(tcps
, tcpInDataInorderBytes
, seg_len
);
3323 * If an out of order FIN was received before, and the seq
3324 * num and len of the new segment match that of the FIN,
3325 * put the FIN flag back in.
3327 if ((tcp
->tcp_valid_bits
& TCP_OFO_FIN_VALID
) &&
3328 seg_seq
+ seg_len
== tcp
->tcp_ofo_fin_seq
) {
3330 tcp
->tcp_valid_bits
&= ~TCP_OFO_FIN_VALID
;
3333 if ((flags
& (TH_RST
| TH_SYN
| TH_URG
| TH_ACK
)) != TH_ACK
) {
3334 if (flags
& TH_RST
) {
3336 switch (tcp
->tcp_state
) {
3338 (void) tcp_clean_death(tcp
, ECONNREFUSED
);
3340 case TCPS_ESTABLISHED
:
3341 case TCPS_FIN_WAIT_1
:
3342 case TCPS_FIN_WAIT_2
:
3343 case TCPS_CLOSE_WAIT
:
3344 (void) tcp_clean_death(tcp
, ECONNRESET
);
3348 (void) tcp_clean_death(tcp
, 0);
3351 ASSERT(tcp
->tcp_state
!= TCPS_TIME_WAIT
);
3352 (void) tcp_clean_death(tcp
, ENXIO
);
3357 if (flags
& TH_SYN
) {
3359 * See RFC 793, Page 71
3361 * The seq number must be in the window as it should
3362 * be "fixed" above. If it is outside window, it should
3363 * be already rejected. Note that we allow seg_seq to be
3364 * rnxt + rwnd because we want to accept 0 window probe.
3366 ASSERT(SEQ_GEQ(seg_seq
, tcp
->tcp_rnxt
) &&
3367 SEQ_LEQ(seg_seq
, tcp
->tcp_rnxt
+ tcp
->tcp_rwnd
));
3370 * If the ACK flag is not set, just use our snxt as the
3371 * seq number of the RST segment.
3373 if (!(flags
& TH_ACK
)) {
3374 seg_ack
= tcp
->tcp_snxt
;
3376 tcp_xmit_ctl("TH_SYN", tcp
, seg_ack
, seg_seq
+ 1,
3378 ASSERT(tcp
->tcp_state
!= TCPS_TIME_WAIT
);
3379 (void) tcp_clean_death(tcp
, ECONNRESET
);
3383 * urp could be -1 when the urp field in the packet is 0
3384 * and TCP_OLD_URP_INTERPRETATION is set. This implies that the urgent
3385 * byte was at seg_seq - 1, in which case we ignore the urgent flag.
3387 if (flags
& TH_URG
&& urp
>= 0) {
3388 if (!tcp
->tcp_urp_last_valid
||
3389 SEQ_GT(urp
+ seg_seq
, tcp
->tcp_urp_last
)) {
3391 * Non-STREAMS sockets handle the urgent data a litte
3392 * differently from STREAMS based sockets. There is no
3393 * need to mark any mblks with the MSG{NOT,}MARKNEXT
3394 * flags to keep SIOCATMARK happy. Instead a
3395 * su_signal_oob upcall is made to update the mark.
3396 * Neither is a T_EXDATA_IND mblk needed to be
3397 * prepended to the urgent data. The urgent data is
3398 * delivered using the su_recv upcall, where we set
3399 * the MSG_OOB flag to indicate that it is urg data.
3401 * Neither TH_SEND_URP_MARK nor TH_MARKNEXT_NEEDED
3402 * are used by non-STREAMS sockets.
3404 if (IPCL_IS_NONSTR(connp
)) {
3405 if (!TCP_IS_DETACHED(tcp
)) {
3406 (*sockupcalls
->su_signal_oob
)
3407 (connp
->conn_upper_handle
, urp
);
3411 * If we haven't generated the signal yet for
3412 * this urgent pointer value, do it now. Also,
3413 * send up a zero-length M_DATA indicating
3414 * whether or not this is the mark. The latter
3415 * is not needed when a T_EXDATA_IND is sent up.
3416 * However, if there are allocation failures
3417 * this code relies on the sender retransmitting
3418 * and the socket code for determining the mark
3419 * should not block waiting for the peer to
3420 * transmit. Thus, for simplicity we always
3421 * send up the mark indication.
3423 mp1
= allocb(0, BPRI_MED
);
3428 if (!TCP_IS_DETACHED(tcp
) &&
3429 !putnextctl1(connp
->conn_rq
, M_PCSIG
,
3431 /* Try again on the rexmit. */
3437 * Mark with NOTMARKNEXT for now.
3438 * The code below will change this to MARKNEXT
3439 * if we are at the mark.
3441 * If there are allocation failures (e.g. in
3442 * dupmsg below) the next time tcp_input_data
3443 * sees the urgent segment it will send up the
3444 * MSGMARKNEXT message.
3446 mp1
->b_flag
|= MSGNOTMARKNEXT
;
3447 freemsg(tcp
->tcp_urp_mark_mp
);
3448 tcp
->tcp_urp_mark_mp
= mp1
;
3449 flags
|= TH_SEND_URP_MARK
;
3451 (void) strlog(TCP_MOD_ID
, 0, 1, SL_TRACE
,
3452 "tcp_rput: sent M_PCSIG 2 seq %x urp %x "
3454 seg_seq
, urp
, tcp
->tcp_urp_last
,
3455 tcp_display(tcp
, NULL
, DISP_PORT_ONLY
));
3458 tcp
->tcp_urp_last_valid
= B_TRUE
;
3459 tcp
->tcp_urp_last
= urp
+ seg_seq
;
3460 } else if (tcp
->tcp_urp_mark_mp
!= NULL
) {
3462 * An allocation failure prevented the previous
3463 * tcp_input_data from sending up the allocated
3464 * MSG*MARKNEXT message - send it up this time
3467 flags
|= TH_SEND_URP_MARK
;
3471 * If the urgent byte is in this segment, make sure that it is
3472 * all by itself. This makes it much easier to deal with the
3473 * possibility of an allocation failure on the T_exdata_ind.
3474 * Note that seg_len is the number of bytes in the segment, and
3475 * urp is the offset into the segment of the urgent byte.
3476 * urp < seg_len means that the urgent byte is in this segment.
3478 if (urp
< seg_len
) {
3482 * Break it up and feed it back in.
3483 * Re-attach the IP header.
3488 * There is stuff before the urgent
3494 * Trim from urgent byte on.
3495 * The rest will come back.
3499 tcp_input_data(connp
,
3503 (void) adjmsg(mp1
, urp
- seg_len
);
3504 /* Feed this piece back in. */
3505 tmp_rnxt
= tcp
->tcp_rnxt
;
3506 tcp_input_data(connp
, mp1
, NULL
, ira
);
3508 * If the data passed back in was not
3509 * processed (ie: bad ACK) sending
3510 * the remainder back in will cause a
3511 * loop. In this case, drop the
3512 * packet and let the sender try
3513 * sending a good packet.
3515 if (tmp_rnxt
== tcp
->tcp_rnxt
) {
3520 if (urp
!= seg_len
- 1) {
3523 * There is stuff after the urgent
3529 * Trim everything beyond the
3530 * urgent byte. The rest will
3535 tcp_input_data(connp
,
3539 (void) adjmsg(mp1
, urp
+ 1 - seg_len
);
3540 tmp_rnxt
= tcp
->tcp_rnxt
;
3541 tcp_input_data(connp
, mp1
, NULL
, ira
);
3543 * If the data passed back in was not
3544 * processed (ie: bad ACK) sending
3545 * the remainder back in will cause a
3546 * loop. In this case, drop the
3547 * packet and let the sender try
3548 * sending a good packet.
3550 if (tmp_rnxt
== tcp
->tcp_rnxt
) {
3555 tcp_input_data(connp
, mp
, NULL
, ira
);
3559 * This segment contains only the urgent byte. We
3560 * have to allocate the T_exdata_ind, if we can.
3562 if (IPCL_IS_NONSTR(connp
)) {
3565 (*sockupcalls
->su_recv
)
3566 (connp
->conn_upper_handle
, mp
, seg_len
,
3567 MSG_OOB
, &error
, NULL
);
3569 * We should never be in middle of a
3570 * fallback, the squeue guarantees that.
3572 ASSERT(error
!= EOPNOTSUPP
);
3575 } else if (!tcp
->tcp_urp_mp
) {
3576 struct T_exdata_ind
*tei
;
3577 mp1
= allocb(sizeof (struct T_exdata_ind
),
3581 * Sigh... It'll be back.
3582 * Generate any MSG*MARK message now.
3586 if (flags
& TH_SEND_URP_MARK
) {
3589 ASSERT(tcp
->tcp_urp_mark_mp
);
3590 tcp
->tcp_urp_mark_mp
->b_flag
&=
3592 tcp
->tcp_urp_mark_mp
->b_flag
|=
3597 mp1
->b_datap
->db_type
= M_PROTO
;
3598 tei
= (struct T_exdata_ind
*)mp1
->b_rptr
;
3599 tei
->PRIM_type
= T_EXDATA_IND
;
3601 mp1
->b_wptr
= (uchar_t
*)&tei
[1];
3602 tcp
->tcp_urp_mp
= mp1
;
3604 (void) strlog(TCP_MOD_ID
, 0, 1, SL_TRACE
,
3605 "tcp_rput: allocated exdata_ind %s",
3606 tcp_display(tcp
, NULL
,
3610 * There is no need to send a separate MSG*MARK
3611 * message since the T_EXDATA_IND will be sent
3614 flags
&= ~TH_SEND_URP_MARK
;
3615 freemsg(tcp
->tcp_urp_mark_mp
);
3616 tcp
->tcp_urp_mark_mp
= NULL
;
3619 * Now we are all set. On the next putnext upstream,
3620 * tcp_urp_mp will be non-NULL and will get prepended
3621 * to what has to be this piece containing the urgent
3622 * byte. If for any reason we abort this segment below,
3623 * if it comes back, we will have this ready, or it
3624 * will get blown off in close.
3626 } else if (urp
== seg_len
) {
3628 * The urgent byte is the next byte after this sequence
3629 * number. If this endpoint is non-STREAMS, then there
3630 * is nothing to do here since the socket has already
3631 * been notified about the urg pointer by the
3632 * su_signal_oob call above.
3634 * In case of STREAMS, some more work might be needed.
3635 * If there is data it is marked with MSGMARKNEXT and
3636 * and any tcp_urp_mark_mp is discarded since it is not
3637 * needed. Otherwise, if the code above just allocated
3638 * a zero-length tcp_urp_mark_mp message, that message
3639 * is tagged with MSGMARKNEXT. Sending up these
3640 * MSGMARKNEXT messages makes SIOCATMARK work correctly
3641 * even though the T_EXDATA_IND will not be sent up
3642 * until the urgent byte arrives.
3644 if (!IPCL_IS_NONSTR(tcp
->tcp_connp
)) {
3646 flags
|= TH_MARKNEXT_NEEDED
;
3647 freemsg(tcp
->tcp_urp_mark_mp
);
3648 tcp
->tcp_urp_mark_mp
= NULL
;
3649 flags
&= ~TH_SEND_URP_MARK
;
3650 } else if (tcp
->tcp_urp_mark_mp
!= NULL
) {
3651 flags
|= TH_SEND_URP_MARK
;
3652 tcp
->tcp_urp_mark_mp
->b_flag
&=
3654 tcp
->tcp_urp_mark_mp
->b_flag
|=
3659 (void) strlog(TCP_MOD_ID
, 0, 1, SL_TRACE
,
3660 "tcp_rput: AT MARK, len %d, flags 0x%x, %s",
3662 tcp_display(tcp
, NULL
, DISP_PORT_ONLY
));
3667 /* Data left until we hit mark */
3668 (void) strlog(TCP_MOD_ID
, 0, 1, SL_TRACE
,
3669 "tcp_rput: URP %d bytes left, %s",
3670 urp
- seg_len
, tcp_display(tcp
, NULL
,
3677 if (!(flags
& TH_ACK
)) {
3682 bytes_acked
= (int)(seg_ack
- tcp
->tcp_suna
);
3684 if (bytes_acked
> 0)
3685 tcp
->tcp_ip_forward_progress
= B_TRUE
;
3686 if (tcp
->tcp_state
== TCPS_SYN_RCVD
) {
3688 * tcp_sendmsg() checks tcp_state without entering
3689 * the squeue so tcp_state should be updated before
3690 * sending up a connection confirmation or a new
3691 * connection indication.
3693 tcp
->tcp_state
= TCPS_ESTABLISHED
;
3696 * We are seeing the final ack in the three way
3697 * hand shake of a active open'ed connection
3698 * so we must send up a T_CONN_CON
3700 if (tcp
->tcp_active_open
) {
3701 if (!tcp_conn_con(tcp
, iphdr
, mp
, NULL
, ira
)) {
3703 tcp
->tcp_state
= TCPS_SYN_RCVD
;
3707 * Don't fuse the loopback endpoints for
3708 * simultaneous active opens.
3710 if (tcp
->tcp_loopback
) {
3711 TCP_STAT(tcps
, tcp_fusion_unfusable
);
3712 tcp
->tcp_unfusable
= B_TRUE
;
3715 * For simultaneous active open, trace receipt of final
3716 * ACK as tcp:::connect-established.
3718 DTRACE_TCP5(connect__established
, mblk_t
*, NULL
,
3719 ip_xmit_attr_t
*, connp
->conn_ixa
, void_ip_t
*,
3720 iphdr
, tcp_t
*, tcp
, tcph_t
*, tcpha
);
3721 } else if (IPCL_IS_NONSTR(connp
)) {
3723 * 3-way handshake has completed, so notify socket
3724 * of the new connection.
3726 * We are here means eager is fine but it can
3727 * get a TH_RST at any point between now and till
3728 * accept completes and disappear. We need to
3729 * ensure that reference to eager is valid after
3730 * we get out of eager's perimeter. So we do
3733 CONN_INC_REF(connp
);
3735 if (!tcp_newconn_notify(tcp
, ira
)) {
3737 * The state-change probe for SYN_RCVD ->
3738 * ESTABLISHED has not fired yet. We reset
3739 * the state to SYN_RCVD so that future
3740 * state-change probes report correct state
3743 tcp
->tcp_state
= TCPS_SYN_RCVD
;
3745 /* notification did not go up, so drop ref */
3746 CONN_DEC_REF(connp
);
3747 /* ... and close the eager */
3748 ASSERT(TCP_IS_DETACHED(tcp
));
3749 (void) tcp_close_detached(tcp
);
3753 * tcp_newconn_notify() changes conn_upcalls and
3754 * connp->conn_upper_handle. Fix things now, in case
3755 * there's data attached to this ack.
3757 if (connp
->conn_upcalls
!= NULL
)
3758 sockupcalls
= connp
->conn_upcalls
;
3760 * For passive open, trace receipt of final ACK as
3761 * tcp:::accept-established.
3763 DTRACE_TCP5(accept__established
, mlbk_t
*, NULL
,
3764 ip_xmit_attr_t
*, connp
->conn_ixa
, void_ip_t
*,
3765 iphdr
, tcp_t
*, tcp
, tcph_t
*, tcpha
);
3768 * 3-way handshake complete - this is a STREAMS based
3769 * socket, so pass up the T_CONN_IND.
3771 tcp_t
*listener
= tcp
->tcp_listener
;
3772 mblk_t
*mp
= tcp
->tcp_conn
.tcp_eager_conn_ind
;
3774 tcp
->tcp_tconnind_started
= B_TRUE
;
3775 tcp
->tcp_conn
.tcp_eager_conn_ind
= NULL
;
3778 * We are here means eager is fine but it can
3779 * get a TH_RST at any point between now and till
3780 * accept completes and disappear. We need to
3781 * ensure that reference to eager is valid after
3782 * we get out of eager's perimeter. So we do
3785 CONN_INC_REF(connp
);
3788 * The listener also exists because of the refhold
3789 * done in tcp_input_listener. Its possible that it
3790 * might have closed. We will check that once we
3791 * get inside listeners context.
3793 CONN_INC_REF(listener
->tcp_connp
);
3794 if (listener
->tcp_connp
->conn_sqp
==
3797 * We optimize by not calling an SQUEUE_ENTER
3798 * on the listener since we know that the
3799 * listener and eager squeues are the same.
3800 * We are able to make this check safely only
3801 * because neither the eager nor the listener
3802 * can change its squeue. Only an active connect
3803 * can change its squeue
3805 tcp_send_conn_ind(listener
->tcp_connp
, mp
,
3806 listener
->tcp_connp
->conn_sqp
);
3807 CONN_DEC_REF(listener
->tcp_connp
);
3808 } else if (!tcp
->tcp_loopback
) {
3809 SQUEUE_ENTER_ONE(listener
->tcp_connp
->conn_sqp
,
3810 mp
, tcp_send_conn_ind
,
3811 listener
->tcp_connp
, NULL
, SQ_FILL
,
3812 SQTAG_TCP_CONN_IND
);
3814 SQUEUE_ENTER_ONE(listener
->tcp_connp
->conn_sqp
,
3815 mp
, tcp_send_conn_ind
,
3816 listener
->tcp_connp
, NULL
, SQ_NODRAIN
,
3817 SQTAG_TCP_CONN_IND
);
3820 * For passive open, trace receipt of final ACK as
3821 * tcp:::accept-established.
3823 DTRACE_TCP5(accept__established
, mlbk_t
*, NULL
,
3824 ip_xmit_attr_t
*, connp
->conn_ixa
, void_ip_t
*,
3825 iphdr
, tcp_t
*, tcp
, tcph_t
*, tcpha
);
3827 TCPS_CONN_INC(tcps
);
3829 tcp
->tcp_suna
= tcp
->tcp_iss
+ 1; /* One for the SYN */
3831 /* SYN was acked - making progress */
3832 tcp
->tcp_ip_forward_progress
= B_TRUE
;
3835 * If SYN was retransmitted, need to reset all
3836 * retransmission info as this segment will be
3837 * treated as a dup ACK.
3839 if (tcp
->tcp_rexmit
) {
3840 tcp
->tcp_rexmit
= B_FALSE
;
3841 tcp
->tcp_rexmit_nxt
= tcp
->tcp_snxt
;
3842 tcp
->tcp_rexmit_max
= tcp
->tcp_snxt
;
3843 tcp
->tcp_snd_burst
= tcp
->tcp_localnet
?
3844 TCP_CWND_INFINITE
: TCP_CWND_NORMAL
;
3845 tcp
->tcp_ms_we_have_waited
= 0;
3846 tcp
->tcp_cwnd
= mss
;
3850 * We set the send window to zero here.
3851 * This is needed if there is data to be
3852 * processed already on the queue.
3853 * Later (at swnd_update label), the
3854 * "new_swnd > tcp_swnd" condition is satisfied
3855 * the XMIT_NEEDED flag is set in the current
3856 * (SYN_RCVD) state. This ensures tcp_wput_data() is
3857 * called if there is already data on queue in
3862 if (new_swnd
> tcp
->tcp_max_swnd
)
3863 tcp
->tcp_max_swnd
= new_swnd
;
3864 tcp
->tcp_swl1
= seg_seq
;
3865 tcp
->tcp_swl2
= seg_ack
;
3866 tcp
->tcp_valid_bits
&= ~TCP_ISS_VALID
;
3868 /* Trace change from SYN_RCVD -> ESTABLISHED here */
3869 DTRACE_TCP6(state__change
, void, NULL
, ip_xmit_attr_t
*,
3870 connp
->conn_ixa
, void, NULL
, tcp_t
*, tcp
, void, NULL
,
3871 int32_t, TCPS_SYN_RCVD
);
3873 /* Fuse when both sides are in ESTABLISHED state */
3874 if (tcp
->tcp_loopback
&& do_tcp_fusion
)
3875 tcp_fuse(tcp
, iphdr
, tcpha
);
3878 /* This code follows 4.4BSD-Lite2 mostly. */
3879 if (bytes_acked
< 0)
3883 * If TCP is ECN capable and the congestion experience bit is
3884 * set, reduce tcp_cwnd and tcp_ssthresh. But this should only be
3885 * done once per window (or more loosely, per RTT).
3887 if (tcp
->tcp_cwr
&& SEQ_GT(seg_ack
, tcp
->tcp_cwr_snd_max
))
3888 tcp
->tcp_cwr
= B_FALSE
;
3889 if (tcp
->tcp_ecn_ok
&& (flags
& TH_ECE
)) {
3890 if (!tcp
->tcp_cwr
) {
3891 npkt
= ((tcp
->tcp_snxt
- tcp
->tcp_suna
) >> 1) / mss
;
3892 tcp
->tcp_cwnd_ssthresh
= MAX(npkt
, 2) * mss
;
3893 tcp
->tcp_cwnd
= npkt
* mss
;
3895 * If the cwnd is 0, use the timer to clock out
3896 * new segments. This is required by the ECN spec.
3899 TCP_TIMER_RESTART(tcp
, tcp
->tcp_rto
);
3901 * This makes sure that when the ACK comes
3902 * back, we will increase tcp_cwnd by 1 MSS.
3904 tcp
->tcp_cwnd_cnt
= 0;
3906 tcp
->tcp_cwr
= B_TRUE
;
3908 * This marks the end of the current window of in
3909 * flight data. That is why we don't use
3910 * tcp_suna + tcp_swnd. Only data in flight can
3913 tcp
->tcp_cwr_snd_max
= tcp
->tcp_snxt
;
3914 tcp
->tcp_ecn_cwr_sent
= B_FALSE
;
3918 mp1
= tcp
->tcp_xmit_head
;
3919 if (bytes_acked
== 0) {
3920 if (!ofo_seg
&& seg_len
== 0 && new_swnd
== tcp
->tcp_swnd
) {
3923 TCPS_BUMP_MIB(tcps
, tcpInDupAck
);
3925 * Fast retransmit. When we have seen exactly three
3926 * identical ACKs while we have unacked data
3927 * outstanding we take it as a hint that our peer
3928 * dropped something.
3930 * If TCP is retransmitting, don't do fast retransmit.
3932 if (mp1
&& tcp
->tcp_suna
!= tcp
->tcp_snxt
&&
3933 ! tcp
->tcp_rexmit
) {
3934 /* Do Limited Transmit */
3935 if ((dupack_cnt
= ++tcp
->tcp_dupack_cnt
) <
3936 tcps
->tcps_dupack_fast_retransmit
) {
3940 * What we need to do is temporarily
3941 * increase tcp_cwnd so that new
3942 * data can be sent if it is allowed
3943 * by the receive window (tcp_rwnd).
3944 * tcp_wput_data() will take care of
3947 * If the connection is SACK capable,
3948 * only do limited xmit when there
3951 * Note how tcp_cwnd is incremented.
3952 * The first dup ACK will increase
3953 * it by 1 MSS. The second dup ACK
3954 * will increase it by 2 MSS. This
3955 * means that only 1 new segment will
3956 * be sent for each dup ACK.
3958 if (tcp
->tcp_unsent
> 0 &&
3959 (!tcp
->tcp_snd_sack_ok
||
3960 (tcp
->tcp_snd_sack_ok
&&
3961 tcp
->tcp_notsack_list
!= NULL
))) {
3962 tcp
->tcp_cwnd
+= mss
<<
3963 (tcp
->tcp_dupack_cnt
- 1);
3964 flags
|= TH_LIMIT_XMIT
;
3966 } else if (dupack_cnt
==
3967 tcps
->tcps_dupack_fast_retransmit
) {
3970 * If we have reduced tcp_ssthresh
3971 * because of ECN, do not reduce it again
3972 * unless it is already one window of data
3973 * away. After one window of data, tcp_cwr
3974 * should then be cleared. Note that
3975 * for non ECN capable connection, tcp_cwr
3976 * should always be false.
3978 * Adjust cwnd since the duplicate
3979 * ack indicates that a packet was
3980 * dropped (due to congestion.)
3982 if (!tcp
->tcp_cwr
) {
3983 npkt
= ((tcp
->tcp_snxt
-
3984 tcp
->tcp_suna
) >> 1) / mss
;
3985 tcp
->tcp_cwnd_ssthresh
= MAX(npkt
, 2) *
3987 tcp
->tcp_cwnd
= (npkt
+
3988 tcp
->tcp_dupack_cnt
) * mss
;
3990 if (tcp
->tcp_ecn_ok
) {
3991 tcp
->tcp_cwr
= B_TRUE
;
3992 tcp
->tcp_cwr_snd_max
= tcp
->tcp_snxt
;
3993 tcp
->tcp_ecn_cwr_sent
= B_FALSE
;
3997 * We do Hoe's algorithm. Refer to her
3998 * paper "Improving the Start-up Behavior
3999 * of a Congestion Control Scheme for TCP,"
4000 * appeared in SIGCOMM'96.
4002 * Save highest seq no we have sent so far.
4003 * Be careful about the invisible FIN byte.
4005 if ((tcp
->tcp_valid_bits
& TCP_FSS_VALID
) &&
4006 (tcp
->tcp_unsent
== 0)) {
4007 tcp
->tcp_rexmit_max
= tcp
->tcp_fss
;
4009 tcp
->tcp_rexmit_max
= tcp
->tcp_snxt
;
4013 * Do not allow bursty traffic during.
4014 * fast recovery. Refer to Fall and Floyd's
4015 * paper "Simulation-based Comparisons of
4016 * Tahoe, Reno and SACK TCP" (in CCR?)
4017 * This is a best current practise.
4019 tcp
->tcp_snd_burst
= TCP_CWND_SS
;
4023 * Calculate tcp_pipe, which is the
4024 * estimated number of bytes in
4027 * tcp_fack is the highest sack'ed seq num
4030 * tcp_pipe is explained in the above quoted
4031 * Fall and Floyd's paper. tcp_fack is
4032 * explained in Mathis and Mahdavi's
4033 * "Forward Acknowledgment: Refining TCP
4034 * Congestion Control" in SIGCOMM '96.
4036 if (tcp
->tcp_snd_sack_ok
) {
4037 if (tcp
->tcp_notsack_list
!= NULL
) {
4038 tcp
->tcp_pipe
= tcp
->tcp_snxt
-
4040 tcp
->tcp_sack_snxt
= seg_ack
;
4041 flags
|= TH_NEED_SACK_REXMIT
;
4044 * Always initialize tcp_pipe
4045 * even though we don't have
4046 * any SACK info. If later
4047 * we get SACK info and
4048 * tcp_pipe is not initialized,
4049 * funny things will happen.
4052 tcp
->tcp_cwnd_ssthresh
;
4055 flags
|= TH_REXMIT_NEEDED
;
4056 } /* tcp_snd_sack_ok */
4060 * Here we perform congestion
4061 * avoidance, but NOT slow start.
4062 * This is known as the Fast
4063 * Recovery Algorithm.
4065 if (tcp
->tcp_snd_sack_ok
&&
4066 tcp
->tcp_notsack_list
!= NULL
) {
4067 flags
|= TH_NEED_SACK_REXMIT
;
4068 tcp
->tcp_pipe
-= mss
;
4069 if (tcp
->tcp_pipe
< 0)
4073 * We know that one more packet has
4074 * left the pipe thus we can update
4077 cwnd
= tcp
->tcp_cwnd
+ mss
;
4078 if (cwnd
> tcp
->tcp_cwnd_max
)
4079 cwnd
= tcp
->tcp_cwnd_max
;
4080 tcp
->tcp_cwnd
= cwnd
;
4081 if (tcp
->tcp_unsent
> 0)
4082 flags
|= TH_XMIT_NEEDED
;
4086 } else if (tcp
->tcp_zero_win_probe
) {
4088 * If the window has opened, need to arrange
4089 * to send additional data.
4091 if (new_swnd
!= 0) {
4092 /* tcp_suna != tcp_snxt */
4093 /* Packet contains a window update */
4094 TCPS_BUMP_MIB(tcps
, tcpInWinUpdate
);
4095 tcp
->tcp_zero_win_probe
= 0;
4096 tcp
->tcp_timer_backoff
= 0;
4097 tcp
->tcp_ms_we_have_waited
= 0;
4100 * Transmit starting with tcp_suna since
4101 * the one byte probe is not ack'ed.
4102 * If TCP has sent more than one identical
4103 * probe, tcp_rexmit will be set. That means
4104 * tcp_ss_rexmit() will send out the one
4105 * byte along with new data. Otherwise,
4106 * fake the retransmission.
4108 flags
|= TH_XMIT_NEEDED
;
4109 if (!tcp
->tcp_rexmit
) {
4110 tcp
->tcp_rexmit
= B_TRUE
;
4111 tcp
->tcp_dupack_cnt
= 0;
4112 tcp
->tcp_rexmit_nxt
= tcp
->tcp_suna
;
4113 tcp
->tcp_rexmit_max
= tcp
->tcp_suna
+ 1;
4121 * Check for "acceptability" of ACK value per RFC 793, pages 72 - 73.
4122 * If the ACK value acks something that we have not yet sent, it might
4123 * be an old duplicate segment. Send an ACK to re-synchronize the
4125 * Note: reset in response to unacceptable ACK in SYN_RECEIVE
4126 * state is handled above, so we can always just drop the segment and
4129 * In the case where the peer shrinks the window, we see the new window
4130 * update, but all the data sent previously is queued up by the peer.
4131 * To account for this, in tcp_process_shrunk_swnd(), the sequence
4132 * number, which was already sent, and within window, is recorded.
4133 * tcp_snxt is then updated.
4135 * If the window has previously shrunk, and an ACK for data not yet
4136 * sent, according to tcp_snxt is recieved, it may still be valid. If
4137 * the ACK is for data within the window at the time the window was
4138 * shrunk, then the ACK is acceptable. In this case tcp_snxt is set to
4139 * the sequence number ACK'ed.
4141 * If the ACK covers all the data sent at the time the window was
4142 * shrunk, we can now set tcp_is_wnd_shrnk to B_FALSE.
4144 * Should we send ACKs in response to ACK only segments?
4147 if (SEQ_GT(seg_ack
, tcp
->tcp_snxt
)) {
4148 if ((tcp
->tcp_is_wnd_shrnk
) &&
4149 (SEQ_LEQ(seg_ack
, tcp
->tcp_snxt_shrunk
))) {
4150 uint32_t data_acked_ahead_snxt
;
4152 data_acked_ahead_snxt
= seg_ack
- tcp
->tcp_snxt
;
4153 tcp_update_xmit_tail(tcp
, seg_ack
);
4154 tcp
->tcp_unsent
-= data_acked_ahead_snxt
;
4156 TCPS_BUMP_MIB(tcps
, tcpInAckUnsent
);
4157 /* drop the received segment */
4161 * Send back an ACK. If tcp_drop_ack_unsent_cnt is
4162 * greater than 0, check if the number of such
4163 * bogus ACks is greater than that count. If yes,
4164 * don't send back any ACK. This prevents TCP from
4165 * getting into an ACK storm if somehow an attacker
4166 * successfully spoofs an acceptable segment to our
4167 * peer. If this continues (count > 2 X threshold),
4168 * we should abort this connection.
4170 if (tcp_drop_ack_unsent_cnt
> 0 &&
4171 ++tcp
->tcp_in_ack_unsent
>
4172 tcp_drop_ack_unsent_cnt
) {
4173 TCP_STAT(tcps
, tcp_in_ack_unsent_drop
);
4174 if (tcp
->tcp_in_ack_unsent
> 2 *
4175 tcp_drop_ack_unsent_cnt
) {
4176 (void) tcp_clean_death(tcp
, EPROTO
);
4180 mp
= tcp_ack_mp(tcp
);
4182 BUMP_LOCAL(tcp
->tcp_obsegs
);
4183 TCPS_BUMP_MIB(tcps
, tcpOutAck
);
4184 tcp_send_data(tcp
, mp
);
4188 } else if (tcp
->tcp_is_wnd_shrnk
&& SEQ_GEQ(seg_ack
,
4189 tcp
->tcp_snxt_shrunk
)) {
4190 tcp
->tcp_is_wnd_shrnk
= B_FALSE
;
4194 * TCP gets a new ACK, update the notsack'ed list to delete those
4195 * blocks that are covered by this ACK.
4197 if (tcp
->tcp_snd_sack_ok
&& tcp
->tcp_notsack_list
!= NULL
) {
4198 tcp_notsack_remove(&(tcp
->tcp_notsack_list
), seg_ack
,
4199 &(tcp
->tcp_num_notsack_blk
), &(tcp
->tcp_cnt_notsack_list
));
4203 * If we got an ACK after fast retransmit, check to see
4204 * if it is a partial ACK. If it is not and the congestion
4205 * window was inflated to account for the other side's
4206 * cached packets, retract it. If it is, do Hoe's algorithm.
4208 if (tcp
->tcp_dupack_cnt
>= tcps
->tcps_dupack_fast_retransmit
) {
4209 ASSERT(tcp
->tcp_rexmit
== B_FALSE
);
4210 if (SEQ_GEQ(seg_ack
, tcp
->tcp_rexmit_max
)) {
4211 tcp
->tcp_dupack_cnt
= 0;
4213 * Restore the orig tcp_cwnd_ssthresh after
4214 * fast retransmit phase.
4216 if (tcp
->tcp_cwnd
> tcp
->tcp_cwnd_ssthresh
) {
4217 tcp
->tcp_cwnd
= tcp
->tcp_cwnd_ssthresh
;
4219 tcp
->tcp_rexmit_max
= seg_ack
;
4220 tcp
->tcp_cwnd_cnt
= 0;
4221 tcp
->tcp_snd_burst
= tcp
->tcp_localnet
?
4222 TCP_CWND_INFINITE
: TCP_CWND_NORMAL
;
4225 * Remove all notsack info to avoid confusion with
4226 * the next fast retrasnmit/recovery phase.
4228 if (tcp
->tcp_snd_sack_ok
) {
4229 TCP_NOTSACK_REMOVE_ALL(tcp
->tcp_notsack_list
,
4233 if (tcp
->tcp_snd_sack_ok
&&
4234 tcp
->tcp_notsack_list
!= NULL
) {
4235 flags
|= TH_NEED_SACK_REXMIT
;
4236 tcp
->tcp_pipe
-= mss
;
4237 if (tcp
->tcp_pipe
< 0)
4243 * Retransmit the unack'ed segment and
4244 * restart fast recovery. Note that we
4245 * need to scale back tcp_cwnd to the
4246 * original value when we started fast
4247 * recovery. This is to prevent overly
4248 * aggressive behaviour in sending new
4251 tcp
->tcp_cwnd
= tcp
->tcp_cwnd_ssthresh
+
4252 tcps
->tcps_dupack_fast_retransmit
* mss
;
4253 tcp
->tcp_cwnd_cnt
= tcp
->tcp_cwnd
;
4254 flags
|= TH_REXMIT_NEEDED
;
4258 tcp
->tcp_dupack_cnt
= 0;
4259 if (tcp
->tcp_rexmit
) {
4261 * TCP is retranmitting. If the ACK ack's all
4262 * outstanding data, update tcp_rexmit_max and
4263 * tcp_rexmit_nxt. Otherwise, update tcp_rexmit_nxt
4264 * to the correct value.
4266 * Note that SEQ_LEQ() is used. This is to avoid
4267 * unnecessary fast retransmit caused by dup ACKs
4268 * received when TCP does slow start retransmission
4269 * after a time out. During this phase, TCP may
4270 * send out segments which are already received.
4271 * This causes dup ACKs to be sent back.
4273 if (SEQ_LEQ(seg_ack
, tcp
->tcp_rexmit_max
)) {
4274 if (SEQ_GT(seg_ack
, tcp
->tcp_rexmit_nxt
)) {
4275 tcp
->tcp_rexmit_nxt
= seg_ack
;
4277 if (seg_ack
!= tcp
->tcp_rexmit_max
) {
4278 flags
|= TH_XMIT_NEEDED
;
4281 tcp
->tcp_rexmit
= B_FALSE
;
4282 tcp
->tcp_rexmit_nxt
= tcp
->tcp_snxt
;
4283 tcp
->tcp_snd_burst
= tcp
->tcp_localnet
?
4284 TCP_CWND_INFINITE
: TCP_CWND_NORMAL
;
4286 tcp
->tcp_ms_we_have_waited
= 0;
4290 TCPS_BUMP_MIB(tcps
, tcpInAckSegs
);
4291 TCPS_UPDATE_MIB(tcps
, tcpInAckBytes
, bytes_acked
);
4292 tcp
->tcp_suna
= seg_ack
;
4293 if (tcp
->tcp_zero_win_probe
!= 0) {
4294 tcp
->tcp_zero_win_probe
= 0;
4295 tcp
->tcp_timer_backoff
= 0;
4299 * If tcp_xmit_head is NULL, then it must be the FIN being ack'ed.
4300 * Note that it cannot be the SYN being ack'ed. The code flow
4301 * will not reach here.
4308 * Update the congestion window.
4310 * If TCP is not ECN capable or TCP is ECN capable but the
4311 * congestion experience bit is not set, increase the tcp_cwnd as
4314 if (!tcp
->tcp_ecn_ok
|| !(flags
& TH_ECE
)) {
4315 cwnd
= tcp
->tcp_cwnd
;
4318 if (cwnd
>= tcp
->tcp_cwnd_ssthresh
) {
4320 * This is to prevent an increase of less than 1 MSS of
4321 * tcp_cwnd. With partial increase, tcp_wput_data()
4322 * may send out tinygrams in order to preserve mblk
4325 * By initializing tcp_cwnd_cnt to new tcp_cwnd and
4326 * decrementing it by 1 MSS for every ACKs, tcp_cwnd is
4327 * increased by 1 MSS for every RTTs.
4329 if (tcp
->tcp_cwnd_cnt
<= 0) {
4330 tcp
->tcp_cwnd_cnt
= cwnd
+ add
;
4332 tcp
->tcp_cwnd_cnt
-= add
;
4336 tcp
->tcp_cwnd
= MIN(cwnd
+ add
, tcp
->tcp_cwnd_max
);
4339 /* See if the latest urgent data has been acknowledged */
4340 if ((tcp
->tcp_valid_bits
& TCP_URG_VALID
) &&
4341 SEQ_GT(seg_ack
, tcp
->tcp_urg
))
4342 tcp
->tcp_valid_bits
&= ~TCP_URG_VALID
;
4344 /* Can we update the RTT estimates? */
4345 if (tcp
->tcp_snd_ts_ok
) {
4346 /* Ignore zero timestamp echo-reply. */
4347 if (tcpopt
.tcp_opt_ts_ecr
!= 0) {
4348 tcp_set_rto(tcp
, (int32_t)LBOLT_FASTPATH
-
4349 (int32_t)tcpopt
.tcp_opt_ts_ecr
);
4352 /* If needed, restart the timer. */
4353 if (tcp
->tcp_set_timer
== 1) {
4354 TCP_TIMER_RESTART(tcp
, tcp
->tcp_rto
);
4355 tcp
->tcp_set_timer
= 0;
4358 * Update tcp_csuna in case the other side stops sending
4361 tcp
->tcp_csuna
= tcp
->tcp_snxt
;
4362 } else if (SEQ_GT(seg_ack
, tcp
->tcp_csuna
)) {
4364 * An ACK sequence we haven't seen before, so get the RTT
4365 * and update the RTO. But first check if the timestamp is
4368 if ((mp1
->b_next
!= NULL
) &&
4369 SEQ_GT(seg_ack
, (uint32_t)(uintptr_t)(mp1
->b_next
)))
4370 tcp_set_rto(tcp
, (int32_t)LBOLT_FASTPATH
-
4371 (int32_t)(intptr_t)mp1
->b_prev
);
4373 TCPS_BUMP_MIB(tcps
, tcpRttNoUpdate
);
4375 /* Remeber the last sequence to be ACKed */
4376 tcp
->tcp_csuna
= seg_ack
;
4377 if (tcp
->tcp_set_timer
== 1) {
4378 TCP_TIMER_RESTART(tcp
, tcp
->tcp_rto
);
4379 tcp
->tcp_set_timer
= 0;
4382 TCPS_BUMP_MIB(tcps
, tcpRttNoUpdate
);
4385 /* Eat acknowledged bytes off the xmit queue. */
4391 ASSERT((uintptr_t)(wptr
- mp1
->b_rptr
) <= (uintptr_t)INT_MAX
);
4392 bytes_acked
-= (int)(wptr
- mp1
->b_rptr
);
4393 if (bytes_acked
< 0) {
4394 mp1
->b_rptr
= wptr
+ bytes_acked
;
4396 * Set a new timestamp if all the bytes timed by the
4397 * old timestamp have been ack'ed.
4400 (uint32_t)(uintptr_t)(mp1
->b_next
))) {
4402 (mblk_t
*)(uintptr_t)LBOLT_FASTPATH
;
4413 * This notification is required for some zero-copy
4414 * clients to maintain a copy semantic. After the data
4415 * is ack'ed, client is safe to modify or reuse the buffer.
4417 if (tcp
->tcp_snd_zcopy_aware
&&
4418 (mp2
->b_datap
->db_struioflag
& STRUIO_ZCNOTIFY
))
4419 tcp_zcopy_notify(tcp
);
4421 if (bytes_acked
== 0) {
4423 /* Everything is ack'ed, clear the tail. */
4424 tcp
->tcp_xmit_tail
= NULL
;
4426 * Cancel the timer unless we are still
4427 * waiting for an ACK for the FIN packet.
4429 if (tcp
->tcp_timer_tid
!= 0 &&
4430 tcp
->tcp_snxt
== tcp
->tcp_suna
) {
4431 (void) TCP_TIMER_CANCEL(tcp
,
4432 tcp
->tcp_timer_tid
);
4433 tcp
->tcp_timer_tid
= 0;
4435 goto pre_swnd_update
;
4437 if (mp2
!= tcp
->tcp_xmit_tail
)
4439 tcp
->tcp_xmit_tail
= mp1
;
4440 ASSERT((uintptr_t)(mp1
->b_wptr
- mp1
->b_rptr
) <=
4441 (uintptr_t)INT_MAX
);
4442 tcp
->tcp_xmit_tail_unsent
= (int)(mp1
->b_wptr
-
4448 * More was acked but there is nothing more
4449 * outstanding. This means that the FIN was
4450 * just acked or that we're talking to a clown.
4453 ASSERT(tcp
->tcp_fin_sent
);
4454 tcp
->tcp_xmit_tail
= NULL
;
4455 if (tcp
->tcp_fin_sent
) {
4456 /* FIN was acked - making progress */
4457 if (!tcp
->tcp_fin_acked
)
4458 tcp
->tcp_ip_forward_progress
= B_TRUE
;
4459 tcp
->tcp_fin_acked
= B_TRUE
;
4460 if (tcp
->tcp_linger_tid
!= 0 &&
4461 TCP_TIMER_CANCEL(tcp
,
4462 tcp
->tcp_linger_tid
) >= 0) {
4463 tcp_stop_lingering(tcp
);
4469 * We should never get here because
4470 * we have already checked that the
4471 * number of bytes ack'ed should be
4472 * smaller than or equal to what we
4473 * have sent so far (it is the
4474 * acceptability check of the ACK).
4475 * We can only get here if the send
4476 * queue is corrupted.
4478 * Terminate the connection and
4479 * panic the system. It is better
4480 * for us to panic instead of
4481 * continuing to avoid other disaster.
4483 tcp_xmit_ctl(NULL
, tcp
, tcp
->tcp_snxt
,
4484 tcp
->tcp_rnxt
, TH_RST
|TH_ACK
);
4485 panic("Memory corruption "
4486 "detected for connection %s.",
4487 tcp_display(tcp
, NULL
,
4488 DISP_ADDR_AND_PORT
));
4491 goto pre_swnd_update
;
4493 ASSERT(mp2
!= tcp
->tcp_xmit_tail
);
4495 if (tcp
->tcp_unsent
) {
4496 flags
|= TH_XMIT_NEEDED
;
4499 tcp
->tcp_xmit_head
= mp1
;
4502 * The following check is different from most other implementations.
4503 * For bi-directional transfer, when segments are dropped, the
4504 * "normal" check will not accept a window update in those
4505 * retransmitted segemnts. Failing to do that, TCP may send out
4506 * segments which are outside receiver's window. As TCP accepts
4507 * the ack in those retransmitted segments, if the window update in
4508 * the same segment is not accepted, TCP will incorrectly calculates
4509 * that it can send more segments. This can create a deadlock
4510 * with the receiver if its window becomes zero.
4512 if (SEQ_LT(tcp
->tcp_swl2
, seg_ack
) ||
4513 SEQ_LT(tcp
->tcp_swl1
, seg_seq
) ||
4514 (tcp
->tcp_swl1
== seg_seq
&& new_swnd
> tcp
->tcp_swnd
)) {
4516 * The criteria for update is:
4518 * 1. the segment acknowledges some data. Or
4519 * 2. the segment is new, i.e. it has a higher seq num. Or
4520 * 3. the segment is not old and the advertised window is
4521 * larger than the previous advertised window.
4523 if (tcp
->tcp_unsent
&& new_swnd
> tcp
->tcp_swnd
)
4524 flags
|= TH_XMIT_NEEDED
;
4525 tcp
->tcp_swnd
= new_swnd
;
4526 if (new_swnd
> tcp
->tcp_max_swnd
)
4527 tcp
->tcp_max_swnd
= new_swnd
;
4528 tcp
->tcp_swl1
= seg_seq
;
4529 tcp
->tcp_swl2
= seg_ack
;
4532 if (tcp
->tcp_state
> TCPS_ESTABLISHED
) {
4534 switch (tcp
->tcp_state
) {
4535 case TCPS_FIN_WAIT_1
:
4536 if (tcp
->tcp_fin_acked
) {
4537 tcp
->tcp_state
= TCPS_FIN_WAIT_2
;
4538 DTRACE_TCP6(state__change
, void, NULL
,
4539 ip_xmit_attr_t
*, connp
->conn_ixa
,
4540 void, NULL
, tcp_t
*, tcp
, void, NULL
,
4541 int32_t, TCPS_FIN_WAIT_1
);
4543 * We implement the non-standard BSD/SunOS
4544 * FIN_WAIT_2 flushing algorithm.
4545 * If there is no user attached to this
4546 * TCP endpoint, then this TCP struct
4547 * could hang around forever in FIN_WAIT_2
4548 * state if the peer forgets to send us
4549 * a FIN. To prevent this, we wait only
4550 * 2*MSL (a convenient time value) for
4551 * the FIN to arrive. If it doesn't show up,
4552 * we flush the TCP endpoint. This algorithm,
4553 * though a violation of RFC-793, has worked
4554 * for over 10 years in BSD systems.
4555 * Note: SunOS 4.x waits 675 seconds before
4556 * flushing the FIN_WAIT_2 connection.
4558 TCP_TIMER_RESTART(tcp
,
4559 tcp
->tcp_fin_wait_2_flush_interval
);
4562 case TCPS_FIN_WAIT_2
:
4563 break; /* Shutdown hook? */
4566 if (tcp
->tcp_fin_acked
) {
4567 (void) tcp_clean_death(tcp
, 0);
4572 if (tcp
->tcp_fin_acked
) {
4573 SET_TIME_WAIT(tcps
, tcp
, connp
);
4574 DTRACE_TCP6(state__change
, void, NULL
,
4575 ip_xmit_attr_t
*, connp
->conn_ixa
, void,
4576 NULL
, tcp_t
*, tcp
, void, NULL
, int32_t,
4580 case TCPS_CLOSE_WAIT
:
4584 ASSERT(tcp
->tcp_state
!= TCPS_TIME_WAIT
);
4588 if (flags
& TH_FIN
) {
4589 /* Make sure we ack the fin */
4590 flags
|= TH_ACK_NEEDED
;
4591 if (!tcp
->tcp_fin_rcvd
) {
4592 tcp
->tcp_fin_rcvd
= B_TRUE
;
4594 tcpha
= tcp
->tcp_tcpha
;
4595 tcpha
->tha_ack
= htonl(tcp
->tcp_rnxt
);
4598 * Generate the ordrel_ind at the end unless the
4599 * conn is detached or it is a STREAMS based eager.
4600 * In the eager case we defer the notification until
4601 * tcp_accept_finish has run.
4603 if (!TCP_IS_DETACHED(tcp
) && (IPCL_IS_NONSTR(connp
) ||
4604 (tcp
->tcp_listener
== NULL
&&
4605 !tcp
->tcp_hard_binding
)))
4606 flags
|= TH_ORDREL_NEEDED
;
4607 switch (tcp
->tcp_state
) {
4609 tcp
->tcp_state
= TCPS_CLOSE_WAIT
;
4610 DTRACE_TCP6(state__change
, void, NULL
,
4611 ip_xmit_attr_t
*, connp
->conn_ixa
,
4612 void, NULL
, tcp_t
*, tcp
, void, NULL
,
4613 int32_t, TCPS_SYN_RCVD
);
4616 case TCPS_ESTABLISHED
:
4617 tcp
->tcp_state
= TCPS_CLOSE_WAIT
;
4618 DTRACE_TCP6(state__change
, void, NULL
,
4619 ip_xmit_attr_t
*, connp
->conn_ixa
,
4620 void, NULL
, tcp_t
*, tcp
, void, NULL
,
4621 int32_t, TCPS_ESTABLISHED
);
4624 case TCPS_FIN_WAIT_1
:
4625 if (!tcp
->tcp_fin_acked
) {
4626 tcp
->tcp_state
= TCPS_CLOSING
;
4627 DTRACE_TCP6(state__change
, void, NULL
,
4628 ip_xmit_attr_t
*, connp
->conn_ixa
,
4629 void, NULL
, tcp_t
*, tcp
, void,
4630 NULL
, int32_t, TCPS_FIN_WAIT_1
);
4634 case TCPS_FIN_WAIT_2
:
4635 SET_TIME_WAIT(tcps
, tcp
, connp
);
4636 DTRACE_TCP6(state__change
, void, NULL
,
4637 ip_xmit_attr_t
*, connp
->conn_ixa
, void,
4638 NULL
, tcp_t
*, tcp
, void, NULL
, int32_t,
4642 * implies data piggybacked on FIN.
4643 * break to handle data.
4658 if (mp
->b_rptr
== mp
->b_wptr
) {
4660 * The header has been consumed, so we remove the
4661 * zero-length mblk here.
4668 tcpha
= tcp
->tcp_tcpha
;
4669 tcp
->tcp_rack_cnt
++;
4673 cur_max
= tcp
->tcp_rack_cur_max
;
4674 if (tcp
->tcp_rack_cnt
>= cur_max
) {
4676 * We have more unacked data than we should - send
4679 flags
|= TH_ACK_NEEDED
;
4681 if (cur_max
> tcp
->tcp_rack_abs_max
)
4682 tcp
->tcp_rack_cur_max
= tcp
->tcp_rack_abs_max
;
4684 tcp
->tcp_rack_cur_max
= cur_max
;
4685 } else if (TCP_IS_DETACHED(tcp
)) {
4686 /* We don't have an ACK timer for detached TCP. */
4687 flags
|= TH_ACK_NEEDED
;
4688 } else if (seg_len
< mss
) {
4690 * If we get a segment that is less than an mss, and we
4691 * already have unacknowledged data, and the amount
4692 * unacknowledged is not a multiple of mss, then we
4693 * better generate an ACK now. Otherwise, this may be
4694 * the tail piece of a transaction, and we would rather
4695 * wait for the response.
4698 ASSERT((uintptr_t)(tcp
->tcp_rnxt
- tcp
->tcp_rack
) <=
4699 (uintptr_t)INT_MAX
);
4700 udif
= (int)(tcp
->tcp_rnxt
- tcp
->tcp_rack
);
4701 if (udif
&& (udif
% mss
))
4702 flags
|= TH_ACK_NEEDED
;
4704 flags
|= TH_ACK_TIMER_NEEDED
;
4706 /* Start delayed ack timer */
4707 flags
|= TH_ACK_TIMER_NEEDED
;
4710 tcp
->tcp_rnxt
+= seg_len
;
4711 tcpha
->tha_ack
= htonl(tcp
->tcp_rnxt
);
4716 /* Update SACK list */
4717 if (tcp
->tcp_snd_sack_ok
&& tcp
->tcp_num_sack_blk
> 0) {
4718 tcp_sack_remove(tcp
->tcp_sack_list
, tcp
->tcp_rnxt
,
4719 &(tcp
->tcp_num_sack_blk
));
4722 if (tcp
->tcp_urp_mp
) {
4723 tcp
->tcp_urp_mp
->b_cont
= mp
;
4724 mp
= tcp
->tcp_urp_mp
;
4725 tcp
->tcp_urp_mp
= NULL
;
4726 /* Ready for a new signal. */
4727 tcp
->tcp_urp_last_valid
= B_FALSE
;
4729 (void) strlog(TCP_MOD_ID
, 0, 1, SL_TRACE
,
4730 "tcp_rput: sending exdata_ind %s",
4731 tcp_display(tcp
, NULL
, DISP_PORT_ONLY
));
4736 * Check for ancillary data changes compared to last segment.
4738 if (connp
->conn_recv_ancillary
.crb_all
!= 0) {
4739 mp
= tcp_input_add_ancillary(tcp
, mp
, &ipp
, ira
);
4744 if (IPCL_IS_NONSTR(connp
)) {
4746 * Non-STREAMS socket
4748 boolean_t push
= flags
& (TH_PUSH
|TH_FIN
);
4751 if ((*sockupcalls
->su_recv
)(connp
->conn_upper_handle
,
4752 mp
, seg_len
, 0, &error
, &push
) <= 0) {
4754 * We should never be in middle of a
4755 * fallback, the squeue guarantees that.
4757 ASSERT(error
!= EOPNOTSUPP
);
4758 if (error
== ENOSPC
)
4759 tcp
->tcp_rwnd
-= seg_len
;
4761 /* PUSH bit set and sockfs is not flow controlled */
4762 flags
|= tcp_rwnd_reopen(tcp
);
4764 } else if (tcp
->tcp_listener
!= NULL
|| tcp
->tcp_hard_binding
) {
4766 * Side queue inbound data until the accept happens.
4767 * tcp_accept/tcp_rput drains this when the accept happens.
4768 * M_DATA is queued on b_cont. Otherwise (T_OPTDATA_IND or
4769 * T_EXDATA_IND) it is queued on b_next.
4770 * XXX Make urgent data use this. Requires:
4771 * Removing tcp_listener check for TH_URG
4772 * Making M_PCPROTO and MARK messages skip the eager case
4775 tcp_rcv_enqueue(tcp
, mp
, seg_len
, ira
->ira_cred
);
4777 /* Active STREAMS socket */
4778 if (mp
->b_datap
->db_type
!= M_DATA
||
4779 (flags
& TH_MARKNEXT_NEEDED
)) {
4780 if (tcp
->tcp_rcv_list
!= NULL
) {
4781 flags
|= tcp_rcv_drain(tcp
);
4783 ASSERT(tcp
->tcp_rcv_list
== NULL
||
4784 tcp
->tcp_fused_sigurg
);
4786 if (flags
& TH_MARKNEXT_NEEDED
) {
4788 (void) strlog(TCP_MOD_ID
, 0, 1, SL_TRACE
,
4789 "tcp_rput: sending MSGMARKNEXT %s",
4790 tcp_display(tcp
, NULL
,
4793 mp
->b_flag
|= MSGMARKNEXT
;
4794 flags
&= ~TH_MARKNEXT_NEEDED
;
4797 if (is_system_labeled())
4798 tcp_setcred_data(mp
, ira
);
4800 putnext(connp
->conn_rq
, mp
);
4801 if (!canputnext(connp
->conn_rq
))
4802 tcp
->tcp_rwnd
-= seg_len
;
4803 } else if ((flags
& (TH_PUSH
|TH_FIN
)) ||
4804 tcp
->tcp_rcv_cnt
+ seg_len
>= connp
->conn_rcvbuf
>> 3) {
4805 if (tcp
->tcp_rcv_list
!= NULL
) {
4807 * Enqueue the new segment first and then
4808 * call tcp_rcv_drain() to send all data
4809 * up. The other way to do this is to
4810 * send all queued data up and then call
4811 * putnext() to send the new segment up.
4812 * This way can remove the else part later
4815 * We don't do this to avoid one more call to
4816 * canputnext() as tcp_rcv_drain() needs to
4817 * call canputnext().
4819 tcp_rcv_enqueue(tcp
, mp
, seg_len
,
4821 flags
|= tcp_rcv_drain(tcp
);
4823 if (is_system_labeled())
4824 tcp_setcred_data(mp
, ira
);
4826 putnext(connp
->conn_rq
, mp
);
4827 if (!canputnext(connp
->conn_rq
))
4828 tcp
->tcp_rwnd
-= seg_len
;
4832 * Enqueue all packets when processing an mblk
4833 * from the co queue and also enqueue normal packets.
4835 tcp_rcv_enqueue(tcp
, mp
, seg_len
, ira
->ira_cred
);
4838 * Make sure the timer is running if we have data waiting
4839 * for a push bit. This provides resiliency against
4840 * implementations that do not correctly generate push bits.
4842 if (tcp
->tcp_rcv_list
!= NULL
&& tcp
->tcp_push_tid
== 0) {
4844 * The connection may be closed at this point, so don't
4845 * do anything for a detached tcp.
4847 if (!TCP_IS_DETACHED(tcp
))
4848 tcp
->tcp_push_tid
= TCP_TIMER(tcp
,
4850 tcps
->tcps_push_timer_interval
);
4855 /* Is there anything left to do? */
4856 ASSERT(!(flags
& TH_MARKNEXT_NEEDED
));
4857 if ((flags
& (TH_REXMIT_NEEDED
|TH_XMIT_NEEDED
|TH_ACK_NEEDED
|
4858 TH_NEED_SACK_REXMIT
|TH_LIMIT_XMIT
|TH_ACK_TIMER_NEEDED
|
4859 TH_ORDREL_NEEDED
|TH_SEND_URP_MARK
)) == 0)
4862 /* Any transmit work to do and a non-zero window? */
4863 if ((flags
& (TH_REXMIT_NEEDED
|TH_XMIT_NEEDED
|TH_NEED_SACK_REXMIT
|
4864 TH_LIMIT_XMIT
)) && tcp
->tcp_swnd
!= 0) {
4865 if (flags
& TH_REXMIT_NEEDED
) {
4866 uint32_t snd_size
= tcp
->tcp_snxt
- tcp
->tcp_suna
;
4868 TCPS_BUMP_MIB(tcps
, tcpOutFastRetrans
);
4871 if (snd_size
> tcp
->tcp_swnd
)
4872 snd_size
= tcp
->tcp_swnd
;
4873 mp1
= tcp_xmit_mp(tcp
, tcp
->tcp_xmit_head
, snd_size
,
4874 NULL
, NULL
, tcp
->tcp_suna
, B_TRUE
, &snd_size
,
4878 tcp
->tcp_xmit_head
->b_prev
=
4879 (mblk_t
*)LBOLT_FASTPATH
;
4880 tcp
->tcp_csuna
= tcp
->tcp_snxt
;
4881 TCPS_BUMP_MIB(tcps
, tcpRetransSegs
);
4882 TCPS_UPDATE_MIB(tcps
, tcpRetransBytes
,
4884 tcp_send_data(tcp
, mp1
);
4887 if (flags
& TH_NEED_SACK_REXMIT
) {
4888 tcp_sack_rexmit(tcp
, &flags
);
4891 * For TH_LIMIT_XMIT, tcp_wput_data() is called to send
4892 * out new segment. Note that tcp_rexmit should not be
4893 * set, otherwise TH_LIMIT_XMIT should not be set.
4895 if (flags
& (TH_XMIT_NEEDED
|TH_LIMIT_XMIT
)) {
4896 if (!tcp
->tcp_rexmit
) {
4897 tcp_wput_data(tcp
, NULL
, B_FALSE
);
4903 * Adjust tcp_cwnd back to normal value after sending
4904 * new data segments.
4906 if (flags
& TH_LIMIT_XMIT
) {
4907 tcp
->tcp_cwnd
-= mss
<< (tcp
->tcp_dupack_cnt
- 1);
4909 * This will restart the timer. Restarting the
4910 * timer is used to avoid a timeout before the
4911 * limited transmitted segment's ACK gets back.
4913 if (tcp
->tcp_xmit_head
!= NULL
)
4914 tcp
->tcp_xmit_head
->b_prev
=
4915 (mblk_t
*)LBOLT_FASTPATH
;
4918 /* Anything more to do? */
4919 if ((flags
& (TH_ACK_NEEDED
|TH_ACK_TIMER_NEEDED
|
4920 TH_ORDREL_NEEDED
|TH_SEND_URP_MARK
)) == 0)
4924 if (flags
& TH_SEND_URP_MARK
) {
4925 ASSERT(tcp
->tcp_urp_mark_mp
);
4926 ASSERT(!IPCL_IS_NONSTR(connp
));
4928 * Send up any queued data and then send the mark message
4930 if (tcp
->tcp_rcv_list
!= NULL
) {
4931 flags
|= tcp_rcv_drain(tcp
);
4934 ASSERT(tcp
->tcp_rcv_list
== NULL
|| tcp
->tcp_fused_sigurg
);
4935 mp1
= tcp
->tcp_urp_mark_mp
;
4936 tcp
->tcp_urp_mark_mp
= NULL
;
4937 if (is_system_labeled())
4938 tcp_setcred_data(mp1
, ira
);
4940 putnext(connp
->conn_rq
, mp1
);
4942 (void) strlog(TCP_MOD_ID
, 0, 1, SL_TRACE
,
4943 "tcp_rput: sending zero-length %s %s",
4944 ((mp1
->b_flag
& MSGMARKNEXT
) ? "MSGMARKNEXT" :
4946 tcp_display(tcp
, NULL
, DISP_PORT_ONLY
));
4948 flags
&= ~TH_SEND_URP_MARK
;
4950 if (flags
& TH_ACK_NEEDED
) {
4952 * Time to send an ack for some reason.
4954 mp1
= tcp_ack_mp(tcp
);
4957 tcp_send_data(tcp
, mp1
);
4958 BUMP_LOCAL(tcp
->tcp_obsegs
);
4959 TCPS_BUMP_MIB(tcps
, tcpOutAck
);
4961 if (tcp
->tcp_ack_tid
!= 0) {
4962 (void) TCP_TIMER_CANCEL(tcp
, tcp
->tcp_ack_tid
);
4963 tcp
->tcp_ack_tid
= 0;
4966 if (flags
& TH_ACK_TIMER_NEEDED
) {
4968 * Arrange for deferred ACK or push wait timeout.
4969 * Start timer if it is not already running.
4971 if (tcp
->tcp_ack_tid
== 0) {
4972 tcp
->tcp_ack_tid
= TCP_TIMER(tcp
, tcp_ack_timer
,
4974 tcps
->tcps_local_dack_interval
:
4975 tcps
->tcps_deferred_ack_interval
);
4978 if (flags
& TH_ORDREL_NEEDED
) {
4980 * Notify upper layer about an orderly release. If this is
4981 * a non-STREAMS socket, then just make an upcall. For STREAMS
4982 * we send up an ordrel_ind, unless this is an eager, in which
4983 * case the ordrel will be sent when tcp_accept_finish runs.
4984 * Note that for non-STREAMS we make an upcall even if it is an
4985 * eager, because we have an upper handle to send it to.
4987 ASSERT(IPCL_IS_NONSTR(connp
) || tcp
->tcp_listener
== NULL
);
4988 ASSERT(!tcp
->tcp_detached
);
4990 if (IPCL_IS_NONSTR(connp
)) {
4991 ASSERT(tcp
->tcp_ordrel_mp
== NULL
);
4992 tcp
->tcp_ordrel_done
= B_TRUE
;
4993 (*sockupcalls
->su_opctl
)(connp
->conn_upper_handle
,
4994 SOCK_OPCTL_SHUT_RECV
, 0);
4998 if (tcp
->tcp_rcv_list
!= NULL
) {
5000 * Push any mblk(s) enqueued from co processing.
5002 flags
|= tcp_rcv_drain(tcp
);
5004 ASSERT(tcp
->tcp_rcv_list
== NULL
|| tcp
->tcp_fused_sigurg
);
5006 mp1
= tcp
->tcp_ordrel_mp
;
5007 tcp
->tcp_ordrel_mp
= NULL
;
5008 tcp
->tcp_ordrel_done
= B_TRUE
;
5009 putnext(connp
->conn_rq
, mp1
);
5012 ASSERT(!(flags
& TH_MARKNEXT_NEEDED
));
5016 * Attach ancillary data to a received TCP segments for the
5017 * ancillary pieces requested by the application that are
5018 * different than they were in the previous data segment.
5020 * Save the "current" values once memory allocation is ok so that
5021 * when memory allocation fails we can just wait for the next data segment.
5024 tcp_input_add_ancillary(tcp_t
*tcp
, mblk_t
*mp
, ip_pkt_t
*ipp
,
5025 ip_recv_attr_t
*ira
)
5027 struct T_optdata_ind
*todi
;
5030 struct T_opthdr
*toh
;
5031 crb_t addflag
; /* Which pieces to add */
5033 conn_t
*connp
= tcp
->tcp_connp
;
5036 addflag
.crb_all
= 0;
5037 /* If app asked for pktinfo and the index has changed ... */
5038 if (connp
->conn_recv_ancillary
.crb_ip_recvpktinfo
&&
5039 ira
->ira_ruifindex
!= tcp
->tcp_recvifindex
) {
5040 optlen
+= sizeof (struct T_opthdr
) +
5041 sizeof (struct in6_pktinfo
);
5042 addflag
.crb_ip_recvpktinfo
= 1;
5044 /* If app asked for hoplimit and it has changed ... */
5045 if (connp
->conn_recv_ancillary
.crb_ipv6_recvhoplimit
&&
5046 ipp
->ipp_hoplimit
!= tcp
->tcp_recvhops
) {
5047 optlen
+= sizeof (struct T_opthdr
) + sizeof (uint_t
);
5048 addflag
.crb_ipv6_recvhoplimit
= 1;
5050 /* If app asked for tclass and it has changed ... */
5051 if (connp
->conn_recv_ancillary
.crb_ipv6_recvtclass
&&
5052 ipp
->ipp_tclass
!= tcp
->tcp_recvtclass
) {
5053 optlen
+= sizeof (struct T_opthdr
) + sizeof (uint_t
);
5054 addflag
.crb_ipv6_recvtclass
= 1;
5057 * If app asked for hopbyhop headers and it has changed ...
5058 * For security labels, note that (1) security labels can't change on
5059 * a connected socket at all, (2) we're connected to at most one peer,
5060 * (3) if anything changes, then it must be some other extra option.
5062 if (connp
->conn_recv_ancillary
.crb_ipv6_recvhopopts
&&
5063 ip_cmpbuf(tcp
->tcp_hopopts
, tcp
->tcp_hopoptslen
,
5064 (ipp
->ipp_fields
& IPPF_HOPOPTS
),
5065 ipp
->ipp_hopopts
, ipp
->ipp_hopoptslen
)) {
5066 optlen
+= sizeof (struct T_opthdr
) + ipp
->ipp_hopoptslen
;
5067 addflag
.crb_ipv6_recvhopopts
= 1;
5068 if (!ip_allocbuf((void **)&tcp
->tcp_hopopts
,
5069 &tcp
->tcp_hopoptslen
, (ipp
->ipp_fields
& IPPF_HOPOPTS
),
5070 ipp
->ipp_hopopts
, ipp
->ipp_hopoptslen
))
5073 /* If app asked for dst headers before routing headers ... */
5074 if (connp
->conn_recv_ancillary
.crb_ipv6_recvrthdrdstopts
&&
5075 ip_cmpbuf(tcp
->tcp_rthdrdstopts
, tcp
->tcp_rthdrdstoptslen
,
5076 (ipp
->ipp_fields
& IPPF_RTHDRDSTOPTS
),
5077 ipp
->ipp_rthdrdstopts
, ipp
->ipp_rthdrdstoptslen
)) {
5078 optlen
+= sizeof (struct T_opthdr
) +
5079 ipp
->ipp_rthdrdstoptslen
;
5080 addflag
.crb_ipv6_recvrthdrdstopts
= 1;
5081 if (!ip_allocbuf((void **)&tcp
->tcp_rthdrdstopts
,
5082 &tcp
->tcp_rthdrdstoptslen
,
5083 (ipp
->ipp_fields
& IPPF_RTHDRDSTOPTS
),
5084 ipp
->ipp_rthdrdstopts
, ipp
->ipp_rthdrdstoptslen
))
5087 /* If app asked for routing headers and it has changed ... */
5088 if (connp
->conn_recv_ancillary
.crb_ipv6_recvrthdr
&&
5089 ip_cmpbuf(tcp
->tcp_rthdr
, tcp
->tcp_rthdrlen
,
5090 (ipp
->ipp_fields
& IPPF_RTHDR
),
5091 ipp
->ipp_rthdr
, ipp
->ipp_rthdrlen
)) {
5092 optlen
+= sizeof (struct T_opthdr
) + ipp
->ipp_rthdrlen
;
5093 addflag
.crb_ipv6_recvrthdr
= 1;
5094 if (!ip_allocbuf((void **)&tcp
->tcp_rthdr
,
5095 &tcp
->tcp_rthdrlen
, (ipp
->ipp_fields
& IPPF_RTHDR
),
5096 ipp
->ipp_rthdr
, ipp
->ipp_rthdrlen
))
5099 /* If app asked for dest headers and it has changed ... */
5100 if ((connp
->conn_recv_ancillary
.crb_ipv6_recvdstopts
||
5101 connp
->conn_recv_ancillary
.crb_old_ipv6_recvdstopts
) &&
5102 ip_cmpbuf(tcp
->tcp_dstopts
, tcp
->tcp_dstoptslen
,
5103 (ipp
->ipp_fields
& IPPF_DSTOPTS
),
5104 ipp
->ipp_dstopts
, ipp
->ipp_dstoptslen
)) {
5105 optlen
+= sizeof (struct T_opthdr
) + ipp
->ipp_dstoptslen
;
5106 addflag
.crb_ipv6_recvdstopts
= 1;
5107 if (!ip_allocbuf((void **)&tcp
->tcp_dstopts
,
5108 &tcp
->tcp_dstoptslen
, (ipp
->ipp_fields
& IPPF_DSTOPTS
),
5109 ipp
->ipp_dstopts
, ipp
->ipp_dstoptslen
))
5114 /* Nothing to add */
5117 mp1
= allocb(sizeof (struct T_optdata_ind
) + optlen
, BPRI_MED
);
5120 * Defer sending ancillary data until the next TCP segment
5127 mp
->b_wptr
+= sizeof (*todi
) + optlen
;
5128 mp
->b_datap
->db_type
= M_PROTO
;
5129 todi
= (struct T_optdata_ind
*)mp
->b_rptr
;
5130 todi
->PRIM_type
= T_OPTDATA_IND
;
5131 todi
->DATA_flag
= 1; /* MORE data */
5132 todi
->OPT_length
= optlen
;
5133 todi
->OPT_offset
= sizeof (*todi
);
5134 optptr
= (uchar_t
*)&todi
[1];
5136 * If app asked for pktinfo and the index has changed ...
5137 * Note that the local address never changes for the connection.
5139 if (addflag
.crb_ip_recvpktinfo
) {
5140 struct in6_pktinfo
*pkti
;
5143 ifindex
= ira
->ira_ruifindex
;
5144 toh
= (struct T_opthdr
*)optptr
;
5145 toh
->level
= IPPROTO_IPV6
;
5146 toh
->name
= IPV6_PKTINFO
;
5147 toh
->len
= sizeof (*toh
) + sizeof (*pkti
);
5149 optptr
+= sizeof (*toh
);
5150 pkti
= (struct in6_pktinfo
*)optptr
;
5151 pkti
->ipi6_addr
= connp
->conn_laddr_v6
;
5152 pkti
->ipi6_ifindex
= ifindex
;
5153 optptr
+= sizeof (*pkti
);
5154 ASSERT(OK_32PTR(optptr
));
5155 /* Save as "last" value */
5156 tcp
->tcp_recvifindex
= ifindex
;
5158 /* If app asked for hoplimit and it has changed ... */
5159 if (addflag
.crb_ipv6_recvhoplimit
) {
5160 toh
= (struct T_opthdr
*)optptr
;
5161 toh
->level
= IPPROTO_IPV6
;
5162 toh
->name
= IPV6_HOPLIMIT
;
5163 toh
->len
= sizeof (*toh
) + sizeof (uint_t
);
5165 optptr
+= sizeof (*toh
);
5166 *(uint_t
*)optptr
= ipp
->ipp_hoplimit
;
5167 optptr
+= sizeof (uint_t
);
5168 ASSERT(OK_32PTR(optptr
));
5169 /* Save as "last" value */
5170 tcp
->tcp_recvhops
= ipp
->ipp_hoplimit
;
5172 /* If app asked for tclass and it has changed ... */
5173 if (addflag
.crb_ipv6_recvtclass
) {
5174 toh
= (struct T_opthdr
*)optptr
;
5175 toh
->level
= IPPROTO_IPV6
;
5176 toh
->name
= IPV6_TCLASS
;
5177 toh
->len
= sizeof (*toh
) + sizeof (uint_t
);
5179 optptr
+= sizeof (*toh
);
5180 *(uint_t
*)optptr
= ipp
->ipp_tclass
;
5181 optptr
+= sizeof (uint_t
);
5182 ASSERT(OK_32PTR(optptr
));
5183 /* Save as "last" value */
5184 tcp
->tcp_recvtclass
= ipp
->ipp_tclass
;
5186 if (addflag
.crb_ipv6_recvhopopts
) {
5187 toh
= (struct T_opthdr
*)optptr
;
5188 toh
->level
= IPPROTO_IPV6
;
5189 toh
->name
= IPV6_HOPOPTS
;
5190 toh
->len
= sizeof (*toh
) + ipp
->ipp_hopoptslen
;
5192 optptr
+= sizeof (*toh
);
5193 bcopy((uchar_t
*)ipp
->ipp_hopopts
, optptr
, ipp
->ipp_hopoptslen
);
5194 optptr
+= ipp
->ipp_hopoptslen
;
5195 ASSERT(OK_32PTR(optptr
));
5196 /* Save as last value */
5197 ip_savebuf((void **)&tcp
->tcp_hopopts
, &tcp
->tcp_hopoptslen
,
5198 (ipp
->ipp_fields
& IPPF_HOPOPTS
),
5199 ipp
->ipp_hopopts
, ipp
->ipp_hopoptslen
);
5201 if (addflag
.crb_ipv6_recvrthdrdstopts
) {
5202 toh
= (struct T_opthdr
*)optptr
;
5203 toh
->level
= IPPROTO_IPV6
;
5204 toh
->name
= IPV6_RTHDRDSTOPTS
;
5205 toh
->len
= sizeof (*toh
) + ipp
->ipp_rthdrdstoptslen
;
5207 optptr
+= sizeof (*toh
);
5208 bcopy(ipp
->ipp_rthdrdstopts
, optptr
, ipp
->ipp_rthdrdstoptslen
);
5209 optptr
+= ipp
->ipp_rthdrdstoptslen
;
5210 ASSERT(OK_32PTR(optptr
));
5211 /* Save as last value */
5212 ip_savebuf((void **)&tcp
->tcp_rthdrdstopts
,
5213 &tcp
->tcp_rthdrdstoptslen
,
5214 (ipp
->ipp_fields
& IPPF_RTHDRDSTOPTS
),
5215 ipp
->ipp_rthdrdstopts
, ipp
->ipp_rthdrdstoptslen
);
5217 if (addflag
.crb_ipv6_recvrthdr
) {
5218 toh
= (struct T_opthdr
*)optptr
;
5219 toh
->level
= IPPROTO_IPV6
;
5220 toh
->name
= IPV6_RTHDR
;
5221 toh
->len
= sizeof (*toh
) + ipp
->ipp_rthdrlen
;
5223 optptr
+= sizeof (*toh
);
5224 bcopy(ipp
->ipp_rthdr
, optptr
, ipp
->ipp_rthdrlen
);
5225 optptr
+= ipp
->ipp_rthdrlen
;
5226 ASSERT(OK_32PTR(optptr
));
5227 /* Save as last value */
5228 ip_savebuf((void **)&tcp
->tcp_rthdr
, &tcp
->tcp_rthdrlen
,
5229 (ipp
->ipp_fields
& IPPF_RTHDR
),
5230 ipp
->ipp_rthdr
, ipp
->ipp_rthdrlen
);
5232 if (addflag
.crb_ipv6_recvdstopts
) {
5233 toh
= (struct T_opthdr
*)optptr
;
5234 toh
->level
= IPPROTO_IPV6
;
5235 toh
->name
= IPV6_DSTOPTS
;
5236 toh
->len
= sizeof (*toh
) + ipp
->ipp_dstoptslen
;
5238 optptr
+= sizeof (*toh
);
5239 bcopy(ipp
->ipp_dstopts
, optptr
, ipp
->ipp_dstoptslen
);
5240 optptr
+= ipp
->ipp_dstoptslen
;
5241 ASSERT(OK_32PTR(optptr
));
5242 /* Save as last value */
5243 ip_savebuf((void **)&tcp
->tcp_dstopts
, &tcp
->tcp_dstoptslen
,
5244 (ipp
->ipp_fields
& IPPF_DSTOPTS
),
5245 ipp
->ipp_dstopts
, ipp
->ipp_dstoptslen
);
5247 ASSERT(optptr
== mp
->b_wptr
);
5251 /* The minimum of smoothed mean deviation in RTO calculation. */
5252 #define TCP_SD_MIN 400
5255 * Set RTO for this connection. The formula is from Jacobson and Karels'
5256 * "Congestion Avoidance and Control" in SIGCOMM '88. The variable names
5257 * are the same as those in Appendix A.2 of that paper.
5259 * m = new measurement
5260 * sa = smoothed RTT average (8 * average estimates).
5261 * sv = smoothed mean deviation (mdev) of RTT (4 * deviation estimates).
5264 tcp_set_rto(tcp_t
*tcp
, clock_t rtt
)
5266 long m
= TICK_TO_MSEC(rtt
);
5267 clock_t sa
= tcp
->tcp_rtt_sa
;
5268 clock_t sv
= tcp
->tcp_rtt_sd
;
5270 tcp_stack_t
*tcps
= tcp
->tcp_tcps
;
5272 TCPS_BUMP_MIB(tcps
, tcpRttUpdate
);
5273 tcp
->tcp_rtt_update
++;
5275 /* tcp_rtt_sa is not 0 means this is a new sample. */
5278 * Update average estimator:
5279 * new rtt = 7/8 old rtt + 1/8 Error
5282 /* m is now Error in estimate. */
5284 if ((sa
+= m
) <= 0) {
5286 * Don't allow the smoothed average to be negative.
5287 * We use 0 to denote reinitialization of the
5294 * Update deviation estimator:
5295 * new mdev = 3/4 old mdev + 1/4 (abs(Error) - old mdev)
5303 * This follows BSD's implementation. So the reinitialized
5304 * RTO is 3 * m. We cannot go less than 2 because if the
5305 * link is bandwidth dominated, doubling the window size
5306 * during slow start means doubling the RTT. We want to be
5307 * more conservative when we reinitialize our estimates. 3
5308 * is just a convenient number.
5313 if (sv
< TCP_SD_MIN
) {
5315 * We do not know that if sa captures the delay ACK
5316 * effect as in a long train of segments, a receiver
5317 * does not delay its ACKs. So set the minimum of sv
5318 * to be TCP_SD_MIN, which is default to 400 ms, twice
5319 * of BSD DATO. That means the minimum of mean
5320 * deviation is 100 ms.
5325 tcp
->tcp_rtt_sa
= sa
;
5326 tcp
->tcp_rtt_sd
= sv
;
5328 * RTO = average estimates (sa / 8) + 4 * deviation estimates (sv)
5330 * Add tcp_rexmit_interval extra in case of extreme environment
5331 * where the algorithm fails to work. The default value of
5332 * tcp_rexmit_interval_extra should be 0.
5334 * As we use a finer grained clock than BSD and update
5335 * RTO for every ACKs, add in another .25 of RTT to the
5336 * deviation of RTO to accomodate burstiness of 1/4 of
5339 rto
= (sa
>> 3) + sv
+ tcps
->tcps_rexmit_interval_extra
+ (sa
>> 5);
5341 TCP_SET_RTO(tcp
, rto
);
5343 /* Now, we can reset tcp_timer_backoff to use the new RTO... */
5344 tcp
->tcp_timer_backoff
= 0;
5348 * On a labeled system we have some protocols above TCP, such as RPC, which
5349 * appear to assume that every mblk in a chain has a db_credp.
5352 tcp_setcred_data(mblk_t
*mp
, ip_recv_attr_t
*ira
)
5354 ASSERT(is_system_labeled());
5355 ASSERT(ira
->ira_cred
!= NULL
);
5357 while (mp
!= NULL
) {
5358 mblk_setcred(mp
, ira
->ira_cred
, NOPID
);
5364 tcp_rwnd_reopen(tcp_t
*tcp
)
5368 conn_t
*connp
= tcp
->tcp_connp
;
5370 /* Learn the latest rwnd information that we sent to the other side. */
5371 thwin
= ((uint_t
)ntohs(tcp
->tcp_tcpha
->tha_win
))
5373 /* This is peer's calculated send window (our receive window). */
5374 thwin
-= tcp
->tcp_rnxt
- tcp
->tcp_rack
;
5376 * Increase the receive window to max. But we need to do receiver
5377 * SWS avoidance. This means that we need to check the increase of
5378 * of receive window is at least 1 MSS.
5380 if (connp
->conn_rcvbuf
- thwin
>= tcp
->tcp_mss
) {
5382 * If the window that the other side knows is less than max
5383 * deferred acks segments, send an update immediately.
5385 if (thwin
< tcp
->tcp_rack_cur_max
* tcp
->tcp_mss
) {
5386 TCPS_BUMP_MIB(tcp
->tcp_tcps
, tcpOutWinUpdate
);
5387 ret
= TH_ACK_NEEDED
;
5389 tcp
->tcp_rwnd
= connp
->conn_rcvbuf
;
5395 * Handle a packet that has been reclassified by TCP.
5396 * This function drops the ref on connp that the caller had.
5399 tcp_reinput(conn_t
*connp
, mblk_t
*mp
, ip_recv_attr_t
*ira
, ip_stack_t
*ipst
)
5401 ipsec_stack_t
*ipss
= ipst
->ips_netstack
->netstack_ipsec
;
5403 if (connp
->conn_incoming_ifindex
!= 0 &&
5404 connp
->conn_incoming_ifindex
!= ira
->ira_ruifindex
) {
5406 CONN_DEC_REF(connp
);
5410 if (CONN_INBOUND_POLICY_PRESENT_V6(connp
, ipss
) ||
5411 (ira
->ira_flags
& IRAF_IPSEC_SECURE
)) {
5415 if (ira
->ira_flags
& IRAF_IS_IPV4
) {
5416 ipha
= (ipha_t
*)mp
->b_rptr
;
5420 ip6h
= (ip6_t
*)mp
->b_rptr
;
5422 mp
= ipsec_check_inbound_policy(mp
, connp
, ipha
, ip6h
, ira
);
5424 BUMP_MIB(&ipst
->ips_ip_mib
, ipIfStatsInDiscards
);
5425 /* Note that mp is NULL */
5426 ip_drop_input("ipIfStatsInDiscards", mp
, NULL
);
5427 CONN_DEC_REF(connp
);
5432 if (IPCL_IS_TCP(connp
)) {
5434 * do not drain, certain use cases can blow
5437 SQUEUE_ENTER_ONE(connp
->conn_sqp
, mp
,
5438 connp
->conn_recv
, connp
, ira
,
5439 SQ_NODRAIN
, SQTAG_IP_TCP_INPUT
);
5441 /* Not TCP; must be SOCK_RAW, IPPROTO_TCP */
5442 (connp
->conn_recv
)(connp
, mp
, NULL
,
5444 CONN_DEC_REF(connp
);
5451 tcp_rsrv_input(void *arg
, mblk_t
*mp
, void *arg2
, ip_recv_attr_t
*dummy
)
5453 conn_t
*connp
= (conn_t
*)arg
;
5454 tcp_t
*tcp
= connp
->conn_tcp
;
5455 queue_t
*q
= connp
->conn_rq
;
5457 ASSERT(!IPCL_IS_NONSTR(connp
));
5458 mutex_enter(&tcp
->tcp_rsrv_mp_lock
);
5459 tcp
->tcp_rsrv_mp
= mp
;
5460 mutex_exit(&tcp
->tcp_rsrv_mp_lock
);
5462 if (TCP_IS_DETACHED(tcp
) || q
== NULL
) {
5466 if (tcp
->tcp_fused
) {
5467 tcp_fuse_backenable(tcp
);
5471 if (canputnext(q
)) {
5472 /* Not flow-controlled, open rwnd */
5473 tcp
->tcp_rwnd
= connp
->conn_rcvbuf
;
5476 * Send back a window update immediately if TCP is above
5477 * ESTABLISHED state and the increase of the rcv window
5478 * that the other side knows is at least 1 MSS after flow
5479 * control is lifted.
5481 if (tcp
->tcp_state
>= TCPS_ESTABLISHED
&&
5482 tcp_rwnd_reopen(tcp
) == TH_ACK_NEEDED
) {
5483 tcp_xmit_ctl(NULL
, tcp
,
5484 (tcp
->tcp_swnd
== 0) ? tcp
->tcp_suna
:
5485 tcp
->tcp_snxt
, tcp
->tcp_rnxt
, TH_ACK
);
5491 * The read side service routine is called mostly when we get back-enabled as a
5492 * result of flow control relief. Since we don't actually queue anything in
5493 * TCP, we have no data to send out of here. What we do is clear the receive
5494 * window, and send out a window update.
5497 tcp_rsrv(queue_t
*q
)
5499 conn_t
*connp
= Q_TO_CONN(q
);
5500 tcp_t
*tcp
= connp
->conn_tcp
;
5503 /* No code does a putq on the read side */
5504 ASSERT(q
->q_first
== NULL
);
5507 * If tcp->tcp_rsrv_mp == NULL, it means that tcp_rsrv() has already
5508 * been run. So just return.
5510 mutex_enter(&tcp
->tcp_rsrv_mp_lock
);
5511 if ((mp
= tcp
->tcp_rsrv_mp
) == NULL
) {
5512 mutex_exit(&tcp
->tcp_rsrv_mp_lock
);
5515 tcp
->tcp_rsrv_mp
= NULL
;
5516 mutex_exit(&tcp
->tcp_rsrv_mp_lock
);
5518 CONN_INC_REF(connp
);
5519 SQUEUE_ENTER_ONE(connp
->conn_sqp
, mp
, tcp_rsrv_input
, connp
,
5520 NULL
, SQ_PROCESS
, SQTAG_TCP_RSRV
);
5523 /* At minimum we need 8 bytes in the TCP header for the lookup */
5524 #define ICMP_MIN_TCP_HDR 8
5527 * tcp_icmp_input is called as conn_recvicmp to process ICMP error messages
5528 * passed up by IP. The message is always received on the correct tcp_t.
5529 * Assumes that IP has pulled up everything up to and including the ICMP header.
5533 tcp_icmp_input(void *arg1
, mblk_t
*mp
, void *arg2
, ip_recv_attr_t
*ira
)
5535 conn_t
*connp
= (conn_t
*)arg1
;
5541 tcp_t
*tcp
= connp
->conn_tcp
;
5543 /* Assume IP provides aligned packets */
5544 ASSERT(OK_32PTR(mp
->b_rptr
));
5545 ASSERT((MBLKL(mp
) >= sizeof (ipha_t
)));
5548 * It's possible we have a closed, but not yet destroyed, TCP
5549 * connection. Several fields (e.g. conn_ixa->ixa_ire) are invalid
5550 * in the closed state, so don't take any chances and drop the packet.
5552 if (tcp
->tcp_state
== TCPS_CLOSED
) {
5558 * Verify IP version. Anything other than IPv4 or IPv6 packet is sent
5559 * upstream. ICMPv6 is handled in tcp_icmp_error_ipv6.
5561 if (!(ira
->ira_flags
& IRAF_IS_IPV4
)) {
5562 tcp_icmp_error_ipv6(tcp
, mp
, ira
);
5566 /* Skip past the outer IP and ICMP headers */
5567 iph_hdr_length
= ira
->ira_ip_hdr_length
;
5568 icmph
= (icmph_t
*)&mp
->b_rptr
[iph_hdr_length
];
5570 * If we don't have the correct outer IP header length
5571 * or if we don't have a complete inner IP header
5574 if (iph_hdr_length
< sizeof (ipha_t
) ||
5575 (ipha_t
*)&icmph
[1] + 1 > (ipha_t
*)mp
->b_wptr
) {
5580 ipha
= (ipha_t
*)&icmph
[1];
5582 /* Skip past the inner IP and find the ULP header */
5583 iph_hdr_length
= IPH_HDR_LENGTH(ipha
);
5584 tcpha
= (tcpha_t
*)((char *)ipha
+ iph_hdr_length
);
5586 * If we don't have the correct inner IP header length or if the ULP
5587 * is not IPPROTO_TCP or if we don't have at least ICMP_MIN_TCP_HDR
5588 * bytes of TCP header, drop it.
5590 if (iph_hdr_length
< sizeof (ipha_t
) ||
5591 ipha
->ipha_protocol
!= IPPROTO_TCP
||
5592 (uchar_t
*)tcpha
+ ICMP_MIN_TCP_HDR
> mp
->b_wptr
) {
5596 seg_seq
= ntohl(tcpha
->tha_seq
);
5597 switch (icmph
->icmph_type
) {
5598 case ICMP_DEST_UNREACHABLE
:
5599 switch (icmph
->icmph_code
) {
5600 case ICMP_FRAGMENTATION_NEEDED
:
5602 * Update Path MTU, then try to send something out.
5604 tcp_update_pmtu(tcp
, B_TRUE
);
5605 tcp_rexmit_after_error(tcp
);
5607 case ICMP_PORT_UNREACHABLE
:
5608 case ICMP_PROTOCOL_UNREACHABLE
:
5609 switch (tcp
->tcp_state
) {
5613 * ICMP can snipe away incipient
5614 * TCP connections as long as
5615 * seq number is same as initial
5618 if (seg_seq
== tcp
->tcp_iss
) {
5619 (void) tcp_clean_death(tcp
,
5625 case ICMP_HOST_UNREACHABLE
:
5626 case ICMP_NET_UNREACHABLE
:
5627 /* Record the error in case we finally time out. */
5628 if (icmph
->icmph_code
== ICMP_HOST_UNREACHABLE
)
5629 tcp
->tcp_client_errno
= EHOSTUNREACH
;
5631 tcp
->tcp_client_errno
= ENETUNREACH
;
5632 if (tcp
->tcp_state
== TCPS_SYN_RCVD
) {
5633 if (tcp
->tcp_listener
!= NULL
&&
5634 tcp
->tcp_listener
->tcp_syn_defense
) {
5636 * Ditch the half-open connection if we
5637 * suspect a SYN attack is under way.
5639 (void) tcp_clean_death(tcp
,
5640 tcp
->tcp_client_errno
);
5648 case ICMP_SOURCE_QUENCH
: {
5650 * use a global boolean to control
5651 * whether TCP should respond to ICMP_SOURCE_QUENCH.
5652 * The default is false.
5654 if (tcp_icmp_source_quench
) {
5656 * Reduce the sending rate as if we got a
5657 * retransmit timeout
5661 npkt
= ((tcp
->tcp_snxt
- tcp
->tcp_suna
) >> 1) /
5663 tcp
->tcp_cwnd_ssthresh
= MAX(npkt
, 2) * tcp
->tcp_mss
;
5664 tcp
->tcp_cwnd
= tcp
->tcp_mss
;
5665 tcp
->tcp_cwnd_cnt
= 0;
5674 * tcp_icmp_error_ipv6 is called from tcp_icmp_input to process ICMPv6
5675 * error messages passed up by IP.
5676 * Assumes that IP has pulled up all the extension headers as well
5677 * as the ICMPv6 header.
5680 tcp_icmp_error_ipv6(tcp_t
*tcp
, mblk_t
*mp
, ip_recv_attr_t
*ira
)
5684 uint16_t iph_hdr_length
= ira
->ira_ip_hdr_length
;
5690 * Verify that we have a complete IP header.
5692 ASSERT((MBLKL(mp
) >= sizeof (ip6_t
)));
5694 icmp6
= (icmp6_t
*)&mp
->b_rptr
[iph_hdr_length
];
5695 ip6h
= (ip6_t
*)&icmp6
[1];
5697 * Verify if we have a complete ICMP and inner IP header.
5699 if ((uchar_t
*)&ip6h
[1] > mp
->b_wptr
) {
5705 if (!ip_hdr_length_nexthdr_v6(mp
, ip6h
, &iph_hdr_length
, &nexthdrp
))
5707 tcpha
= (tcpha_t
*)((char *)ip6h
+ iph_hdr_length
);
5709 * Validate inner header. If the ULP is not IPPROTO_TCP or if we don't
5710 * have at least ICMP_MIN_TCP_HDR bytes of TCP header drop the
5713 if ((*nexthdrp
!= IPPROTO_TCP
) ||
5714 ((uchar_t
*)tcpha
+ ICMP_MIN_TCP_HDR
) > mp
->b_wptr
) {
5718 seg_seq
= ntohl(tcpha
->tha_seq
);
5719 switch (icmp6
->icmp6_type
) {
5720 case ICMP6_PACKET_TOO_BIG
:
5722 * Update Path MTU, then try to send something out.
5724 tcp_update_pmtu(tcp
, B_TRUE
);
5725 tcp_rexmit_after_error(tcp
);
5727 case ICMP6_DST_UNREACH
:
5728 switch (icmp6
->icmp6_code
) {
5729 case ICMP6_DST_UNREACH_NOPORT
:
5730 if (((tcp
->tcp_state
== TCPS_SYN_SENT
) ||
5731 (tcp
->tcp_state
== TCPS_SYN_RCVD
)) &&
5732 (seg_seq
== tcp
->tcp_iss
)) {
5733 (void) tcp_clean_death(tcp
, ECONNREFUSED
);
5736 case ICMP6_DST_UNREACH_ADMIN
:
5737 case ICMP6_DST_UNREACH_NOROUTE
:
5738 case ICMP6_DST_UNREACH_BEYONDSCOPE
:
5739 case ICMP6_DST_UNREACH_ADDR
:
5740 /* Record the error in case we finally time out. */
5741 tcp
->tcp_client_errno
= EHOSTUNREACH
;
5742 if (((tcp
->tcp_state
== TCPS_SYN_SENT
) ||
5743 (tcp
->tcp_state
== TCPS_SYN_RCVD
)) &&
5744 (seg_seq
== tcp
->tcp_iss
)) {
5745 if (tcp
->tcp_listener
!= NULL
&&
5746 tcp
->tcp_listener
->tcp_syn_defense
) {
5748 * Ditch the half-open connection if we
5749 * suspect a SYN attack is under way.
5751 (void) tcp_clean_death(tcp
,
5752 tcp
->tcp_client_errno
);
5762 case ICMP6_PARAM_PROB
:
5763 /* If this corresponds to an ICMP_PROTOCOL_UNREACHABLE */
5764 if (icmp6
->icmp6_code
== ICMP6_PARAMPROB_NEXTHEADER
&&
5765 (uchar_t
*)ip6h
+ icmp6
->icmp6_pptr
==
5766 (uchar_t
*)nexthdrp
) {
5767 if (tcp
->tcp_state
== TCPS_SYN_SENT
||
5768 tcp
->tcp_state
== TCPS_SYN_RCVD
) {
5769 (void) tcp_clean_death(tcp
, ECONNREFUSED
);
5775 case ICMP6_TIME_EXCEEDED
:
5783 * CALLED OUTSIDE OF SQUEUE! It can not follow any pointers that tcp might
5784 * change. But it can refer to fields like tcp_suna and tcp_snxt.
5786 * Function tcp_verifyicmp is called as conn_verifyicmp to verify the ICMP
5787 * error messages received by IP. The message is always received on the correct
5792 tcp_verifyicmp(conn_t
*connp
, void *arg2
, icmph_t
*icmph
, icmp6_t
*icmp6
,
5793 ip_recv_attr_t
*ira
)
5795 tcpha_t
*tcpha
= (tcpha_t
*)arg2
;
5796 uint32_t seq
= ntohl(tcpha
->tha_seq
);
5797 tcp_t
*tcp
= connp
->conn_tcp
;
5800 * TCP sequence number contained in payload of the ICMP error message
5801 * should be within the range SND.UNA <= SEG.SEQ < SND.NXT. Otherwise,
5802 * the message is either a stale ICMP error, or an attack from the
5803 * network. Fail the verification.
5805 if (SEQ_LT(seq
, tcp
->tcp_suna
) || SEQ_GEQ(seq
, tcp
->tcp_snxt
))
5808 /* For "too big" we also check the ignore flag */
5809 if (ira
->ira_flags
& IRAF_IS_IPV4
) {
5810 ASSERT(icmph
!= NULL
);
5811 if (icmph
->icmph_type
== ICMP_DEST_UNREACHABLE
&&
5812 icmph
->icmph_code
== ICMP_FRAGMENTATION_NEEDED
&&
5813 tcp
->tcp_tcps
->tcps_ignore_path_mtu
)
5816 ASSERT(icmp6
!= NULL
);
5817 if (icmp6
->icmp6_type
== ICMP6_PACKET_TOO_BIG
&&
5818 tcp
->tcp_tcps
->tcps_ignore_path_mtu
)