4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
26 /* This files contains all TCP TLI/TPI related functions */
28 #include <sys/types.h>
29 #include <sys/stream.h>
30 #include <sys/strsun.h>
31 #include <sys/strsubr.h>
32 #include <sys/stropts.h>
33 #include <sys/strlog.h>
34 #define _SUN_TPI_VERSION 2
35 #include <sys/tihdr.h>
36 #include <sys/suntpi.h>
37 #include <sys/xti_inet.h>
38 #include <sys/squeue_impl.h>
39 #include <sys/squeue.h>
41 #include <inet/common.h>
44 #include <inet/tcp_impl.h>
45 #include <inet/proto_set.h>
47 static void tcp_accept_swap(tcp_t
*, tcp_t
*, tcp_t
*);
48 static int tcp_conprim_opt_process(tcp_t
*, mblk_t
*, int *, int *, int *);
51 tcp_use_pure_tpi(tcp_t
*tcp
)
53 conn_t
*connp
= tcp
->tcp_connp
;
56 tcp
->tcp_acceptor_id
= (t_uscalar_t
)connp
->conn_rq
;
58 tcp
->tcp_acceptor_id
= connp
->conn_dev
;
61 * Insert this socket into the acceptor hash.
62 * We might need it for T_CONN_RES message
64 tcp_acceptor_hash_insert(tcp
->tcp_acceptor_id
, tcp
);
66 tcp
->tcp_issocket
= B_FALSE
;
67 TCP_STAT(tcp
->tcp_tcps
, tcp_sock_fallback
);
70 /* Shorthand to generate and send TPI error acks to our client */
72 tcp_err_ack(tcp_t
*tcp
, mblk_t
*mp
, int t_error
, int sys_error
)
74 if ((mp
= mi_tpi_err_ack_alloc(mp
, t_error
, sys_error
)) != NULL
)
75 putnext(tcp
->tcp_connp
->conn_rq
, mp
);
78 /* Shorthand to generate and send TPI error acks to our client */
80 tcp_err_ack_prim(tcp_t
*tcp
, mblk_t
*mp
, int primitive
,
81 int t_error
, int sys_error
)
83 struct T_error_ack
*teackp
;
85 if ((mp
= tpi_ack_alloc(mp
, sizeof (struct T_error_ack
),
86 M_PCPROTO
, T_ERROR_ACK
)) != NULL
) {
87 teackp
= (struct T_error_ack
*)mp
->b_rptr
;
88 teackp
->ERROR_prim
= primitive
;
89 teackp
->TLI_error
= t_error
;
90 teackp
->UNIX_error
= sys_error
;
91 putnext(tcp
->tcp_connp
->conn_rq
, mp
);
96 * TCP routine to get the values of options.
99 tcp_tpi_opt_get(queue_t
*q
, int level
, int name
, uchar_t
*ptr
)
101 return (tcp_opt_get(Q_TO_CONN(q
), level
, name
, ptr
));
106 tcp_tpi_opt_set(queue_t
*q
, uint_t optset_context
, int level
, int name
,
107 uint_t inlen
, uchar_t
*invalp
, uint_t
*outlenp
, uchar_t
*outvalp
,
108 void *thisdg_attrs
, cred_t
*cr
)
110 conn_t
*connp
= Q_TO_CONN(q
);
112 return (tcp_opt_set(connp
, optset_context
, level
, name
, inlen
, invalp
,
113 outlenp
, outvalp
, thisdg_attrs
, cr
));
117 tcp_conprim_opt_process(tcp_t
*tcp
, mblk_t
*mp
, int *do_disconnectp
,
118 int *t_errorp
, int *sys_errorp
)
121 int is_absreq_failure
;
122 t_scalar_t
*opt_lenp
;
123 t_scalar_t opt_offset
;
125 struct T_conn_req
*tcreqp
;
126 struct T_conn_res
*tcresp
;
130 * All Solaris components should pass a db_credp
131 * for this TPI message, hence we ASSERT.
132 * But in case there is some other M_PROTO that looks
133 * like a TPI message sent by some other kernel
134 * component, we check and return an error.
136 cr
= msg_getcred(mp
, NULL
);
141 prim_type
= ((union T_primitives
*)mp
->b_rptr
)->type
;
142 ASSERT(prim_type
== T_CONN_REQ
|| prim_type
== O_T_CONN_RES
||
143 prim_type
== T_CONN_RES
);
147 tcreqp
= (struct T_conn_req
*)mp
->b_rptr
;
148 opt_offset
= tcreqp
->OPT_offset
;
149 opt_lenp
= (t_scalar_t
*)&tcreqp
->OPT_length
;
153 tcresp
= (struct T_conn_res
*)mp
->b_rptr
;
154 opt_offset
= tcresp
->OPT_offset
;
155 opt_lenp
= (t_scalar_t
*)&tcresp
->OPT_length
;
163 error
= tpi_optcom_buf(tcp
->tcp_connp
->conn_wq
, mp
, opt_lenp
,
164 opt_offset
, cr
, &tcp_opt_obj
,
165 NULL
, &is_absreq_failure
);
168 case 0: /* no error */
169 ASSERT(is_absreq_failure
== 0);
178 *t_errorp
= TSYSERR
; *sys_errorp
= error
;
181 if (is_absreq_failure
!= 0) {
183 * The connection request should get the local ack
184 * T_OK_ACK and then a T_DISCON_IND.
192 tcp_tpi_bind(tcp_t
*tcp
, mblk_t
*mp
)
195 conn_t
*connp
= tcp
->tcp_connp
;
198 struct T_bind_req
*tbr
;
206 * All Solaris components should pass a db_credp
207 * for this TPI message, hence we ASSERT.
208 * But in case there is some other M_PROTO that looks
209 * like a TPI message sent by some other kernel
210 * component, we check and return an error.
212 cr
= msg_getcred(mp
, NULL
);
215 tcp_err_ack(tcp
, mp
, TSYSERR
, EINVAL
);
219 ASSERT((uintptr_t)(mp
->b_wptr
- mp
->b_rptr
) <= (uintptr_t)INT_MAX
);
220 if ((mp
->b_wptr
- mp
->b_rptr
) < sizeof (*tbr
)) {
221 if (connp
->conn_debug
) {
222 (void) strlog(TCP_MOD_ID
, 0, 1, SL_ERROR
|SL_TRACE
,
223 "tcp_tpi_bind: bad req, len %u",
224 (uint_t
)(mp
->b_wptr
- mp
->b_rptr
));
226 tcp_err_ack(tcp
, mp
, TPROTO
, 0);
229 /* Make sure the largest address fits */
230 mp1
= reallocb(mp
, sizeof (struct T_bind_ack
) + sizeof (sin6_t
), 1);
232 tcp_err_ack(tcp
, mp
, TSYSERR
, ENOMEM
);
236 tbr
= (struct T_bind_req
*)mp
->b_rptr
;
238 backlog
= tbr
->CONIND_number
;
239 len
= tbr
->ADDR_length
;
242 case 0: /* request for a generic port */
243 tbr
->ADDR_offset
= sizeof (struct T_bind_req
);
244 if (connp
->conn_family
== AF_INET
) {
245 tbr
->ADDR_length
= sizeof (sin_t
);
246 sin
= (sin_t
*)&tbr
[1];
248 sin
->sin_family
= AF_INET
;
249 sa
= (struct sockaddr
*)sin
;
250 len
= sizeof (sin_t
);
251 mp
->b_wptr
= (uchar_t
*)&sin
[1];
253 ASSERT(connp
->conn_family
== AF_INET6
);
254 tbr
->ADDR_length
= sizeof (sin6_t
);
255 sin6
= (sin6_t
*)&tbr
[1];
257 sin6
->sin6_family
= AF_INET6
;
258 sa
= (struct sockaddr
*)sin6
;
259 len
= sizeof (sin6_t
);
260 mp
->b_wptr
= (uchar_t
*)&sin6
[1];
264 case sizeof (sin_t
): /* Complete IPv4 address */
265 sa
= (struct sockaddr
*)mi_offset_param(mp
, tbr
->ADDR_offset
,
269 case sizeof (sin6_t
): /* Complete IPv6 address */
270 sa
= (struct sockaddr
*)mi_offset_param(mp
,
271 tbr
->ADDR_offset
, sizeof (sin6_t
));
275 if (connp
->conn_debug
) {
276 (void) strlog(TCP_MOD_ID
, 0, 1, SL_ERROR
|SL_TRACE
,
277 "tcp_tpi_bind: bad address length, %d",
280 tcp_err_ack(tcp
, mp
, TBADADDR
, 0);
285 error
= tcp_do_listen(connp
, sa
, len
, backlog
, DB_CRED(mp
),
286 tbr
->PRIM_type
!= O_T_BIND_REQ
);
288 error
= tcp_do_bind(connp
, sa
, len
, DB_CRED(mp
),
289 tbr
->PRIM_type
!= O_T_BIND_REQ
);
293 tcp_err_ack(tcp
, mp
, TSYSERR
, error
);
294 } else if (error
< 0) {
295 tcp_err_ack(tcp
, mp
, -error
, 0);
298 * Update port information as sockfs/tpi needs it for checking
300 if (connp
->conn_family
== AF_INET
) {
302 sin
->sin_port
= connp
->conn_lport
;
305 sin6
->sin6_port
= connp
->conn_lport
;
307 mp
->b_datap
->db_type
= M_PCPROTO
;
308 tbr
->PRIM_type
= T_BIND_ACK
;
309 putnext(connp
->conn_rq
, mp
);
313 /* tcp_unbind is called by tcp_wput_proto to handle T_UNBIND_REQ messages. */
315 tcp_tpi_unbind(tcp_t
*tcp
, mblk_t
*mp
)
317 conn_t
*connp
= tcp
->tcp_connp
;
320 error
= tcp_do_unbind(connp
);
322 tcp_err_ack(tcp
, mp
, TSYSERR
, error
);
323 } else if (error
< 0) {
324 tcp_err_ack(tcp
, mp
, -error
, 0);
326 /* Send M_FLUSH according to TPI */
327 (void) putnextctl1(connp
->conn_rq
, M_FLUSH
, FLUSHRW
);
329 mp
= mi_tpi_ok_ack_alloc(mp
);
331 putnext(connp
->conn_rq
, mp
);
336 tcp_tpi_close(queue_t
*q
, int flags
)
340 ASSERT(WR(q
)->q_next
== NULL
);
342 if (flags
& SO_FALLBACK
) {
344 * stream is being closed while in fallback
345 * simply free the resources that were allocated
347 inet_minor_free(WR(q
)->q_ptr
, (dev_t
)(RD(q
)->q_ptr
));
352 connp
= Q_TO_CONN(q
);
354 * We are being closed as /dev/tcp or /dev/tcp6.
356 tcp_close_common(connp
, flags
);
359 inet_minor_free(connp
->conn_minor_arena
, connp
->conn_dev
);
362 * Drop IP's reference on the conn. This is the last reference
363 * on the connp if the state was less than established. If the
364 * connection has gone into timewait state, then we will have
365 * one ref for the TCP and one more ref (total of two) for the
366 * classifier connected hash list (a timewait connections stays
367 * in connected hash till closed).
369 * We can't assert the references because there might be other
370 * transient reference places because of some walkers or queued
371 * packets in squeue for the timewait state.
375 q
->q_ptr
= WR(q
)->q_ptr
= NULL
;
380 tcp_tpi_close_accept(queue_t
*q
)
384 extern struct qinit tcp_acceptor_winit
;
386 ASSERT(WR(q
)->q_qinfo
== &tcp_acceptor_winit
);
389 * We had opened an acceptor STREAM for sockfs which is
390 * now being closed due to some error.
394 minor_arena
= (vmem_t
*)WR(q
)->q_ptr
;
395 conn_dev
= (dev_t
)RD(q
)->q_ptr
;
396 ASSERT(minor_arena
!= NULL
);
397 ASSERT(conn_dev
!= 0);
398 inet_minor_free(minor_arena
, conn_dev
);
399 q
->q_ptr
= WR(q
)->q_ptr
= NULL
;
404 * Put a connection confirmation message upstream built from the
405 * address/flowid information with the conn and iph. Report our success or
409 tcp_conn_con(tcp_t
*tcp
, uchar_t
*iphdr
, mblk_t
*idmp
,
410 mblk_t
**defermp
, ip_recv_attr_t
*ira
)
417 conn_t
*connp
= tcp
->tcp_connp
;
422 if (tcp
->tcp_conn
.tcp_opts_conn_req
!= NULL
) {
424 * Return in T_CONN_CON results of option negotiation through
425 * the T_CONN_REQ. Note: If there is an real end-to-end option
426 * negotiation, then what is received from remote end needs
427 * to be taken into account but there is no such thing (yet?)
429 * Note: We do not use mi_offset_param() here as
430 * tcp_opts_conn_req contents do not directly come from
431 * an application and are either generated in kernel or
432 * from user input that was already verified.
434 mp
= tcp
->tcp_conn
.tcp_opts_conn_req
;
435 optp
= (char *)(mp
->b_rptr
+
436 ((struct T_conn_req
*)mp
->b_rptr
)->OPT_offset
);
438 ((struct T_conn_req
*)mp
->b_rptr
)->OPT_length
;
441 if (IPH_HDR_VERSION(iphdr
) == IPV4_VERSION
) {
444 if (connp
->conn_family
== AF_INET
) {
446 sin
.sin_addr
.s_addr
= connp
->conn_faddr_v4
;
447 sin
.sin_port
= connp
->conn_fport
;
448 sin
.sin_family
= AF_INET
;
449 mp
= mi_tpi_conn_con(NULL
, (char *)&sin
,
450 (int)sizeof (sin_t
), optp
, optlen
);
453 sin6
.sin6_addr
= connp
->conn_faddr_v6
;
454 sin6
.sin6_port
= connp
->conn_fport
;
455 sin6
.sin6_family
= AF_INET6
;
456 mp
= mi_tpi_conn_con(NULL
, (char *)&sin6
,
457 (int)sizeof (sin6_t
), optp
, optlen
);
461 ip6_t
*ip6h
= (ip6_t
*)iphdr
;
463 ASSERT(IPH_HDR_VERSION(iphdr
) == IPV6_VERSION
);
464 ASSERT(connp
->conn_family
== AF_INET6
);
466 sin6
.sin6_addr
= connp
->conn_faddr_v6
;
467 sin6
.sin6_port
= connp
->conn_fport
;
468 sin6
.sin6_family
= AF_INET6
;
469 sin6
.sin6_flowinfo
= ip6h
->ip6_vcf
& ~IPV6_VERS_AND_FLOW_MASK
;
470 mp
= mi_tpi_conn_con(NULL
, (char *)&sin6
,
471 (int)sizeof (sin6_t
), optp
, optlen
);
477 mblk_copycred(mp
, idmp
);
479 if (defermp
== NULL
) {
480 conn_t
*connp
= tcp
->tcp_connp
;
481 if (IPCL_IS_NONSTR(connp
)) {
482 (*connp
->conn_upcalls
->su_connected
)
483 (connp
->conn_upper_handle
, tcp
->tcp_connid
,
484 ira
->ira_cred
, ira
->ira_cpid
);
487 if (ira
->ira_cred
!= NULL
) {
488 /* So that getpeerucred works for TPI sockfs */
489 mblk_setcred(mp
, ira
->ira_cred
, ira
->ira_cpid
);
491 putnext(connp
->conn_rq
, mp
);
497 if (tcp
->tcp_conn
.tcp_opts_conn_req
!= NULL
)
498 tcp_close_mpp(&tcp
->tcp_conn
.tcp_opts_conn_req
);
503 * Successful connect request processing begins when our client passes
504 * a T_CONN_REQ message into tcp_wput(), which performs function calls into
505 * IP and the passes a T_OK_ACK (or T_ERROR_ACK upstream).
507 * After various error checks are completed, tcp_tpi_connect() lays
508 * the target address and port into the composite header template.
509 * Then we ask IP for information, including a source address if we didn't
510 * already have one. Finally we prepare to send the SYN packet, and then
511 * send up the T_OK_ACK reply message.
514 tcp_tpi_connect(tcp_t
*tcp
, mblk_t
*mp
)
517 struct T_conn_req
*tcr
;
523 conn_t
*connp
= tcp
->tcp_connp
;
524 queue_t
*q
= connp
->conn_wq
;
527 * All Solaris components should pass a db_credp
528 * for this TPI message, hence we ASSERT.
529 * But in case there is some other M_PROTO that looks
530 * like a TPI message sent by some other kernel
531 * component, we check and return an error.
533 cr
= msg_getcred(mp
, &cpid
);
536 tcp_err_ack(tcp
, mp
, TSYSERR
, EINVAL
);
540 tcr
= (struct T_conn_req
*)mp
->b_rptr
;
542 ASSERT((uintptr_t)(mp
->b_wptr
- mp
->b_rptr
) <= (uintptr_t)INT_MAX
);
543 if ((mp
->b_wptr
- mp
->b_rptr
) < sizeof (*tcr
)) {
544 tcp_err_ack(tcp
, mp
, TPROTO
, 0);
549 * Pre-allocate the T_ordrel_ind mblk so that at close time, we
550 * will always have that to send up. Otherwise, we need to do
551 * special handling in case the allocation fails at that time.
552 * If the end point is TPI, the tcp_t can be reused and the
553 * tcp_ordrel_mp may be allocated already.
555 if (tcp
->tcp_ordrel_mp
== NULL
) {
556 if ((tcp
->tcp_ordrel_mp
= mi_tpi_ordrel_ind()) == NULL
) {
557 tcp_err_ack(tcp
, mp
, TSYSERR
, ENOMEM
);
563 * Determine packet type based on type of address passed in
564 * the request should contain an IPv4 or IPv6 address.
565 * Make sure that address family matches the type of
566 * family of the address passed down.
568 switch (tcr
->DEST_length
) {
570 tcp_err_ack(tcp
, mp
, TBADADDR
, 0);
573 case (sizeof (sin_t
) - sizeof (sin
->sin_zero
)): {
575 * XXX: The check for valid DEST_length was not there
576 * in earlier releases and some buggy
577 * TLI apps (e.g Sybase) got away with not feeding
578 * in sin_zero part of address.
579 * We allow that bug to keep those buggy apps humming.
580 * Test suites require the check on DEST_length.
581 * We construct a new mblk with valid DEST_length
582 * free the original so the rest of the code does
583 * not have to keep track of this special shorter
584 * length address case.
587 struct T_conn_req
*ntcr
;
590 nmp
= allocb(sizeof (struct T_conn_req
) + sizeof (sin_t
) +
591 tcr
->OPT_length
, BPRI_HI
);
593 tcp_err_ack(tcp
, mp
, TSYSERR
, ENOMEM
);
596 ntcr
= (struct T_conn_req
*)nmp
->b_rptr
;
597 bzero(ntcr
, sizeof (struct T_conn_req
)); /* zero fill */
598 ntcr
->PRIM_type
= T_CONN_REQ
;
599 ntcr
->DEST_length
= sizeof (sin_t
);
600 ntcr
->DEST_offset
= sizeof (struct T_conn_req
);
602 nsin
= (sin_t
*)((uchar_t
*)ntcr
+ ntcr
->DEST_offset
);
604 /* Get pointer to shorter address to copy from original mp */
605 sin
= (sin_t
*)mi_offset_param(mp
, tcr
->DEST_offset
,
606 tcr
->DEST_length
); /* extract DEST_length worth of sin_t */
607 if (sin
== NULL
|| !OK_32PTR((char *)sin
)) {
609 tcp_err_ack(tcp
, mp
, TSYSERR
, EINVAL
);
612 nsin
->sin_family
= sin
->sin_family
;
613 nsin
->sin_port
= sin
->sin_port
;
614 nsin
->sin_addr
= sin
->sin_addr
;
615 /* Note:nsin->sin_zero zero-fill with sin_null assign above */
616 nmp
->b_wptr
= (uchar_t
*)&nsin
[1];
617 if (tcr
->OPT_length
!= 0) {
618 ntcr
->OPT_length
= tcr
->OPT_length
;
619 ntcr
->OPT_offset
= nmp
->b_wptr
- nmp
->b_rptr
;
620 bcopy((uchar_t
*)tcr
+ tcr
->OPT_offset
,
621 (uchar_t
*)ntcr
+ ntcr
->OPT_offset
,
623 nmp
->b_wptr
+= tcr
->OPT_length
;
625 freemsg(mp
); /* original mp freed */
626 mp
= nmp
; /* re-initialize original variables */
632 sa
= (struct sockaddr
*)mi_offset_param(mp
, tcr
->DEST_offset
,
634 len
= sizeof (sin_t
);
637 case sizeof (sin6_t
):
638 sa
= (struct sockaddr
*)mi_offset_param(mp
, tcr
->DEST_offset
,
640 len
= sizeof (sin6_t
);
644 error
= proto_verify_ip_addr(connp
->conn_family
, sa
, len
);
646 tcp_err_ack(tcp
, mp
, TSYSERR
, error
);
651 * TODO: If someone in TCPS_TIME_WAIT has this dst/port we
652 * should key on their sequence number and cut them loose.
656 * If options passed in, feed it for verification and handling
658 if (tcr
->OPT_length
!= 0) {
661 mblk_t
*conn_opts_mp
;
662 int t_error
, sys_error
, do_disconnect
;
666 if (tcp_conprim_opt_process(tcp
, mp
,
667 &do_disconnect
, &t_error
, &sys_error
) < 0) {
669 ASSERT(t_error
== 0 && sys_error
== 0);
670 discon_mp
= mi_tpi_discon_ind(NULL
,
673 tcp_err_ack_prim(tcp
, mp
, T_CONN_REQ
,
677 ok_mp
= mi_tpi_ok_ack_alloc(mp
);
679 tcp_err_ack_prim(tcp
, NULL
, T_CONN_REQ
,
684 qreply(q
, discon_mp
); /* no flush! */
686 ASSERT(t_error
!= 0);
687 tcp_err_ack_prim(tcp
, mp
, T_CONN_REQ
, t_error
,
693 * Success in setting options, the mp option buffer represented
694 * by OPT_length/offset has been potentially modified and
695 * contains results of option processing. We copy it in
696 * another mp to save it for potentially influencing returning
699 if (tcr
->OPT_length
!= 0) { /* there are resulting options */
700 conn_opts_mp
= copyb(mp
);
702 tcp_err_ack_prim(tcp
, mp
, T_CONN_REQ
,
706 ASSERT(tcp
->tcp_conn
.tcp_opts_conn_req
== NULL
);
707 tcp
->tcp_conn
.tcp_opts_conn_req
= conn_opts_mp
;
710 * These resulting option negotiation can include any
711 * end-to-end negotiation options but there no such
712 * thing (yet?) in our TCP/IP.
717 /* call the non-TPI version */
718 error
= tcp_do_connect(tcp
->tcp_connp
, sa
, len
, cr
, cpid
);
720 mp
= mi_tpi_err_ack_alloc(mp
, -error
, 0);
721 } else if (error
> 0) {
722 mp
= mi_tpi_err_ack_alloc(mp
, TSYSERR
, error
);
724 mp
= mi_tpi_ok_ack_alloc(mp
);
728 * Note: Code below is the "failure" case
730 /* return error ack and blow away saved option results if any */
733 putnext(connp
->conn_rq
, mp
);
735 tcp_err_ack_prim(tcp
, NULL
, T_CONN_REQ
,
740 /* Return the TPI/TLI equivalent of our current tcp_state */
742 tcp_tpistate(tcp_t
*tcp
)
744 switch (tcp
->tcp_state
) {
749 * Return whether there are outstanding T_CONN_IND waiting
750 * for the matching T_CONN_RES. Therefore don't count q0.
752 if (tcp
->tcp_conn_req_cnt_q
> 0)
753 return (TS_WRES_CIND
);
759 return (TS_WCON_CREQ
);
762 * Note: assumption: this has to the active open SYN_RCVD.
763 * The passive instance is detached in SYN_RCVD stage of
764 * incoming connection processing so we cannot get request
765 * for T_info_ack on it.
767 return (TS_WACK_CRES
);
768 case TCPS_ESTABLISHED
:
769 return (TS_DATA_XFER
);
770 case TCPS_CLOSE_WAIT
:
771 return (TS_WREQ_ORDREL
);
772 case TCPS_FIN_WAIT_1
:
773 return (TS_WIND_ORDREL
);
774 case TCPS_FIN_WAIT_2
:
775 return (TS_WIND_ORDREL
);
782 * Following TS_WACK_DREQ7 is a rendition of "not
783 * yet TS_IDLE" TPI state. There is no best match to any
784 * TPI state for TCPS_{CLOSING, LAST_ACK, TIME_WAIT} but we
785 * choose a value chosen that will map to TLI/XTI level
786 * state of TSTATECHNG (state is process of changing) which
787 * captures what this dummy state represents.
789 return (TS_WACK_DREQ7
);
791 cmn_err(CE_WARN
, "tcp_tpistate: strange state (%d) %s",
792 tcp
->tcp_state
, tcp_display(tcp
, NULL
,
799 tcp_copy_info(struct T_info_ack
*tia
, tcp_t
*tcp
)
801 tcp_stack_t
*tcps
= tcp
->tcp_tcps
;
802 conn_t
*connp
= tcp
->tcp_connp
;
803 extern struct T_info_ack tcp_g_t_info_ack
;
804 extern struct T_info_ack tcp_g_t_info_ack_v6
;
806 if (connp
->conn_family
== AF_INET6
)
807 *tia
= tcp_g_t_info_ack_v6
;
809 *tia
= tcp_g_t_info_ack
;
810 tia
->CURRENT_state
= tcp_tpistate(tcp
);
811 tia
->OPT_size
= tcp_max_optsize
;
812 if (tcp
->tcp_mss
== 0) {
813 /* Not yet set - tcp_open does not set mss */
814 if (connp
->conn_ipversion
== IPV4_VERSION
)
815 tia
->TIDU_size
= tcps
->tcps_mss_def_ipv4
;
817 tia
->TIDU_size
= tcps
->tcps_mss_def_ipv6
;
819 tia
->TIDU_size
= tcp
->tcp_mss
;
821 /* TODO: Default ETSDU is 1. Is that correct for tcp? */
825 tcp_do_capability_ack(tcp_t
*tcp
, struct T_capability_ack
*tcap
,
826 t_uscalar_t cap_bits1
)
830 if (cap_bits1
& TC1_INFO
) {
831 tcp_copy_info(&tcap
->INFO_ack
, tcp
);
832 tcap
->CAP_bits1
|= TC1_INFO
;
835 if (cap_bits1
& TC1_ACCEPTOR_ID
) {
836 tcap
->ACCEPTOR_id
= tcp
->tcp_acceptor_id
;
837 tcap
->CAP_bits1
|= TC1_ACCEPTOR_ID
;
843 * This routine responds to T_CAPABILITY_REQ messages. It is called by
844 * tcp_wput. Much of the T_CAPABILITY_ACK information is copied from
845 * tcp_g_t_info_ack. The current state of the stream is copied from
849 tcp_capability_req(tcp_t
*tcp
, mblk_t
*mp
)
851 t_uscalar_t cap_bits1
;
852 struct T_capability_ack
*tcap
;
854 if (MBLKL(mp
) < sizeof (struct T_capability_req
)) {
859 cap_bits1
= ((struct T_capability_req
*)mp
->b_rptr
)->CAP_bits1
;
861 mp
= tpi_ack_alloc(mp
, sizeof (struct T_capability_ack
),
862 mp
->b_datap
->db_type
, T_CAPABILITY_ACK
);
866 tcap
= (struct T_capability_ack
*)mp
->b_rptr
;
867 tcp_do_capability_ack(tcp
, tcap
, cap_bits1
);
869 putnext(tcp
->tcp_connp
->conn_rq
, mp
);
873 * This routine responds to T_INFO_REQ messages. It is called by tcp_wput.
874 * Most of the T_INFO_ACK information is copied from tcp_g_t_info_ack.
875 * The current state of the stream is copied from tcp_state.
878 tcp_info_req(tcp_t
*tcp
, mblk_t
*mp
)
880 mp
= tpi_ack_alloc(mp
, sizeof (struct T_info_ack
), M_PCPROTO
,
883 tcp_err_ack(tcp
, mp
, TSYSERR
, ENOMEM
);
886 tcp_copy_info((struct T_info_ack
*)mp
->b_rptr
, tcp
);
887 putnext(tcp
->tcp_connp
->conn_rq
, mp
);
890 /* Respond to the TPI addr request */
892 tcp_addr_req(tcp_t
*tcp
, mblk_t
*mp
)
896 struct T_addr_ack
*taa
;
897 conn_t
*connp
= tcp
->tcp_connp
;
900 /* Make it large enough for worst case */
901 ackmp
= reallocb(mp
, sizeof (struct T_addr_ack
) +
902 2 * sizeof (sin6_t
), 1);
904 tcp_err_ack(tcp
, mp
, TSYSERR
, ENOMEM
);
908 taa
= (struct T_addr_ack
*)ackmp
->b_rptr
;
910 bzero(taa
, sizeof (struct T_addr_ack
));
911 ackmp
->b_wptr
= (uchar_t
*)&taa
[1];
913 taa
->PRIM_type
= T_ADDR_ACK
;
914 ackmp
->b_datap
->db_type
= M_PCPROTO
;
916 if (connp
->conn_family
== AF_INET
)
917 addrlen
= sizeof (sin_t
);
919 addrlen
= sizeof (sin6_t
);
922 * Note: Following code assumes 32 bit alignment of basic
923 * data structures like sin_t and struct T_addr_ack.
925 if (tcp
->tcp_state
>= TCPS_BOUND
) {
927 * Fill in local address first
929 taa
->LOCADDR_offset
= sizeof (*taa
);
930 taa
->LOCADDR_length
= addrlen
;
931 sa
= (struct sockaddr
*)&taa
[1];
932 (void) conn_getsockname(connp
, sa
, &addrlen
);
933 ackmp
->b_wptr
+= addrlen
;
935 if (tcp
->tcp_state
>= TCPS_SYN_RCVD
) {
937 * Fill in Remote address
939 taa
->REMADDR_length
= addrlen
;
940 /* assumed 32-bit alignment */
941 taa
->REMADDR_offset
= taa
->LOCADDR_offset
+ taa
->LOCADDR_length
;
942 sa
= (struct sockaddr
*)(ackmp
->b_rptr
+ taa
->REMADDR_offset
);
943 (void) conn_getpeername(connp
, sa
, &addrlen
);
944 ackmp
->b_wptr
+= addrlen
;
946 ASSERT(ackmp
->b_wptr
<= ackmp
->b_datap
->db_lim
);
947 putnext(tcp
->tcp_connp
->conn_rq
, ackmp
);
951 * Swap information between the eager and acceptor for a TLI/XTI client.
952 * The sockfs accept is done on the acceptor stream and control goes
953 * through tcp_tli_accept() and tcp_accept()/tcp_accept_swap() is not
954 * called. In either case, both the eager and listener are in their own
955 * perimeter (squeue) and the code has to deal with potential race.
957 * See the block comment on top of tcp_accept() and tcp_tli_accept().
960 tcp_accept_swap(tcp_t
*listener
, tcp_t
*acceptor
, tcp_t
*eager
)
962 conn_t
*econnp
, *aconnp
;
964 ASSERT(eager
->tcp_connp
->conn_rq
== listener
->tcp_connp
->conn_rq
);
965 ASSERT(eager
->tcp_detached
&& !acceptor
->tcp_detached
);
966 ASSERT(!TCP_IS_SOCKET(acceptor
));
967 ASSERT(!TCP_IS_SOCKET(eager
));
968 ASSERT(!TCP_IS_SOCKET(listener
));
970 acceptor
->tcp_detached
= B_TRUE
;
972 * To permit stream re-use by TLI/XTI, the eager needs a copy of
975 eager
->tcp_acceptor_id
= acceptor
->tcp_acceptor_id
;
977 /* remove eager from listen list... */
978 mutex_enter(&listener
->tcp_eager_lock
);
979 tcp_eager_unlink(eager
);
980 ASSERT(eager
->tcp_eager_next_q
== NULL
&&
981 eager
->tcp_eager_last_q
== NULL
);
982 ASSERT(eager
->tcp_eager_next_q0
== NULL
&&
983 eager
->tcp_eager_prev_q0
== NULL
);
984 mutex_exit(&listener
->tcp_eager_lock
);
986 econnp
= eager
->tcp_connp
;
987 aconnp
= acceptor
->tcp_connp
;
988 econnp
->conn_rq
= aconnp
->conn_rq
;
989 econnp
->conn_wq
= aconnp
->conn_wq
;
990 econnp
->conn_rq
->q_ptr
= econnp
;
991 econnp
->conn_wq
->q_ptr
= econnp
;
994 * In the TLI/XTI loopback case, we are inside the listener's squeue,
995 * which might be a different squeue from our peer TCP instance.
996 * For TCP Fusion, the peer expects that whenever tcp_detached is
997 * clear, our TCP queues point to the acceptor's queues. Thus, use
998 * membar_producer() to ensure that the assignments of conn_rq/conn_wq
999 * above reach global visibility prior to the clearing of tcp_detached.
1002 eager
->tcp_detached
= B_FALSE
;
1004 ASSERT(eager
->tcp_ack_tid
== 0);
1006 econnp
->conn_dev
= aconnp
->conn_dev
;
1007 econnp
->conn_minor_arena
= aconnp
->conn_minor_arena
;
1009 ASSERT(econnp
->conn_minor_arena
!= NULL
);
1010 if (econnp
->conn_cred
!= NULL
)
1011 crfree(econnp
->conn_cred
);
1012 econnp
->conn_cred
= aconnp
->conn_cred
;
1013 ASSERT(!(econnp
->conn_ixa
->ixa_free_flags
& IXA_FREE_CRED
));
1014 econnp
->conn_ixa
->ixa_cred
= econnp
->conn_cred
;
1015 aconnp
->conn_cred
= NULL
;
1016 econnp
->conn_cpid
= aconnp
->conn_cpid
;
1017 ASSERT(econnp
->conn_netstack
== aconnp
->conn_netstack
);
1018 ASSERT(eager
->tcp_tcps
== acceptor
->tcp_tcps
);
1020 econnp
->conn_zoneid
= aconnp
->conn_zoneid
;
1021 econnp
->conn_allzones
= aconnp
->conn_allzones
;
1022 econnp
->conn_ixa
->ixa_zoneid
= aconnp
->conn_ixa
->ixa_zoneid
;
1024 econnp
->conn_zone_is_global
= aconnp
->conn_zone_is_global
;
1026 /* Do the IPC initialization */
1027 CONN_INC_REF(econnp
);
1029 /* Done with old IPC. Drop its ref on its connp */
1030 CONN_DEC_REF(aconnp
);
1034 * This runs at the tail end of accept processing on the squeue of the
1039 tcp_accept_finish(void *arg
, mblk_t
*mp
, void *arg2
, ip_recv_attr_t
*dummy
)
1041 conn_t
*connp
= (conn_t
*)arg
;
1042 tcp_t
*tcp
= connp
->conn_tcp
;
1043 queue_t
*q
= connp
->conn_rq
;
1044 tcp_stack_t
*tcps
= tcp
->tcp_tcps
;
1045 struct stroptions
*stropt
;
1046 struct sock_proto_props sopp
;
1048 /* Should never be called for non-STREAMS sockets */
1049 ASSERT(!IPCL_IS_NONSTR(connp
));
1051 /* We should just receive a single mblk that fits a T_discon_ind */
1052 ASSERT(mp
->b_cont
== NULL
);
1055 * Drop the eager's ref on the listener, that was placed when
1056 * this eager began life in tcp_input_listener.
1058 CONN_DEC_REF(tcp
->tcp_saved_listener
->tcp_connp
);
1060 tcp
->tcp_detached
= B_FALSE
;
1062 if (tcp
->tcp_state
<= TCPS_BOUND
|| tcp
->tcp_accept_error
) {
1064 * Someone blewoff the eager before we could finish
1067 * The only reason eager exists it because we put in
1068 * a ref on it when conn ind went up. We need to send
1069 * a disconnect indication up while the last reference
1070 * on the eager will be dropped by the squeue when we
1073 ASSERT(tcp
->tcp_listener
== NULL
);
1074 if (tcp
->tcp_issocket
|| tcp
->tcp_send_discon_ind
) {
1075 struct T_discon_ind
*tdi
;
1077 (void) putnextctl1(q
, M_FLUSH
, FLUSHRW
);
1079 * Let us reuse the incoming mblk to avoid
1080 * memory allocation failure problems. We know
1081 * that the size of the incoming mblk i.e.
1082 * stroptions is greater than sizeof
1085 ASSERT(DB_REF(mp
) == 1);
1086 ASSERT(MBLKSIZE(mp
) >=
1087 sizeof (struct T_discon_ind
));
1089 DB_TYPE(mp
) = M_PROTO
;
1090 ((union T_primitives
*)mp
->b_rptr
)->type
=
1092 tdi
= (struct T_discon_ind
*)mp
->b_rptr
;
1093 if (tcp
->tcp_issocket
) {
1094 tdi
->DISCON_reason
= ECONNREFUSED
;
1095 tdi
->SEQ_number
= 0;
1097 tdi
->DISCON_reason
= ENOPROTOOPT
;
1099 tcp
->tcp_conn_req_seqnum
;
1101 mp
->b_wptr
= mp
->b_rptr
+
1102 sizeof (struct T_discon_ind
);
1105 tcp
->tcp_hard_binding
= B_FALSE
;
1110 * This is the first time we run on the correct
1111 * queue after tcp_accept. So fix all the q parameters
1114 * Let us reuse the incoming mblk to avoid
1115 * memory allocation failure problems. We know
1116 * that the size of the incoming mblk is at least
1119 tcp_get_proto_props(tcp
, &sopp
);
1121 ASSERT(DB_REF(mp
) == 1);
1122 ASSERT(MBLKSIZE(mp
) >= sizeof (struct stroptions
));
1124 DB_TYPE(mp
) = M_SETOPTS
;
1125 stropt
= (struct stroptions
*)mp
->b_rptr
;
1126 mp
->b_wptr
= mp
->b_rptr
+ sizeof (struct stroptions
);
1127 stropt
= (struct stroptions
*)mp
->b_rptr
;
1128 ASSERT(sopp
.sopp_flags
& (SO_HIWAT
|SO_WROFF
|SO_MAXBLK
));
1129 stropt
->so_flags
= SO_HIWAT
| SO_WROFF
| SO_MAXBLK
;
1130 stropt
->so_hiwat
= sopp
.sopp_rxhiwat
;
1131 stropt
->so_wroff
= sopp
.sopp_wroff
;
1132 stropt
->so_maxblk
= sopp
.sopp_maxblk
;
1134 /* Send the options up */
1138 * Pass up any data and/or a fin that has been received.
1140 * Adjust receive window in case it had decreased
1141 * (because there is data <=> tcp_rcv_list != NULL)
1142 * while the connection was detached. Note that
1143 * in case the eager was flow-controlled, w/o this
1144 * code, the rwnd may never open up again!
1146 if (tcp
->tcp_rcv_list
!= NULL
) {
1147 /* We drain directly in case of fused tcp loopback */
1149 if (!tcp
->tcp_fused
&& canputnext(q
)) {
1150 tcp
->tcp_rwnd
= connp
->conn_rcvbuf
;
1151 if (tcp
->tcp_state
>= TCPS_ESTABLISHED
&&
1152 tcp_rwnd_reopen(tcp
) == TH_ACK_NEEDED
) {
1154 tcp
, (tcp
->tcp_swnd
== 0) ?
1155 tcp
->tcp_suna
: tcp
->tcp_snxt
,
1156 tcp
->tcp_rnxt
, TH_ACK
);
1160 (void) tcp_rcv_drain(tcp
);
1163 * For fused tcp loopback, back-enable peer endpoint
1164 * if it's currently flow-controlled.
1166 if (tcp
->tcp_fused
) {
1167 tcp_t
*peer_tcp
= tcp
->tcp_loopback_peer
;
1169 ASSERT(peer_tcp
!= NULL
);
1170 ASSERT(peer_tcp
->tcp_fused
);
1172 mutex_enter(&peer_tcp
->tcp_non_sq_lock
);
1173 if (peer_tcp
->tcp_flow_stopped
) {
1174 tcp_clrqfull(peer_tcp
);
1175 TCP_STAT(tcps
, tcp_fusion_backenabled
);
1177 mutex_exit(&peer_tcp
->tcp_non_sq_lock
);
1180 ASSERT(tcp
->tcp_rcv_list
== NULL
|| tcp
->tcp_fused_sigurg
);
1181 if (tcp
->tcp_fin_rcvd
&& !tcp
->tcp_ordrel_done
) {
1182 tcp
->tcp_ordrel_done
= B_TRUE
;
1183 mp
= tcp
->tcp_ordrel_mp
;
1184 tcp
->tcp_ordrel_mp
= NULL
;
1187 tcp
->tcp_hard_binding
= B_FALSE
;
1189 if (connp
->conn_keepalive
) {
1190 tcp
->tcp_ka_last_intrvl
= 0;
1191 tcp
->tcp_ka_tid
= TCP_TIMER(tcp
, tcp_keepalive_timer
,
1192 tcp
->tcp_ka_interval
);
1196 * At this point, eager is fully established and will
1197 * have the following references -
1199 * 2 references for connection to exist (1 for TCP and 1 for IP).
1200 * 1 reference for the squeue which will be dropped by the squeue as
1201 * soon as this function returns.
1202 * There will be 1 additonal reference for being in classifier
1203 * hash list provided something bad hasn't happened.
1205 ASSERT((connp
->conn_fanout
!= NULL
&& connp
->conn_ref
>= 4) ||
1206 (connp
->conn_fanout
== NULL
&& connp
->conn_ref
>= 3));
1210 * Pull a deferred connection indication off of the listener. The caller
1211 * must verify that there is a deferred conn ind under eager_lock before
1212 * calling this function.
1215 tcp_get_def_conn_ind(tcp_t
*listener
)
1221 ASSERT(MUTEX_HELD(&listener
->tcp_eager_lock
));
1222 ASSERT(listener
->tcp_eager_prev_q0
->tcp_conn_def_q0
);
1224 tcp
= listener
->tcp_eager_prev_q0
;
1226 * listener->tcp_eager_prev_q0 points to the TAIL of the
1227 * deferred T_conn_ind queue. We need to get to the head
1228 * of the queue in order to send up T_conn_ind the same
1229 * order as how the 3WHS is completed.
1231 while (tcp
!= listener
) {
1232 if (!tcp
->tcp_eager_prev_q0
->tcp_conn_def_q0
)
1235 tcp
= tcp
->tcp_eager_prev_q0
;
1238 conn_ind
= tcp
->tcp_conn
.tcp_eager_conn_ind
;
1239 tcp
->tcp_conn
.tcp_eager_conn_ind
= NULL
;
1240 /* Move from q0 to q */
1241 ASSERT(listener
->tcp_conn_req_cnt_q0
> 0);
1242 listener
->tcp_conn_req_cnt_q0
--;
1243 listener
->tcp_conn_req_cnt_q
++;
1244 tcp
->tcp_eager_next_q0
->tcp_eager_prev_q0
=
1245 tcp
->tcp_eager_prev_q0
;
1246 tcp
->tcp_eager_prev_q0
->tcp_eager_next_q0
=
1247 tcp
->tcp_eager_next_q0
;
1248 tcp
->tcp_eager_prev_q0
= NULL
;
1249 tcp
->tcp_eager_next_q0
= NULL
;
1250 tcp
->tcp_conn_def_q0
= B_FALSE
;
1252 /* Make sure the tcp isn't in the list of droppables */
1253 ASSERT(tcp
->tcp_eager_next_drop_q0
== NULL
&&
1254 tcp
->tcp_eager_prev_drop_q0
== NULL
);
1257 * Insert at end of the queue because sockfs sends
1258 * down T_CONN_RES in chronological order. Leaving
1259 * the older conn indications at front of the queue
1260 * helps reducing search time.
1262 tail
= listener
->tcp_eager_last_q
;
1264 tail
->tcp_eager_next_q
= tcp
;
1266 listener
->tcp_eager_next_q
= tcp
;
1268 listener
->tcp_eager_last_q
= tcp
;
1269 tcp
->tcp_eager_next_q
= NULL
;
1276 * Reply to a clients T_CONN_RES TPI message. This function
1277 * is used only for TLI/XTI listener. Sockfs sends T_CONN_RES
1278 * on the acceptor STREAM and processed in tcp_accept_common().
1279 * Read the block comment on top of tcp_input_listener().
1282 tcp_tli_accept(tcp_t
*listener
, mblk_t
*mp
)
1286 struct T_conn_res
*tcr
;
1287 t_uscalar_t acceptor_id
;
1289 mblk_t
*discon_mp
= NULL
;
1292 tcp_stack_t
*tcps
= listener
->tcp_tcps
;
1295 if ((mp
->b_wptr
- mp
->b_rptr
) < sizeof (*tcr
)) {
1296 tcp_err_ack(listener
, mp
, TPROTO
, 0);
1299 tcr
= (struct T_conn_res
*)mp
->b_rptr
;
1302 * Under ILP32 the stream head points tcr->ACCEPTOR_id at the
1303 * read side queue of the streams device underneath us i.e. the
1304 * read side queue of 'ip'. Since we can't deference QUEUE_ptr we
1305 * look it up in the queue_hash. Under LP64 it sends down the
1306 * minor_t of the accepting endpoint.
1308 * Once the acceptor/eager are modified (in tcp_accept_swap) the
1309 * fanout hash lock is held.
1310 * This prevents any thread from entering the acceptor queue from
1311 * below (since it has not been hard bound yet i.e. any inbound
1312 * packets will arrive on the listener conn_t and
1313 * go through the classifier).
1314 * The CONN_INC_REF will prevent the acceptor from closing.
1316 * XXX It is still possible for a tli application to send down data
1317 * on the accepting stream while another thread calls t_accept.
1318 * This should not be a problem for well-behaved applications since
1319 * the T_OK_ACK is sent after the queue swapping is completed.
1321 * If the accepting fd is the same as the listening fd, avoid
1322 * queue hash lookup since that will return an eager listener in a
1323 * already established state.
1325 acceptor_id
= tcr
->ACCEPTOR_id
;
1326 mutex_enter(&listener
->tcp_eager_lock
);
1327 if (listener
->tcp_acceptor_id
== acceptor_id
) {
1328 eager
= listener
->tcp_eager_next_q
;
1329 /* only count how many T_CONN_INDs so don't count q0 */
1330 if ((listener
->tcp_conn_req_cnt_q
!= 1) ||
1331 (eager
->tcp_conn_req_seqnum
!= tcr
->SEQ_number
)) {
1332 mutex_exit(&listener
->tcp_eager_lock
);
1333 tcp_err_ack(listener
, mp
, TBADF
, 0);
1336 if (listener
->tcp_conn_req_cnt_q0
!= 0) {
1337 /* Throw away all the eagers on q0. */
1338 tcp_eager_cleanup(listener
, 1);
1340 if (listener
->tcp_syn_defense
) {
1341 listener
->tcp_syn_defense
= B_FALSE
;
1342 if (listener
->tcp_ip_addr_cache
!= NULL
) {
1343 kmem_free(listener
->tcp_ip_addr_cache
,
1344 IP_ADDR_CACHE_SIZE
* sizeof (ipaddr_t
));
1345 listener
->tcp_ip_addr_cache
= NULL
;
1349 * Transfer tcp_conn_req_max to the eager so that when
1350 * a disconnect occurs we can revert the endpoint to the
1353 eager
->tcp_conn_req_max
= listener
->tcp_conn_req_max
;
1354 ASSERT(listener
->tcp_conn_req_cnt_q0
== 0);
1356 * Get a reference on the acceptor just like the
1357 * tcp_acceptor_hash_lookup below.
1359 acceptor
= listener
;
1360 CONN_INC_REF(acceptor
->tcp_connp
);
1362 acceptor
= tcp_acceptor_hash_lookup(acceptor_id
, tcps
);
1363 if (acceptor
== NULL
) {
1364 if (listener
->tcp_connp
->conn_debug
) {
1365 (void) strlog(TCP_MOD_ID
, 0, 1,
1367 "tcp_accept: did not find acceptor 0x%x\n",
1370 mutex_exit(&listener
->tcp_eager_lock
);
1371 tcp_err_ack(listener
, mp
, TPROVMISMATCH
, 0);
1375 * Verify acceptor state. The acceptable states for an acceptor
1376 * include TCPS_IDLE and TCPS_BOUND.
1378 switch (acceptor
->tcp_state
) {
1384 CONN_DEC_REF(acceptor
->tcp_connp
);
1385 mutex_exit(&listener
->tcp_eager_lock
);
1386 tcp_err_ack(listener
, mp
, TOUTSTATE
, 0);
1391 /* The listener must be in TCPS_LISTEN */
1392 if (listener
->tcp_state
!= TCPS_LISTEN
) {
1393 CONN_DEC_REF(acceptor
->tcp_connp
);
1394 mutex_exit(&listener
->tcp_eager_lock
);
1395 tcp_err_ack(listener
, mp
, TOUTSTATE
, 0);
1400 * Rendezvous with an eager connection request packet hanging off
1401 * 'tcp' that has the 'seqnum' tag. We tagged the detached open
1402 * tcp structure when the connection packet arrived in
1403 * tcp_input_listener().
1405 seqnum
= tcr
->SEQ_number
;
1408 eager
= eager
->tcp_eager_next_q
;
1409 if (eager
== NULL
) {
1410 CONN_DEC_REF(acceptor
->tcp_connp
);
1411 mutex_exit(&listener
->tcp_eager_lock
);
1412 tcp_err_ack(listener
, mp
, TBADSEQ
, 0);
1415 } while (eager
->tcp_conn_req_seqnum
!= seqnum
);
1416 mutex_exit(&listener
->tcp_eager_lock
);
1419 * At this point, both acceptor and listener have 2 ref
1420 * that they begin with. Acceptor has one additional ref
1421 * we placed in lookup while listener has 3 additional
1422 * ref for being behind the squeue (tcp_accept() is
1423 * done on listener's squeue); being in classifier hash;
1424 * and eager's ref on listener.
1426 ASSERT(listener
->tcp_connp
->conn_ref
>= 5);
1427 ASSERT(acceptor
->tcp_connp
->conn_ref
>= 3);
1430 * The eager at this point is set in its own squeue and
1431 * could easily have been killed (tcp_accept_finish will
1432 * deal with that) because of a TH_RST so we can only
1433 * ASSERT for a single ref.
1435 ASSERT(eager
->tcp_connp
->conn_ref
>= 1);
1438 * Pre allocate the discon_ind mblk also. tcp_accept_finish will
1439 * use it if something failed.
1441 discon_mp
= allocb(MAX(sizeof (struct T_discon_ind
),
1442 sizeof (struct stroptions
)), BPRI_HI
);
1443 if (discon_mp
== NULL
) {
1444 CONN_DEC_REF(acceptor
->tcp_connp
);
1445 CONN_DEC_REF(eager
->tcp_connp
);
1446 tcp_err_ack(listener
, mp
, TSYSERR
, ENOMEM
);
1450 econnp
= eager
->tcp_connp
;
1452 /* Hold a copy of mp, in case reallocb fails */
1453 if ((mp1
= copymsg(mp
)) == NULL
) {
1454 CONN_DEC_REF(acceptor
->tcp_connp
);
1455 CONN_DEC_REF(eager
->tcp_connp
);
1457 tcp_err_ack(listener
, mp
, TSYSERR
, ENOMEM
);
1461 tcr
= (struct T_conn_res
*)mp1
->b_rptr
;
1464 * This is an expanded version of mi_tpi_ok_ack_alloc()
1465 * which allocates a larger mblk and appends the new
1466 * local address to the ok_ack. The address is copied by
1467 * soaccept() for getsockname().
1472 extra
= (econnp
->conn_family
== AF_INET
) ?
1473 sizeof (sin_t
) : sizeof (sin6_t
);
1476 * Try to re-use mp, if possible. Otherwise, allocate
1477 * an mblk and return it as ok_mp. In any case, mp
1478 * is no longer usable upon return.
1480 if ((ok_mp
= mi_tpi_ok_ack_alloc_extra(mp
, extra
)) == NULL
) {
1481 CONN_DEC_REF(acceptor
->tcp_connp
);
1482 CONN_DEC_REF(eager
->tcp_connp
);
1484 /* Original mp has been freed by now, so use mp1 */
1485 tcp_err_ack(listener
, mp1
, TSYSERR
, ENOMEM
);
1489 mp
= NULL
; /* We should never use mp after this point */
1492 case sizeof (sin_t
): {
1493 sin_t
*sin
= (sin_t
*)ok_mp
->b_wptr
;
1495 ok_mp
->b_wptr
+= extra
;
1496 sin
->sin_family
= AF_INET
;
1497 sin
->sin_port
= econnp
->conn_lport
;
1498 sin
->sin_addr
.s_addr
= econnp
->conn_laddr_v4
;
1501 case sizeof (sin6_t
): {
1502 sin6_t
*sin6
= (sin6_t
*)ok_mp
->b_wptr
;
1504 ok_mp
->b_wptr
+= extra
;
1505 sin6
->sin6_family
= AF_INET6
;
1506 sin6
->sin6_port
= econnp
->conn_lport
;
1507 sin6
->sin6_addr
= econnp
->conn_laddr_v6
;
1508 sin6
->sin6_flowinfo
= econnp
->conn_flowinfo
;
1509 if (IN6_IS_ADDR_LINKSCOPE(&econnp
->conn_laddr_v6
) &&
1510 (econnp
->conn_ixa
->ixa_flags
& IXAF_SCOPEID_SET
)) {
1511 sin6
->sin6_scope_id
=
1512 econnp
->conn_ixa
->ixa_scopeid
;
1514 sin6
->sin6_scope_id
= 0;
1516 sin6
->__sin6_src_id
= 0;
1522 ASSERT(ok_mp
->b_wptr
<= ok_mp
->b_datap
->db_lim
);
1526 * If there are no options we know that the T_CONN_RES will
1527 * succeed. However, we can't send the T_OK_ACK upstream until
1528 * the tcp_accept_swap is done since it would be dangerous to
1529 * let the application start using the new fd prior to the swap.
1531 tcp_accept_swap(listener
, acceptor
, eager
);
1534 * tcp_accept_swap unlinks eager from listener but does not drop
1535 * the eager's reference on the listener.
1537 ASSERT(eager
->tcp_listener
== NULL
);
1538 ASSERT(listener
->tcp_connp
->conn_ref
>= 5);
1541 * The eager is now associated with its own queue. Insert in
1542 * the hash so that the connection can be reused for a future
1545 tcp_acceptor_hash_insert(acceptor_id
, eager
);
1548 * We now do the processing of options with T_CONN_RES.
1549 * We delay till now since we wanted to have queue to pass to
1550 * option processing routines that points back to the right
1551 * instance structure which does not happen until after
1552 * tcp_accept_swap().
1555 * The sanity of the logic here assumes that whatever options
1556 * are appropriate to inherit from listner=>eager are done
1557 * before this point, and whatever were to be overridden (or not)
1558 * in transfer logic from eager=>acceptor in tcp_accept_swap().
1559 * [ Warning: acceptor endpoint can have T_OPTMGMT_REQ done to it
1560 * before its ACCEPTOR_id comes down in T_CONN_RES ]
1561 * This may not be true at this point in time but can be fixed
1562 * independently. This option processing code starts with
1563 * the instantiated acceptor instance and the final queue at
1567 if (tcr
->OPT_length
!= 0) {
1568 /* Options to process */
1571 int do_disconnect
= 0;
1573 if (tcp_conprim_opt_process(eager
, mp1
,
1574 &do_disconnect
, &t_error
, &sys_error
) < 0) {
1575 eager
->tcp_accept_error
= 1;
1576 if (do_disconnect
) {
1578 * An option failed which does not allow
1579 * connection to be accepted.
1581 * We allow T_CONN_RES to succeed and
1582 * put a T_DISCON_IND on the eager queue.
1584 ASSERT(t_error
== 0 && sys_error
== 0);
1585 eager
->tcp_send_discon_ind
= 1;
1587 ASSERT(t_error
!= 0);
1590 * Original mp was either freed or set
1591 * to ok_mp above, so use mp1 instead.
1593 tcp_err_ack(listener
, mp1
, t_error
, sys_error
);
1598 * Most likely success in setting options (except if
1599 * eager->tcp_send_discon_ind set).
1600 * mp1 option buffer represented by OPT_length/offset
1601 * potentially modified and contains results of setting
1602 * options at this point
1606 /* We no longer need mp1, since all options processing has passed */
1609 putnext(listener
->tcp_connp
->conn_rq
, ok_mp
);
1611 mutex_enter(&listener
->tcp_eager_lock
);
1612 if (listener
->tcp_eager_prev_q0
->tcp_conn_def_q0
) {
1616 * This path should not be executed if listener and
1617 * acceptor streams are the same.
1619 ASSERT(listener
!= acceptor
);
1620 conn_ind
= tcp_get_def_conn_ind(listener
);
1621 mutex_exit(&listener
->tcp_eager_lock
);
1622 putnext(listener
->tcp_connp
->conn_rq
, conn_ind
);
1624 mutex_exit(&listener
->tcp_eager_lock
);
1628 * Done with the acceptor - free it
1630 * Note: from this point on, no access to listener should be made
1631 * as listener can be equal to acceptor.
1634 ASSERT(acceptor
->tcp_detached
);
1635 acceptor
->tcp_connp
->conn_rq
= NULL
;
1636 ASSERT(!IPCL_IS_NONSTR(acceptor
->tcp_connp
));
1637 acceptor
->tcp_connp
->conn_wq
= NULL
;
1638 (void) tcp_clean_death(acceptor
, 0);
1639 CONN_DEC_REF(acceptor
->tcp_connp
);
1642 * We pass discon_mp to tcp_accept_finish to get on the right squeue.
1644 * It will update the setting for sockfs/stream head and also take
1645 * care of any data that arrived before accept() wad called.
1646 * In case we already received a FIN then tcp_accept_finish will send up
1647 * the ordrel. It will also send up a window update if the window
1652 * XXX: we currently have a problem if XTI application closes the
1653 * acceptor stream in between. This problem exists in on10-gate also
1654 * and is well know but nothing can be done short of major rewrite
1655 * to fix it. Now it is possible to take care of it by assigning TLI/XTI
1656 * eager same squeue as listener (we can distinguish non socket
1657 * listeners at the time of handling a SYN in tcp_input_listener)
1658 * and do most of the work that tcp_accept_finish does here itself
1659 * and then get behind the acceptor squeue to access the acceptor
1663 * We already have a ref on tcp so no need to do one before squeue_enter
1665 SQUEUE_ENTER_ONE(eager
->tcp_connp
->conn_sqp
, discon_mp
,
1666 tcp_accept_finish
, eager
->tcp_connp
, NULL
, SQ_FILL
,
1667 SQTAG_TCP_ACCEPT_FINISH
);
1672 * This is the STREAMS entry point for T_CONN_RES coming down on
1673 * Acceptor STREAM when sockfs listener does accept processing.
1674 * Read the block comment on top of tcp_input_listener().
1677 tcp_tpi_accept(queue_t
*q
, mblk_t
*mp
)
1679 queue_t
*rq
= RD(q
);
1680 struct T_conn_res
*conn_res
;
1683 struct T_ok_ack
*ok
;
1684 t_scalar_t PRIM_type
;
1689 ASSERT(DB_TYPE(mp
) == M_PROTO
);
1692 * All Solaris components should pass a db_credp
1693 * for this TPI message, hence we ASSERT.
1694 * But in case there is some other M_PROTO that looks
1695 * like a TPI message sent by some other kernel
1696 * component, we check and return an error.
1698 cr
= msg_getcred(mp
, NULL
);
1701 mp
= mi_tpi_err_ack_alloc(mp
, TSYSERR
, EINVAL
);
1706 conn_res
= (struct T_conn_res
*)mp
->b_rptr
;
1707 ASSERT((uintptr_t)(mp
->b_wptr
- mp
->b_rptr
) <= (uintptr_t)INT_MAX
);
1708 if ((mp
->b_wptr
- mp
->b_rptr
) < sizeof (struct T_conn_res
)) {
1709 mp
= mi_tpi_err_ack_alloc(mp
, TPROTO
, 0);
1714 switch (conn_res
->PRIM_type
) {
1718 * We pass up an err ack if allocb fails. This will
1719 * cause sockfs to issue a T_DISCON_REQ which will cause
1720 * tcp_eager_blowoff to be called. sockfs will then call
1721 * rq->q_qinfo->qi_qclose to cleanup the acceptor stream.
1722 * we need to do the allocb up here because we have to
1723 * make sure rq->q_qinfo->qi_qclose still points to the
1724 * correct function (tcp_tpi_close_accept) in case allocb
1727 bcopy(mp
->b_rptr
+ conn_res
->OPT_offset
,
1728 &eager
, conn_res
->OPT_length
);
1729 PRIM_type
= conn_res
->PRIM_type
;
1730 mp
->b_datap
->db_type
= M_PCPROTO
;
1731 mp
->b_wptr
= mp
->b_rptr
+ sizeof (struct T_ok_ack
);
1732 ok
= (struct T_ok_ack
*)mp
->b_rptr
;
1733 ok
->PRIM_type
= T_OK_ACK
;
1734 ok
->CORRECT_prim
= PRIM_type
;
1735 econnp
= eager
->tcp_connp
;
1736 econnp
->conn_dev
= (dev_t
)RD(q
)->q_ptr
;
1737 econnp
->conn_minor_arena
= (vmem_t
*)(WR(q
)->q_ptr
);
1738 econnp
->conn_rq
= rq
;
1739 econnp
->conn_wq
= q
;
1741 rq
->q_qinfo
= &tcp_rinitv4
; /* No open - same as rinitv6 */
1743 q
->q_qinfo
= &tcp_winit
;
1744 listener
= eager
->tcp_listener
;
1747 * Pre allocate the discon_ind mblk also. tcp_accept_finish will
1748 * use it if something failed.
1750 discon_mp
= allocb(MAX(sizeof (struct T_discon_ind
),
1751 sizeof (struct stroptions
)), BPRI_HI
);
1753 if (discon_mp
== NULL
) {
1754 mp
= mi_tpi_err_ack_alloc(mp
, TPROTO
, 0);
1760 eager
->tcp_issocket
= B_TRUE
;
1762 ASSERT(econnp
->conn_netstack
==
1763 listener
->tcp_connp
->conn_netstack
);
1764 ASSERT(eager
->tcp_tcps
== listener
->tcp_tcps
);
1766 /* Put the ref for IP */
1767 CONN_INC_REF(econnp
);
1770 * We should have minimum of 3 references on the conn
1771 * at this point. One each for TCP and IP and one for
1772 * the T_conn_ind that was sent up when the 3-way handshake
1773 * completed. In the normal case we would also have another
1774 * reference (making a total of 4) for the conn being in the
1775 * classifier hash list. However the eager could have received
1776 * an RST subsequently and tcp_closei_local could have removed
1777 * the eager from the classifier hash list, hence we can't
1778 * assert that reference.
1780 ASSERT(econnp
->conn_ref
>= 3);
1782 mutex_enter(&listener
->tcp_eager_lock
);
1783 if (listener
->tcp_eager_prev_q0
->tcp_conn_def_q0
) {
1784 mblk_t
*conn_ind
= tcp_get_def_conn_ind(listener
);
1786 /* Need to get inside the listener perimeter */
1787 CONN_INC_REF(listener
->tcp_connp
);
1788 SQUEUE_ENTER_ONE(listener
->tcp_connp
->conn_sqp
,
1789 conn_ind
, tcp_send_pending
, listener
->tcp_connp
,
1790 NULL
, SQ_FILL
, SQTAG_TCP_SEND_PENDING
);
1792 tcp_eager_unlink(eager
);
1793 mutex_exit(&listener
->tcp_eager_lock
);
1796 * At this point, the eager is detached from the listener
1797 * but we still have an extra refs on eager (apart from the
1798 * usual tcp references). The ref was placed in tcp_input_data
1799 * before sending the conn_ind in tcp_send_conn_ind.
1800 * The ref will be dropped in tcp_accept_finish().
1802 SQUEUE_ENTER_ONE(econnp
->conn_sqp
, discon_mp
, tcp_accept_finish
,
1803 econnp
, NULL
, SQ_NODRAIN
, SQTAG_TCP_ACCEPT_FINISH_Q0
);
1806 * Send the new local address also up to sockfs. There
1807 * should already be enough space in the mp that came
1808 * down from soaccept().
1810 if (econnp
->conn_family
== AF_INET
) {
1813 ASSERT((mp
->b_datap
->db_lim
- mp
->b_datap
->db_base
) >=
1814 (sizeof (struct T_ok_ack
) + sizeof (sin_t
)));
1815 sin
= (sin_t
*)mp
->b_wptr
;
1816 mp
->b_wptr
+= sizeof (sin_t
);
1817 sin
->sin_family
= AF_INET
;
1818 sin
->sin_port
= econnp
->conn_lport
;
1819 sin
->sin_addr
.s_addr
= econnp
->conn_laddr_v4
;
1823 ASSERT((mp
->b_datap
->db_lim
- mp
->b_datap
->db_base
) >=
1824 sizeof (struct T_ok_ack
) + sizeof (sin6_t
));
1825 sin6
= (sin6_t
*)mp
->b_wptr
;
1826 mp
->b_wptr
+= sizeof (sin6_t
);
1827 sin6
->sin6_family
= AF_INET6
;
1828 sin6
->sin6_port
= econnp
->conn_lport
;
1829 sin6
->sin6_addr
= econnp
->conn_laddr_v6
;
1830 if (econnp
->conn_ipversion
== IPV4_VERSION
)
1831 sin6
->sin6_flowinfo
= 0;
1833 sin6
->sin6_flowinfo
= econnp
->conn_flowinfo
;
1834 if (IN6_IS_ADDR_LINKSCOPE(&econnp
->conn_laddr_v6
) &&
1835 (econnp
->conn_ixa
->ixa_flags
& IXAF_SCOPEID_SET
)) {
1836 sin6
->sin6_scope_id
=
1837 econnp
->conn_ixa
->ixa_scopeid
;
1839 sin6
->sin6_scope_id
= 0;
1841 sin6
->__sin6_src_id
= 0;
1847 mp
= mi_tpi_err_ack_alloc(mp
, TNOTSUPPORT
, 0);
1855 * The function called through squeue to get behind listener's perimeter to
1856 * send a deferred conn_ind.
1860 tcp_send_pending(void *arg
, mblk_t
*mp
, void *arg2
, ip_recv_attr_t
*dummy
)
1862 conn_t
*lconnp
= (conn_t
*)arg
;
1863 tcp_t
*listener
= lconnp
->conn_tcp
;
1864 struct T_conn_ind
*conn_ind
;
1867 conn_ind
= (struct T_conn_ind
*)mp
->b_rptr
;
1868 bcopy(mp
->b_rptr
+ conn_ind
->OPT_offset
, &tcp
,
1869 conn_ind
->OPT_length
);
1871 if (listener
->tcp_state
!= TCPS_LISTEN
) {
1873 * If listener has closed, it would have caused a
1874 * a cleanup/blowoff to happen for the eager, so
1875 * we don't need to do anything more.
1881 putnext(lconnp
->conn_rq
, mp
);
1885 * Sends the T_CONN_IND to the listener. The caller calls this
1886 * functions via squeue to get inside the listener's perimeter
1887 * once the 3 way hand shake is done a T_CONN_IND needs to be
1888 * sent. As an optimization, the caller can call this directly
1889 * if listener's perimeter is same as eager's.
1893 tcp_send_conn_ind(void *arg
, mblk_t
*mp
, void *arg2
)
1895 conn_t
*lconnp
= (conn_t
*)arg
;
1896 tcp_t
*listener
= lconnp
->conn_tcp
;
1898 struct T_conn_ind
*conn_ind
;
1899 ipaddr_t
*addr_cache
;
1900 boolean_t need_send_conn_ind
= B_FALSE
;
1901 tcp_stack_t
*tcps
= listener
->tcp_tcps
;
1903 /* retrieve the eager */
1904 conn_ind
= (struct T_conn_ind
*)mp
->b_rptr
;
1905 ASSERT(conn_ind
->OPT_offset
!= 0 &&
1906 conn_ind
->OPT_length
== sizeof (intptr_t));
1907 bcopy(mp
->b_rptr
+ conn_ind
->OPT_offset
, &tcp
,
1908 conn_ind
->OPT_length
);
1911 * TLI/XTI applications will get confused by
1912 * sending eager as an option since it violates
1913 * the option semantics. So remove the eager as
1914 * option since TLI/XTI app doesn't need it anyway.
1916 if (!TCP_IS_SOCKET(listener
)) {
1917 conn_ind
->OPT_length
= 0;
1918 conn_ind
->OPT_offset
= 0;
1920 if (listener
->tcp_state
!= TCPS_LISTEN
) {
1922 * If listener has closed, it would have caused a
1923 * a cleanup/blowoff to happen for the eager. We
1924 * just need to return.
1932 * if the conn_req_q is full defer passing up the
1933 * T_CONN_IND until space is availabe after t_accept()
1936 mutex_enter(&listener
->tcp_eager_lock
);
1939 * Take the eager out, if it is in the list of droppable eagers
1940 * as we are here because the 3W handshake is over.
1942 MAKE_UNDROPPABLE(tcp
);
1944 if (listener
->tcp_conn_req_cnt_q
< listener
->tcp_conn_req_max
) {
1948 * The eager already has an extra ref put in tcp_input_data
1949 * so that it stays till accept comes back even though it
1950 * might get into TCPS_CLOSED as a result of a TH_RST etc.
1952 ASSERT(listener
->tcp_conn_req_cnt_q0
> 0);
1953 listener
->tcp_conn_req_cnt_q0
--;
1954 listener
->tcp_conn_req_cnt_q
++;
1956 /* Move from SYN_RCVD to ESTABLISHED list */
1957 tcp
->tcp_eager_next_q0
->tcp_eager_prev_q0
=
1958 tcp
->tcp_eager_prev_q0
;
1959 tcp
->tcp_eager_prev_q0
->tcp_eager_next_q0
=
1960 tcp
->tcp_eager_next_q0
;
1961 tcp
->tcp_eager_prev_q0
= NULL
;
1962 tcp
->tcp_eager_next_q0
= NULL
;
1965 * Insert at end of the queue because sockfs
1966 * sends down T_CONN_RES in chronological
1967 * order. Leaving the older conn indications
1968 * at front of the queue helps reducing search
1971 tail
= listener
->tcp_eager_last_q
;
1973 tail
->tcp_eager_next_q
= tcp
;
1975 listener
->tcp_eager_next_q
= tcp
;
1976 listener
->tcp_eager_last_q
= tcp
;
1977 tcp
->tcp_eager_next_q
= NULL
;
1979 * Delay sending up the T_conn_ind until we are
1980 * done with the eager. Once we have have sent up
1981 * the T_conn_ind, the accept can potentially complete
1982 * any time and release the refhold we have on the eager.
1984 need_send_conn_ind
= B_TRUE
;
1987 * Defer connection on q0 and set deferred
1988 * connection bit true
1990 tcp
->tcp_conn_def_q0
= B_TRUE
;
1992 /* take tcp out of q0 ... */
1993 tcp
->tcp_eager_prev_q0
->tcp_eager_next_q0
=
1994 tcp
->tcp_eager_next_q0
;
1995 tcp
->tcp_eager_next_q0
->tcp_eager_prev_q0
=
1996 tcp
->tcp_eager_prev_q0
;
1998 /* ... and place it at the end of q0 */
1999 tcp
->tcp_eager_prev_q0
= listener
->tcp_eager_prev_q0
;
2000 tcp
->tcp_eager_next_q0
= listener
;
2001 listener
->tcp_eager_prev_q0
->tcp_eager_next_q0
= tcp
;
2002 listener
->tcp_eager_prev_q0
= tcp
;
2003 tcp
->tcp_conn
.tcp_eager_conn_ind
= mp
;
2006 /* we have timed out before */
2007 if (tcp
->tcp_syn_rcvd_timeout
!= 0) {
2008 tcp
->tcp_syn_rcvd_timeout
= 0;
2009 listener
->tcp_syn_rcvd_timeout
--;
2010 if (listener
->tcp_syn_defense
&&
2011 listener
->tcp_syn_rcvd_timeout
<=
2012 (tcps
->tcps_conn_req_max_q0
>> 5) &&
2013 10*MINUTES
< TICK_TO_MSEC(ddi_get_lbolt64() -
2014 listener
->tcp_last_rcv_lbolt
)) {
2016 * Turn off the defense mode if we
2017 * believe the SYN attack is over.
2019 listener
->tcp_syn_defense
= B_FALSE
;
2020 if (listener
->tcp_ip_addr_cache
) {
2021 kmem_free((void *)listener
->tcp_ip_addr_cache
,
2022 IP_ADDR_CACHE_SIZE
* sizeof (ipaddr_t
));
2023 listener
->tcp_ip_addr_cache
= NULL
;
2027 addr_cache
= (ipaddr_t
*)(listener
->tcp_ip_addr_cache
);
2028 if (addr_cache
!= NULL
) {
2030 * We have finished a 3-way handshake with this
2031 * remote host. This proves the IP addr is good.
2034 addr_cache
[IP_ADDR_CACHE_HASH(tcp
->tcp_connp
->conn_faddr_v4
)] =
2035 tcp
->tcp_connp
->conn_faddr_v4
;
2037 mutex_exit(&listener
->tcp_eager_lock
);
2038 if (need_send_conn_ind
)
2039 putnext(lconnp
->conn_rq
, mp
);