2 * Copyright (c) 2000-2001 Boris Popov
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by Boris Popov.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * $Id: smb_iod.c,v 1.32 2005/02/12 00:17:09 lindak Exp $
36 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
37 * Use is subject to license terms.
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/atomic.h>
49 #include <sys/thread.h>
52 #include <sys/unistd.h>
53 #include <sys/mount.h>
54 #include <sys/vnode.h>
55 #include <sys/types.h>
57 #include <sys/sunddi.h>
58 #include <sys/stream.h>
59 #include <sys/strsun.h>
61 #include <sys/class.h>
63 #include <sys/cmn_err.h>
67 #include <netsmb/smb_osdep.h>
69 #include <netsmb/smb.h>
70 #include <netsmb/smb_conn.h>
71 #include <netsmb/smb_rq.h>
72 #include <netsmb/smb_subr.h>
73 #include <netsmb/smb_tran.h>
74 #include <netsmb/smb_trantcp.h>
76 int smb_iod_send_echo(smb_vc_t
*);
79 * This is set/cleared when smbfs loads/unloads
80 * No locks should be necessary, because smbfs
81 * can't unload until all the mounts are gone.
83 static smb_fscb_t
*fscb
;
85 smb_fscb_set(smb_fscb_t
*cb
)
91 smb_iod_share_disconnected(smb_share_t
*ssp
)
94 smb_share_invalidate(ssp
);
97 if (fscb
&& fscb
->fscb_disconn
) {
98 fscb
->fscb_disconn(ssp
);
103 * State changes are important and infrequent.
104 * Make them easily observable via dtrace.
107 smb_iod_newstate(struct smb_vc
*vcp
, int state
)
109 vcp
->vc_state
= state
;
112 /* Lock Held version of the next function. */
114 smb_iod_rqprocessed_LH(
119 rqp
->sr_flags
|= flags
;
120 rqp
->sr_lerror
= error
;
122 rqp
->sr_state
= SMBRQ_NOTIFIED
;
123 cv_broadcast(&rqp
->sr_cond
);
134 smb_iod_rqprocessed_LH(rqp
, error
, flags
);
139 smb_iod_invrq(struct smb_vc
*vcp
)
144 * Invalidate all outstanding requests for this connection
146 rw_enter(&vcp
->iod_rqlock
, RW_READER
);
147 TAILQ_FOREACH(rqp
, &vcp
->iod_rqlist
, sr_link
) {
148 smb_iod_rqprocessed(rqp
, ENOTCONN
, SMBR_RESTART
);
150 rw_exit(&vcp
->iod_rqlock
);
154 * Called by smb_vc_rele, smb_vc_kill, and by the driver
155 * close entry point if the IOD closes its dev handle.
157 * Forcibly kill the connection and IOD.
160 smb_iod_disconnect(struct smb_vc
*vcp
)
164 * Inform everyone of the state change.
167 if (vcp
->vc_state
!= SMBIOD_ST_DEAD
) {
168 smb_iod_newstate(vcp
, SMBIOD_ST_DEAD
);
169 cv_broadcast(&vcp
->vc_statechg
);
174 * Let's be safe here and avoid doing any
175 * call across the network while trying to
176 * shut things down. If we just disconnect,
177 * the server will take care of the logoff.
179 SMB_TRAN_DISCONNECT(vcp
);
182 * If we have an IOD, it should immediately notice
183 * that its connection has closed. But in case
184 * it doesn't, let's also send it a signal.
187 if (vcp
->iod_thr
!= NULL
&&
188 vcp
->iod_thr
!= curthread
) {
189 tsignal(vcp
->iod_thr
, SIGKILL
);
197 * Called by _addrq (for internal requests)
198 * and _sendall (via _addrq, _multirq, _waitrq)
201 smb_iod_sendrq(struct smb_rq
*rqp
)
203 struct smb_vc
*vcp
= rqp
->sr_vc
;
208 ASSERT(SEMA_HELD(&vcp
->vc_sendlock
));
209 ASSERT(RW_READ_HELD(&vcp
->iod_rqlock
));
212 * Note: Anything special for SMBR_INTERNAL here?
214 if (vcp
->vc_state
!= SMBIOD_ST_VCACTIVE
) {
215 SMBIODEBUG("bad vc_state=%d\n", vcp
->vc_state
);
221 * On the first send, set the MID and (maybe)
222 * the signing sequence numbers. The increments
223 * here are serialized by vc_sendlock
225 if (rqp
->sr_sendcnt
== 0) {
227 rqp
->sr_mid
= vcp
->vc_next_mid
++;
229 if (rqp
->sr_rqflags2
& SMB_FLAGS2_SECURITY_SIGNATURE
) {
231 * We're signing requests and verifying
232 * signatures on responses. Set the
233 * sequence numbers of the request and
234 * response here, used in smb_rq_verify.
236 rqp
->sr_seqno
= vcp
->vc_next_seq
++;
237 rqp
->sr_rseqno
= vcp
->vc_next_seq
++;
240 /* Fill in UID, TID, MID, etc. */
244 * Sign the message now that we're finally done
245 * filling in the SMB header fields, etc.
247 if (rqp
->sr_rqflags2
& SMB_FLAGS2_SECURITY_SIGNATURE
) {
251 if (rqp
->sr_sendcnt
++ >= 60/SMBSBTIMO
) { /* one minute */
252 smb_iod_rqprocessed(rqp
, rqp
->sr_lerror
, SMBR_RESTART
);
254 * If all attempts to send a request failed, then
255 * something is seriously hosed.
261 * Replaced m_copym() with Solaris copymsg() which does the same
262 * work when we want to do a M_COPYALL.
263 * m = m_copym(rqp->sr_rq.mb_top, 0, M_COPYALL, 0);
265 m
= copymsg(rqp
->sr_rq
.mb_top
);
268 DTRACE_PROBE2(smb_iod_sendrq
,
269 (smb_rq_t
*), rqp
, (mblk_t
*), m
);
271 SMBIODEBUG("M:%04x, P:%04x, U:%04x, T:%04x\n", rqp
->sr_mid
, 0, 0, 0);
276 error
= SMB_TRAN_SEND(vcp
, m
);
277 m
= 0; /* consumed by SEND */
281 rqp
->sr_lerror
= error
;
284 rqp
->sr_flags
|= SMBR_SENT
;
285 rqp
->sr_state
= SMBRQ_SENT
;
286 if (rqp
->sr_flags
& SMBR_SENDWAIT
)
287 cv_broadcast(&rqp
->sr_cond
);
292 * Check for fatal errors
294 if (SMB_TRAN_FATAL(vcp
, error
)) {
296 * No further attempts should be made
298 SMBSDEBUG("TRAN_SEND returned fatal error %d\n", error
);
302 SMBSDEBUG("TRAN_SEND returned non-fatal error %d\n", error
);
305 /* If proc waiting on rqp was signaled... */
306 if (smb_rq_intr(rqp
))
307 smb_iod_rqprocessed(rqp
, EINTR
, 0);
314 smb_iod_recv1(struct smb_vc
*vcp
, mblk_t
**mpp
)
322 error
= SMB_TRAN_RECV(vcp
, &m
);
329 m
= m_pullup(m
, SMB_HDRLEN
);
335 * Check the SMB header
337 hp
= mtod(m
, uchar_t
*);
338 if (bcmp(hp
, SMB_SIGNATURE
, SMB_SIGLEN
) != 0) {
348 * Process incoming packets
350 * This is the "reader" loop, run by the IOD thread
351 * while in state SMBIOD_ST_VCACTIVE. The loop now
352 * simply blocks in the socket recv until either a
353 * message arrives, or a disconnect.
355 * Any non-zero error means the IOD should terminate.
358 smb_iod_recvall(struct smb_vc
*vcp
)
365 int etime_count
= 0; /* for "server not responding", etc. */
369 * Check whether someone "killed" this VC,
370 * or is asking the IOD to terminate.
373 if (vcp
->vc_state
!= SMBIOD_ST_VCACTIVE
) {
374 SMBIODEBUG("bad vc_state=%d\n", vcp
->vc_state
);
379 if (vcp
->iod_flags
& SMBIOD_SHUTDOWN
) {
380 SMBIODEBUG("SHUTDOWN set\n");
381 /* This IOD thread will terminate. */
383 smb_iod_newstate(vcp
, SMBIOD_ST_DEAD
);
384 cv_broadcast(&vcp
->vc_statechg
);
391 error
= smb_iod_recv1(vcp
, &m
);
393 if (error
== ETIME
&&
394 vcp
->iod_rqlist
.tqh_first
!= NULL
) {
396 * Nothing received for 15 seconds and
397 * we have requests in the queue.
402 * Once, at 15 sec. notify callbacks
403 * and print the warning message.
405 if (etime_count
== 1) {
406 /* Was: smb_iod_notify_down(vcp); */
407 if (fscb
&& fscb
->fscb_down
)
408 smb_vc_walkshares(vcp
,
410 zprintf(vcp
->vc_zoneid
,
411 "SMB server %s not responding\n",
416 * At 30 sec. try sending an echo, and then
417 * once a minute thereafter.
419 if ((etime_count
& 3) == 2) {
420 (void) smb_iod_send_echo(vcp
);
424 } /* ETIME && requests in queue */
426 if (error
== ETIME
) {
428 * If the IOD thread holds the last reference
429 * to this VC, let the IOD thread terminate.
431 if (vcp
->vc_co
.co_usecount
> 1)
434 if (vcp
->vc_co
.co_usecount
== 1) {
435 smb_iod_newstate(vcp
, SMBIOD_ST_DEAD
);
442 } /* error == ETIME */
446 * The recv. above returned some error
447 * we can't continue from i.e. ENOTCONN.
448 * It's dangerous to continue here.
449 * (possible infinite loop!)
451 * If we have requests enqueued, next
452 * state is reconnecting, else idle.
456 state
= (vcp
->iod_rqlist
.tqh_first
!= NULL
) ?
457 SMBIOD_ST_RECONNECT
: SMBIOD_ST_IDLE
;
458 smb_iod_newstate(vcp
, state
);
459 cv_broadcast(&vcp
->vc_statechg
);
466 * Received something. Yea!
471 zprintf(vcp
->vc_zoneid
, "SMB server %s OK\n",
474 /* Was: smb_iod_notify_up(vcp); */
475 if (fscb
&& fscb
->fscb_up
)
476 smb_vc_walkshares(vcp
, fscb
->fscb_up
);
480 * Have an SMB packet. The SMB header was
481 * checked in smb_iod_recv1().
482 * Find the request...
484 hp
= mtod(m
, uchar_t
*);
486 mid
= letohs(SMB_HDRMID(hp
));
487 SMBIODEBUG("mid %04x\n", (uint_t
)mid
);
489 rw_enter(&vcp
->iod_rqlock
, RW_READER
);
490 TAILQ_FOREACH(rqp
, &vcp
->iod_rqlist
, sr_link
) {
492 if (rqp
->sr_mid
!= mid
)
495 DTRACE_PROBE2(smb_iod_recvrq
,
496 (smb_rq_t
*), rqp
, (mblk_t
*), m
);
500 if (rqp
->sr_rp
.md_top
== NULL
) {
501 md_initm(&rqp
->sr_rp
, m
);
503 if (rqp
->sr_flags
& SMBR_MULTIPACKET
) {
504 md_append_record(&rqp
->sr_rp
, m
);
507 SMBSDEBUG("duplicate response %d "
512 smb_iod_rqprocessed_LH(rqp
, 0, 0);
518 int cmd
= SMB_HDRCMD(hp
);
520 if (cmd
!= SMB_COM_ECHO
)
521 SMBSDEBUG("drop resp: mid %d, cmd %d\n",
523 /* smb_printrqlist(vcp); */
526 rw_exit(&vcp
->iod_rqlock
);
534 * The IOD receiver thread has requests pending and
535 * has not received anything in a while. Try to
536 * send an SMB echo request. It's tricky to do a
537 * send from the IOD thread because we can't block.
539 * Using tmo=SMBNOREPLYWAIT in the request
540 * so smb_rq_reply will skip smb_iod_waitrq.
541 * The smb_smb_echo call uses SMBR_INTERNAL
542 * to avoid calling smb_iod_sendall().
545 smb_iod_send_echo(smb_vc_t
*vcp
)
550 smb_credinit(&scred
, NULL
);
551 err
= smb_smb_echo(vcp
, &scred
, SMBNOREPLYWAIT
);
552 smb_credrele(&scred
);
557 * The IOD thread is now just a "reader",
558 * so no more smb_iod_request(). Yea!
562 * Place request in the queue, and send it now if possible.
563 * Called with no locks held.
566 smb_iod_addrq(struct smb_rq
*rqp
)
568 struct smb_vc
*vcp
= rqp
->sr_vc
;
569 int error
, save_newrq
;
571 ASSERT(rqp
->sr_cred
);
574 * State should be correct after the check in
575 * smb_rq_enqueue(), but we dropped locks...
577 if (vcp
->vc_state
!= SMBIOD_ST_VCACTIVE
) {
578 SMBIODEBUG("bad vc_state=%d\n", vcp
->vc_state
);
583 * Requests from the IOD itself are marked _INTERNAL,
584 * and get some special treatment to avoid blocking
585 * the reader thread (so we don't deadlock).
586 * The request is not yet on the queue, so we can
587 * modify it's state here without locks.
588 * Only thing using this now is ECHO.
590 rqp
->sr_owner
= curthread
;
591 if (rqp
->sr_owner
== vcp
->iod_thr
) {
592 rqp
->sr_flags
|= SMBR_INTERNAL
;
595 * This is a request from the IOD thread.
596 * Always send directly from this thread.
597 * Note lock order: iod_rqlist, vc_sendlock
599 rw_enter(&vcp
->iod_rqlock
, RW_WRITER
);
600 TAILQ_INSERT_HEAD(&vcp
->iod_rqlist
, rqp
, sr_link
);
601 rw_downgrade(&vcp
->iod_rqlock
);
604 * Note: iod_sendrq expects vc_sendlock,
605 * so take that here, but carefully:
606 * Never block the IOD thread here.
608 if (sema_tryp(&vcp
->vc_sendlock
) == 0) {
609 SMBIODEBUG("sendlock busy\n");
612 /* Have vc_sendlock */
613 error
= smb_iod_sendrq(rqp
);
614 sema_v(&vcp
->vc_sendlock
);
617 rw_exit(&vcp
->iod_rqlock
);
620 * In the non-error case, _removerq
621 * is done by either smb_rq_reply
625 smb_iod_removerq(rqp
);
630 rw_enter(&vcp
->iod_rqlock
, RW_WRITER
);
632 TAILQ_INSERT_TAIL(&vcp
->iod_rqlist
, rqp
, sr_link
);
633 /* iod_rqlock/WRITER protects iod_newrq */
634 save_newrq
= vcp
->iod_newrq
;
637 rw_exit(&vcp
->iod_rqlock
);
640 * Now send any requests that need to be sent,
641 * including the one we just put on the list.
642 * Only the thread that found iod_newrq==0
643 * needs to run the send loop.
646 smb_iod_sendall(vcp
);
652 * Mark an SMBR_MULTIPACKET request as
653 * needing another send. Similar to the
654 * "normal" part of smb_iod_addrq.
657 smb_iod_multirq(struct smb_rq
*rqp
)
659 struct smb_vc
*vcp
= rqp
->sr_vc
;
662 ASSERT(rqp
->sr_flags
& SMBR_MULTIPACKET
);
664 if (rqp
->sr_flags
& SMBR_INTERNAL
)
667 if (vcp
->vc_state
!= SMBIOD_ST_VCACTIVE
) {
668 SMBIODEBUG("bad vc_state=%d\n", vcp
->vc_state
);
672 rw_enter(&vcp
->iod_rqlock
, RW_WRITER
);
674 /* Already on iod_rqlist, just reset state. */
675 rqp
->sr_state
= SMBRQ_NOTSENT
;
677 /* iod_rqlock/WRITER protects iod_newrq */
678 save_newrq
= vcp
->iod_newrq
;
681 rw_exit(&vcp
->iod_rqlock
);
684 * Now send any requests that need to be sent,
685 * including the one we just marked NOTSENT.
686 * Only the thread that found iod_newrq==0
687 * needs to run the send loop.
690 smb_iod_sendall(vcp
);
697 smb_iod_removerq(struct smb_rq
*rqp
)
699 struct smb_vc
*vcp
= rqp
->sr_vc
;
701 rw_enter(&vcp
->iod_rqlock
, RW_WRITER
);
704 * Make sure we have not already removed it.
705 * See sys/queue.h QUEUEDEBUG_TAILQ_POSTREMOVE
706 * XXX: Don't like the constant 1 here...
708 ASSERT(rqp
->sr_link
.tqe_next
!= (void *)1L);
710 TAILQ_REMOVE(&vcp
->iod_rqlist
, rqp
, sr_link
);
711 rw_exit(&vcp
->iod_rqlock
);
717 * Wait for a request to complete.
719 * For normal requests, we need to deal with
720 * ioc_muxcnt dropping below vc_maxmux by
721 * making arrangements to send more...
724 smb_iod_waitrq(struct smb_rq
*rqp
)
726 struct smb_vc
*vcp
= rqp
->sr_vc
;
727 clock_t tr
, tmo1
, tmo2
;
730 if (rqp
->sr_flags
& SMBR_INTERNAL
) {
731 ASSERT((rqp
->sr_flags
& SMBR_MULTIPACKET
) == 0);
732 smb_iod_removerq(rqp
);
737 * Make sure this is NOT the IOD thread,
738 * or the wait below will stop the reader.
740 ASSERT(curthread
!= vcp
->iod_thr
);
745 * First, wait for the request to be sent. Normally the send
746 * has already happened by the time we get here. However, if
747 * we have more than maxmux entries in the request list, our
748 * request may not be sent until other requests complete.
749 * The wait in this case is due to local I/O demands, so
750 * we don't want the server response timeout to apply.
752 * If a request is allowed to interrupt this wait, then the
753 * request is cancelled and never sent OTW. Some kinds of
754 * requests should never be cancelled (i.e. close) and those
755 * are marked SMBR_NOINTR_SEND so they either go eventually,
756 * or a connection close will terminate them with ENOTCONN.
758 while (rqp
->sr_state
== SMBRQ_NOTSENT
) {
759 rqp
->sr_flags
|= SMBR_SENDWAIT
;
760 if (rqp
->sr_flags
& SMBR_NOINTR_SEND
) {
761 cv_wait(&rqp
->sr_cond
, &rqp
->sr_lock
);
764 rc
= cv_wait_sig(&rqp
->sr_cond
, &rqp
->sr_lock
);
765 rqp
->sr_flags
&= ~SMBR_SENDWAIT
;
767 SMBIODEBUG("EINTR in sendwait, rqp=%p\n", rqp
);
774 * The request has been sent. Now wait for the response,
775 * with the timeout specified for this request.
776 * Compute all the deadlines now, so we effectively
777 * start the timer(s) after the request is sent.
779 if (smb_timo_notice
&& (smb_timo_notice
< rqp
->sr_timo
))
780 tmo1
= SEC_TO_TICK(smb_timo_notice
);
783 tmo2
= ddi_get_lbolt() + SEC_TO_TICK(rqp
->sr_timo
);
786 * As above, we don't want to allow interrupt for some
787 * requests like open, because we could miss a succesful
788 * response and therefore "leak" a FID. Such requests
789 * are marked SMBR_NOINTR_RECV to prevent that.
791 * If "slow server" warnings are enabled, wait first
792 * for the "notice" timeout, and warn if expired.
794 if (tmo1
&& rqp
->sr_rpgen
== rqp
->sr_rplast
) {
795 if (rqp
->sr_flags
& SMBR_NOINTR_RECV
)
796 tr
= cv_reltimedwait(&rqp
->sr_cond
,
797 &rqp
->sr_lock
, tmo1
, TR_CLOCK_TICK
);
799 tr
= cv_reltimedwait_sig(&rqp
->sr_cond
,
800 &rqp
->sr_lock
, tmo1
, TR_CLOCK_TICK
);
807 DTRACE_PROBE1(smb_iod_waitrq1
,
811 /* Want this to go ONLY to the user. */
812 uprintf("SMB server %s has not responded"
813 " to request %d after %d seconds..."
814 " (still waiting).\n", vcp
->vc_srvname
,
815 rqp
->sr_mid
, smb_timo_notice
);
821 * Keep waiting until tmo2 is expired.
823 while (rqp
->sr_rpgen
== rqp
->sr_rplast
) {
824 if (rqp
->sr_flags
& SMBR_NOINTR_RECV
)
825 tr
= cv_timedwait(&rqp
->sr_cond
,
826 &rqp
->sr_lock
, tmo2
);
828 tr
= cv_timedwait_sig(&rqp
->sr_cond
,
829 &rqp
->sr_lock
, tmo2
);
836 DTRACE_PROBE1(smb_iod_waitrq2
,
840 /* Want this to go ONLY to the user. */
841 uprintf("SMB server %s has not responded"
842 " to request %d after %d seconds..."
843 " (giving up).\n", vcp
->vc_srvname
,
844 rqp
->sr_mid
, rqp
->sr_timo
);
851 error
= rqp
->sr_lerror
;
858 * MULTIPACKET request must stay in the list.
859 * They may need additional responses.
861 if ((rqp
->sr_flags
& SMBR_MULTIPACKET
) == 0)
862 smb_iod_removerq(rqp
);
865 * Some request has been completed.
866 * If we reached the mux limit,
867 * re-run the send loop...
869 if (vcp
->iod_muxfull
)
870 smb_iod_sendall(vcp
);
876 * Shutdown all outstanding I/O requests on the specified share with
877 * ENXIO; used when unmounting a share. (There shouldn't be any for a
878 * non-forced unmount; if this is a forced unmount, we have to shutdown
879 * the requests as part of the unmount process.)
882 smb_iod_shutdown_share(struct smb_share
*ssp
)
884 struct smb_vc
*vcp
= SSTOVC(ssp
);
888 * Loop through the list of requests and shutdown the ones
889 * that are for the specified share.
891 rw_enter(&vcp
->iod_rqlock
, RW_READER
);
892 TAILQ_FOREACH(rqp
, &vcp
->iod_rqlist
, sr_link
) {
893 if (rqp
->sr_state
!= SMBRQ_NOTIFIED
&& rqp
->sr_share
== ssp
)
894 smb_iod_rqprocessed(rqp
, EIO
, 0);
896 rw_exit(&vcp
->iod_rqlock
);
900 * Send all requests that need sending.
901 * Called from _addrq, _multirq, _waitrq
904 smb_iod_sendall(smb_vc_t
*vcp
)
910 * Clear "newrq" to make sure threads adding
911 * new requests will run this function again.
913 rw_enter(&vcp
->iod_rqlock
, RW_WRITER
);
917 * We only read iod_rqlist, so downgrade rwlock.
918 * This allows the IOD to handle responses while
919 * some requesting thread may be blocked in send.
921 rw_downgrade(&vcp
->iod_rqlock
);
924 * Serialize to prevent multiple senders.
925 * Note lock order: iod_rqlock, vc_sendlock
927 sema_p(&vcp
->vc_sendlock
);
930 * Walk the list of requests and send when possible.
931 * We avoid having more than vc_maxmux requests
932 * outstanding to the server by traversing only
933 * vc_maxmux entries into this list. Simple!
935 ASSERT(vcp
->vc_maxmux
> 0);
937 TAILQ_FOREACH(rqp
, &vcp
->iod_rqlist
, sr_link
) {
939 if (vcp
->vc_state
!= SMBIOD_ST_VCACTIVE
) {
940 error
= ENOTCONN
; /* stop everything! */
944 if (rqp
->sr_state
== SMBRQ_NOTSENT
) {
945 error
= smb_iod_sendrq(rqp
);
950 if (++muxcnt
== vcp
->vc_maxmux
) {
951 SMBIODEBUG("muxcnt == vc_maxmux\n");
958 * If we have vc_maxmux requests outstanding,
959 * arrange for _waitrq to call _sendall as
960 * requests are completed.
963 (muxcnt
< vcp
->vc_maxmux
) ? 0 : 1;
965 sema_v(&vcp
->vc_sendlock
);
966 rw_exit(&vcp
->iod_rqlock
);
970 smb_iod_vc_work(struct smb_vc
*vcp
, cred_t
*cr
)
972 struct file
*fp
= NULL
;
976 * This is called by the one-and-only
977 * IOD thread for this VC.
979 ASSERT(vcp
->iod_thr
== curthread
);
982 * Get the network transport file pointer,
983 * and "loan" it to our transport module.
985 if ((fp
= getf(vcp
->vc_tran_fd
)) == NULL
) {
989 if ((err
= SMB_TRAN_LOAN_FP(vcp
, fp
, cr
)) != 0)
993 * In case of reconnect, tell any enqueued requests
997 vcp
->vc_genid
++; /* possibly new connection */
998 smb_iod_newstate(vcp
, SMBIOD_ST_VCACTIVE
);
999 cv_broadcast(&vcp
->vc_statechg
);
1003 * The above cv_broadcast should be sufficient to
1004 * get requests going again.
1006 * If we have a callback function, run it.
1007 * Was: smb_iod_notify_connected()
1009 if (fscb
&& fscb
->fscb_connect
)
1010 smb_vc_walkshares(vcp
, fscb
->fscb_connect
);
1013 * Run the "reader" loop.
1015 err
= smb_iod_recvall(vcp
);
1018 * The reader loop returned, so we must have a
1019 * new state. (disconnected or reconnecting)
1021 * Notify shares of the disconnect.
1022 * Was: smb_iod_notify_disconnect()
1024 smb_vc_walkshares(vcp
, smb_iod_share_disconnected
);
1027 * The reader loop function returns only when
1028 * there's been an error on the connection, or
1029 * this VC has no more references. It also
1030 * updates the state before it returns.
1032 * Tell any requests to give up or restart.
1037 /* Recall the file descriptor loan. */
1038 (void) SMB_TRAN_LOAN_FP(vcp
, NULL
, cr
);
1040 releasef(vcp
->vc_tran_fd
);
1047 * Wait around for someone to ask to use this VC.
1048 * If the VC has only the IOD reference, then
1049 * wait only a minute or so, then drop it.
1052 smb_iod_vc_idle(struct smb_vc
*vcp
)
1054 clock_t tr
, delta
= SEC_TO_TICK(15);
1058 * This is called by the one-and-only
1059 * IOD thread for this VC.
1061 ASSERT(vcp
->iod_thr
== curthread
);
1064 while (vcp
->vc_state
== SMBIOD_ST_IDLE
) {
1065 tr
= cv_reltimedwait_sig(&vcp
->iod_idle
, &vcp
->vc_lock
,
1066 delta
, TR_CLOCK_TICK
);
1073 if (vcp
->vc_co
.co_usecount
== 1) {
1074 /* Let this IOD terminate. */
1075 smb_iod_newstate(vcp
, SMBIOD_ST_DEAD
);
1076 /* nobody to cv_broadcast */
1087 * After a failed reconnect attempt, smbiod will
1088 * call this to make current requests error out.
1091 smb_iod_vc_rcfail(struct smb_vc
*vcp
)
1097 * This is called by the one-and-only
1098 * IOD thread for this VC.
1100 ASSERT(vcp
->iod_thr
== curthread
);
1102 if (vcp
->vc_state
!= SMBIOD_ST_RECONNECT
)
1107 smb_iod_newstate(vcp
, SMBIOD_ST_RCFAILED
);
1108 cv_broadcast(&vcp
->vc_statechg
);
1111 * Short wait here for two reasons:
1112 * (1) Give requests a chance to error out.
1113 * (2) Prevent immediate retry.
1115 tr
= cv_reltimedwait_sig(&vcp
->iod_idle
, &vcp
->vc_lock
,
1116 SEC_TO_TICK(5), TR_CLOCK_TICK
);
1120 smb_iod_newstate(vcp
, SMBIOD_ST_IDLE
);
1121 cv_broadcast(&vcp
->vc_statechg
);
1129 * Ask the IOD to reconnect (if not already underway)
1130 * then wait for the reconnect to finish.
1133 smb_iod_reconnect(struct smb_vc
*vcp
)
1139 switch (vcp
->vc_state
) {
1141 case SMBIOD_ST_IDLE
:
1142 smb_iod_newstate(vcp
, SMBIOD_ST_RECONNECT
);
1143 cv_signal(&vcp
->iod_idle
);
1146 case SMBIOD_ST_RECONNECT
:
1147 rv
= cv_wait_sig(&vcp
->vc_statechg
, &vcp
->vc_lock
);
1154 case SMBIOD_ST_VCACTIVE
:
1155 err
= 0; /* success! */
1158 case SMBIOD_ST_RCFAILED
:
1159 case SMBIOD_ST_DEAD
: