2 * Copyright (c) 2000-2001 Boris Popov
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by Boris Popov.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * $FreeBSD: src/sys/netsmb/smb_iod.c,v 1.1.2.2 2002/04/23 03:45:01 bp Exp $
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/endian.h>
39 #include <sys/kernel.h>
40 #include <sys/kthread.h>
41 #include <sys/malloc.h>
43 #include <sys/unistd.h>
45 #include <sys/mplock2.h>
51 #include "smb_trantcp.h"
54 #define SMBIOD_SLEEP_TIMO 2
55 #define SMBIOD_PING_TIMO 60 /* seconds */
57 #define SMB_IOD_EVLOCKPTR(iod) (&(iod)->iod_evlock)
58 #define SMB_IOD_EVLOCK(iod) smb_sl_lock(&(iod)->iod_evlock)
59 #define SMB_IOD_EVUNLOCK(iod) smb_sl_unlock(&(iod)->iod_evlock)
60 #define SMB_IOD_EVINTERLOCK(iod) (&(iod)->iod_evlock)
62 #define SMB_IOD_RQLOCKPTR(iod) (&(iod)->iod_rqlock)
63 #define SMB_IOD_RQLOCK(iod) smb_sl_lock(&((iod)->iod_rqlock))
64 #define SMB_IOD_RQUNLOCK(iod) smb_sl_unlock(&(iod)->iod_rqlock)
65 #define SMB_IOD_RQINTERLOCK(iod) (&(iod)->iod_rqlock)
67 #define smb_iod_wakeup(iod) wakeup(&(iod)->iod_flags)
70 static MALLOC_DEFINE(M_SMBIOD
, "SMBIOD", "SMB network io daemon");
72 static int smb_iod_next
;
74 static int smb_iod_sendall(struct smbiod
*iod
);
75 static int smb_iod_disconnect(struct smbiod
*iod
);
76 static void smb_iod_thread(void *);
79 smb_iod_rqprocessed(struct smb_rq
*rqp
, int error
)
82 rqp
->sr_lerror
= error
;
84 rqp
->sr_state
= SMBRQ_NOTIFIED
;
85 wakeup(&rqp
->sr_state
);
90 smb_iod_invrq(struct smbiod
*iod
)
95 * Invalidate all outstanding requests for this connection
98 TAILQ_FOREACH(rqp
, &iod
->iod_rqlist
, sr_link
) {
100 /* this makes no sense whatsoever XXX */
101 if (rqp
->sr_flags
& SMBR_INTERNAL
)
104 rqp
->sr_flags
|= SMBR_RESTART
;
105 smb_iod_rqprocessed(rqp
, ENOTCONN
);
107 SMB_IOD_RQUNLOCK(iod
);
111 smb_iod_closetran(struct smbiod
*iod
)
113 struct smb_vc
*vcp
= iod
->iod_vc
;
114 struct thread
*td
= iod
->iod_td
;
116 if (vcp
->vc_tdata
== NULL
)
118 SMB_TRAN_DISCONNECT(vcp
, td
);
119 SMB_TRAN_DONE(vcp
, td
);
120 vcp
->vc_tdata
= NULL
;
124 smb_iod_dead(struct smbiod
*iod
)
126 iod
->iod_state
= SMBIOD_ST_DEAD
;
127 smb_iod_closetran(iod
);
132 smb_iod_connect(struct smbiod
*iod
)
134 struct smb_vc
*vcp
= iod
->iod_vc
;
135 struct thread
*td
= iod
->iod_td
;
138 SMBIODEBUG("%d\n", iod
->iod_state
);
139 switch(iod
->iod_state
) {
140 case SMBIOD_ST_VCACTIVE
:
141 SMBERROR("called for already opened connection\n");
144 return ENOTCONN
; /* XXX: last error code ? */
151 error
= SMB_TRAN_CREATE(vcp
, td
);
154 SMBIODEBUG("tcreate\n");
157 error
= SMB_TRAN_BIND(vcp
, vcp
->vc_laddr
, td
);
161 SMBIODEBUG("tbind\n");
163 error
= SMB_TRAN_CONNECT(vcp
, vcp
->vc_paddr
, td
);
166 SMB_TRAN_SETPARAM(vcp
, SMBTP_SELECTID
, &iod
->iod_flags
);
167 iod
->iod_state
= SMBIOD_ST_TRANACTIVE
;
168 SMBIODEBUG("tconnect\n");
170 /* vcp->vc_mid = 0;*/
172 error
= smb_smb_negotiate(vcp
, &iod
->iod_scred
);
175 SMBIODEBUG("snegotiate\n");
177 error
= smb_smb_ssnsetup(vcp
, &iod
->iod_scred
);
180 iod
->iod_state
= SMBIOD_ST_VCACTIVE
;
181 SMBIODEBUG("completed\n");
193 smb_iod_disconnect(struct smbiod
*iod
)
195 struct smb_vc
*vcp
= iod
->iod_vc
;
198 if (iod
->iod_state
== SMBIOD_ST_VCACTIVE
) {
199 smb_smb_ssnclose(vcp
, &iod
->iod_scred
);
200 iod
->iod_state
= SMBIOD_ST_TRANACTIVE
;
202 vcp
->vc_smbuid
= SMB_UID_UNKNOWN
;
203 smb_iod_closetran(iod
);
204 iod
->iod_state
= SMBIOD_ST_NOTCONN
;
209 smb_iod_treeconnect(struct smbiod
*iod
, struct smb_share
*ssp
)
213 if (iod
->iod_state
!= SMBIOD_ST_VCACTIVE
) {
214 if (iod
->iod_state
!= SMBIOD_ST_DEAD
)
216 iod
->iod_state
= SMBIOD_ST_RECONNECT
;
217 error
= smb_iod_connect(iod
);
221 SMBIODEBUG("tree reconnect\n");
223 ssp
->ss_flags
|= SMBS_RECONNECTING
;
225 error
= smb_smb_treeconnect(ssp
, &iod
->iod_scred
);
227 ssp
->ss_flags
&= ~SMBS_RECONNECTING
;
229 wakeup(&ssp
->ss_vcgenid
);
234 smb_iod_sendrq(struct smbiod
*iod
, struct smb_rq
*rqp
)
236 struct thread
*td
= iod
->iod_td
;
237 struct smb_vc
*vcp
= iod
->iod_vc
;
238 struct smb_share
*ssp
= rqp
->sr_share
;
242 SMBIODEBUG("iod_state = %d\n", iod
->iod_state
);
243 switch (iod
->iod_state
) {
244 case SMBIOD_ST_NOTCONN
:
245 smb_iod_rqprocessed(rqp
, ENOTCONN
);
248 iod
->iod_state
= SMBIOD_ST_RECONNECT
;
250 case SMBIOD_ST_RECONNECT
:
255 if (rqp
->sr_sendcnt
== 0) {
256 #ifdef movedtoanotherplace
257 if (vcp
->vc_maxmux
!= 0 && iod
->iod_muxcnt
>= vcp
->vc_maxmux
)
260 *rqp
->sr_rqtid
= htole16(ssp
? ssp
->ss_tid
: SMB_TID_UNKNOWN
);
261 *rqp
->sr_rquid
= htole16(vcp
? vcp
->vc_smbuid
: 0);
262 mb_fixhdr(&rqp
->sr_rq
);
263 if (vcp
->vc_hflags2
& SMB_FLAGS2_SECURITY_SIGNATURE
)
266 if (rqp
->sr_sendcnt
++ > 5) {
267 rqp
->sr_flags
|= SMBR_RESTART
;
268 smb_iod_rqprocessed(rqp
, rqp
->sr_lerror
);
270 * If all attempts to send a request failed, then
271 * something is seriously hosed.
275 SMBSDEBUG("M:%04x, P:%04x, U:%04x, T:%04x\n", rqp
->sr_mid
, 0, 0, 0);
276 m_dumpm(rqp
->sr_rq
.mb_top
);
277 m
= m_copym(rqp
->sr_rq
.mb_top
, 0, M_COPYALL
, M_WAITOK
);
278 error
= rqp
->sr_lerror
= m
? SMB_TRAN_SEND(vcp
, m
, td
) : ENOBUFS
;
280 getnanotime(&rqp
->sr_timesent
);
281 iod
->iod_lastrqsent
= rqp
->sr_timesent
;
282 rqp
->sr_flags
|= SMBR_SENT
;
283 rqp
->sr_state
= SMBRQ_SENT
;
287 * Check for fatal errors
289 if (SMB_TRAN_FATAL(vcp
, error
)) {
291 * No further attempts should be made
295 if (smb_rq_intr(rqp
))
296 smb_iod_rqprocessed(rqp
, EINTR
);
301 * Process incoming packets
304 smb_iod_recvall(struct smbiod
*iod
)
306 struct smb_vc
*vcp
= iod
->iod_vc
;
307 struct thread
*td
= iod
->iod_td
;
314 switch (iod
->iod_state
) {
315 case SMBIOD_ST_NOTCONN
:
317 case SMBIOD_ST_RECONNECT
:
324 error
= SMB_TRAN_RECV(vcp
, &m
, td
);
325 if (error
== EWOULDBLOCK
)
327 if (SMB_TRAN_FATAL(vcp
, error
)) {
334 SMBERROR("tran return NULL without error\n");
338 m
= m_pullup(m
, SMB_HDRLEN
);
340 continue; /* wait for a good packet */
342 * Now we got an entire and possibly invalid SMB packet.
343 * Be careful while parsing it.
346 hp
= mtod(m
, u_char
*);
347 if (bcmp(hp
, SMB_SIGNATURE
, SMB_SIGLEN
) != 0) {
351 mid
= SMB_HDRMID(hp
);
352 SMBSDEBUG("mid %04x\n", (u_int
)mid
);
354 TAILQ_FOREACH(rqp
, &iod
->iod_rqlist
, sr_link
) {
355 if (rqp
->sr_mid
!= mid
)
358 if (rqp
->sr_rp
.md_top
== NULL
) {
359 md_initm(&rqp
->sr_rp
, m
);
361 if (rqp
->sr_flags
& SMBR_MULTIPACKET
) {
362 md_append_record(&rqp
->sr_rp
, m
);
365 SMBERROR("duplicate response %d (ignored)\n", mid
);
370 smb_iod_rqprocessed(rqp
, 0);
373 SMB_IOD_RQUNLOCK(iod
);
375 SMBERROR("drop resp with mid %d\n", (u_int
)mid
);
376 /* smb_printrqlist(vcp);*/
381 * check for interrupts
384 TAILQ_FOREACH(rqp
, &iod
->iod_rqlist
, sr_link
) {
385 if (smb_proc_intr(rqp
->sr_cred
->scr_td
)) {
386 smb_iod_rqprocessed(rqp
, EINTR
);
389 SMB_IOD_RQUNLOCK(iod
);
394 smb_iod_request(struct smbiod
*iod
, int event
, void *ident
)
396 struct smbiod_event
*evp
;
400 evp
= smb_zmalloc(sizeof(*evp
), M_SMBIOD
, M_WAITOK
);
401 evp
->ev_type
= event
;
402 evp
->ev_ident
= ident
;
404 STAILQ_INSERT_TAIL(&iod
->iod_evlist
, evp
, ev_link
);
405 if ((event
& SMBIOD_EV_SYNC
) == 0) {
406 SMB_IOD_EVUNLOCK(iod
);
411 smb_sleep(evp
, SMB_IOD_EVINTERLOCK(iod
), PDROP
, "90evw", 0);
412 error
= evp
->ev_error
;
413 kfree(evp
, M_SMBIOD
);
418 * Place request in the queue.
419 * Request from smbiod have a high priority.
422 smb_iod_addrq(struct smb_rq
*rqp
)
424 struct smb_vc
*vcp
= rqp
->sr_vc
;
425 struct smbiod
*iod
= vcp
->vc_iod
;
429 if (rqp
->sr_cred
->scr_td
== iod
->iod_td
) {
430 rqp
->sr_flags
|= SMBR_INTERNAL
;
432 TAILQ_INSERT_HEAD(&iod
->iod_rqlist
, rqp
, sr_link
);
433 SMB_IOD_RQUNLOCK(iod
);
435 if (smb_iod_sendrq(iod
, rqp
) != 0) {
440 * we don't need to lock state field here
442 if (rqp
->sr_state
!= SMBRQ_NOTSENT
)
444 tsleep(&iod
->iod_flags
, 0, "90sndw", hz
);
447 smb_iod_removerq(rqp
);
448 return rqp
->sr_lerror
;
451 switch (iod
->iod_state
) {
452 case SMBIOD_ST_NOTCONN
:
455 error
= smb_iod_request(vcp
->vc_iod
, SMBIOD_EV_CONNECT
| SMBIOD_EV_SYNC
, NULL
);
465 if (vcp
->vc_maxmux
== 0) {
466 SMBERROR("maxmux == 0\n");
469 if (iod
->iod_muxcnt
< vcp
->vc_maxmux
)
472 smb_sleep(&iod
->iod_muxwant
, SMB_IOD_RQINTERLOCK(iod
), 0, "90mux", 0);
475 TAILQ_INSERT_TAIL(&iod
->iod_rqlist
, rqp
, sr_link
);
476 SMB_IOD_RQUNLOCK(iod
);
482 smb_iod_removerq(struct smb_rq
*rqp
)
484 struct smb_vc
*vcp
= rqp
->sr_vc
;
485 struct smbiod
*iod
= vcp
->vc_iod
;
488 if (rqp
->sr_flags
& SMBR_INTERNAL
) {
490 TAILQ_REMOVE(&iod
->iod_rqlist
, rqp
, sr_link
);
491 SMB_IOD_RQUNLOCK(iod
);
495 while (rqp
->sr_flags
& SMBR_XLOCK
) {
496 rqp
->sr_flags
|= SMBR_XLOCKWANT
;
497 smb_sleep(rqp
, SMB_IOD_RQINTERLOCK(iod
), 0, "90xrm", 0);
499 TAILQ_REMOVE(&iod
->iod_rqlist
, rqp
, sr_link
);
501 if (iod
->iod_muxwant
) {
503 wakeup(&iod
->iod_muxwant
);
505 SMB_IOD_RQUNLOCK(iod
);
510 smb_iod_waitrq(struct smb_rq
*rqp
)
512 struct smbiod
*iod
= rqp
->sr_vc
->vc_iod
;
516 if (rqp
->sr_flags
& SMBR_INTERNAL
) {
518 smb_iod_sendall(iod
);
519 smb_iod_recvall(iod
);
520 if (rqp
->sr_rpgen
!= rqp
->sr_rplast
)
522 tsleep(&iod
->iod_flags
, 0, "90irq", hz
);
524 smb_iod_removerq(rqp
);
525 return rqp
->sr_lerror
;
529 if (rqp
->sr_rpgen
== rqp
->sr_rplast
)
530 smb_sleep(&rqp
->sr_state
, SMBRQ_INTERLOCK(rqp
), 0, "90wrq", 0);
533 error
= rqp
->sr_lerror
;
534 if (rqp
->sr_flags
& SMBR_MULTIPACKET
) {
536 * If request should stay in the list, then reinsert it
537 * at the end of queue so other waiters have chance to concur
540 TAILQ_REMOVE(&iod
->iod_rqlist
, rqp
, sr_link
);
541 TAILQ_INSERT_TAIL(&iod
->iod_rqlist
, rqp
, sr_link
);
542 SMB_IOD_RQUNLOCK(iod
);
544 smb_iod_removerq(rqp
);
550 smb_iod_sendall(struct smbiod
*iod
)
552 struct smb_vc
*vcp
= iod
->iod_vc
;
554 struct timespec ts
, tstimeout
;
559 * Loop through the list of requests and send them if possible
562 TAILQ_FOREACH(rqp
, &iod
->iod_rqlist
, sr_link
) {
563 switch (rqp
->sr_state
) {
565 rqp
->sr_flags
|= SMBR_XLOCK
;
566 SMB_IOD_RQUNLOCK(iod
);
567 herror
= smb_iod_sendrq(iod
, rqp
);
569 rqp
->sr_flags
&= ~SMBR_XLOCK
;
570 if (rqp
->sr_flags
& SMBR_XLOCKWANT
) {
571 rqp
->sr_flags
&= ~SMBR_XLOCKWANT
;
576 SMB_TRAN_GETPARAM(vcp
, SMBTP_TIMEOUT
, &tstimeout
);
577 timespecadd(&tstimeout
, &tstimeout
);
579 timespecsub(&ts
, &tstimeout
);
580 if (timespeccmp(&ts
, &rqp
->sr_timesent
, >)) {
581 smb_iod_rqprocessed(rqp
, ETIMEDOUT
);
590 SMB_IOD_RQUNLOCK(iod
);
591 if (herror
== ENOTCONN
)
597 * "main" function for smbiod daemon
600 smb_iod_main(struct smbiod
*iod
)
602 /* struct smb_vc *vcp = iod->iod_vc;*/
603 struct smbiod_event
*evp
;
605 struct timespec tsnow
;
611 * Check all interesting events
615 evp
= STAILQ_FIRST(&iod
->iod_evlist
);
617 SMB_IOD_EVUNLOCK(iod
);
620 STAILQ_REMOVE_HEAD(&iod
->iod_evlist
, ev_link
);
621 evp
->ev_type
|= SMBIOD_EV_PROCESSING
;
622 SMB_IOD_EVUNLOCK(iod
);
623 switch (evp
->ev_type
& SMBIOD_EV_MASK
) {
624 case SMBIOD_EV_CONNECT
:
625 iod
->iod_state
= SMBIOD_ST_RECONNECT
;
626 evp
->ev_error
= smb_iod_connect(iod
);
628 case SMBIOD_EV_DISCONNECT
:
629 evp
->ev_error
= smb_iod_disconnect(iod
);
631 case SMBIOD_EV_TREECONNECT
:
632 evp
->ev_error
= smb_iod_treeconnect(iod
, evp
->ev_ident
);
634 case SMBIOD_EV_SHUTDOWN
:
635 iod
->iod_flags
|= SMBIOD_SHUTDOWN
;
637 case SMBIOD_EV_NEWRQ
:
640 if (evp
->ev_type
& SMBIOD_EV_SYNC
) {
643 SMB_IOD_EVUNLOCK(iod
);
645 kfree(evp
, M_SMBIOD
);
648 if (iod
->iod_state
== SMBIOD_ST_VCACTIVE
) {
650 timespecsub(&tsnow
, &iod
->iod_pingtimo
);
651 if (timespeccmp(&tsnow
, &iod
->iod_lastrqsent
, >)) {
652 smb_smb_echo(vcp
, &iod
->iod_scred
);
656 smb_iod_sendall(iod
);
657 smb_iod_recvall(iod
);
661 #define kthread_create_compat smb_kthread_create
662 #define kthread_exit_compat smb_kthread_exit
665 smb_iod_thread(void *arg
)
667 struct smbiod
*iod
= arg
;
670 * mplock not held on entry but we aren't mpsafe yet.
674 smb_makescred(&iod
->iod_scred
, iod
->iod_td
, NULL
);
675 while ((iod
->iod_flags
& SMBIOD_SHUTDOWN
) == 0) {
677 SMBIODEBUG("going to sleep for %d ticks\n", iod
->iod_sleeptimo
);
678 if (iod
->iod_flags
& SMBIOD_SHUTDOWN
)
680 tsleep(&iod
->iod_flags
, 0, "90idle", iod
->iod_sleeptimo
);
682 kthread_exit_compat();
686 smb_iod_create(struct smb_vc
*vcp
)
689 struct proc
*newp
= NULL
;
692 iod
= smb_zmalloc(sizeof(*iod
), M_SMBIOD
, M_WAITOK
);
693 iod
->iod_id
= smb_iod_next
++;
694 iod
->iod_state
= SMBIOD_ST_NOTCONN
;
696 iod
->iod_sleeptimo
= hz
* SMBIOD_SLEEP_TIMO
;
697 iod
->iod_pingtimo
.tv_sec
= SMBIOD_PING_TIMO
;
698 getnanotime(&iod
->iod_lastrqsent
);
700 smb_sl_init(&iod
->iod_rqlock
, "90rql");
701 TAILQ_INIT(&iod
->iod_rqlist
);
702 smb_sl_init(&iod
->iod_evlock
, "90evl");
703 STAILQ_INIT(&iod
->iod_evlist
);
704 error
= kthread_create_compat(smb_iod_thread
, iod
, &newp
,
705 RFNOWAIT
, "smbiod%d", iod
->iod_id
);
707 SMBERROR("can't start smbiod: %d", error
);
708 kfree(iod
, M_SMBIOD
);
712 iod
->iod_td
= ONLY_LWP_IN_PROC(newp
)->lwp_thread
;
717 smb_iod_destroy(struct smbiod
*iod
)
719 smb_iod_request(iod
, SMBIOD_EV_SHUTDOWN
| SMBIOD_EV_SYNC
, NULL
);
720 smb_sl_destroy(&iod
->iod_rqlock
);
721 smb_sl_destroy(&iod
->iod_evlock
);
722 kfree(iod
, M_SMBIOD
);