2 * Copyright (c) 2000-2001 Boris Popov
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by Boris Popov.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * $FreeBSD: src/sys/netsmb/smb_iod.c,v 1.1.2.2 2002/04/23 03:45:01 bp Exp $
33 * $DragonFly: src/sys/netproto/smb/smb_iod.c,v 1.15 2007/02/03 17:05:58 corecode Exp $
36 #include <sys/param.h>
37 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/kthread.h>
41 #include <sys/malloc.h>
43 #include <sys/unistd.h>
49 #include "smb_trantcp.h"
52 #define SMBIOD_SLEEP_TIMO 2
53 #define SMBIOD_PING_TIMO 60 /* seconds */
55 #define SMB_IOD_EVLOCKPTR(iod) (&(iod)->iod_evlock)
56 #define SMB_IOD_EVLOCK(iod) smb_sl_lock(&(iod)->iod_evlock)
57 #define SMB_IOD_EVUNLOCK(iod) smb_sl_unlock(&(iod)->iod_evlock)
58 #define SMB_IOD_EVINTERLOCK(iod) (&(iod)->iod_evlock)
60 #define SMB_IOD_RQLOCKPTR(iod) (&(iod)->iod_rqlock)
61 #define SMB_IOD_RQLOCK(iod) smb_sl_lock(&((iod)->iod_rqlock))
62 #define SMB_IOD_RQUNLOCK(iod) smb_sl_unlock(&(iod)->iod_rqlock)
63 #define SMB_IOD_RQINTERLOCK(iod) (&(iod)->iod_rqlock)
65 #define smb_iod_wakeup(iod) wakeup(&(iod)->iod_flags)
68 static MALLOC_DEFINE(M_SMBIOD
, "SMBIOD", "SMB network io daemon");
70 static int smb_iod_next
;
72 static int smb_iod_sendall(struct smbiod
*iod
);
73 static int smb_iod_disconnect(struct smbiod
*iod
);
74 static void smb_iod_thread(void *);
77 smb_iod_rqprocessed(struct smb_rq
*rqp
, int error
)
80 rqp
->sr_lerror
= error
;
82 rqp
->sr_state
= SMBRQ_NOTIFIED
;
83 wakeup(&rqp
->sr_state
);
88 smb_iod_invrq(struct smbiod
*iod
)
93 * Invalidate all outstanding requests for this connection
96 TAILQ_FOREACH(rqp
, &iod
->iod_rqlist
, sr_link
) {
98 /* this makes no sense whatsoever XXX */
99 if (rqp
->sr_flags
& SMBR_INTERNAL
)
102 rqp
->sr_flags
|= SMBR_RESTART
;
103 smb_iod_rqprocessed(rqp
, ENOTCONN
);
105 SMB_IOD_RQUNLOCK(iod
);
109 smb_iod_closetran(struct smbiod
*iod
)
111 struct smb_vc
*vcp
= iod
->iod_vc
;
112 struct thread
*td
= iod
->iod_td
;
114 if (vcp
->vc_tdata
== NULL
)
116 SMB_TRAN_DISCONNECT(vcp
, td
);
117 SMB_TRAN_DONE(vcp
, td
);
118 vcp
->vc_tdata
= NULL
;
122 smb_iod_dead(struct smbiod
*iod
)
124 iod
->iod_state
= SMBIOD_ST_DEAD
;
125 smb_iod_closetran(iod
);
130 smb_iod_connect(struct smbiod
*iod
)
132 struct smb_vc
*vcp
= iod
->iod_vc
;
133 struct thread
*td
= iod
->iod_td
;
136 SMBIODEBUG("%d\n", iod
->iod_state
);
137 switch(iod
->iod_state
) {
138 case SMBIOD_ST_VCACTIVE
:
139 SMBERROR("called for already opened connection\n");
142 return ENOTCONN
; /* XXX: last error code ? */
149 ithrow(SMB_TRAN_CREATE(vcp
, td
));
150 SMBIODEBUG("tcreate\n");
152 ithrow(SMB_TRAN_BIND(vcp
, vcp
->vc_laddr
, td
));
154 SMBIODEBUG("tbind\n");
155 ithrow(SMB_TRAN_CONNECT(vcp
, vcp
->vc_paddr
, td
));
156 SMB_TRAN_SETPARAM(vcp
, SMBTP_SELECTID
, &iod
->iod_flags
);
157 iod
->iod_state
= SMBIOD_ST_TRANACTIVE
;
158 SMBIODEBUG("tconnect\n");
159 /* vcp->vc_mid = 0;*/
160 ithrow(smb_smb_negotiate(vcp
, &iod
->iod_scred
));
161 SMBIODEBUG("snegotiate\n");
162 ithrow(smb_smb_ssnsetup(vcp
, &iod
->iod_scred
));
163 iod
->iod_state
= SMBIOD_ST_VCACTIVE
;
164 SMBIODEBUG("completed\n");
174 smb_iod_disconnect(struct smbiod
*iod
)
176 struct smb_vc
*vcp
= iod
->iod_vc
;
179 if (iod
->iod_state
== SMBIOD_ST_VCACTIVE
) {
180 smb_smb_ssnclose(vcp
, &iod
->iod_scred
);
181 iod
->iod_state
= SMBIOD_ST_TRANACTIVE
;
183 vcp
->vc_smbuid
= SMB_UID_UNKNOWN
;
184 smb_iod_closetran(iod
);
185 iod
->iod_state
= SMBIOD_ST_NOTCONN
;
190 smb_iod_treeconnect(struct smbiod
*iod
, struct smb_share
*ssp
)
194 if (iod
->iod_state
!= SMBIOD_ST_VCACTIVE
) {
195 if (iod
->iod_state
!= SMBIOD_ST_DEAD
)
197 iod
->iod_state
= SMBIOD_ST_RECONNECT
;
198 error
= smb_iod_connect(iod
);
202 SMBIODEBUG("tree reconnect\n");
204 ssp
->ss_flags
|= SMBS_RECONNECTING
;
206 error
= smb_smb_treeconnect(ssp
, &iod
->iod_scred
);
208 ssp
->ss_flags
&= ~SMBS_RECONNECTING
;
210 wakeup(&ssp
->ss_vcgenid
);
215 smb_iod_sendrq(struct smbiod
*iod
, struct smb_rq
*rqp
)
217 struct thread
*td
= iod
->iod_td
;
218 struct smb_vc
*vcp
= iod
->iod_vc
;
219 struct smb_share
*ssp
= rqp
->sr_share
;
223 SMBIODEBUG("iod_state = %d\n", iod
->iod_state
);
224 switch (iod
->iod_state
) {
225 case SMBIOD_ST_NOTCONN
:
226 smb_iod_rqprocessed(rqp
, ENOTCONN
);
229 iod
->iod_state
= SMBIOD_ST_RECONNECT
;
231 case SMBIOD_ST_RECONNECT
:
236 if (rqp
->sr_sendcnt
== 0) {
237 #ifdef movedtoanotherplace
238 if (vcp
->vc_maxmux
!= 0 && iod
->iod_muxcnt
>= vcp
->vc_maxmux
)
241 *rqp
->sr_rqtid
= htoles(ssp
? ssp
->ss_tid
: SMB_TID_UNKNOWN
);
242 *rqp
->sr_rquid
= htoles(vcp
? vcp
->vc_smbuid
: 0);
243 mb_fixhdr(&rqp
->sr_rq
);
245 if (rqp
->sr_sendcnt
++ > 5) {
246 rqp
->sr_flags
|= SMBR_RESTART
;
247 smb_iod_rqprocessed(rqp
, rqp
->sr_lerror
);
249 * If all attempts to send a request failed, then
250 * something is seriously hosed.
254 SMBSDEBUG("M:%04x, P:%04x, U:%04x, T:%04x\n", rqp
->sr_mid
, 0, 0, 0);
255 m_dumpm(rqp
->sr_rq
.mb_top
);
256 m
= m_copym(rqp
->sr_rq
.mb_top
, 0, M_COPYALL
, MB_WAIT
);
257 error
= rqp
->sr_lerror
= m
? SMB_TRAN_SEND(vcp
, m
, td
) : ENOBUFS
;
259 getnanotime(&rqp
->sr_timesent
);
260 iod
->iod_lastrqsent
= rqp
->sr_timesent
;
261 rqp
->sr_flags
|= SMBR_SENT
;
262 rqp
->sr_state
= SMBRQ_SENT
;
266 * Check for fatal errors
268 if (SMB_TRAN_FATAL(vcp
, error
)) {
270 * No further attempts should be made
274 if (smb_rq_intr(rqp
))
275 smb_iod_rqprocessed(rqp
, EINTR
);
280 * Process incoming packets
283 smb_iod_recvall(struct smbiod
*iod
)
285 struct smb_vc
*vcp
= iod
->iod_vc
;
286 struct thread
*td
= iod
->iod_td
;
293 switch (iod
->iod_state
) {
294 case SMBIOD_ST_NOTCONN
:
296 case SMBIOD_ST_RECONNECT
:
303 error
= SMB_TRAN_RECV(vcp
, &m
, td
);
304 if (error
== EWOULDBLOCK
)
306 if (SMB_TRAN_FATAL(vcp
, error
)) {
313 SMBERROR("tran return NULL without error\n");
317 m
= m_pullup(m
, SMB_HDRLEN
);
319 continue; /* wait for a good packet */
321 * Now we got an entire and possibly invalid SMB packet.
322 * Be careful while parsing it.
325 hp
= mtod(m
, u_char
*);
326 if (bcmp(hp
, SMB_SIGNATURE
, SMB_SIGLEN
) != 0) {
330 mid
= SMB_HDRMID(hp
);
331 SMBSDEBUG("mid %04x\n", (u_int
)mid
);
333 TAILQ_FOREACH(rqp
, &iod
->iod_rqlist
, sr_link
) {
334 if (rqp
->sr_mid
!= mid
)
337 if (rqp
->sr_rp
.md_top
== NULL
) {
338 md_initm(&rqp
->sr_rp
, m
);
340 if (rqp
->sr_flags
& SMBR_MULTIPACKET
) {
341 md_append_record(&rqp
->sr_rp
, m
);
344 SMBERROR("duplicate response %d (ignored)\n", mid
);
349 smb_iod_rqprocessed(rqp
, 0);
352 SMB_IOD_RQUNLOCK(iod
);
354 SMBERROR("drop resp with mid %d\n", (u_int
)mid
);
355 /* smb_printrqlist(vcp);*/
360 * check for interrupts
363 TAILQ_FOREACH(rqp
, &iod
->iod_rqlist
, sr_link
) {
364 if (smb_proc_intr(rqp
->sr_cred
->scr_td
)) {
365 smb_iod_rqprocessed(rqp
, EINTR
);
368 SMB_IOD_RQUNLOCK(iod
);
373 smb_iod_request(struct smbiod
*iod
, int event
, void *ident
)
375 struct smbiod_event
*evp
;
379 evp
= smb_zmalloc(sizeof(*evp
), M_SMBIOD
, M_WAITOK
);
380 evp
->ev_type
= event
;
381 evp
->ev_ident
= ident
;
383 STAILQ_INSERT_TAIL(&iod
->iod_evlist
, evp
, ev_link
);
384 if ((event
& SMBIOD_EV_SYNC
) == 0) {
385 SMB_IOD_EVUNLOCK(iod
);
390 smb_sleep(evp
, SMB_IOD_EVINTERLOCK(iod
), PDROP
, "90evw", 0);
391 error
= evp
->ev_error
;
392 kfree(evp
, M_SMBIOD
);
397 * Place request in the queue.
398 * Request from smbiod have a high priority.
401 smb_iod_addrq(struct smb_rq
*rqp
)
403 struct smb_vc
*vcp
= rqp
->sr_vc
;
404 struct smbiod
*iod
= vcp
->vc_iod
;
408 if (rqp
->sr_cred
->scr_td
== iod
->iod_td
) {
409 rqp
->sr_flags
|= SMBR_INTERNAL
;
411 TAILQ_INSERT_HEAD(&iod
->iod_rqlist
, rqp
, sr_link
);
412 SMB_IOD_RQUNLOCK(iod
);
414 if (smb_iod_sendrq(iod
, rqp
) != 0) {
419 * we don't need to lock state field here
421 if (rqp
->sr_state
!= SMBRQ_NOTSENT
)
423 tsleep(&iod
->iod_flags
, 0, "90sndw", hz
);
426 smb_iod_removerq(rqp
);
427 return rqp
->sr_lerror
;
430 switch (iod
->iod_state
) {
431 case SMBIOD_ST_NOTCONN
:
434 error
= smb_iod_request(vcp
->vc_iod
, SMBIOD_EV_CONNECT
| SMBIOD_EV_SYNC
, NULL
);
444 if (vcp
->vc_maxmux
== 0) {
445 SMBERROR("maxmux == 0\n");
448 if (iod
->iod_muxcnt
< vcp
->vc_maxmux
)
451 smb_sleep(&iod
->iod_muxwant
, SMB_IOD_RQINTERLOCK(iod
), 0, "90mux", 0);
454 TAILQ_INSERT_TAIL(&iod
->iod_rqlist
, rqp
, sr_link
);
455 SMB_IOD_RQUNLOCK(iod
);
461 smb_iod_removerq(struct smb_rq
*rqp
)
463 struct smb_vc
*vcp
= rqp
->sr_vc
;
464 struct smbiod
*iod
= vcp
->vc_iod
;
467 if (rqp
->sr_flags
& SMBR_INTERNAL
) {
469 TAILQ_REMOVE(&iod
->iod_rqlist
, rqp
, sr_link
);
470 SMB_IOD_RQUNLOCK(iod
);
474 while (rqp
->sr_flags
& SMBR_XLOCK
) {
475 rqp
->sr_flags
|= SMBR_XLOCKWANT
;
476 smb_sleep(rqp
, SMB_IOD_RQINTERLOCK(iod
), 0, "90xrm", 0);
478 TAILQ_REMOVE(&iod
->iod_rqlist
, rqp
, sr_link
);
480 if (iod
->iod_muxwant
) {
482 wakeup(&iod
->iod_muxwant
);
484 SMB_IOD_RQUNLOCK(iod
);
489 smb_iod_waitrq(struct smb_rq
*rqp
)
491 struct smbiod
*iod
= rqp
->sr_vc
->vc_iod
;
495 if (rqp
->sr_flags
& SMBR_INTERNAL
) {
497 smb_iod_sendall(iod
);
498 smb_iod_recvall(iod
);
499 if (rqp
->sr_rpgen
!= rqp
->sr_rplast
)
501 tsleep(&iod
->iod_flags
, 0, "90irq", hz
);
503 smb_iod_removerq(rqp
);
504 return rqp
->sr_lerror
;
508 if (rqp
->sr_rpgen
== rqp
->sr_rplast
)
509 smb_sleep(&rqp
->sr_state
, SMBRQ_INTERLOCK(rqp
), 0, "90wrq", 0);
512 error
= rqp
->sr_lerror
;
513 if (rqp
->sr_flags
& SMBR_MULTIPACKET
) {
515 * If request should stay in the list, then reinsert it
516 * at the end of queue so other waiters have chance to concur
519 TAILQ_REMOVE(&iod
->iod_rqlist
, rqp
, sr_link
);
520 TAILQ_INSERT_TAIL(&iod
->iod_rqlist
, rqp
, sr_link
);
521 SMB_IOD_RQUNLOCK(iod
);
523 smb_iod_removerq(rqp
);
529 smb_iod_sendall(struct smbiod
*iod
)
531 struct smb_vc
*vcp
= iod
->iod_vc
;
533 struct timespec ts
, tstimeout
;
538 * Loop through the list of requests and send them if possible
541 TAILQ_FOREACH(rqp
, &iod
->iod_rqlist
, sr_link
) {
542 switch (rqp
->sr_state
) {
544 rqp
->sr_flags
|= SMBR_XLOCK
;
545 SMB_IOD_RQUNLOCK(iod
);
546 herror
= smb_iod_sendrq(iod
, rqp
);
548 rqp
->sr_flags
&= ~SMBR_XLOCK
;
549 if (rqp
->sr_flags
& SMBR_XLOCKWANT
) {
550 rqp
->sr_flags
&= ~SMBR_XLOCKWANT
;
555 SMB_TRAN_GETPARAM(vcp
, SMBTP_TIMEOUT
, &tstimeout
);
556 timespecadd(&tstimeout
, &tstimeout
);
558 timespecsub(&ts
, &tstimeout
);
559 if (timespeccmp(&ts
, &rqp
->sr_timesent
, >)) {
560 smb_iod_rqprocessed(rqp
, ETIMEDOUT
);
569 SMB_IOD_RQUNLOCK(iod
);
570 if (herror
== ENOTCONN
)
576 * "main" function for smbiod daemon
579 smb_iod_main(struct smbiod
*iod
)
581 /* struct smb_vc *vcp = iod->iod_vc;*/
582 struct smbiod_event
*evp
;
583 /* struct timespec tsnow;*/
590 * Check all interesting events
594 evp
= STAILQ_FIRST(&iod
->iod_evlist
);
596 SMB_IOD_EVUNLOCK(iod
);
599 STAILQ_REMOVE_HEAD(&iod
->iod_evlist
, ev_link
);
600 evp
->ev_type
|= SMBIOD_EV_PROCESSING
;
601 SMB_IOD_EVUNLOCK(iod
);
602 switch (evp
->ev_type
& SMBIOD_EV_MASK
) {
603 case SMBIOD_EV_CONNECT
:
604 iod
->iod_state
= SMBIOD_ST_RECONNECT
;
605 evp
->ev_error
= smb_iod_connect(iod
);
607 case SMBIOD_EV_DISCONNECT
:
608 evp
->ev_error
= smb_iod_disconnect(iod
);
610 case SMBIOD_EV_TREECONNECT
:
611 evp
->ev_error
= smb_iod_treeconnect(iod
, evp
->ev_ident
);
613 case SMBIOD_EV_SHUTDOWN
:
614 iod
->iod_flags
|= SMBIOD_SHUTDOWN
;
616 case SMBIOD_EV_NEWRQ
:
619 if (evp
->ev_type
& SMBIOD_EV_SYNC
) {
622 SMB_IOD_EVUNLOCK(iod
);
624 kfree(evp
, M_SMBIOD
);
627 if (iod
->iod_state
== SMBIOD_ST_VCACTIVE
) {
629 timespecsub(&tsnow
, &iod
->iod_pingtimo
);
630 if (timespeccmp(&tsnow
, &iod
->iod_lastrqsent
, >)) {
631 smb_smb_echo(vcp
, &iod
->iod_scred
);
635 smb_iod_sendall(iod
);
636 smb_iod_recvall(iod
);
640 #define kthread_create_compat kthread_create2
641 #define kthread_exit_compat kthread_exit2
644 smb_iod_thread(void *arg
)
646 struct smbiod
*iod
= arg
;
648 smb_makescred(&iod
->iod_scred
, iod
->iod_td
, NULL
);
649 while ((iod
->iod_flags
& SMBIOD_SHUTDOWN
) == 0) {
651 SMBIODEBUG("going to sleep for %d ticks\n", iod
->iod_sleeptimo
);
652 if (iod
->iod_flags
& SMBIOD_SHUTDOWN
)
654 tsleep(&iod
->iod_flags
, 0, "90idle", iod
->iod_sleeptimo
);
656 kthread_exit_compat();
660 smb_iod_create(struct smb_vc
*vcp
)
663 struct proc
*newp
= NULL
;
666 iod
= smb_zmalloc(sizeof(*iod
), M_SMBIOD
, M_WAITOK
);
667 iod
->iod_id
= smb_iod_next
++;
668 iod
->iod_state
= SMBIOD_ST_NOTCONN
;
670 iod
->iod_sleeptimo
= hz
* SMBIOD_SLEEP_TIMO
;
671 iod
->iod_pingtimo
.tv_sec
= SMBIOD_PING_TIMO
;
672 getnanotime(&iod
->iod_lastrqsent
);
674 smb_sl_init(&iod
->iod_rqlock
, "90rql");
675 TAILQ_INIT(&iod
->iod_rqlist
);
676 smb_sl_init(&iod
->iod_evlock
, "90evl");
677 STAILQ_INIT(&iod
->iod_evlist
);
678 error
= kthread_create_compat(smb_iod_thread
, iod
, &newp
,
679 RFNOWAIT
, "smbiod%d", iod
->iod_id
);
681 SMBERROR("can't start smbiod: %d", error
);
682 kfree(iod
, M_SMBIOD
);
686 iod
->iod_td
= ONLY_LWP_IN_PROC(newp
)->lwp_thread
;
691 smb_iod_destroy(struct smbiod
*iod
)
693 smb_iod_request(iod
, SMBIOD_EV_SHUTDOWN
| SMBIOD_EV_SYNC
, NULL
);
694 smb_sl_destroy(&iod
->iod_rqlock
);
695 smb_sl_destroy(&iod
->iod_evlock
);
696 kfree(iod
, M_SMBIOD
);