1 /* $NetBSD: puffs_msgif.c,v 1.87 2011/07/03 08:57:43 mrg Exp $ */
4 * Copyright (c) 2005, 2006, 2007 Antti Kantee. All Rights Reserved.
6 * Development of this software was supported by the
7 * Google Summer of Code program and the Ulla Tuominen Foundation.
8 * The Google SoC project was mentored by Bill Studenmund.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
20 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 #include <sys/param.h>
33 #include <sys/kthread.h>
35 #include <sys/malloc.h>
36 #include <sys/objcache.h>
37 #include <sys/mount.h>
38 #include <sys/namei.h>
40 #include <sys/signal2.h>
41 #include <sys/vnode.h>
42 #include <machine/inttypes.h>
44 #include <dev/misc/putter/putter_sys.h>
45 #include <vfs/puffs/puffs_msgif.h>
46 #include <vfs/puffs/puffs_sys.h>
49 * waitq data structures
53 * While a request is going to userspace, park the caller within the
54 * kernel. This is the kernel counterpart of "struct puffs_req".
56 struct puffs_msgpark
{
57 struct puffs_req
*park_preq
; /* req followed by buf */
59 size_t park_copylen
; /* userspace copylength */
60 size_t park_maxlen
; /* max size in comeback */
62 struct puffs_req
*park_creq
; /* non-compat preq */
63 size_t park_creqlen
; /* non-compat preq len */
65 parkdone_fn park_done
; /* "biodone" a'la puffs */
74 TAILQ_ENTRY(puffs_msgpark
) park_entries
;
76 #define PARKFLAG_WAITERGONE 0x01
77 #define PARKFLAG_DONE 0x02
78 #define PARKFLAG_ONQUEUE1 0x04
79 #define PARKFLAG_ONQUEUE2 0x08
80 #define PARKFLAG_CALL 0x10
81 #define PARKFLAG_WANTREPLY 0x20
82 #define PARKFLAG_HASERROR 0x40
84 static struct objcache
*parkpc
;
90 makepark(void *obj
, void *privdata
, int flags
)
92 struct puffs_msgpark
*park
= obj
;
94 lockinit(&park
->park_mtx
, "puffs park_mtx", 0, 0);
95 cv_init(&park
->park_cv
, "puffsrpl");
101 nukepark(void *obj
, void *privdata
)
103 struct puffs_msgpark
*park
= obj
;
105 cv_destroy(&park
->park_cv
);
106 lockuninit(&park
->park_mtx
);
110 puffs_msgif_init(void)
113 parkpc
= objcache_create_mbacked(M_PUFFS
, sizeof(struct puffs_msgpark
),
114 0, 0, makepark
, nukepark
, NULL
);
118 puffs_msgif_destroy(void)
121 objcache_destroy(parkpc
);
124 static struct puffs_msgpark
*
125 puffs_msgpark_alloc(int waitok
)
127 struct puffs_msgpark
*park
;
129 park
= objcache_get(parkpc
, waitok
? M_WAITOK
: M_NOWAIT
);
133 park
->park_refcount
= 1;
134 park
->park_preq
= park
->park_creq
= NULL
;
135 park
->park_flags
= PARKFLAG_WANTREPLY
;
145 puffs_msgpark_reference(struct puffs_msgpark
*park
)
148 KKASSERT(lockstatus(&park
->park_mtx
, curthread
) == LK_EXCLUSIVE
);
149 park
->park_refcount
++;
153 * Release reference to park structure.
156 puffs_msgpark_release1(struct puffs_msgpark
*park
, int howmany
)
158 struct puffs_req
*preq
= park
->park_preq
;
159 struct puffs_req
*creq
= park
->park_creq
;
162 KKASSERT(lockstatus(&park
->park_mtx
, curthread
) == LK_EXCLUSIVE
);
163 refcnt
= park
->park_refcount
-= howmany
;
164 lockmgr(&park
->park_mtx
, LK_RELEASE
);
166 KKASSERT(refcnt
>= 0);
170 kfree(preq
, M_PUFFS
);
173 kfree(creq
, M_PUFFS
);
175 objcache_put(parkpc
, park
);
182 #define puffs_msgpark_release(a) puffs_msgpark_release1(a, 1)
186 parkdump(struct puffs_msgpark
*park
)
189 DPRINTF_VERBOSE(("park %p, preq %p, id %" PRIu64
"\n"
190 "\tcopy %zu, max %zu - done: %p/%p\n"
191 "\tflags 0x%08x, refcount %d, cv/mtx: %p/%p\n",
192 park
, park
->park_preq
, park
->park_preq
->preq_id
,
193 park
->park_copylen
, park
->park_maxlen
,
194 park
->park_done
, park
->park_donearg
,
195 park
->park_flags
, park
->park_refcount
,
196 &park
->park_cv
, &park
->park_mtx
));
200 parkqdump(struct puffs_wq
*q
, int dumpall
)
202 struct puffs_msgpark
*park
;
205 TAILQ_FOREACH(park
, q
, park_entries
) {
210 DPRINTF_VERBOSE(("puffs waitqueue at %p dumped, %d total\n", q
, total
));
213 #endif /* PUFFSDEBUG */
216 * A word about locking in the park structures: the lock protects the
217 * fields of the *park* structure (not preq) and acts as an interlock
218 * in cv operations. The lock is always internal to this module and
219 * callers do not need to worry about it.
223 puffs_msgmem_alloc(size_t len
, struct puffs_msgpark
**ppark
, void **mem
,
226 struct puffs_msgpark
*park
;
229 m
= kmalloc(len
, M_PUFFS
, M_ZERO
| (cansleep
? M_WAITOK
: M_NOWAIT
));
231 KKASSERT(cansleep
== 0);
235 park
= puffs_msgpark_alloc(cansleep
);
237 KKASSERT(cansleep
== 0);
243 park
->park_maxlen
= park
->park_copylen
= len
;
252 puffs_msgmem_release(struct puffs_msgpark
*park
)
258 lockmgr(&park
->park_mtx
, LK_EXCLUSIVE
);
259 puffs_msgpark_release(park
);
263 puffs_msg_setfaf(struct puffs_msgpark
*park
)
266 KKASSERT((park
->park_flags
& PARKFLAG_CALL
) == 0);
267 park
->park_flags
&= ~PARKFLAG_WANTREPLY
;
271 puffs_msg_setdelta(struct puffs_msgpark
*park
, size_t delta
)
274 KKASSERT(delta
< park
->park_maxlen
); /* "<=" wouldn't make sense */
275 park
->park_copylen
= park
->park_maxlen
- delta
;
279 puffs_msg_setinfo(struct puffs_msgpark
*park
, int class, int type
,
283 park
->park_preq
->preq_opclass
= PUFFSOP_OPCLASS(class);
284 park
->park_preq
->preq_optype
= type
;
285 park
->park_preq
->preq_cookie
= ck
;
289 puffs_msg_setcall(struct puffs_msgpark
*park
, parkdone_fn donefn
, void *donearg
)
292 KKASSERT(park
->park_flags
& PARKFLAG_WANTREPLY
);
293 park
->park_done
= donefn
;
294 park
->park_donearg
= donearg
;
295 park
->park_flags
|= PARKFLAG_CALL
;
299 * kernel-user-kernel waitqueues
303 puffs_getmsgid(struct puffs_mount
*pmp
)
307 lockmgr(&pmp
->pmp_lock
, LK_EXCLUSIVE
);
308 rv
= pmp
->pmp_nextmsgid
++;
309 lockmgr(&pmp
->pmp_lock
, LK_RELEASE
);
315 * A word about reference counting of parks. A reference must be taken
316 * when accessing a park and additionally when it is on a queue. So
317 * when taking it off a queue and releasing the access reference, the
318 * reference count is generally decremented by 2.
322 puffs_msg_enqueue(struct puffs_mount
*pmp
, struct puffs_msgpark
*park
)
324 struct thread
*td
= curthread
;
325 struct puffs_req
*preq
;
329 * Some clients reuse a park, so reset some flags. We might
330 * want to provide a caller-side interface for this and add
331 * a few more invariant checks here, but this will do for now.
333 KKASSERT(pmp
!= NULL
&& park
!= NULL
);
334 park
->park_flags
&= ~(PARKFLAG_DONE
| PARKFLAG_HASERROR
);
335 KKASSERT((park
->park_flags
& PARKFLAG_WAITERGONE
) == 0);
337 preq
= park
->park_preq
;
339 preq
->preq_buflen
= park
->park_maxlen
;
340 KKASSERT(preq
->preq_id
== 0
341 || (preq
->preq_opclass
& PUFFSOPFLAG_ISRESPONSE
));
343 if ((park
->park_flags
& PARKFLAG_WANTREPLY
) == 0)
344 preq
->preq_opclass
|= PUFFSOPFLAG_FAF
;
346 preq
->preq_id
= puffs_getmsgid(pmp
);
348 /* fill in caller information */
349 if (td
->td_proc
== NULL
|| td
->td_lwp
== NULL
) {
350 DPRINTF_VERBOSE(("puffs_msg_enqueue: no process\n"));
355 preq
->preq_pid
= td
->td_proc
->p_pid
;
356 preq
->preq_lid
= td
->td_lwp
->lwp_tid
;
359 * To support cv_sig, yet another movie: check if there are signals
360 * pending and we are issueing a non-FAF. If so, return an error
361 * directly UNLESS we are issueing INACTIVE/RECLAIM. In that case,
362 * convert it to a FAF, fire off to the file server and return
363 * an error. Yes, this is bordering disgusting. Barfbags are on me.
365 ss
= lwp_sigpend(td
->td_lwp
);
366 SIGSETNAND(ss
, td
->td_lwp
->lwp_sigmask
);
367 if (__predict_false((park
->park_flags
& PARKFLAG_WANTREPLY
)
368 && (park
->park_flags
& PARKFLAG_CALL
) == 0
369 && SIGNOTEMPTY(ss
))) {
372 * see the comment about signals in puffs_msg_wait.
374 if (SIGISMEMBER(ss
, SIGINT
) ||
375 SIGISMEMBER(ss
, SIGTERM
) ||
376 SIGISMEMBER(ss
, SIGKILL
) ||
377 SIGISMEMBER(ss
, SIGHUP
) ||
378 SIGISMEMBER(ss
, SIGQUIT
)) {
379 park
->park_flags
|= PARKFLAG_HASERROR
;
380 preq
->preq_rv
= EINTR
;
381 if (PUFFSOP_OPCLASS(preq
->preq_opclass
) == PUFFSOP_VN
382 && (preq
->preq_optype
== PUFFS_VN_INACTIVE
383 || preq
->preq_optype
== PUFFS_VN_RECLAIM
)) {
384 park
->park_preq
->preq_opclass
|=
386 park
->park_flags
&= ~PARKFLAG_WANTREPLY
;
387 DPRINTF_VERBOSE(("puffs_msg_enqueue: "
388 "converted to FAF %p\n", park
));
396 lockmgr(&pmp
->pmp_lock
, LK_EXCLUSIVE
);
397 if (pmp
->pmp_status
!= PUFFSTAT_RUNNING
) {
398 lockmgr(&pmp
->pmp_lock
, LK_RELEASE
);
399 park
->park_flags
|= PARKFLAG_HASERROR
;
400 preq
->preq_rv
= ENXIO
;
405 parkqdump(&pmp
->pmp_msg_touser
, puffsdebug
> 1);
406 parkqdump(&pmp
->pmp_msg_replywait
, puffsdebug
> 1);
410 * Note: we don't need to lock park since we have the only
411 * reference to it at this point.
413 TAILQ_INSERT_TAIL(&pmp
->pmp_msg_touser
, park
, park_entries
);
414 park
->park_flags
|= PARKFLAG_ONQUEUE1
;
415 pmp
->pmp_msg_touser_count
++;
416 park
->park_refcount
++;
417 lockmgr(&pmp
->pmp_lock
, LK_RELEASE
);
419 cv_broadcast(&pmp
->pmp_msg_waiter_cv
);
420 putter_notify(pmp
->pmp_pi
);
422 DPRINTF_VERBOSE(("touser: req %" PRIu64
", preq: %p, park: %p, "
423 "c/t: 0x%x/0x%x, f: 0x%x\n", preq
->preq_id
, preq
, park
,
424 preq
->preq_opclass
, preq
->preq_optype
, park
->park_flags
));
428 puffs_msg_wait(struct puffs_mount
*pmp
, struct puffs_msgpark
*park
)
430 struct puffs_req
*preq
= park
->park_preq
; /* XXX: hmmm */
432 struct lwp
*l
= curthread
->td_lwp
;
433 struct proc
*p
= curthread
->td_proc
;
440 KKASSERT(pmp
!= NULL
&& park
!= NULL
);
443 * block unimportant signals.
445 * The set of "important" signals here was chosen to be same as
446 * nfs interruptible mount.
450 SIGDELSET(ss
, SIGINT
);
451 SIGDELSET(ss
, SIGTERM
);
452 SIGDELSET(ss
, SIGKILL
);
453 SIGDELSET(ss
, SIGHUP
);
454 SIGDELSET(ss
, SIGQUIT
);
455 lockmgr(p
->p_lock
, LK_EXCLUSIVE
);
456 sigprocmask1(l
, SIG_BLOCK
, &ss
, &oss
);
457 lockmgr(p
->p_lock
, LK_RELEASE
);
460 lockmgr(&pmp
->pmp_lock
, LK_EXCLUSIVE
);
461 puffs_mp_reference(pmp
);
462 lockmgr(&pmp
->pmp_lock
, LK_RELEASE
);
464 lockmgr(&park
->park_mtx
, LK_EXCLUSIVE
);
465 /* did the response beat us to the wait? */
466 if (__predict_false((park
->park_flags
& PARKFLAG_DONE
)
467 || (park
->park_flags
& PARKFLAG_HASERROR
))) {
468 rv
= park
->park_preq
->preq_rv
;
469 lockmgr(&park
->park_mtx
, LK_RELEASE
);
473 if ((park
->park_flags
& PARKFLAG_WANTREPLY
) == 0
474 || (park
->park_flags
& PARKFLAG_CALL
)) {
475 lockmgr(&park
->park_mtx
, LK_RELEASE
);
480 error
= cv_wait_sig(&park
->park_cv
, &park
->park_mtx
);
481 DPRINTF_VERBOSE(("puffs_touser: waiter for %p woke up with %d\n",
484 park
->park_flags
|= PARKFLAG_WAITERGONE
;
485 if (park
->park_flags
& PARKFLAG_DONE
) {
487 lockmgr(&park
->park_mtx
, LK_RELEASE
);
490 * ok, we marked it as going away, but
491 * still need to do queue ops. take locks
494 * We don't want to release our reference
495 * if it's on replywait queue to avoid error
496 * to file server. putop() code will DTRT.
498 lockmgr(&park
->park_mtx
, LK_RELEASE
);
499 lockmgr(&pmp
->pmp_lock
, LK_EXCLUSIVE
);
500 lockmgr(&park
->park_mtx
, LK_EXCLUSIVE
);
503 * Still on queue1? We can safely remove it
504 * without any consequences since the file
505 * server hasn't seen it. "else" we need to
506 * wait for the response and just ignore it
507 * to avoid signalling an incorrect error to
510 if (park
->park_flags
& PARKFLAG_ONQUEUE1
) {
511 TAILQ_REMOVE(&pmp
->pmp_msg_touser
,
513 puffs_msgpark_release(park
);
514 pmp
->pmp_msg_touser_count
--;
515 park
->park_flags
&= ~PARKFLAG_ONQUEUE1
;
517 lockmgr(&park
->park_mtx
, LK_RELEASE
);
519 lockmgr(&pmp
->pmp_lock
, LK_RELEASE
);
525 lockmgr(&park
->park_mtx
, LK_RELEASE
);
529 lockmgr(&pmp
->pmp_lock
, LK_EXCLUSIVE
);
530 puffs_mp_release(pmp
);
531 lockmgr(&pmp
->pmp_lock
, LK_RELEASE
);
534 lockmgr(p
->p_lock
, LK_EXCLUSIVE
);
535 sigprocmask1(l
, SIG_SETMASK
, &oss
, NULL
);
536 lockmgr(p
->p_lock
, LK_RELEASE
);
543 * XXX: this suuuucks. Hopefully I'll get rid of this lossage once
544 * the whole setback-nonsense gets fixed.
547 puffs_msg_wait2(struct puffs_mount
*pmp
, struct puffs_msgpark
*park
,
548 struct puffs_node
*pn1
, struct puffs_node
*pn2
)
550 struct puffs_req
*preq
;
553 rv
= puffs_msg_wait(pmp
, park
);
555 preq
= park
->park_preq
;
556 if (pn1
&& preq
->preq_setbacks
& PUFFS_SETBACK_INACT_N1
)
557 pn1
->pn_stat
|= PNODE_DOINACT
;
558 if (pn2
&& preq
->preq_setbacks
& PUFFS_SETBACK_INACT_N2
)
559 pn2
->pn_stat
|= PNODE_DOINACT
;
561 if (pn1
&& preq
->preq_setbacks
& PUFFS_SETBACK_NOREF_N1
)
562 pn1
->pn_stat
|= PNODE_NOREFS
;
563 if (pn2
&& preq
->preq_setbacks
& PUFFS_SETBACK_NOREF_N2
)
564 pn2
->pn_stat
|= PNODE_NOREFS
;
571 * XXX: lazy bum. please, for the love of foie gras, fix me.
572 * This should *NOT* depend on setfaf. Also "memcpy" could
573 * be done more nicely.
576 puffs_msg_sendresp(struct puffs_mount
*pmp
, struct puffs_req
*origpreq
, int rv
)
578 struct puffs_msgpark
*park
;
579 struct puffs_req
*preq
;
581 puffs_msgmem_alloc(sizeof(struct puffs_req
), &park
, (void *)&preq
, 1);
582 puffs_msg_setfaf(park
); /* XXXXXX: avoids reqid override */
584 memcpy(preq
, origpreq
, sizeof(struct puffs_req
));
586 preq
->preq_opclass
|= PUFFSOPFLAG_ISRESPONSE
;
588 puffs_msg_enqueue(pmp
, park
);
589 puffs_msgmem_release(park
);
593 * Get next request in the outgoing queue. "maxsize" controls the
594 * size the caller can accommodate and "nonblock" signals if this
595 * should block while waiting for input. Handles all locking internally.
598 puffs_msgif_getout(void *this, size_t maxsize
, int nonblock
,
599 uint8_t **data
, size_t *dlen
, void **parkptr
)
601 struct puffs_mount
*pmp
= this;
602 struct puffs_msgpark
*park
= NULL
;
603 struct puffs_req
*preq
= NULL
;
607 lockmgr(&pmp
->pmp_lock
, LK_EXCLUSIVE
);
608 puffs_mp_reference(pmp
);
611 if (pmp
->pmp_status
!= PUFFSTAT_RUNNING
) {
616 /* need platinum yendorian express card? */
617 if (TAILQ_EMPTY(&pmp
->pmp_msg_touser
)) {
618 DPRINTF_VERBOSE(("puffs_getout: no outgoing op, "));
620 DPRINTF_VERBOSE(("returning EWOULDBLOCK\n"));
624 DPRINTF_VERBOSE(("waiting ...\n"));
626 error
= cv_wait_sig(&pmp
->pmp_msg_waiter_cv
,
634 park
= TAILQ_FIRST(&pmp
->pmp_msg_touser
);
638 lockmgr(&park
->park_mtx
, LK_EXCLUSIVE
);
639 puffs_msgpark_reference(park
);
641 DPRINTF_VERBOSE(("puffs_getout: found park at %p, ", park
));
643 /* If it's a goner, don't process any furher */
644 if (park
->park_flags
& PARKFLAG_WAITERGONE
) {
645 DPRINTF_VERBOSE(("waitergone!\n"));
646 puffs_msgpark_release(park
);
649 preq
= park
->park_preq
;
654 * XXX: this check is not valid for now, we don't know
655 * the size of the caller's input buffer. i.e. this
656 * will most likely go away
658 if (maxsize
< preq
->preq_frhdr
.pfr_len
) {
659 DPRINTF(("buffer too small\n"));
660 puffs_msgpark_release(park
);
666 DPRINTF_VERBOSE(("returning\n"));
669 * Ok, we found what we came for. Release it from the
670 * outgoing queue but do not unlock. We will unlock
671 * only after we "releaseout" it to avoid complications:
672 * otherwise it is (theoretically) possible for userland
673 * to race us into "put" before we have a change to put
674 * this baby on the receiving queue.
676 TAILQ_REMOVE(&pmp
->pmp_msg_touser
, park
, park_entries
);
677 KKASSERT(park
->park_flags
& PARKFLAG_ONQUEUE1
);
678 park
->park_flags
&= ~PARKFLAG_ONQUEUE1
;
679 lockmgr(&park
->park_mtx
, LK_RELEASE
);
681 pmp
->pmp_msg_touser_count
--;
682 KKASSERT(pmp
->pmp_msg_touser_count
>= 0);
686 puffs_mp_release(pmp
);
687 lockmgr(&pmp
->pmp_lock
, LK_RELEASE
);
690 *data
= (uint8_t *)preq
;
691 preq
->preq_pth
.pth_framelen
= park
->park_copylen
;
692 *dlen
= preq
->preq_pth
.pth_framelen
;
700 * Release outgoing structure. Now, depending on the success of the
701 * outgoing send, it is either going onto the result waiting queue
702 * or the death chamber.
705 puffs_msgif_releaseout(void *this, void *parkptr
, int status
)
707 struct puffs_mount
*pmp
= this;
708 struct puffs_msgpark
*park
= parkptr
;
710 DPRINTF_VERBOSE(("puffs_releaseout: returning park %p, errno %d: " ,
712 lockmgr(&pmp
->pmp_lock
, LK_EXCLUSIVE
);
713 lockmgr(&park
->park_mtx
, LK_EXCLUSIVE
);
714 if (park
->park_flags
& PARKFLAG_WANTREPLY
) {
716 DPRINTF_VERBOSE(("enqueue replywait\n"));
717 TAILQ_INSERT_TAIL(&pmp
->pmp_msg_replywait
, park
,
719 park
->park_flags
|= PARKFLAG_ONQUEUE2
;
721 DPRINTF_VERBOSE(("error path!\n"));
722 park
->park_preq
->preq_rv
= status
;
723 park
->park_flags
|= PARKFLAG_DONE
;
724 cv_signal(&park
->park_cv
);
726 puffs_msgpark_release(park
);
728 DPRINTF_VERBOSE(("release\n"));
729 puffs_msgpark_release1(park
, 2);
731 lockmgr(&pmp
->pmp_lock
, LK_RELEASE
);
735 puffs_msgif_waitcount(void *this)
737 struct puffs_mount
*pmp
= this;
740 lockmgr(&pmp
->pmp_lock
, LK_EXCLUSIVE
);
741 rv
= pmp
->pmp_msg_touser_count
;
742 lockmgr(&pmp
->pmp_lock
, LK_RELEASE
);
748 * XXX: locking with this one?
751 puffsop_msg(void *this, struct puffs_req
*preq
)
753 struct puffs_mount
*pmp
= this;
754 struct putter_hdr
*pth
= &preq
->preq_pth
;
755 struct puffs_msgpark
*park
;
758 lockmgr(&pmp
->pmp_lock
, LK_EXCLUSIVE
);
761 TAILQ_FOREACH(park
, &pmp
->pmp_msg_replywait
, park_entries
) {
762 if (park
->park_preq
->preq_id
== preq
->preq_id
)
766 DPRINTF_VERBOSE(("puffsop_msg: no request: %" PRIu64
"\n",
768 lockmgr(&pmp
->pmp_lock
, LK_RELEASE
);
769 return; /* XXX send error */
772 lockmgr(&park
->park_mtx
, LK_EXCLUSIVE
);
773 puffs_msgpark_reference(park
);
774 if (pth
->pth_framelen
> park
->park_maxlen
) {
775 DPRINTF_VERBOSE(("puffsop_msg: invalid buffer length: "
776 "%" PRIu64
" (req %" PRIu64
", \n", pth
->pth_framelen
,
778 park
->park_preq
->preq_rv
= EPROTO
;
779 cv_signal(&park
->park_cv
);
780 puffs_msgpark_release1(park
, 2);
781 lockmgr(&pmp
->pmp_lock
, LK_RELEASE
);
782 return; /* XXX: error */
784 wgone
= park
->park_flags
& PARKFLAG_WAITERGONE
;
786 KKASSERT(park
->park_flags
& PARKFLAG_ONQUEUE2
);
787 TAILQ_REMOVE(&pmp
->pmp_msg_replywait
, park
, park_entries
);
788 park
->park_flags
&= ~PARKFLAG_ONQUEUE2
;
789 lockmgr(&pmp
->pmp_lock
, LK_RELEASE
);
792 DPRINTF_VERBOSE(("puffsop_msg: bad service - waiter gone for "
795 memcpy(park
->park_preq
, preq
, pth
->pth_framelen
);
797 if (park
->park_flags
& PARKFLAG_CALL
) {
798 DPRINTF_VERBOSE(("puffsop_msg: call for %p, arg %p\n",
799 park
->park_preq
, park
->park_donearg
));
800 park
->park_done(pmp
, preq
, park
->park_donearg
);
805 DPRINTF_VERBOSE(("puffs_putop: flagging done for "
807 cv_signal(&park
->park_cv
);
810 park
->park_flags
|= PARKFLAG_DONE
;
811 puffs_msgpark_release1(park
, 2);
815 puffsop_flush(struct puffs_mount
*pmp
, struct puffs_flush
*pf
)
824 KKASSERT(pf
->pf_req
.preq_pth
.pth_framelen
== sizeof(struct puffs_flush
));
827 if (pf
->pf_op
== PUFFS_INVAL_NAMECACHE_ALL
) {
829 cache_purgevfs(PMPTOMP(pmp
));
837 * Get vnode, don't lock it. Namecache is protected by its own lock
838 * and we have a reference to protect against premature harvesting.
840 * The node we want here might be locked and the op is in
841 * userspace waiting for us to complete ==> deadlock. Another
842 * reason we need to eventually bump locking to userspace, as we
843 * will need to lock the node if we wish to do flushes.
845 rv
= puffs_cookie2vnode(pmp
, pf
->pf_cookie
, 0, &vp
);
847 if (rv
== PUFFS_NOSUCHCOOKIE
)
854 /* not quite ready, yet */
855 case PUFFS_INVAL_NAMECACHE_NODE
:
856 struct componentname
*pf_cn
;
858 /* get comfortab^Wcomponentname */
859 pf_cn
= kmem_alloc(componentname
);
860 memset(pf_cn
, 0, sizeof(struct componentname
));
864 case PUFFS_INVAL_NAMECACHE_DIR
:
865 if (vp
->v_type
!= VDIR
) {
870 /* deadlocks, needs its own kernel thread */
876 case PUFFS_INVAL_PAGECACHE_NODE_RANGE
:
879 case PUFFS_FLUSH_PAGECACHE_NODE_RANGE
:
883 if (pf
->pf_end
> vp
->v_size
|| vp
->v_type
!= VREG
) {
888 offlo
= trunc_page(pf
->pf_start
);
889 offhi
= round_page(pf
->pf_end
);
890 if (offhi
!= 0 && offlo
>= offhi
) {
895 lockmgr(&vp
->v_uobj
.vmobjlock
, LK_EXCLUSIVE
);
896 rv
= VOP_PUTPAGES(vp
, offlo
, offhi
, flags
);
907 puffs_msg_sendresp(pmp
, &pf
->pf_req
, rv
);
911 puffs_msgif_dispatch(void *this, struct putter_hdr
*pth
)
913 struct puffs_mount
*pmp
= this;
914 struct puffs_req
*preq
= (struct puffs_req
*)pth
;
915 struct puffs_sopreq
*psopr
;
917 if (pth
->pth_framelen
< sizeof(struct puffs_req
)) {
918 puffs_msg_sendresp(pmp
, preq
, EINVAL
); /* E2SMALL */
922 switch (PUFFSOP_OPCLASS(preq
->preq_opclass
)) {
925 DPRINTF_VERBOSE(("dispatch: vn/vfs message 0x%x\n",
927 puffsop_msg(pmp
, preq
);
930 case PUFFSOP_FLUSH
: /* process in sop thread */
932 struct puffs_flush
*pf
;
934 DPRINTF(("dispatch: flush 0x%x\n", preq
->preq_optype
));
936 if (preq
->preq_pth
.pth_framelen
!= sizeof(struct puffs_flush
)) {
937 puffs_msg_sendresp(pmp
, preq
, EINVAL
); /* E2SMALL */
940 pf
= (struct puffs_flush
*)preq
;
942 psopr
= kmalloc(sizeof(*psopr
), M_PUFFS
, M_WAITOK
);
943 memcpy(&psopr
->psopr_pf
, pf
, sizeof(*pf
));
944 psopr
->psopr_sopreq
= PUFFS_SOPREQ_FLUSH
;
946 lockmgr(&pmp
->pmp_sopmtx
, LK_EXCLUSIVE
);
947 if (pmp
->pmp_sopthrcount
== 0) {
948 lockmgr(&pmp
->pmp_sopmtx
, LK_RELEASE
);
949 kfree(psopr
, M_PUFFS
);
950 puffs_msg_sendresp(pmp
, preq
, ENXIO
);
952 TAILQ_INSERT_TAIL(&pmp
->pmp_sopreqs
,
953 psopr
, psopr_entries
);
954 cv_signal(&pmp
->pmp_sopcv
);
955 lockmgr(&pmp
->pmp_sopmtx
, LK_RELEASE
);
960 case PUFFSOP_UNMOUNT
: /* process in sop thread */
963 DPRINTF(("dispatch: unmount 0x%x\n", preq
->preq_optype
));
965 psopr
= kmalloc(sizeof(*psopr
), M_PUFFS
, M_WAITOK
);
966 psopr
->psopr_preq
= *preq
;
967 psopr
->psopr_sopreq
= PUFFS_SOPREQ_UNMOUNT
;
969 lockmgr(&pmp
->pmp_sopmtx
, LK_EXCLUSIVE
);
970 if (pmp
->pmp_sopthrcount
== 0) {
971 lockmgr(&pmp
->pmp_sopmtx
, LK_RELEASE
);
972 kfree(psopr
, M_PUFFS
);
973 puffs_msg_sendresp(pmp
, preq
, ENXIO
);
975 TAILQ_INSERT_TAIL(&pmp
->pmp_sopreqs
,
976 psopr
, psopr_entries
);
977 cv_signal(&pmp
->pmp_sopcv
);
978 lockmgr(&pmp
->pmp_sopmtx
, LK_RELEASE
);
984 DPRINTF(("dispatch: invalid class 0x%x\n", preq
->preq_opclass
));
985 puffs_msg_sendresp(pmp
, preq
, EOPNOTSUPP
);
993 * Work loop for thread processing all ops from server which
994 * cannot safely be handled in caller context. This includes
995 * everything which might need a lock currently "held" by the file
996 * server, i.e. a long-term kernel lock which will be released only
997 * once the file server acknowledges a request
1000 puffs_sop_thread(void *arg
)
1002 struct puffs_mount
*pmp
= arg
;
1003 struct mount
*mp
= PMPTOMP(pmp
);
1004 struct puffs_sopreq
*psopr
;
1005 boolean_t keeprunning
;
1006 boolean_t unmountme
= FALSE
;
1008 lockmgr(&pmp
->pmp_sopmtx
, LK_EXCLUSIVE
);
1009 for (keeprunning
= TRUE
; keeprunning
; ) {
1010 while ((psopr
= TAILQ_FIRST(&pmp
->pmp_sopreqs
)) == NULL
)
1011 cv_wait(&pmp
->pmp_sopcv
, &pmp
->pmp_sopmtx
);
1012 TAILQ_REMOVE(&pmp
->pmp_sopreqs
, psopr
, psopr_entries
);
1013 lockmgr(&pmp
->pmp_sopmtx
, LK_RELEASE
);
1015 switch (psopr
->psopr_sopreq
) {
1016 case PUFFS_SOPREQSYS_EXIT
:
1017 keeprunning
= FALSE
;
1019 case PUFFS_SOPREQ_FLUSH
:
1020 puffsop_flush(pmp
, &psopr
->psopr_pf
);
1022 case PUFFS_SOPREQ_UNMOUNT
:
1023 puffs_msg_sendresp(pmp
, &psopr
->psopr_preq
, 0);
1026 keeprunning
= FALSE
;
1031 kfree(psopr
, M_PUFFS
);
1032 lockmgr(&pmp
->pmp_sopmtx
, LK_EXCLUSIVE
);
1036 * Purge remaining ops.
1038 while ((psopr
= TAILQ_FIRST(&pmp
->pmp_sopreqs
)) != NULL
) {
1039 TAILQ_REMOVE(&pmp
->pmp_sopreqs
, psopr
, psopr_entries
);
1040 lockmgr(&pmp
->pmp_sopmtx
, LK_RELEASE
);
1041 puffs_msg_sendresp(pmp
, &psopr
->psopr_preq
, ENXIO
);
1042 kfree(psopr
, M_PUFFS
);
1043 lockmgr(&pmp
->pmp_sopmtx
, LK_EXCLUSIVE
);
1046 pmp
->pmp_sopthrcount
--;
1047 cv_broadcast(&pmp
->pmp_sopcv
);
1048 lockmgr(&pmp
->pmp_sopmtx
, LK_RELEASE
); /* not allowed to access fs after this */
1051 * If unmount was requested, we can now safely do it here, since
1052 * our context is dead from the point-of-view of puffs_unmount()
1053 * and we are just another thread. dounmount() makes internally
1054 * sure that VFS_UNMOUNT() isn't called reentrantly and that it
1055 * is eventually completed.
1058 (void)dounmount(mp
, MNT_FORCE
);
1065 puffs_msgif_close(void *this)
1067 struct puffs_mount
*pmp
= this;
1068 struct mount
*mp
= PMPTOMP(pmp
);
1070 lockmgr(&pmp
->pmp_lock
, LK_EXCLUSIVE
);
1071 puffs_mp_reference(pmp
);
1074 * Free the waiting callers before proceeding any further.
1075 * The syncer might be jogging around in this file system
1076 * currently. If we allow it to go to the userspace of no
1077 * return while trying to get the syncer lock, well ...
1079 puffs_userdead(pmp
);
1082 * Make sure someone from puffs_unmount() isn't currently in
1083 * userspace. If we don't take this precautionary step,
1084 * they might notice that the mountpoint has disappeared
1085 * from under them once they return. Especially note that we
1086 * cannot simply test for an unmounter before calling
1087 * dounmount(), since it might be possible that that particular
1088 * invocation of unmount was called without MNT_FORCE. Here we
1089 * *must* make sure unmount succeeds. Also, restart is necessary
1090 * since pmp isn't locked. We might end up with PUTTER_DEAD after
1091 * restart and exit from there.
1093 if (pmp
->pmp_unmounting
) {
1094 cv_wait(&pmp
->pmp_unmounting_cv
, &pmp
->pmp_lock
);
1095 puffs_mp_release(pmp
);
1096 lockmgr(&pmp
->pmp_lock
, LK_RELEASE
);
1097 DPRINTF(("puffs_fop_close: unmount was in progress for pmp %p, "
1102 /* Won't access pmp from here anymore */
1103 puffs_mp_release(pmp
);
1104 lockmgr(&pmp
->pmp_lock
, LK_RELEASE
);
1106 /* Detach from VFS. */
1107 (void)dounmount(mp
, MNT_FORCE
);
1113 * We're dead, kaput, RIP, slightly more than merely pining for the
1114 * fjords, belly-up, fallen, lifeless, finished, expired, gone to meet
1115 * our maker, ceased to be, etcetc. YASD. It's a dead FS!
1117 * Caller must hold puffs mutex.
1120 puffs_userdead(struct puffs_mount
*pmp
)
1122 struct puffs_msgpark
*park
, *park_next
;
1125 * Mark filesystem status as dying so that operations don't
1126 * attempt to march to userspace any longer.
1128 pmp
->pmp_status
= PUFFSTAT_DYING
;
1130 /* signal waiters on REQUEST TO file server queue */
1131 for (park
= TAILQ_FIRST(&pmp
->pmp_msg_touser
); park
; park
= park_next
) {
1132 lockmgr(&park
->park_mtx
, LK_EXCLUSIVE
);
1133 puffs_msgpark_reference(park
);
1134 park_next
= TAILQ_NEXT(park
, park_entries
);
1136 KKASSERT(park
->park_flags
& PARKFLAG_ONQUEUE1
);
1137 TAILQ_REMOVE(&pmp
->pmp_msg_touser
, park
, park_entries
);
1138 park
->park_flags
&= ~PARKFLAG_ONQUEUE1
;
1139 pmp
->pmp_msg_touser_count
--;
1142 * Even though waiters on QUEUE1 are removed in touser()
1143 * in case of WAITERGONE, it is still possible for us to
1144 * get raced here due to having to retake locks in said
1145 * touser(). In the race case simply "ignore" the item
1146 * on the queue and move on to the next one.
1148 if (park
->park_flags
& PARKFLAG_WAITERGONE
) {
1149 KKASSERT((park
->park_flags
& PARKFLAG_CALL
) == 0);
1150 KKASSERT(park
->park_flags
& PARKFLAG_WANTREPLY
);
1151 puffs_msgpark_release(park
);
1154 park
->park_preq
->preq_rv
= ENXIO
;
1156 if (park
->park_flags
& PARKFLAG_CALL
) {
1157 park
->park_done(pmp
, park
->park_preq
,
1158 park
->park_donearg
);
1159 puffs_msgpark_release1(park
, 2);
1160 } else if ((park
->park_flags
& PARKFLAG_WANTREPLY
)==0) {
1161 puffs_msgpark_release1(park
, 2);
1163 park
->park_preq
->preq_rv
= ENXIO
;
1164 cv_signal(&park
->park_cv
);
1165 puffs_msgpark_release(park
);
1170 /* signal waiters on RESPONSE FROM file server queue */
1171 for (park
=TAILQ_FIRST(&pmp
->pmp_msg_replywait
); park
; park
=park_next
) {
1172 lockmgr(&park
->park_mtx
, LK_EXCLUSIVE
);
1173 puffs_msgpark_reference(park
);
1174 park_next
= TAILQ_NEXT(park
, park_entries
);
1176 KKASSERT(park
->park_flags
& PARKFLAG_ONQUEUE2
);
1177 KKASSERT(park
->park_flags
& PARKFLAG_WANTREPLY
);
1179 TAILQ_REMOVE(&pmp
->pmp_msg_replywait
, park
, park_entries
);
1180 park
->park_flags
&= ~PARKFLAG_ONQUEUE2
;
1182 if (park
->park_flags
& PARKFLAG_WAITERGONE
) {
1183 KKASSERT((park
->park_flags
& PARKFLAG_CALL
) == 0);
1184 puffs_msgpark_release(park
);
1186 park
->park_preq
->preq_rv
= ENXIO
;
1187 if (park
->park_flags
& PARKFLAG_CALL
) {
1188 park
->park_done(pmp
, park
->park_preq
,
1189 park
->park_donearg
);
1190 puffs_msgpark_release1(park
, 2);
1192 cv_signal(&park
->park_cv
);
1193 puffs_msgpark_release(park
);
1198 cv_broadcast(&pmp
->pmp_msg_waiter_cv
);