4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2016 by Delphix. All rights reserved.
28 * System call I/F to doors (outside of vnodes I/F) and misc support
31 #include <sys/types.h>
32 #include <sys/systm.h>
34 #include <sys/door_data.h>
36 #include <sys/thread.h>
37 #include <sys/prsystm.h>
38 #include <sys/procfs.h>
39 #include <sys/class.h>
42 #include <sys/cmn_err.h>
43 #include <sys/stack.h>
44 #include <sys/debug.h>
45 #include <sys/cpuvar.h>
47 #include <sys/fcntl.h>
48 #include <sys/vnode.h>
50 #include <sys/sobject.h>
51 #include <sys/schedctl.h>
52 #include <sys/callb.h>
53 #include <sys/ucred.h>
56 #include <sys/sysmacros.h>
57 #include <sys/vmsystm.h>
62 #include <vm/seg_vn.h>
63 #include <vm/seg_vn.h>
64 #include <vm/seg_kpm.h>
66 #include <sys/modctl.h>
67 #include <sys/syscall.h>
68 #include <sys/pathname.h>
72 * The maximum amount of data (in bytes) that will be transferred using
73 * an intermediate kernel buffer. For sizes greater than this we map
74 * in the destination pages and perform a 1-copy transfer.
76 size_t door_max_arg
= 16 * 1024;
79 * Maximum amount of data that will be transferred in a reply to a
80 * door_upcall. Need to guard against a process returning huge amounts
81 * of data and getting the kernel stuck in kmem_alloc.
83 size_t door_max_upcall_reply
= 1024 * 1024;
86 * Maximum number of descriptors allowed to be passed in a single
87 * door_call or door_return. We need to allocate kernel memory
88 * for all of them at once, so we can't let it scale without limit.
90 uint_t door_max_desc
= 1024;
93 * Definition of a door handle, used by other kernel subsystems when
94 * calling door functions. This is really a file structure but we
95 * want to hide that fact.
97 struct __door_handle
{
101 #define DHTOF(dh) ((file_t *)(dh))
102 #define FTODH(fp) ((door_handle_t)(fp))
104 static int doorfs(long, long, long, long, long, long);
106 static struct sysent door_sysent
= {
108 SE_ARGC
| SE_NOUNLOAD
,
112 static struct modlsys modlsys
= {
113 &mod_syscallops
, "doors", &door_sysent
116 #ifdef _SYSCALL32_IMPL
119 doorfs32(int32_t arg1
, int32_t arg2
, int32_t arg3
, int32_t arg4
,
120 int32_t arg5
, int32_t subcode
);
122 static struct sysent door_sysent32
= {
124 SE_ARGC
| SE_NOUNLOAD
,
128 static struct modlsys modlsys32
= {
130 "32-bit door syscalls",
135 static struct modlinkage modlinkage
= {
138 #ifdef _SYSCALL32_IMPL
146 extern struct vfs door_vfs
;
147 extern const struct vnodeops door_vnodeops
;
149 /* yes, we want all defaults */
150 static const struct vfsops door_vfsops
;
157 mutex_init(&door_knob
, NULL
, MUTEX_DEFAULT
, NULL
);
158 if ((major
= getudev()) == (major_t
)-1)
160 doordev
= makedevice(major
, 0);
162 VFS_INIT(&door_vfs
, &door_vfsops
, NULL
);
163 door_vfs
.vfs_flag
= VFS_RDONLY
;
164 door_vfs
.vfs_dev
= doordev
;
165 vfs_make_fsid(&(door_vfs
.vfs_fsid
), doordev
, 0);
167 return (mod_install(&modlinkage
));
171 _info(struct modinfo
*modinfop
)
173 return (mod_info(&modlinkage
, modinfop
));
176 /* system call functions */
177 static int door_call(int, void *);
178 static int door_return(caddr_t
, size_t, door_desc_t
*, uint_t
, caddr_t
, size_t);
179 static int door_create(void (*pc_cookie
)(void *, char *, size_t, door_desc_t
*,
180 uint_t
), void *data_cookie
, uint_t
);
181 static int door_revoke(int);
182 static int door_info(int, struct door_info
*);
183 static int door_ucred(struct ucred_s
*);
184 static int door_bind(int);
185 static int door_unbind(void);
186 static int door_unref(void);
187 static int door_getparam(int, int, size_t *);
188 static int door_setparam(int, int, size_t);
190 #define DOOR_RETURN_OLD 4 /* historic value, for s10 */
193 * System call wrapper for all door related system calls
196 doorfs(long arg1
, long arg2
, long arg3
, long arg4
, long arg5
, long subcode
)
200 return (door_call(arg1
, (void *)arg2
));
202 door_return_desc_t
*drdp
= (door_return_desc_t
*)arg3
;
205 door_return_desc_t drd
;
206 if (copyin(drdp
, &drd
, sizeof (drd
)))
208 return (door_return((caddr_t
)arg1
, arg2
, drd
.desc_ptr
,
209 drd
.desc_num
, (caddr_t
)arg4
, arg5
));
211 return (door_return((caddr_t
)arg1
, arg2
, NULL
,
212 0, (caddr_t
)arg4
, arg5
));
214 case DOOR_RETURN_OLD
:
216 * In order to support the S10 runtime environment, we
217 * still respond to the old syscall subcode for door_return.
218 * We treat it as having no stack limits. This code should
219 * be removed when such support is no longer needed.
221 return (door_return((caddr_t
)arg1
, arg2
, (door_desc_t
*)arg3
,
222 arg4
, (caddr_t
)arg5
, 0));
224 return (door_create((void (*)())arg1
, (void *)arg2
, arg3
));
226 return (door_revoke(arg1
));
228 return (door_info(arg1
, (struct door_info
*)arg2
));
230 return (door_bind(arg1
));
232 return (door_unbind());
234 return (door_unref());
236 return (door_ucred((struct ucred_s
*)arg1
));
238 return (door_getparam(arg1
, arg2
, (size_t *)arg3
));
240 return (door_setparam(arg1
, arg2
, arg3
));
242 return (set_errno(EINVAL
));
246 #ifdef _SYSCALL32_IMPL
248 * System call wrapper for all door related system calls from 32-bit programs.
249 * Needed at the moment because of the casts - they undo some damage
250 * that truss causes (sign-extending the stack pointer) when truss'ing
251 * a 32-bit program using doors.
254 doorfs32(int32_t arg1
, int32_t arg2
, int32_t arg3
,
255 int32_t arg4
, int32_t arg5
, int32_t subcode
)
259 return (door_call(arg1
, (void *)(uintptr_t)(caddr32_t
)arg2
));
261 door_return_desc32_t
*drdp
=
262 (door_return_desc32_t
*)(uintptr_t)(caddr32_t
)arg3
;
264 door_return_desc32_t drd
;
265 if (copyin(drdp
, &drd
, sizeof (drd
)))
268 (caddr_t
)(uintptr_t)(caddr32_t
)arg1
, arg2
,
269 (door_desc_t
*)(uintptr_t)drd
.desc_ptr
,
270 drd
.desc_num
, (caddr_t
)(uintptr_t)(caddr32_t
)arg4
,
271 (size_t)(uintptr_t)(size32_t
)arg5
));
273 return (door_return((caddr_t
)(uintptr_t)(caddr32_t
)arg1
,
274 arg2
, NULL
, 0, (caddr_t
)(uintptr_t)(caddr32_t
)arg4
,
275 (size_t)(uintptr_t)(size32_t
)arg5
));
277 case DOOR_RETURN_OLD
:
279 * In order to support the S10 runtime environment, we
280 * still respond to the old syscall subcode for door_return.
281 * We treat it as having no stack limits. This code should
282 * be removed when such support is no longer needed.
284 return (door_return((caddr_t
)(uintptr_t)(caddr32_t
)arg1
, arg2
,
285 (door_desc_t
*)(uintptr_t)(caddr32_t
)arg3
, arg4
,
286 (caddr_t
)(uintptr_t)(caddr32_t
)arg5
, 0));
288 return (door_create((void (*)())(uintptr_t)(caddr32_t
)arg1
,
289 (void *)(uintptr_t)(caddr32_t
)arg2
, arg3
));
291 return (door_revoke(arg1
));
293 return (door_info(arg1
,
294 (struct door_info
*)(uintptr_t)(caddr32_t
)arg2
));
296 return (door_bind(arg1
));
298 return (door_unbind());
300 return (door_unref());
303 (struct ucred_s
*)(uintptr_t)(caddr32_t
)arg1
));
305 return (door_getparam(arg1
, arg2
,
306 (size_t *)(uintptr_t)(caddr32_t
)arg3
));
308 return (door_setparam(arg1
, arg2
, (size_t)(size32_t
)arg3
));
311 return (set_errno(EINVAL
));
316 void shuttle_resume(kthread_t
*, kmutex_t
*);
317 void shuttle_swtch(kmutex_t
*);
318 void shuttle_sleep(kthread_t
*);
323 static int door_create_common(void (*)(), void *, uint_t
, int, int *,
325 static int door_overflow(kthread_t
*, caddr_t
, size_t, door_desc_t
*, uint_t
);
326 static int door_args(kthread_t
*, int);
327 static int door_results(kthread_t
*, caddr_t
, size_t, door_desc_t
*, uint_t
);
328 static int door_copy(struct as
*, caddr_t
, caddr_t
, uint_t
);
329 static void door_server_exit(proc_t
*, kthread_t
*);
330 static void door_release_server(door_node_t
*, kthread_t
*);
331 static kthread_t
*door_get_server(door_node_t
*);
332 static door_node_t
*door_lookup(int, file_t
**);
333 static int door_translate_in(void);
334 static int door_translate_out(void);
335 static void door_fd_rele(door_desc_t
*, uint_t
, int);
336 static void door_list_insert(door_node_t
*);
337 static void door_info_common(door_node_t
*, door_info_t
*, file_t
*);
338 static int door_release_fds(door_desc_t
*, uint_t
);
339 static void door_fd_close(door_desc_t
*, uint_t
);
340 static void door_fp_close(struct file
**, uint_t
);
343 door_my_data(int create_if_missing
)
347 ddp
= curthread
->t_door
;
348 if (create_if_missing
&& ddp
== NULL
)
349 ddp
= curthread
->t_door
= kmem_zalloc(sizeof (*ddp
), KM_SLEEP
);
354 static door_server_t
*
355 door_my_server(int create_if_missing
)
357 door_data_t
*ddp
= door_my_data(create_if_missing
);
359 return ((ddp
!= NULL
)? DOOR_SERVER(ddp
) : NULL
);
362 static door_client_t
*
363 door_my_client(int create_if_missing
)
365 door_data_t
*ddp
= door_my_data(create_if_missing
);
367 return ((ddp
!= NULL
)? DOOR_CLIENT(ddp
) : NULL
);
371 * System call to create a door
374 door_create(void (*pc_cookie
)(), void *data_cookie
, uint_t attributes
)
379 if ((attributes
& ~DOOR_CREATE_MASK
) ||
380 ((attributes
& (DOOR_UNREF
| DOOR_UNREF_MULTI
)) ==
381 (DOOR_UNREF
| DOOR_UNREF_MULTI
)))
382 return (set_errno(EINVAL
));
384 if ((err
= door_create_common(pc_cookie
, data_cookie
, attributes
, 0,
386 return (set_errno(err
));
388 f_setfd(fd
, FD_CLOEXEC
);
393 * Common code for creating user and kernel doors. If a door was
394 * created, stores a file structure pointer in the location pointed
395 * to by fpp (if fpp is non-NULL) and returns 0. Also, if a non-NULL
396 * pointer to a file descriptor is passed in as fdp, allocates a file
397 * descriptor representing the door. If a door could not be created,
401 door_create_common(void (*pc_cookie
)(), void *data_cookie
, uint_t attributes
,
402 int from_kernel
, int *fdp
, file_t
**fpp
)
407 static door_id_t index
= 0;
408 proc_t
*p
= (from_kernel
)? &p0
: curproc
;
410 dp
= kmem_zalloc(sizeof (door_node_t
), KM_SLEEP
);
412 dp
->door_vnode
= vn_alloc(KM_SLEEP
);
414 dp
->door_data
= data_cookie
;
415 dp
->door_pc
= pc_cookie
;
416 dp
->door_flags
= attributes
;
417 #ifdef _SYSCALL32_IMPL
418 if (!from_kernel
&& get_udatamodel() != DATAMODEL_NATIVE
)
419 dp
->door_data_max
= UINT32_MAX
;
422 dp
->door_data_max
= SIZE_MAX
;
423 dp
->door_data_min
= 0UL;
424 dp
->door_desc_max
= (attributes
& DOOR_REFUSE_DESC
)? 0 : INT_MAX
;
427 vn_setops(vp
, &door_vnodeops
);
429 vp
->v_vfsp
= &door_vfs
;
430 vp
->v_data
= (caddr_t
)dp
;
431 mutex_enter(&door_knob
);
432 dp
->door_index
= index
++;
433 /* add to per-process door list */
434 door_list_insert(dp
);
435 mutex_exit(&door_knob
);
437 if (falloc(vp
, FREAD
| FWRITE
, &fp
, fdp
)) {
439 * If the file table is full, remove the door from the
440 * per-process list, free the door, and return NULL.
442 mutex_enter(&door_knob
);
443 door_list_delete(dp
);
444 mutex_exit(&door_knob
);
446 kmem_free(dp
, sizeof (door_node_t
));
452 mutex_exit(&fp
->f_tlock
);
460 door_check_limits(door_node_t
*dp
, door_arg_t
*da
, int upcall
)
462 ASSERT(MUTEX_HELD(&door_knob
));
464 /* we allow unref upcalls through, despite any minimum */
465 if (da
->data_size
< dp
->door_data_min
&&
466 !(upcall
&& da
->data_ptr
== DOOR_UNREF_DATA
))
469 if (da
->data_size
> dp
->door_data_max
)
472 if (da
->desc_num
> 0 && (dp
->door_flags
& DOOR_REFUSE_DESC
))
475 if (da
->desc_num
> dp
->door_desc_max
)
485 door_call(int did
, void *args
)
489 kthread_t
*server_thread
;
492 door_client_t
*ct
; /* curthread door_data */
493 door_server_t
*st
; /* server thread door_data */
494 door_desc_t
*start
= NULL
;
497 /* destructor for data returned by a kernel server */
498 void (*destfn
)() = NULL
;
505 lwp
= ttolwp(curthread
);
506 datamodel
= lwp_getdatamodel(lwp
);
508 ct
= door_my_client(1);
514 if (datamodel
== DATAMODEL_NATIVE
) {
515 if (copyin(args
, &ct
->d_args
, sizeof (door_arg_t
)) != 0)
516 return (set_errno(EFAULT
));
520 if (copyin(args
, &da32
, sizeof (door_arg32_t
)) != 0)
521 return (set_errno(EFAULT
));
522 ct
->d_args
.data_ptr
=
523 (char *)(uintptr_t)da32
.data_ptr
;
524 ct
->d_args
.data_size
= da32
.data_size
;
525 ct
->d_args
.desc_ptr
=
526 (door_desc_t
*)(uintptr_t)da32
.desc_ptr
;
527 ct
->d_args
.desc_num
= da32
.desc_num
;
529 (char *)(uintptr_t)da32
.rbuf
;
530 ct
->d_args
.rsize
= da32
.rsize
;
533 /* No arguments, and no results allowed */
535 ct
->d_args
.data_size
= 0;
536 ct
->d_args
.desc_num
= 0;
537 ct
->d_args
.rsize
= 0;
540 if ((dp
= door_lookup(did
, NULL
)) == NULL
)
541 return (set_errno(EBADF
));
544 * We don't want to hold the door FD over the entire operation;
545 * instead, we put a hold on the door vnode and release the FD
552 * This should be done in shuttle_resume(), just before going to
553 * sleep, but we want to avoid overhead while holding door_knob.
554 * prstop() is just a no-op if we don't really go to sleep.
555 * We test not-kernel-address-space for the sake of clustering code.
557 if (lwp
&& lwp
->lwp_nostop
== 0 && curproc
->p_as
!= &kas
)
558 prstop(PR_REQUESTED
, 0);
560 mutex_enter(&door_knob
);
561 if (DOOR_INVALID(dp
)) {
562 mutex_exit(&door_knob
);
568 * before we do anything, check that we are not overflowing the
571 error
= door_check_limits(dp
, &ct
->d_args
, 0);
573 mutex_exit(&door_knob
);
578 * Check for in-kernel door server.
580 if (dp
->door_target
== &p0
) {
581 caddr_t rbuf
= ct
->d_args
.rbuf
;
582 size_t rsize
= ct
->d_args
.rsize
;
586 ct
->d_error
= DOOR_WAIT
;
587 mutex_exit(&door_knob
);
588 /* translate file descriptors to vnodes */
589 if (ct
->d_args
.desc_num
) {
590 error
= door_translate_in();
595 * Call kernel door server. Arguments are passed and
596 * returned as a door_arg pointer. When called, data_ptr
597 * points to user data and desc_ptr points to a kernel list
598 * of door descriptors that have been converted to file
599 * structure pointers. It's the server function's
600 * responsibility to copyin the data pointed to by data_ptr
601 * (this avoids extra copying in some cases). On return,
602 * data_ptr points to a user buffer of data, and desc_ptr
603 * points to a kernel list of door descriptors representing
604 * files. When a reference is passed to a kernel server,
605 * it is the server's responsibility to release the reference
606 * (by calling closef). When the server includes a
607 * reference in its reply, it is released as part of the
608 * the call (the server must duplicate the reference if
609 * it wants to retain a copy). The destfn, if set to
610 * non-NULL, is a destructor to be called when the returned
611 * kernel data (if any) is no longer needed (has all been
612 * translated and copied to user level).
614 (*(dp
->door_pc
))(dp
->door_data
, &ct
->d_args
,
615 &destfn
, &destarg
, &error
);
616 mutex_enter(&door_knob
);
617 /* not implemented yet */
618 if (--dp
->door_active
== 0 && (dp
->door_flags
& DOOR_DELAY
))
619 door_deliver_unref(dp
);
620 mutex_exit(&door_knob
);
624 /* translate vnodes to files */
625 if (ct
->d_args
.desc_num
) {
626 error
= door_translate_out();
630 ct
->d_buf
= ct
->d_args
.rbuf
;
631 ct
->d_bufsize
= ct
->d_args
.rsize
;
632 if (rsize
< (ct
->d_args
.data_size
+
633 (ct
->d_args
.desc_num
* sizeof (door_desc_t
)))) {
634 /* handle overflow */
635 error
= door_overflow(curthread
, ct
->d_args
.data_ptr
,
636 ct
->d_args
.data_size
, ct
->d_args
.desc_ptr
,
637 ct
->d_args
.desc_num
);
640 /* door_overflow sets d_args rbuf and rsize */
642 ct
->d_args
.rbuf
= rbuf
;
643 ct
->d_args
.rsize
= rsize
;
649 * Get a server thread from the target domain
651 if ((server_thread
= door_get_server(dp
)) == NULL
) {
652 if (DOOR_INVALID(dp
))
656 mutex_exit(&door_knob
);
660 st
= DOOR_SERVER(server_thread
->t_door
);
661 if (ct
->d_args
.desc_num
|| ct
->d_args
.data_size
) {
662 int is_private
= (dp
->door_flags
& DOOR_PRIVATE
);
664 * Move data from client to server
667 mutex_exit(&door_knob
);
668 error
= door_args(server_thread
, is_private
);
669 mutex_enter(&door_knob
);
673 * We're not going to resume this thread after all
675 door_release_server(dp
, server_thread
);
676 shuttle_sleep(server_thread
);
677 mutex_exit(&door_knob
);
683 ct
->d_error
= DOOR_WAIT
;
685 st
->d_caller
= curthread
;
688 shuttle_resume(server_thread
, &door_knob
);
690 mutex_enter(&door_knob
);
692 if ((error
= ct
->d_error
) < 0) { /* DOOR_WAIT or DOOR_EXIT */
694 * Premature wakeup. Find out why (stop, forkall, sig, exit ...)
696 mutex_exit(&door_knob
); /* May block in ISSIG */
698 if (ISSIG(curthread
, FORREAL
) || lwp
->lwp_sysabort
||
699 MUSTRETURN(curproc
, curthread
) ||
700 (cancel_pending
= schedctl_cancel_pending()) != 0) {
701 /* Signal, forkall, ... */
702 lwp
->lwp_sysabort
= 0;
704 schedctl_cancel_eintr();
705 mutex_enter(&door_knob
);
708 * If the server has finished processing our call,
709 * or exited (calling door_slam()), then d_error
710 * will have changed. If the server hasn't finished
711 * yet, d_error will still be DOOR_WAIT, and we
712 * let it know we are not interested in any
713 * results by sending a SIGCANCEL, unless the door
714 * is marked with DOOR_NO_CANCEL.
716 if (ct
->d_error
== DOOR_WAIT
&&
717 st
->d_caller
== curthread
) {
718 proc_t
*p
= ttoproc(server_thread
);
723 if (!(dp
->door_flags
& DOOR_NO_CANCEL
)) {
725 mutex_exit(&door_knob
);
727 mutex_enter(&p
->p_lock
);
728 sigtoproc(p
, server_thread
, SIGCANCEL
);
729 mutex_exit(&p
->p_lock
);
731 mutex_enter(&door_knob
);
737 * Return from stop(), server exit...
739 * Note that the server could have done a
740 * door_return while the client was in stop state
741 * (ISSIG), in which case the error condition
742 * is updated by the server.
744 mutex_enter(&door_knob
);
745 if (ct
->d_error
== DOOR_WAIT
) {
746 /* Still waiting for a reply */
747 shuttle_swtch(&door_knob
);
748 mutex_enter(&door_knob
);
751 } else if (ct
->d_error
== DOOR_EXIT
) {
755 /* Server did a door_return during ISSIG */
760 * Can't exit if the server is currently copying
763 while (DOOR_T_HELD(ct
))
764 cv_wait(&ct
->d_cv
, &door_knob
);
767 * If the server has not processed our message, free the
770 if (!ct
->d_args_done
) {
776 * Find out if results were successfully copied.
778 if (ct
->d_error
== 0)
781 ASSERT(ct
->d_args_done
);
782 lwp
->lwp_asleep
= 0; /* /proc */
783 lwp
->lwp_sysabort
= 0; /* /proc */
784 if (--dp
->door_active
== 0 && (dp
->door_flags
& DOOR_DELAY
))
785 door_deliver_unref(dp
);
786 mutex_exit(&door_knob
);
789 door_fp_close(ct
->d_fpp
, ct
->d_args
.desc_num
);
793 * Move the results to userland (if any)
801 * If server returned results successfully, then we've
802 * been interrupted and may need to clean up.
805 ASSERT(error
== EINTR
);
806 door_fp_close(ct
->d_fpp
, ct
->d_args
.desc_num
);
812 * Copy back data if we haven't caused an overflow (already
813 * handled) and we are using a 2 copy transfer, or we are
814 * returning data from a kernel server.
816 if (ct
->d_args
.data_size
) {
817 ct
->d_args
.data_ptr
= ct
->d_args
.rbuf
;
818 if (ct
->d_kernel
|| (!ct
->d_overflow
&&
819 ct
->d_args
.data_size
<= door_max_arg
)) {
820 if (copyout_nowatch(ct
->d_buf
, ct
->d_args
.rbuf
,
821 ct
->d_args
.data_size
)) {
822 door_fp_close(ct
->d_fpp
, ct
->d_args
.desc_num
);
830 * stuff returned doors into our proc, copyout the descriptors
832 if (ct
->d_args
.desc_num
) {
835 uint_t n
= ct
->d_args
.desc_num
;
837 dsize
= n
* sizeof (door_desc_t
);
838 start
= didpp
= kmem_alloc(dsize
, KM_SLEEP
);
842 if (door_insert(*fpp
, didpp
) == -1) {
843 /* Close remaining files */
844 door_fp_close(fpp
, n
+ 1);
848 fpp
++; didpp
++; ncopied
++;
851 ct
->d_args
.desc_ptr
= (door_desc_t
*)(ct
->d_args
.rbuf
+
852 roundup(ct
->d_args
.data_size
, sizeof (door_desc_t
)));
854 if (copyout_nowatch(start
, ct
->d_args
.desc_ptr
, dsize
)) {
863 if (datamodel
== DATAMODEL_NATIVE
) {
864 if (copyout_nowatch(&ct
->d_args
, args
,
865 sizeof (door_arg_t
)) != 0)
870 da32
.data_ptr
= (caddr32_t
)(uintptr_t)ct
->d_args
.data_ptr
;
871 da32
.data_size
= ct
->d_args
.data_size
;
872 da32
.desc_ptr
= (caddr32_t
)(uintptr_t)ct
->d_args
.desc_ptr
;
873 da32
.desc_num
= ct
->d_args
.desc_num
;
874 da32
.rbuf
= (caddr32_t
)(uintptr_t)ct
->d_args
.rbuf
;
875 da32
.rsize
= ct
->d_args
.rsize
;
876 if (copyout_nowatch(&da32
, args
, sizeof (door_arg32_t
)) != 0) {
884 /* clean up the overflow buffer if an error occurred */
885 if (error
!= 0 && ct
->d_overflow
) {
886 (void) as_unmap(curproc
->p_as
, ct
->d_args
.rbuf
,
891 /* call destructor */
893 ASSERT(ct
->d_kernel
);
894 (*destfn
)(dp
->door_data
, destarg
);
903 ASSERT(!ct
->d_kernel
);
904 kmem_free(ct
->d_buf
, ct
->d_bufsize
);
910 /* clean up the descriptor copyout buffer */
913 door_fd_close(start
, ncopied
);
914 kmem_free(start
, dsize
);
918 kmem_free(ct
->d_fpp
, ct
->d_fpp_size
);
924 return (set_errno(error
));
930 door_setparam_common(door_node_t
*dp
, int from_kernel
, int type
, size_t val
)
934 mutex_enter(&door_knob
);
936 if (DOOR_INVALID(dp
)) {
937 mutex_exit(&door_knob
);
942 * door_ki_setparam() can only affect kernel doors.
943 * door_setparam() can only affect doors attached to the current
946 if ((from_kernel
&& dp
->door_target
!= &p0
) ||
947 (!from_kernel
&& dp
->door_target
!= curproc
)) {
948 mutex_exit(&door_knob
);
953 case DOOR_PARAM_DESC_MAX
:
956 else if ((dp
->door_flags
& DOOR_REFUSE_DESC
) && val
!= 0)
959 dp
->door_desc_max
= (uint_t
)val
;
962 case DOOR_PARAM_DATA_MIN
:
963 if (val
> dp
->door_data_max
)
966 dp
->door_data_min
= val
;
969 case DOOR_PARAM_DATA_MAX
:
970 if (val
< dp
->door_data_min
)
973 dp
->door_data_max
= val
;
981 mutex_exit(&door_knob
);
986 door_getparam_common(door_node_t
*dp
, int type
, size_t *out
)
990 mutex_enter(&door_knob
);
992 case DOOR_PARAM_DESC_MAX
:
993 *out
= (size_t)dp
->door_desc_max
;
995 case DOOR_PARAM_DATA_MIN
:
996 *out
= dp
->door_data_min
;
998 case DOOR_PARAM_DATA_MAX
:
999 *out
= dp
->door_data_max
;
1005 mutex_exit(&door_knob
);
1010 door_setparam(int did
, int type
, size_t val
)
1015 if ((dp
= door_lookup(did
, NULL
)) == NULL
)
1016 return (set_errno(EBADF
));
1018 error
= door_setparam_common(dp
, 0, type
, val
);
1023 return (set_errno(error
));
1029 door_getparam(int did
, int type
, size_t *out
)
1035 if ((dp
= door_lookup(did
, NULL
)) == NULL
)
1036 return (set_errno(EBADF
));
1038 error
= door_getparam_common(dp
, type
, &val
);
1043 return (set_errno(error
));
1045 if (get_udatamodel() == DATAMODEL_NATIVE
) {
1046 if (copyout(&val
, out
, sizeof (val
)))
1047 return (set_errno(EFAULT
));
1048 #ifdef _SYSCALL32_IMPL
1050 size32_t val32
= (size32_t
)val
;
1053 return (set_errno(EOVERFLOW
));
1055 if (copyout(&val32
, out
, sizeof (val32
)))
1056 return (set_errno(EFAULT
));
1057 #endif /* _SYSCALL32_IMPL */
1064 * A copyout() which proceeds from high addresses to low addresses. This way,
1065 * stack guard pages are effective.
1067 * Note that we use copyout_nowatch(); this is called while the client is
1071 door_stack_copyout(const void *kaddr
, void *uaddr
, size_t count
)
1073 const char *kbase
= (const char *)kaddr
;
1074 uintptr_t ubase
= (uintptr_t)uaddr
;
1075 size_t pgsize
= PAGESIZE
;
1077 if (count
<= pgsize
)
1078 return (copyout_nowatch(kaddr
, uaddr
, count
));
1081 uintptr_t start
, end
, offset
, amount
;
1083 end
= ubase
+ count
;
1084 start
= P2ALIGN(end
- 1, pgsize
);
1085 if (P2ALIGN(ubase
, pgsize
) == start
)
1088 offset
= start
- ubase
;
1089 amount
= end
- start
;
1091 ASSERT(amount
> 0 && amount
<= count
&& amount
<= pgsize
);
1093 if (copyout_nowatch(kbase
+ offset
, (void *)start
, amount
))
1101 * Writes the stack layout for door_return() into the door_server_t of the
1105 door_layout(kthread_t
*tp
, size_t data_size
, uint_t ndesc
, int info_needed
)
1107 door_server_t
*st
= DOOR_SERVER(tp
->t_door
);
1108 door_layout_t
*out
= &st
->d_layout
;
1109 uintptr_t base_sp
= (uintptr_t)st
->d_sp
;
1110 size_t ssize
= st
->d_ssize
;
1112 uintptr_t descp
, datap
, infop
, resultsp
, finalsp
;
1113 size_t align
= STACK_ALIGN
;
1114 size_t results_sz
= sizeof (struct door_results
);
1115 model_t datamodel
= lwp_getdatamodel(ttolwp(tp
));
1117 ASSERT(!st
->d_layout_done
);
1119 #ifndef _STACK_GROWS_DOWNWARD
1120 #error stack does not grow downward, door_layout() must change
1123 #ifdef _SYSCALL32_IMPL
1124 if (datamodel
!= DATAMODEL_NATIVE
) {
1125 align
= STACK_ALIGN32
;
1126 results_sz
= sizeof (struct door_results32
);
1130 descsz
= ndesc
* sizeof (door_desc_t
);
1133 * To speed up the overflow checking, we do an initial check
1134 * that the passed in data size won't cause us to wrap past
1135 * base_sp. Since door_max_desc limits descsz, we can
1136 * safely use it here. 65535 is an arbitrary 'bigger than
1137 * we need, small enough to not cause trouble' constant;
1138 * the only constraint is that it must be > than:
1141 * sizeof (door_info_t) +
1142 * sizeof (door_results_t) +
1143 * (max adjustment from door_final_sp())
1145 * After we compute the layout, we can safely do a "did we wrap
1146 * around" check, followed by a check against the recorded
1149 if (data_size
>= SIZE_MAX
- (size_t)65535UL - descsz
)
1150 return (E2BIG
); /* overflow */
1152 descp
= P2ALIGN(base_sp
- descsz
, align
);
1153 datap
= P2ALIGN(descp
- data_size
, align
);
1156 infop
= P2ALIGN(datap
- sizeof (door_info_t
), align
);
1160 resultsp
= P2ALIGN(infop
- results_sz
, align
);
1161 finalsp
= door_final_sp(resultsp
, align
, datamodel
);
1163 if (finalsp
> base_sp
)
1164 return (E2BIG
); /* overflow */
1166 if (ssize
!= 0 && (base_sp
- finalsp
) > ssize
)
1167 return (E2BIG
); /* doesn't fit in stack */
1169 out
->dl_descp
= (ndesc
!= 0)? (caddr_t
)descp
: 0;
1170 out
->dl_datap
= (data_size
!= 0)? (caddr_t
)datap
: 0;
1171 out
->dl_infop
= info_needed
? (caddr_t
)infop
: 0;
1172 out
->dl_resultsp
= (caddr_t
)resultsp
;
1173 out
->dl_sp
= (caddr_t
)finalsp
;
1175 st
->d_layout_done
= 1;
1180 door_server_dispatch(door_client_t
*ct
, door_node_t
*dp
)
1182 door_server_t
*st
= DOOR_SERVER(curthread
->t_door
);
1183 door_layout_t
*layout
= &st
->d_layout
;
1186 int is_private
= (dp
->door_flags
& DOOR_PRIVATE
);
1188 door_pool_t
*pool
= (is_private
)? &dp
->door_servers
:
1189 &curproc
->p_server_threads
;
1191 int empty_pool
= (pool
->dp_threads
== NULL
);
1193 caddr_t infop
= NULL
;
1195 size_t datasize
= 0;
1198 file_t
**fpp
= ct
->d_fpp
;
1199 door_desc_t
*start
= NULL
;
1204 datap
= ct
->d_args
.data_ptr
;
1205 datasize
= ct
->d_args
.data_size
;
1206 ndesc
= ct
->d_args
.desc_num
;
1209 descsize
= ndesc
* sizeof (door_desc_t
);
1212 * Reset datap to NULL if we aren't passing any data. Be careful
1213 * to let unref notifications through, though.
1215 if (datap
== DOOR_UNREF_DATA
) {
1216 if (ct
->d_upcall
!= NULL
)
1220 } else if (datasize
== 0) {
1225 * Get the stack layout, if it hasn't already been done.
1227 if (!st
->d_layout_done
) {
1228 error
= door_layout(curthread
, datasize
, ndesc
,
1229 (is_private
&& empty_pool
));
1235 * fill out the stack, starting from the top. Layout was already
1236 * filled in by door_args() or door_translate_out().
1238 if (layout
->dl_descp
!= NULL
) {
1240 start
= kmem_alloc(descsize
, KM_SLEEP
);
1243 if (door_insert(*fpp
, &start
[ncopied
]) == -1) {
1251 if (door_stack_copyout(start
, layout
->dl_descp
, descsize
)) {
1256 fpp
= NULL
; /* finished processing */
1258 if (layout
->dl_datap
!= NULL
) {
1259 ASSERT(datasize
!= 0);
1260 datap
= layout
->dl_datap
;
1261 if (ct
->d_upcall
!= NULL
|| datasize
<= door_max_arg
) {
1262 if (door_stack_copyout(ct
->d_buf
, datap
, datasize
)) {
1269 if (is_private
&& empty_pool
) {
1272 infop
= layout
->dl_infop
;
1273 ASSERT(infop
!= NULL
);
1275 di
.di_target
= curproc
->p_pid
;
1276 di
.di_proc
= (door_ptr_t
)(uintptr_t)dp
->door_pc
;
1277 di
.di_data
= (door_ptr_t
)(uintptr_t)dp
->door_data
;
1278 di
.di_uniquifier
= dp
->door_index
;
1279 di
.di_attributes
= (dp
->door_flags
& DOOR_ATTR_MASK
) |
1282 if (door_stack_copyout(&di
, infop
, sizeof (di
))) {
1288 if (get_udatamodel() == DATAMODEL_NATIVE
) {
1289 struct door_results dr
;
1291 dr
.cookie
= dp
->door_data
;
1292 dr
.data_ptr
= datap
;
1293 dr
.data_size
= datasize
;
1294 dr
.desc_ptr
= (door_desc_t
*)layout
->dl_descp
;
1295 dr
.desc_num
= ncopied
;
1296 dr
.pc
= dp
->door_pc
;
1297 dr
.nservers
= !empty_pool
;
1298 dr
.door_info
= (door_info_t
*)infop
;
1300 if (door_stack_copyout(&dr
, layout
->dl_resultsp
, sizeof (dr
))) {
1304 #ifdef _SYSCALL32_IMPL
1306 struct door_results32 dr32
;
1308 dr32
.cookie
= (caddr32_t
)(uintptr_t)dp
->door_data
;
1309 dr32
.data_ptr
= (caddr32_t
)(uintptr_t)datap
;
1310 dr32
.data_size
= (size32_t
)datasize
;
1311 dr32
.desc_ptr
= (caddr32_t
)(uintptr_t)layout
->dl_descp
;
1312 dr32
.desc_num
= ncopied
;
1313 dr32
.pc
= (caddr32_t
)(uintptr_t)dp
->door_pc
;
1314 dr32
.nservers
= !empty_pool
;
1315 dr32
.door_info
= (caddr32_t
)(uintptr_t)infop
;
1317 if (door_stack_copyout(&dr32
, layout
->dl_resultsp
,
1325 error
= door_finish_dispatch(layout
->dl_sp
);
1327 if (start
!= NULL
) {
1329 door_fd_close(start
, ncopied
);
1330 kmem_free(start
, descsize
);
1333 door_fp_close(fpp
, ndesc
);
1339 * Return the results (if any) to the caller (if any) and wait for the
1340 * next invocation on a door.
1343 door_return(caddr_t data_ptr
, size_t data_size
,
1344 door_desc_t
*desc_ptr
, uint_t desc_num
, caddr_t sp
, size_t ssize
)
1350 door_server_t
*st
; /* curthread door_data */
1351 door_client_t
*ct
; /* caller door_data */
1354 st
= door_my_server(1);
1357 * If thread was bound to a door that no longer exists, return
1358 * an error. This can happen if a thread is bound to a door
1359 * before the process calls forkall(); in the child, the door
1360 * doesn't exist and door_fork() sets the d_invbound flag.
1363 return (set_errno(EINVAL
));
1365 st
->d_sp
= sp
; /* Save base of stack. */
1366 st
->d_ssize
= ssize
; /* and its size */
1369 * This should be done in shuttle_resume(), just before going to
1370 * sleep, but we want to avoid overhead while holding door_knob.
1371 * prstop() is just a no-op if we don't really go to sleep.
1372 * We test not-kernel-address-space for the sake of clustering code.
1374 lwp
= ttolwp(curthread
);
1375 if (lwp
&& lwp
->lwp_nostop
== 0 && curproc
->p_as
!= &kas
)
1376 prstop(PR_REQUESTED
, 0);
1378 /* Make sure the caller hasn't gone away */
1379 mutex_enter(&door_knob
);
1380 if ((caller
= st
->d_caller
) == NULL
|| caller
->t_door
== NULL
) {
1381 if (desc_num
!= 0) {
1382 /* close any DOOR_RELEASE descriptors */
1383 mutex_exit(&door_knob
);
1384 error
= door_release_fds(desc_ptr
, desc_num
);
1386 return (set_errno(error
));
1387 mutex_enter(&door_knob
);
1391 ct
= DOOR_CLIENT(caller
->t_door
);
1393 ct
->d_args
.data_size
= data_size
;
1394 ct
->d_args
.desc_num
= desc_num
;
1396 * Transfer results, if any, to the client
1398 if (data_size
!= 0 || desc_num
!= 0) {
1400 * Prevent the client from exiting until we have finished
1404 mutex_exit(&door_knob
);
1405 error
= door_results(caller
, data_ptr
, data_size
,
1406 desc_ptr
, desc_num
);
1407 mutex_enter(&door_knob
);
1410 * Pass EOVERFLOW errors back to the client
1412 if (error
&& error
!= EOVERFLOW
) {
1413 mutex_exit(&door_knob
);
1414 return (set_errno(error
));
1418 /* Put ourselves on the available server thread list */
1419 door_release_server(st
->d_pool
, curthread
);
1422 * Make sure the caller is still waiting to be resumed
1427 thread_lock(caller
);
1428 ct
->d_error
= error
; /* Return any errors */
1429 if (caller
->t_state
== TS_SLEEP
&&
1430 SOBJ_TYPE(caller
->t_sobj_ops
) == SOBJ_SHUTTLE
) {
1433 tlp
= caller
->t_lockp
;
1435 * Setting t_disp_queue prevents erroneous preemptions
1436 * if this thread is still in execution on another
1439 caller
->t_disp_queue
= cp
->cpu_disp
;
1442 * We are calling thread_onproc() instead of
1443 * THREAD_ONPROC() because compiler can reorder
1444 * the two stores of t_state and t_lockp in
1447 thread_onproc(caller
, cp
);
1448 disp_lock_exit_high(tlp
);
1449 shuttle_resume(caller
, &door_knob
);
1451 /* May have been setrun or in stop state */
1452 thread_unlock(caller
);
1453 shuttle_swtch(&door_knob
);
1456 shuttle_swtch(&door_knob
);
1460 * We've sprung to life. Determine if we are part of a door
1461 * invocation, or just interrupted
1463 mutex_enter(&door_knob
);
1464 if ((dp
= st
->d_active
) != NULL
) {
1466 * Normal door invocation. Return any error condition
1467 * encountered while trying to pass args to the server
1470 lwp
->lwp_asleep
= 0;
1472 * Prevent the caller from leaving us while we
1473 * are copying out the arguments from it's buffer.
1475 ASSERT(st
->d_caller
!= NULL
);
1476 ct
= DOOR_CLIENT(st
->d_caller
->t_door
);
1479 mutex_exit(&door_knob
);
1480 error
= door_server_dispatch(ct
, dp
);
1481 mutex_enter(&door_knob
);
1484 /* let the client know we have processed its message */
1485 ct
->d_args_done
= 1;
1488 caller
= st
->d_caller
;
1490 ct
= DOOR_CLIENT(caller
->t_door
);
1495 mutex_exit(&door_knob
);
1499 * We are not involved in a door_invocation.
1500 * Check for /proc related activity...
1502 st
->d_caller
= NULL
;
1503 door_server_exit(curproc
, curthread
);
1504 mutex_exit(&door_knob
);
1506 if (ISSIG(curthread
, FORREAL
) || lwp
->lwp_sysabort
||
1507 MUSTRETURN(curproc
, curthread
) ||
1508 (cancel_pending
= schedctl_cancel_pending()) != 0) {
1510 schedctl_cancel_eintr();
1511 lwp
->lwp_asleep
= 0;
1512 lwp
->lwp_sysabort
= 0;
1513 return (set_errno(EINTR
));
1515 /* Go back and wait for another request */
1516 lwp
->lwp_asleep
= 0;
1517 mutex_enter(&door_knob
);
1524 * Revoke any future invocations on this door
1527 door_revoke(int did
)
1532 if ((d
= door_lookup(did
, NULL
)) == NULL
)
1533 return (set_errno(EBADF
));
1535 mutex_enter(&door_knob
);
1536 if (d
->door_target
!= curproc
) {
1537 mutex_exit(&door_knob
);
1539 return (set_errno(EPERM
));
1541 d
->door_flags
|= DOOR_REVOKED
;
1542 if (d
->door_flags
& DOOR_PRIVATE
)
1543 cv_broadcast(&d
->door_servers
.dp_cv
);
1545 cv_broadcast(&curproc
->p_server_threads
.dp_cv
);
1546 mutex_exit(&door_knob
);
1548 /* Invalidate the descriptor */
1549 if ((error
= closeandsetf(did
, NULL
)) != 0)
1550 return (set_errno(error
));
1555 door_info(int did
, struct door_info
*d_info
)
1562 if (did
== DOOR_QUERY
) {
1563 /* Get information on door current thread is bound to */
1564 if ((st
= door_my_server(0)) == NULL
||
1565 (dp
= st
->d_pool
) == NULL
)
1566 /* Thread isn't bound to a door */
1567 return (set_errno(EBADF
));
1568 } else if ((dp
= door_lookup(did
, &fp
)) == NULL
) {
1570 return (set_errno(EBADF
));
1573 door_info_common(dp
, &di
, fp
);
1575 if (did
!= DOOR_QUERY
)
1578 if (copyout(&di
, d_info
, sizeof (struct door_info
)))
1579 return (set_errno(EFAULT
));
1584 * Common code for getting information about a door either via the
1585 * door_info system call or the door_ki_info kernel call.
1588 door_info_common(door_node_t
*dp
, struct door_info
*dip
, file_t
*fp
)
1592 bzero(dip
, sizeof (door_info_t
));
1594 mutex_enter(&door_knob
);
1595 if (dp
->door_target
== NULL
)
1596 dip
->di_target
= -1;
1598 dip
->di_target
= dp
->door_target
->p_pid
;
1600 dip
->di_attributes
= dp
->door_flags
& DOOR_ATTR_MASK
;
1601 if (dp
->door_target
== curproc
)
1602 dip
->di_attributes
|= DOOR_LOCAL
;
1603 dip
->di_proc
= (door_ptr_t
)(uintptr_t)dp
->door_pc
;
1604 dip
->di_data
= (door_ptr_t
)(uintptr_t)dp
->door_data
;
1605 dip
->di_uniquifier
= dp
->door_index
;
1607 * If this door is in the middle of having an unreferenced
1608 * notification delivered, don't count the VN_HOLD by
1609 * door_deliver_unref in determining if it is unreferenced.
1610 * This handles the case where door_info is called from the
1611 * thread delivering the unref notification.
1613 if (dp
->door_flags
& DOOR_UNREF_ACTIVE
)
1617 mutex_exit(&door_knob
);
1621 * If this thread is bound to the door, then we can just
1622 * check the vnode; a ref count of 1 (or 2 if this is
1623 * handling an unref notification) means that the hold
1624 * from the door_bind is the only reference to the door
1625 * (no file descriptor refers to it).
1627 if (DTOV(dp
)->v_count
== unref_count
)
1628 dip
->di_attributes
|= DOOR_IS_UNREF
;
1631 * If we're working from a file descriptor or door handle
1632 * we need to look at the file structure count. We don't
1633 * need to hold the vnode lock since this is just a snapshot.
1635 mutex_enter(&fp
->f_tlock
);
1636 if (fp
->f_count
== 1 && DTOV(dp
)->v_count
== unref_count
)
1637 dip
->di_attributes
|= DOOR_IS_UNREF
;
1638 mutex_exit(&fp
->f_tlock
);
1643 * Return credentials of the door caller (if any) for this invocation
1646 door_ucred(struct ucred_s
*uch
)
1653 struct ucred_s
*res
;
1656 mutex_enter(&door_knob
);
1657 if ((st
= door_my_server(0)) == NULL
||
1658 (caller
= st
->d_caller
) == NULL
) {
1659 mutex_exit(&door_knob
);
1660 return (set_errno(EINVAL
));
1663 ASSERT(caller
->t_door
!= NULL
);
1664 ct
= DOOR_CLIENT(caller
->t_door
);
1666 /* Prevent caller from exiting while we examine the cred */
1668 mutex_exit(&door_knob
);
1670 p
= ttoproc(caller
);
1673 * If the credentials are not specified by the client, get the one
1674 * associated with the calling process.
1676 if ((dup
= ct
->d_upcall
) != NULL
)
1677 res
= cred2ucred(dup
->du_cred
, p0
.p_pid
, NULL
, CRED());
1679 res
= cred2ucred(caller
->t_cred
, p
->p_pid
, NULL
, CRED());
1681 mutex_enter(&door_knob
);
1683 mutex_exit(&door_knob
);
1685 err
= copyout(res
, uch
, res
->uc_size
);
1687 kmem_free(res
, res
->uc_size
);
1690 return (set_errno(EFAULT
));
1696 * Bind the current lwp to the server thread pool associated with 'did'
1704 if ((dp
= door_lookup(did
, NULL
)) == NULL
) {
1706 return (set_errno(EBADF
));
1710 * Can't bind to a non-private door, and can't bind to a door
1711 * served by another process.
1713 if ((dp
->door_flags
& DOOR_PRIVATE
) == 0 ||
1714 dp
->door_target
!= curproc
) {
1716 return (set_errno(EINVAL
));
1719 st
= door_my_server(1);
1721 door_unbind_thread(st
->d_pool
);
1724 door_bind_thread(dp
);
1731 * Unbind the current lwp from it's server thread pool
1738 if ((st
= door_my_server(0)) == NULL
)
1739 return (set_errno(EBADF
));
1741 if (st
->d_invbound
) {
1742 ASSERT(st
->d_pool
== NULL
);
1746 if (st
->d_pool
== NULL
)
1747 return (set_errno(EBADF
));
1748 door_unbind_thread(st
->d_pool
);
1754 * Create a descriptor for the associated file and fill in the
1755 * attributes associated with it.
1757 * Return 0 for success, -1 otherwise;
1760 door_insert(struct file
*fp
, door_desc_t
*dp
)
1764 door_attr_t attributes
= DOOR_DESCRIPTOR
;
1766 ASSERT(MUTEX_NOT_HELD(&door_knob
));
1767 if ((fd
= ufalloc(0)) == -1)
1770 dp
->d_data
.d_desc
.d_descriptor
= fd
;
1772 /* Fill in the attributes */
1773 if (fop_realvp(fp
->f_vnode
, &vp
, NULL
))
1775 if (vp
&& vp
->v_type
== VDOOR
) {
1776 if (VTOD(vp
)->door_target
== curproc
)
1777 attributes
|= DOOR_LOCAL
;
1778 attributes
|= VTOD(vp
)->door_flags
& DOOR_ATTR_MASK
;
1779 dp
->d_data
.d_desc
.d_id
= VTOD(vp
)->door_index
;
1781 dp
->d_attributes
= attributes
;
1786 * Return an available thread for this server. A NULL return value indicates
1788 * The door has been revoked, or
1789 * a signal was received.
1790 * The two conditions can be differentiated using DOOR_INVALID(dp).
1793 door_get_server(door_node_t
*dp
)
1796 kthread_t
*server_t
;
1804 ASSERT(MUTEX_HELD(&door_knob
));
1806 if (dp
->door_flags
& DOOR_PRIVATE
)
1807 pool
= &dp
->door_servers
;
1809 pool
= &dp
->door_target
->p_server_threads
;
1813 * We search the thread pool, looking for a server thread
1814 * ready to take an invocation (i.e. one which is still
1815 * sleeping on a shuttle object). If none are available,
1816 * we sleep on the pool's CV, and will be signaled when a
1817 * thread is added to the pool.
1819 * This relies on the fact that once a thread in the thread
1820 * pool wakes up, it *must* remove and add itself to the pool
1821 * before it can receive door calls.
1823 if (DOOR_INVALID(dp
))
1824 return (NULL
); /* Target has become invalid */
1826 for (ktp
= &pool
->dp_threads
;
1827 (server_t
= *ktp
) != NULL
;
1828 ktp
= &st
->d_servers
) {
1829 st
= DOOR_SERVER(server_t
->t_door
);
1831 thread_lock(server_t
);
1832 if (server_t
->t_state
== TS_SLEEP
&&
1833 SOBJ_TYPE(server_t
->t_sobj_ops
) == SOBJ_SHUTTLE
)
1835 thread_unlock(server_t
);
1837 if (server_t
!= NULL
)
1838 break; /* we've got a live one! */
1840 if (!cv_wait_sig_swap_core(&pool
->dp_cv
, &door_knob
,
1843 * If we were signaled and the door is still
1844 * valid, pass the signal on to another waiter.
1846 if (signalled
&& !DOOR_INVALID(dp
))
1847 cv_signal(&pool
->dp_cv
);
1848 return (NULL
); /* Got a signal */
1853 * We've got a thread_lock()ed thread which is still on the
1854 * shuttle. Take it off the list of available server threads
1855 * and mark it as ONPROC. We are committed to resuming this
1858 tlp
= server_t
->t_lockp
;
1861 *ktp
= st
->d_servers
;
1862 st
->d_servers
= NULL
;
1864 * Setting t_disp_queue prevents erroneous preemptions
1865 * if this thread is still in execution on another processor
1867 server_t
->t_disp_queue
= cp
->cpu_disp
;
1868 CL_ACTIVE(server_t
);
1870 * We are calling thread_onproc() instead of
1871 * THREAD_ONPROC() because compiler can reorder
1872 * the two stores of t_state and t_lockp in
1875 thread_onproc(server_t
, cp
);
1876 disp_lock_exit(tlp
);
1881 * Put a server thread back in the pool.
1884 door_release_server(door_node_t
*dp
, kthread_t
*t
)
1886 door_server_t
*st
= DOOR_SERVER(t
->t_door
);
1889 ASSERT(MUTEX_HELD(&door_knob
));
1890 st
->d_active
= NULL
;
1891 st
->d_caller
= NULL
;
1892 st
->d_layout_done
= 0;
1893 if (dp
&& (dp
->door_flags
& DOOR_PRIVATE
)) {
1894 ASSERT(dp
->door_target
== NULL
||
1895 dp
->door_target
== ttoproc(t
));
1896 pool
= &dp
->door_servers
;
1898 pool
= &ttoproc(t
)->p_server_threads
;
1901 st
->d_servers
= pool
->dp_threads
;
1902 pool
->dp_threads
= t
;
1904 /* If someone is waiting for a server thread, wake him up */
1905 cv_signal(&pool
->dp_cv
);
1909 * Remove a server thread from the pool if present.
1912 door_server_exit(proc_t
*p
, kthread_t
*t
)
1916 door_server_t
*st
= DOOR_SERVER(t
->t_door
);
1918 ASSERT(MUTEX_HELD(&door_knob
));
1919 if (st
->d_pool
!= NULL
) {
1920 ASSERT(st
->d_pool
->door_flags
& DOOR_PRIVATE
);
1921 pool
= &st
->d_pool
->door_servers
;
1923 pool
= &p
->p_server_threads
;
1926 next
= &pool
->dp_threads
;
1927 while (*next
!= NULL
) {
1929 *next
= DOOR_SERVER(t
->t_door
)->d_servers
;
1932 next
= &(DOOR_SERVER((*next
)->t_door
)->d_servers
);
1937 * Lookup the door descriptor. Caller must call releasef when finished
1938 * with associated door.
1940 static door_node_t
*
1941 door_lookup(int did
, file_t
**fpp
)
1946 ASSERT(MUTEX_NOT_HELD(&door_knob
));
1947 if ((fp
= getf(did
)) == NULL
)
1950 * Use the underlying vnode (we may be namefs mounted)
1952 if (fop_realvp(fp
->f_vnode
, &vp
, NULL
))
1955 if (vp
== NULL
|| vp
->v_type
!= VDOOR
) {
1967 * The current thread is exiting, so clean up any pending
1968 * invocation details
1979 * If we are an active door server, notify our
1980 * client that we are exiting and revoke our door.
1982 if ((dt
= door_my_data(0)) == NULL
)
1984 ct
= DOOR_CLIENT(dt
);
1985 st
= DOOR_SERVER(dt
);
1987 mutex_enter(&door_knob
);
1989 if (DOOR_T_HELD(ct
))
1990 cv_wait(&ct
->d_cv
, &door_knob
);
1991 else if (DOOR_T_HELD(st
))
1992 cv_wait(&st
->d_cv
, &door_knob
);
1994 break; /* neither flag is set */
1996 curthread
->t_door
= NULL
;
1997 if ((dp
= st
->d_active
) != NULL
) {
1998 kthread_t
*t
= st
->d_caller
;
1999 proc_t
*p
= curproc
;
2001 /* Revoke our door if the process is exiting */
2002 if (dp
->door_target
== p
&& (p
->p_flag
& SEXITING
)) {
2003 door_list_delete(dp
);
2004 dp
->door_target
= NULL
;
2005 dp
->door_flags
|= DOOR_REVOKED
;
2006 if (dp
->door_flags
& DOOR_PRIVATE
)
2007 cv_broadcast(&dp
->door_servers
.dp_cv
);
2009 cv_broadcast(&p
->p_server_threads
.dp_cv
);
2014 * Let the caller know we are gone
2016 DOOR_CLIENT(t
->t_door
)->d_error
= DOOR_EXIT
;
2018 if (t
->t_state
== TS_SLEEP
&&
2019 SOBJ_TYPE(t
->t_sobj_ops
) == SOBJ_SHUTTLE
)
2024 mutex_exit(&door_knob
);
2026 door_unbind_thread(st
->d_pool
); /* Implicit door_unbind */
2027 kmem_free(dt
, sizeof (door_data_t
));
2031 * Set DOOR_REVOKED for all doors of the current process. This is called
2032 * on exit before all lwp's are being terminated so that door calls will
2033 * return with an error.
2039 proc_t
*p
= ttoproc(curthread
);
2041 mutex_enter(&door_knob
);
2042 for (dp
= p
->p_door_list
; dp
!= NULL
; dp
= dp
->door_list
) {
2043 ASSERT(dp
->door_target
== p
);
2044 dp
->door_flags
|= DOOR_REVOKED
;
2045 if (dp
->door_flags
& DOOR_PRIVATE
)
2046 cv_broadcast(&dp
->door_servers
.dp_cv
);
2048 cv_broadcast(&p
->p_server_threads
.dp_cv
);
2049 mutex_exit(&door_knob
);
2053 * The process is exiting, and all doors it created need to be revoked.
2059 proc_t
*p
= ttoproc(curthread
);
2061 ASSERT(p
->p_lwpcnt
== 1);
2063 * Walk the list of active doors created by this process and
2066 mutex_enter(&door_knob
);
2067 for (dp
= p
->p_door_list
; dp
!= NULL
; dp
= dp
->door_list
) {
2068 dp
->door_target
= NULL
;
2069 dp
->door_flags
|= DOOR_REVOKED
;
2070 if (dp
->door_flags
& DOOR_PRIVATE
)
2071 cv_broadcast(&dp
->door_servers
.dp_cv
);
2073 cv_broadcast(&p
->p_server_threads
.dp_cv
);
2074 /* Clear the list */
2075 p
->p_door_list
= NULL
;
2077 /* Clean up the unref list */
2078 while ((dp
= p
->p_unref_list
) != NULL
) {
2079 p
->p_unref_list
= dp
->door_ulist
;
2080 dp
->door_ulist
= NULL
;
2081 mutex_exit(&door_knob
);
2083 mutex_enter(&door_knob
);
2085 mutex_exit(&door_knob
);
2090 * The process is executing forkall(), and we need to flag threads that
2091 * are bound to a door in the child. This will make the child threads
2092 * return an error to door_return unless they call door_unbind first.
2095 door_fork(kthread_t
*parent
, kthread_t
*child
)
2097 door_data_t
*pt
= parent
->t_door
;
2098 door_server_t
*st
= DOOR_SERVER(pt
);
2101 ASSERT(MUTEX_NOT_HELD(&door_knob
));
2102 if (pt
!= NULL
&& (st
->d_pool
!= NULL
|| st
->d_invbound
)) {
2103 /* parent thread is bound to a door */
2104 dt
= child
->t_door
=
2105 kmem_zalloc(sizeof (door_data_t
), KM_SLEEP
);
2106 DOOR_SERVER(dt
)->d_invbound
= 1;
2111 * Deliver queued unrefs to appropriate door server.
2117 static door_arg_t unref_args
= { DOOR_UNREF_DATA
, 0, 0, 0, 0, 0 };
2118 proc_t
*p
= ttoproc(curthread
);
2120 /* make sure there's only one unref thread per process */
2121 mutex_enter(&door_knob
);
2122 if (p
->p_unref_thread
) {
2123 mutex_exit(&door_knob
);
2124 return (set_errno(EALREADY
));
2126 p
->p_unref_thread
= 1;
2127 mutex_exit(&door_knob
);
2129 (void) door_my_data(1); /* create info, if necessary */
2132 mutex_enter(&door_knob
);
2134 /* Grab a queued request */
2135 while ((dp
= p
->p_unref_list
) == NULL
) {
2136 if (!cv_wait_sig(&p
->p_unref_cv
, &door_knob
)) {
2139 * Return so we can finish forkall() or exit().
2141 p
->p_unref_thread
= 0;
2142 mutex_exit(&door_knob
);
2143 return (set_errno(EINTR
));
2146 p
->p_unref_list
= dp
->door_ulist
;
2147 dp
->door_ulist
= NULL
;
2148 dp
->door_flags
|= DOOR_UNREF_ACTIVE
;
2149 mutex_exit(&door_knob
);
2151 (void) door_upcall(DTOV(dp
), &unref_args
, NULL
, SIZE_MAX
, 0);
2153 if (unref_args
.rbuf
!= 0) {
2154 kmem_free(unref_args
.rbuf
, unref_args
.rsize
);
2155 unref_args
.rbuf
= NULL
;
2156 unref_args
.rsize
= 0;
2159 mutex_enter(&door_knob
);
2160 ASSERT(dp
->door_flags
& DOOR_UNREF_ACTIVE
);
2161 dp
->door_flags
&= ~DOOR_UNREF_ACTIVE
;
2162 mutex_exit(&door_knob
);
2169 * Deliver queued unrefs to kernel door server.
2173 door_unref_kernel(caddr_t arg
)
2176 static door_arg_t unref_args
= { DOOR_UNREF_DATA
, 0, 0, 0, 0, 0 };
2177 proc_t
*p
= ttoproc(curthread
);
2178 callb_cpr_t cprinfo
;
2180 /* should only be one of these */
2181 mutex_enter(&door_knob
);
2182 if (p
->p_unref_thread
) {
2183 mutex_exit(&door_knob
);
2186 p
->p_unref_thread
= 1;
2187 mutex_exit(&door_knob
);
2189 (void) door_my_data(1); /* make sure we have a door_data_t */
2191 CALLB_CPR_INIT(&cprinfo
, &door_knob
, callb_generic_cpr
, "door_unref");
2193 mutex_enter(&door_knob
);
2194 /* Grab a queued request */
2195 while ((dp
= p
->p_unref_list
) == NULL
) {
2196 CALLB_CPR_SAFE_BEGIN(&cprinfo
);
2197 cv_wait(&p
->p_unref_cv
, &door_knob
);
2198 CALLB_CPR_SAFE_END(&cprinfo
, &door_knob
);
2200 p
->p_unref_list
= dp
->door_ulist
;
2201 dp
->door_ulist
= NULL
;
2202 dp
->door_flags
|= DOOR_UNREF_ACTIVE
;
2203 mutex_exit(&door_knob
);
2205 (*(dp
->door_pc
))(dp
->door_data
, &unref_args
, NULL
, NULL
, NULL
);
2207 mutex_enter(&door_knob
);
2208 ASSERT(dp
->door_flags
& DOOR_UNREF_ACTIVE
);
2209 dp
->door_flags
&= ~DOOR_UNREF_ACTIVE
;
2210 mutex_exit(&door_knob
);
2217 * Queue an unref invocation for processing for the current process
2218 * The door may or may not be revoked at this point.
2221 door_deliver_unref(door_node_t
*d
)
2223 struct proc
*server
= d
->door_target
;
2225 ASSERT(MUTEX_HELD(&door_knob
));
2226 ASSERT(d
->door_active
== 0);
2231 * Create a lwp to deliver unref calls if one isn't already running.
2233 * A separate thread is used to deliver unrefs since the current
2234 * thread may be holding resources (e.g. locks) in user land that
2235 * may be needed by the unref processing. This would cause a
2238 if (d
->door_flags
& DOOR_UNREF_MULTI
) {
2239 /* multiple unrefs */
2240 d
->door_flags
&= ~DOOR_DELAY
;
2242 /* Only 1 unref per door */
2243 d
->door_flags
&= ~(DOOR_UNREF
|DOOR_DELAY
);
2245 mutex_exit(&door_knob
);
2248 * Need to bump the vnode count before putting the door on the
2249 * list so it doesn't get prematurely released by door_unref.
2253 mutex_enter(&door_knob
);
2254 /* is this door already on the unref list? */
2255 if (d
->door_flags
& DOOR_UNREF_MULTI
) {
2257 for (dp
= server
->p_unref_list
; dp
!= NULL
;
2258 dp
= dp
->door_ulist
) {
2260 /* already there, don't need to add another */
2261 mutex_exit(&door_knob
);
2263 mutex_enter(&door_knob
);
2268 ASSERT(d
->door_ulist
== NULL
);
2269 d
->door_ulist
= server
->p_unref_list
;
2270 server
->p_unref_list
= d
;
2271 cv_broadcast(&server
->p_unref_cv
);
2275 * The callers buffer isn't big enough for all of the data/fd's. Allocate
2276 * space in the callers address space for the results and copy the data
2279 * For EOVERFLOW, we must clean up the server's door descriptors.
2284 caddr_t data_ptr
, /* data location */
2285 size_t data_size
, /* data size */
2286 door_desc_t
*desc_ptr
, /* descriptor location */
2287 uint_t desc_num
) /* descriptor size */
2289 proc_t
*callerp
= ttoproc(caller
);
2290 struct as
*as
= callerp
->p_as
;
2291 door_client_t
*ct
= DOOR_CLIENT(caller
->t_door
);
2292 caddr_t addr
; /* Resulting address in target */
2293 size_t rlen
; /* Rounded len */
2296 size_t ds
= desc_num
* sizeof (door_desc_t
);
2298 ASSERT(MUTEX_NOT_HELD(&door_knob
));
2299 ASSERT(DOOR_T_HELD(ct
) || ct
->d_kernel
);
2301 /* Do initial overflow check */
2302 if (!ufcanalloc(callerp
, desc_num
))
2306 * Allocate space for this stuff in the callers address space
2308 rlen
= roundup(data_size
+ ds
, PAGESIZE
);
2310 map_addr_proc(&addr
, rlen
, 0, 1, as
->a_userlimit
, ttoproc(caller
), 0);
2312 as_map(as
, addr
, rlen
, segvn_create
, zfod_argsp
) != 0) {
2313 /* No virtual memory available, or anon mapping failed */
2315 if (!ct
->d_kernel
&& desc_num
> 0) {
2316 int error
= door_release_fds(desc_ptr
, desc_num
);
2327 if (data_size
!= 0) {
2328 caddr_t src
= data_ptr
;
2329 caddr_t saddr
= addr
;
2337 amount
= len
> PAGESIZE
? PAGESIZE
: len
;
2338 if ((error
= door_copy(as
, src
, saddr
, amount
)) != 0) {
2339 (void) as_unmap(as
, addr
, rlen
);
2348 if (desc_num
!= 0) {
2349 door_desc_t
*didpp
, *start
;
2353 start
= didpp
= kmem_alloc(ds
, KM_SLEEP
);
2354 if (copyin_nowatch(desc_ptr
, didpp
, ds
)) {
2355 kmem_free(start
, ds
);
2356 (void) as_unmap(as
, addr
, rlen
);
2360 fpp_size
= desc_num
* sizeof (struct file
*);
2361 if (fpp_size
> ct
->d_fpp_size
) {
2362 /* make more space */
2364 kmem_free(ct
->d_fpp
, ct
->d_fpp_size
);
2365 ct
->d_fpp_size
= fpp_size
;
2366 ct
->d_fpp
= kmem_alloc(ct
->d_fpp_size
, KM_SLEEP
);
2370 for (i
= 0; i
< desc_num
; i
++) {
2372 int fd
= didpp
->d_data
.d_desc
.d_descriptor
;
2374 if (!(didpp
->d_attributes
& DOOR_DESCRIPTOR
) ||
2375 (fp
= getf(fd
)) == NULL
) {
2376 /* close translated references */
2377 door_fp_close(ct
->d_fpp
, fpp
- ct
->d_fpp
);
2378 /* close untranslated references */
2379 door_fd_rele(didpp
, desc_num
- i
, 0);
2380 kmem_free(start
, ds
);
2381 (void) as_unmap(as
, addr
, rlen
);
2384 mutex_enter(&fp
->f_tlock
);
2386 mutex_exit(&fp
->f_tlock
);
2391 if (didpp
->d_attributes
& DOOR_RELEASE
) {
2392 /* release passed reference */
2393 (void) closeandsetf(fd
, NULL
);
2398 kmem_free(start
, ds
);
2403 ct
->d_args
.rbuf
= addr
;
2404 ct
->d_args
.rsize
= rlen
;
2409 * Transfer arguments from the client to the server.
2412 door_args(kthread_t
*server
, int is_private
)
2414 door_server_t
*st
= DOOR_SERVER(server
->t_door
);
2415 door_client_t
*ct
= DOOR_CLIENT(curthread
->t_door
);
2420 ASSERT(DOOR_T_HELD(st
));
2421 ASSERT(MUTEX_NOT_HELD(&door_knob
));
2423 ndid
= ct
->d_args
.desc_num
;
2424 if (ndid
> door_max_desc
)
2428 * Get the stack layout, and fail now if it won't fit.
2430 error
= door_layout(server
, ct
->d_args
.data_size
, ndid
, is_private
);
2434 dsize
= ndid
* sizeof (door_desc_t
);
2435 if (ct
->d_args
.data_size
!= 0) {
2436 if (ct
->d_args
.data_size
<= door_max_arg
) {
2438 * Use a 2 copy method for small amounts of data
2440 * Allocate a little more than we need for the
2441 * args, in the hope that the results will fit
2442 * without having to reallocate a buffer
2444 ASSERT(ct
->d_buf
== NULL
);
2445 ct
->d_bufsize
= roundup(ct
->d_args
.data_size
,
2447 ct
->d_buf
= kmem_alloc(ct
->d_bufsize
, KM_SLEEP
);
2448 if (copyin_nowatch(ct
->d_args
.data_ptr
,
2449 ct
->d_buf
, ct
->d_args
.data_size
) != 0) {
2450 kmem_free(ct
->d_buf
, ct
->d_bufsize
);
2459 size_t len
= ct
->d_args
.data_size
;
2463 * Use a 1 copy method
2465 as
= ttoproc(server
)->p_as
;
2466 src
= ct
->d_args
.data_ptr
;
2468 dest
= st
->d_layout
.dl_datap
;
2469 base
= (uintptr_t)dest
;
2472 * Copy data directly into server. We proceed
2473 * downward from the top of the stack, to mimic
2474 * normal stack usage. This allows the guard page
2475 * to stop us before we corrupt anything.
2484 * Locate the next part to copy.
2487 start
= P2ALIGN(end
- 1, PAGESIZE
);
2490 * if we are on the final (first) page, fix
2491 * up the start position.
2493 if (P2ALIGN(base
, PAGESIZE
) == start
)
2496 offset
= start
- base
; /* the copy offset */
2497 amount
= end
- start
; /* # bytes to copy */
2499 ASSERT(amount
> 0 && amount
<= len
&&
2500 amount
<= PAGESIZE
);
2502 error
= door_copy(as
, src
+ offset
,
2503 dest
+ offset
, amount
);
2511 * Copyin the door args and translate them into files
2518 start
= didpp
= kmem_alloc(dsize
, KM_SLEEP
);
2520 if (copyin_nowatch(ct
->d_args
.desc_ptr
, didpp
, dsize
)) {
2521 kmem_free(start
, dsize
);
2524 ct
->d_fpp_size
= ndid
* sizeof (struct file
*);
2525 ct
->d_fpp
= kmem_alloc(ct
->d_fpp_size
, KM_SLEEP
);
2529 int fd
= didpp
->d_data
.d_desc
.d_descriptor
;
2531 /* We only understand file descriptors as passed objs */
2532 if (!(didpp
->d_attributes
& DOOR_DESCRIPTOR
) ||
2533 (fp
= getf(fd
)) == NULL
) {
2534 /* close translated references */
2535 door_fp_close(ct
->d_fpp
, fpp
- ct
->d_fpp
);
2536 /* close untranslated references */
2537 door_fd_rele(didpp
, ndid
+ 1, 0);
2538 kmem_free(start
, dsize
);
2539 kmem_free(ct
->d_fpp
, ct
->d_fpp_size
);
2545 mutex_enter(&fp
->f_tlock
);
2547 mutex_exit(&fp
->f_tlock
);
2552 if (didpp
->d_attributes
& DOOR_RELEASE
) {
2553 /* release passed reference */
2554 (void) closeandsetf(fd
, NULL
);
2559 kmem_free(start
, dsize
);
2565 * Transfer arguments from a user client to a kernel server. This copies in
2566 * descriptors and translates them into door handles. It doesn't touch the
2567 * other data, letting the kernel server deal with that (to avoid needing
2568 * to copy the data twice).
2571 door_translate_in(void)
2573 door_client_t
*ct
= DOOR_CLIENT(curthread
->t_door
);
2576 ASSERT(MUTEX_NOT_HELD(&door_knob
));
2577 ndid
= ct
->d_args
.desc_num
;
2578 if (ndid
> door_max_desc
)
2581 * Copyin the door args and translate them into door handles.
2586 size_t dsize
= ndid
* sizeof (door_desc_t
);
2589 start
= didpp
= kmem_alloc(dsize
, KM_SLEEP
);
2591 if (copyin_nowatch(ct
->d_args
.desc_ptr
, didpp
, dsize
)) {
2592 kmem_free(start
, dsize
);
2597 int fd
= didpp
->d_data
.d_desc
.d_descriptor
;
2600 * We only understand file descriptors as passed objs
2602 if ((didpp
->d_attributes
& DOOR_DESCRIPTOR
) &&
2603 (fp
= getf(fd
)) != NULL
) {
2604 didpp
->d_data
.d_handle
= FTODH(fp
);
2606 door_ki_hold(didpp
->d_data
.d_handle
);
2610 if (didpp
->d_attributes
& DOOR_RELEASE
) {
2611 /* release passed reference */
2612 (void) closeandsetf(fd
, NULL
);
2615 if (fop_realvp(fp
->f_vnode
, &vp
, NULL
))
2618 /* Set attributes */
2619 didpp
->d_attributes
= DOOR_HANDLE
|
2620 (VTOD(vp
)->door_flags
& DOOR_ATTR_MASK
);
2622 /* close translated references */
2623 door_fd_close(start
, didpp
- start
);
2624 /* close untranslated references */
2625 door_fd_rele(didpp
, ndid
+ 1, 0);
2626 kmem_free(start
, dsize
);
2631 ct
->d_args
.desc_ptr
= start
;
2637 * Translate door arguments from kernel to user. This copies the passed
2638 * door handles. It doesn't touch other data. It is used by door_upcall,
2639 * and for data returned by a door_call to a kernel server.
2642 door_translate_out(void)
2644 door_client_t
*ct
= DOOR_CLIENT(curthread
->t_door
);
2647 ASSERT(MUTEX_NOT_HELD(&door_knob
));
2648 ndid
= ct
->d_args
.desc_num
;
2649 if (ndid
> door_max_desc
) {
2650 door_fd_rele(ct
->d_args
.desc_ptr
, ndid
, 1);
2654 * Translate the door args into files
2657 door_desc_t
*didpp
= ct
->d_args
.desc_ptr
;
2660 ct
->d_fpp_size
= ndid
* sizeof (struct file
*);
2661 fpp
= ct
->d_fpp
= kmem_alloc(ct
->d_fpp_size
, KM_SLEEP
);
2663 struct file
*fp
= NULL
;
2667 * We understand file descriptors and door
2668 * handles as passed objs.
2670 if (didpp
->d_attributes
& DOOR_DESCRIPTOR
) {
2671 fd
= didpp
->d_data
.d_desc
.d_descriptor
;
2673 } else if (didpp
->d_attributes
& DOOR_HANDLE
)
2674 fp
= DHTOF(didpp
->d_data
.d_handle
);
2677 mutex_enter(&fp
->f_tlock
);
2679 mutex_exit(&fp
->f_tlock
);
2682 if (didpp
->d_attributes
& DOOR_DESCRIPTOR
)
2684 if (didpp
->d_attributes
& DOOR_RELEASE
) {
2685 /* release passed reference */
2687 (void) closeandsetf(fd
, NULL
);
2692 /* close translated references */
2693 door_fp_close(ct
->d_fpp
, fpp
- ct
->d_fpp
);
2694 /* close untranslated references */
2695 door_fd_rele(didpp
, ndid
+ 1, 1);
2696 kmem_free(ct
->d_fpp
, ct
->d_fpp_size
);
2708 * Move the results from the server to the client
2711 door_results(kthread_t
*caller
, caddr_t data_ptr
, size_t data_size
,
2712 door_desc_t
*desc_ptr
, uint_t desc_num
)
2714 door_client_t
*ct
= DOOR_CLIENT(caller
->t_door
);
2715 door_upcall_t
*dup
= ct
->d_upcall
;
2720 ASSERT(DOOR_T_HELD(ct
));
2721 ASSERT(MUTEX_NOT_HELD(&door_knob
));
2723 if (ct
->d_noresults
)
2724 return (E2BIG
); /* No results expected */
2726 if (desc_num
> door_max_desc
)
2727 return (E2BIG
); /* Too many descriptors */
2729 dsize
= desc_num
* sizeof (door_desc_t
);
2731 * Check if the results are bigger than the clients buffer
2734 rlen
= roundup(data_size
, sizeof (door_desc_t
));
2737 if ((result_size
= rlen
+ dsize
) == 0)
2741 if (desc_num
> dup
->du_max_descs
)
2744 if (data_size
> dup
->du_max_data
)
2750 if (ct
->d_args
.rbuf
== NULL
|| ct
->d_args
.rsize
< result_size
) {
2752 * If there's no return buffer or the buffer is too
2753 * small, allocate a new one. The old buffer (if it
2754 * exists) will be freed by the upcall client.
2756 if (result_size
> door_max_upcall_reply
)
2758 ct
->d_args
.rsize
= result_size
;
2759 ct
->d_args
.rbuf
= kmem_alloc(result_size
, KM_SLEEP
);
2761 ct
->d_args
.data_ptr
= ct
->d_args
.rbuf
;
2762 if (data_size
!= 0 &&
2763 copyin_nowatch(data_ptr
, ct
->d_args
.data_ptr
,
2766 } else if (result_size
> ct
->d_args
.rsize
) {
2767 return (door_overflow(caller
, data_ptr
, data_size
,
2768 desc_ptr
, desc_num
));
2769 } else if (data_size
!= 0) {
2770 if (data_size
<= door_max_arg
) {
2772 * Use a 2 copy method for small amounts of data
2774 if (ct
->d_buf
== NULL
) {
2775 ct
->d_bufsize
= data_size
;
2776 ct
->d_buf
= kmem_alloc(ct
->d_bufsize
, KM_SLEEP
);
2777 } else if (ct
->d_bufsize
< data_size
) {
2778 kmem_free(ct
->d_buf
, ct
->d_bufsize
);
2779 ct
->d_bufsize
= data_size
;
2780 ct
->d_buf
= kmem_alloc(ct
->d_bufsize
, KM_SLEEP
);
2782 if (copyin_nowatch(data_ptr
, ct
->d_buf
, data_size
) != 0)
2785 struct as
*as
= ttoproc(caller
)->p_as
;
2786 caddr_t dest
= ct
->d_args
.rbuf
;
2787 caddr_t src
= data_ptr
;
2788 size_t len
= data_size
;
2790 /* Copy data directly into client */
2797 off
= (uintptr_t)dest
& PAGEOFFSET
;
2799 max
= PAGESIZE
- off
;
2802 amount
= len
> max
? max
: len
;
2803 error
= door_copy(as
, src
, dest
, amount
);
2814 * Copyin the returned door ids and translate them into door_node_t
2816 if (desc_num
!= 0) {
2823 /* First, check if we would overflow client */
2824 if (!ufcanalloc(ttoproc(caller
), desc_num
))
2827 start
= didpp
= kmem_alloc(dsize
, KM_SLEEP
);
2828 if (copyin_nowatch(desc_ptr
, didpp
, dsize
)) {
2829 kmem_free(start
, dsize
);
2832 fpp_size
= desc_num
* sizeof (struct file
*);
2833 if (fpp_size
> ct
->d_fpp_size
) {
2834 /* make more space */
2836 kmem_free(ct
->d_fpp
, ct
->d_fpp_size
);
2837 ct
->d_fpp_size
= fpp_size
;
2838 ct
->d_fpp
= kmem_alloc(fpp_size
, KM_SLEEP
);
2842 for (i
= 0; i
< desc_num
; i
++) {
2844 int fd
= didpp
->d_data
.d_desc
.d_descriptor
;
2846 /* Only understand file descriptor results */
2847 if (!(didpp
->d_attributes
& DOOR_DESCRIPTOR
) ||
2848 (fp
= getf(fd
)) == NULL
) {
2849 /* close translated references */
2850 door_fp_close(ct
->d_fpp
, fpp
- ct
->d_fpp
);
2851 /* close untranslated references */
2852 door_fd_rele(didpp
, desc_num
- i
, 0);
2853 kmem_free(start
, dsize
);
2857 mutex_enter(&fp
->f_tlock
);
2859 mutex_exit(&fp
->f_tlock
);
2864 if (didpp
->d_attributes
& DOOR_RELEASE
) {
2865 /* release passed reference */
2866 (void) closeandsetf(fd
, NULL
);
2871 kmem_free(start
, dsize
);
2877 * Close all the descriptors.
2880 door_fd_close(door_desc_t
*d
, uint_t n
)
2884 ASSERT(MUTEX_NOT_HELD(&door_knob
));
2885 for (i
= 0; i
< n
; i
++) {
2886 if (d
->d_attributes
& DOOR_DESCRIPTOR
) {
2887 (void) closeandsetf(
2888 d
->d_data
.d_desc
.d_descriptor
, NULL
);
2889 } else if (d
->d_attributes
& DOOR_HANDLE
) {
2890 door_ki_rele(d
->d_data
.d_handle
);
2897 * Close descriptors that have the DOOR_RELEASE attribute set.
2900 door_fd_rele(door_desc_t
*d
, uint_t n
, int from_kernel
)
2904 ASSERT(MUTEX_NOT_HELD(&door_knob
));
2905 for (i
= 0; i
< n
; i
++) {
2906 if (d
->d_attributes
& DOOR_RELEASE
) {
2907 if (d
->d_attributes
& DOOR_DESCRIPTOR
) {
2908 (void) closeandsetf(
2909 d
->d_data
.d_desc
.d_descriptor
, NULL
);
2910 } else if (from_kernel
&&
2911 (d
->d_attributes
& DOOR_HANDLE
)) {
2912 door_ki_rele(d
->d_data
.d_handle
);
2920 * Copy descriptors into the kernel so we can release any marked
2924 door_release_fds(door_desc_t
*desc_ptr
, uint_t ndesc
)
2930 ASSERT(MUTEX_NOT_HELD(&door_knob
));
2933 desc_num
= MIN(ndesc
, door_max_desc
);
2935 dsize
= desc_num
* sizeof (door_desc_t
);
2936 didpp
= kmem_alloc(dsize
, KM_SLEEP
);
2939 uint_t count
= MIN(ndesc
, desc_num
);
2941 if (copyin_nowatch(desc_ptr
, didpp
,
2942 count
* sizeof (door_desc_t
))) {
2943 kmem_free(didpp
, dsize
);
2946 door_fd_rele(didpp
, count
, 0);
2951 kmem_free(didpp
, dsize
);
2956 * Decrement ref count on all the files passed
2959 door_fp_close(struct file
**fp
, uint_t n
)
2963 ASSERT(MUTEX_NOT_HELD(&door_knob
));
2965 for (i
= 0; i
< n
; i
++)
2966 (void) closef(fp
[i
]);
2970 * Copy data from 'src' in current address space to 'dest' in 'as' for 'len'
2973 * Performs this using 1 mapin and 1 copy operation.
2975 * We really should do more than 1 page at a time to improve
2976 * performance, but for now this is treated as an anomalous condition.
2979 door_copy(struct as
*as
, caddr_t src
, caddr_t dest
, uint_t len
)
2988 ASSERT(len
<= PAGESIZE
);
2989 off
= (uintptr_t)dest
& PAGEOFFSET
; /* offset within the page */
2990 rdest
= (caddr_t
)((uintptr_t)dest
&
2991 (uintptr_t)PAGEMASK
); /* Page boundary */
2992 ASSERT(off
+ len
<= PAGESIZE
);
2995 * Lock down destination page.
2997 if (as_pagelock(as
, &pplist
, rdest
, PAGESIZE
, S_WRITE
))
3000 * Check if we have a shadow page list from as_pagelock. If not,
3001 * we took the slow path and have to find our page struct the hard
3004 if (pplist
== NULL
) {
3007 /* MMU mapping is already locked down */
3008 AS_LOCK_ENTER(as
, RW_READER
);
3009 pfnum
= hat_getpfnum(as
->a_hat
, rdest
);
3013 * TODO: The pfn step should not be necessary - need
3014 * a hat_getpp() function.
3016 if (pf_is_memory(pfnum
)) {
3017 pp
= page_numtopp_nolock(pfnum
);
3018 ASSERT(pp
== NULL
|| PAGE_LOCKED(pp
));
3022 as_pageunlock(as
, pplist
, rdest
, PAGESIZE
, S_WRITE
);
3029 * Map destination page into kernel address
3032 kaddr
= (caddr_t
)hat_kpm_mapin(pp
, NULL
);
3034 kaddr
= (caddr_t
)ppmapin(pp
, PROT_READ
| PROT_WRITE
,
3038 * Copy from src to dest
3040 if (copyin_nowatch(src
, kaddr
+ off
, len
) != 0)
3043 * Unmap destination page from kernel
3046 hat_kpm_mapout(pp
, NULL
, kaddr
);
3050 * Unlock destination page
3052 as_pageunlock(as
, pplist
, rdest
, PAGESIZE
, S_WRITE
);
3057 * General kernel upcall using doors
3058 * Returns 0 on success, errno for failures.
3059 * Caller must have a hold on the door based vnode, and on any
3060 * references passed in desc_ptr. The references are released
3061 * in the event of an error, and passed without duplication
3062 * otherwise. Note that param->rbuf must be 64-bit aligned in
3063 * a 64-bit kernel, since it may be used to store door descriptors
3064 * if they are returned by the server. The caller is responsible
3065 * for holding a reference to the cred passed in.
3068 door_upcall(vnode_t
*vp
, door_arg_t
*param
, struct cred
*cred
,
3069 size_t max_data
, uint_t max_descs
)
3074 kthread_t
*server_thread
;
3077 door_client_t
*ct
; /* curthread door_data */
3078 door_server_t
*st
; /* server thread door_data */
3082 if (vp
->v_type
!= VDOOR
) {
3083 if (param
->desc_num
)
3084 door_fd_rele(param
->desc_ptr
, param
->desc_num
, 1);
3088 lwp
= ttolwp(curthread
);
3089 ct
= door_my_client(1);
3090 dp
= VTOD(vp
); /* Convert to a door_node_t */
3092 dup
= kmem_zalloc(sizeof (*dup
), KM_SLEEP
);
3093 dup
->du_cred
= (cred
!= NULL
) ? cred
: curthread
->t_cred
;
3094 dup
->du_max_data
= max_data
;
3095 dup
->du_max_descs
= max_descs
;
3098 * This should be done in shuttle_resume(), just before going to
3099 * sleep, but we want to avoid overhead while holding door_knob.
3100 * prstop() is just a no-op if we don't really go to sleep.
3101 * We test not-kernel-address-space for the sake of clustering code.
3103 if (lwp
&& lwp
->lwp_nostop
== 0 && curproc
->p_as
!= &kas
)
3104 prstop(PR_REQUESTED
, 0);
3106 mutex_enter(&door_knob
);
3107 if (DOOR_INVALID(dp
)) {
3108 mutex_exit(&door_knob
);
3109 if (param
->desc_num
)
3110 door_fd_rele(param
->desc_ptr
, param
->desc_num
, 1);
3115 if (dp
->door_target
== &p0
) {
3116 /* Can't do an upcall to a kernel server */
3117 mutex_exit(&door_knob
);
3118 if (param
->desc_num
)
3119 door_fd_rele(param
->desc_ptr
, param
->desc_num
, 1);
3124 error
= door_check_limits(dp
, param
, 1);
3126 mutex_exit(&door_knob
);
3127 if (param
->desc_num
)
3128 door_fd_rele(param
->desc_ptr
, param
->desc_num
, 1);
3133 * Get a server thread from the target domain
3135 if ((server_thread
= door_get_server(dp
)) == NULL
) {
3136 if (DOOR_INVALID(dp
))
3140 mutex_exit(&door_knob
);
3141 if (param
->desc_num
)
3142 door_fd_rele(param
->desc_ptr
, param
->desc_num
, 1);
3146 st
= DOOR_SERVER(server_thread
->t_door
);
3147 ct
->d_buf
= param
->data_ptr
;
3148 ct
->d_bufsize
= param
->data_size
;
3149 ct
->d_args
= *param
; /* structure assignment */
3151 if (ct
->d_args
.desc_num
) {
3153 * Move data from client to server
3156 mutex_exit(&door_knob
);
3157 error
= door_translate_out();
3158 mutex_enter(&door_knob
);
3162 * We're not going to resume this thread after all
3164 door_release_server(dp
, server_thread
);
3165 shuttle_sleep(server_thread
);
3166 mutex_exit(&door_knob
);
3172 if (param
->rsize
== 0)
3173 ct
->d_noresults
= 1;
3175 ct
->d_noresults
= 0;
3179 ct
->d_error
= DOOR_WAIT
;
3180 st
->d_caller
= curthread
;
3183 shuttle_resume(server_thread
, &door_knob
);
3185 mutex_enter(&door_knob
);
3187 if ((error
= ct
->d_error
) < 0) { /* DOOR_WAIT or DOOR_EXIT */
3189 * Premature wakeup. Find out why (stop, forkall, sig, exit ...)
3191 mutex_exit(&door_knob
); /* May block in ISSIG */
3193 if (lwp
&& (ISSIG(curthread
, FORREAL
) || lwp
->lwp_sysabort
||
3194 MUSTRETURN(curproc
, curthread
) ||
3195 (cancel_pending
= schedctl_cancel_pending()) != 0)) {
3196 /* Signal, forkall, ... */
3198 schedctl_cancel_eintr();
3199 lwp
->lwp_sysabort
= 0;
3200 mutex_enter(&door_knob
);
3203 * If the server has finished processing our call,
3204 * or exited (calling door_slam()), then d_error
3205 * will have changed. If the server hasn't finished
3206 * yet, d_error will still be DOOR_WAIT, and we
3207 * let it know we are not interested in any
3208 * results by sending a SIGCANCEL, unless the door
3209 * is marked with DOOR_NO_CANCEL.
3211 if (ct
->d_error
== DOOR_WAIT
&&
3212 st
->d_caller
== curthread
) {
3213 proc_t
*p
= ttoproc(server_thread
);
3215 st
->d_active
= NULL
;
3216 st
->d_caller
= NULL
;
3217 if (!(dp
->door_flags
& DOOR_NO_CANCEL
)) {
3219 mutex_exit(&door_knob
);
3221 mutex_enter(&p
->p_lock
);
3222 sigtoproc(p
, server_thread
, SIGCANCEL
);
3223 mutex_exit(&p
->p_lock
);
3225 mutex_enter(&door_knob
);
3231 * Return from stop(), server exit...
3233 * Note that the server could have done a
3234 * door_return while the client was in stop state
3235 * (ISSIG), in which case the error condition
3236 * is updated by the server.
3238 mutex_enter(&door_knob
);
3239 if (ct
->d_error
== DOOR_WAIT
) {
3240 /* Still waiting for a reply */
3241 shuttle_swtch(&door_knob
);
3242 mutex_enter(&door_knob
);
3244 lwp
->lwp_asleep
= 0;
3245 goto shuttle_return
;
3246 } else if (ct
->d_error
== DOOR_EXIT
) {
3250 /* Server did a door_return during ISSIG */
3251 error
= ct
->d_error
;
3255 * Can't exit if the server is currently copying
3258 while (DOOR_T_HELD(ct
))
3259 cv_wait(&ct
->d_cv
, &door_knob
);
3262 * Find out if results were successfully copied.
3264 if (ct
->d_error
== 0)
3268 lwp
->lwp_asleep
= 0; /* /proc */
3269 lwp
->lwp_sysabort
= 0; /* /proc */
3271 if (--dp
->door_active
== 0 && (dp
->door_flags
& DOOR_DELAY
))
3272 door_deliver_unref(dp
);
3273 mutex_exit(&door_knob
);
3276 * Translate returned doors (if any)
3279 if (ct
->d_noresults
)
3284 * If server returned results successfully, then we've
3285 * been interrupted and may need to clean up.
3288 ASSERT(error
== EINTR
);
3289 door_fp_close(ct
->d_fpp
, ct
->d_args
.desc_num
);
3294 if (ct
->d_args
.desc_num
) {
3298 uint_t n
= ct
->d_args
.desc_num
;
3300 didpp
= ct
->d_args
.desc_ptr
= (door_desc_t
*)(ct
->d_args
.rbuf
+
3301 roundup(ct
->d_args
.data_size
, sizeof (door_desc_t
)));
3308 if (fop_realvp(fp
->f_vnode
, &vp
, NULL
))
3311 didpp
->d_attributes
= DOOR_HANDLE
|
3312 (VTOD(vp
)->door_flags
& DOOR_ATTR_MASK
);
3313 didpp
->d_data
.d_handle
= FTODH(fp
);
3319 /* on return data is in rbuf */
3320 *param
= ct
->d_args
; /* structure assignment */
3323 kmem_free(dup
, sizeof (*dup
));
3326 kmem_free(ct
->d_fpp
, ct
->d_fpp_size
);
3331 ct
->d_upcall
= NULL
;
3332 ct
->d_noresults
= 0;
3339 * Add a door to the per-process list of active doors for which the
3340 * process is a server.
3343 door_list_insert(door_node_t
*dp
)
3345 proc_t
*p
= dp
->door_target
;
3347 ASSERT(MUTEX_HELD(&door_knob
));
3348 dp
->door_list
= p
->p_door_list
;
3349 p
->p_door_list
= dp
;
3353 * Remove a door from the per-process list of active doors.
3356 door_list_delete(door_node_t
*dp
)
3360 ASSERT(MUTEX_HELD(&door_knob
));
3362 * Find the door in the list. If the door belongs to another process,
3363 * it's OK to use p_door_list since that process can't exit until all
3364 * doors have been taken off the list (see door_exit).
3366 pp
= &(dp
->door_target
->p_door_list
);
3368 pp
= &((*pp
)->door_list
);
3370 /* found it, take it off the list */
3371 *pp
= dp
->door_list
;
3376 * External kernel interfaces for doors. These functions are available
3377 * outside the doorfs module for use in creating and using doors from
3378 * within the kernel.
3382 * door_ki_upcall invokes a user-level door server from the kernel, with
3383 * the credentials associated with curthread.
3386 door_ki_upcall(door_handle_t dh
, door_arg_t
*param
)
3388 return (door_ki_upcall_limited(dh
, param
, NULL
, SIZE_MAX
, UINT_MAX
));
3392 * door_ki_upcall_limited invokes a user-level door server from the
3393 * kernel with the given credentials and reply limits. If the "cred"
3394 * argument is NULL, uses the credentials associated with current
3395 * thread. max_data limits the maximum length of the returned data (the
3396 * client will get E2BIG if they go over), and max_desc limits the
3397 * number of returned descriptors (the client will get EMFILE if they
3401 door_ki_upcall_limited(door_handle_t dh
, door_arg_t
*param
, struct cred
*cred
,
3402 size_t max_data
, uint_t max_desc
)
3404 file_t
*fp
= DHTOF(dh
);
3407 if (fop_realvp(fp
->f_vnode
, &realvp
, NULL
))
3408 realvp
= fp
->f_vnode
;
3409 return (door_upcall(realvp
, param
, cred
, max_data
, max_desc
));
3413 * Function call to create a "kernel" door server. A kernel door
3414 * server provides a way for a user-level process to invoke a function
3415 * in the kernel through a door_call. From the caller's point of
3416 * view, a kernel door server looks the same as a user-level one
3417 * (except the server pid is 0). Unlike normal door calls, the
3418 * kernel door function is invoked via a normal function call in the
3419 * same thread and context as the caller.
3422 door_ki_create(void (*pc_cookie
)(), void *data_cookie
, uint_t attributes
,
3428 /* no DOOR_PRIVATE */
3429 if ((attributes
& ~DOOR_KI_CREATE_MASK
) ||
3430 (attributes
& (DOOR_UNREF
| DOOR_UNREF_MULTI
)) ==
3431 (DOOR_UNREF
| DOOR_UNREF_MULTI
))
3434 err
= door_create_common(pc_cookie
, data_cookie
, attributes
,
3436 if (err
== 0 && (attributes
& (DOOR_UNREF
| DOOR_UNREF_MULTI
)) &&
3437 p0
.p_unref_thread
== 0) {
3438 /* need to create unref thread for process 0 */
3439 (void) thread_create(NULL
, 0, door_unref_kernel
, NULL
, 0, &p0
,
3440 TS_RUN
, minclsyspri
);
3449 door_ki_hold(door_handle_t dh
)
3451 file_t
*fp
= DHTOF(dh
);
3453 mutex_enter(&fp
->f_tlock
);
3455 mutex_exit(&fp
->f_tlock
);
3459 door_ki_rele(door_handle_t dh
)
3461 file_t
*fp
= DHTOF(dh
);
3467 door_ki_open(char *pathname
, door_handle_t
*dhp
)
3473 if ((err
= lookupname(pathname
, UIO_SYSSPACE
, FOLLOW
, NULL
, &vp
)) != 0)
3475 if (err
= fop_open(&vp
, FREAD
, kcred
, NULL
)) {
3479 if (vp
->v_type
!= VDOOR
) {
3483 if ((err
= falloc(vp
, FREAD
| FWRITE
, &fp
, NULL
)) != 0) {
3487 /* falloc returns with f_tlock held on success */
3488 mutex_exit(&fp
->f_tlock
);
3494 door_ki_info(door_handle_t dh
, struct door_info
*dip
)
3496 file_t
*fp
= DHTOF(dh
);
3499 if (fop_realvp(fp
->f_vnode
, &vp
, NULL
))
3501 if (vp
->v_type
!= VDOOR
)
3503 door_info_common(VTOD(vp
), dip
, fp
);
3508 door_ki_lookup(int did
)
3513 /* is the descriptor really a door? */
3514 if (door_lookup(did
, &fp
) == NULL
)
3516 /* got the door, put a hold on it and release the fd */
3524 door_ki_setparam(door_handle_t dh
, int type
, size_t val
)
3526 file_t
*fp
= DHTOF(dh
);
3529 if (fop_realvp(fp
->f_vnode
, &vp
, NULL
))
3531 if (vp
->v_type
!= VDOOR
)
3533 return (door_setparam_common(VTOD(vp
), 1, type
, val
));
3537 door_ki_getparam(door_handle_t dh
, int type
, size_t *out
)
3539 file_t
*fp
= DHTOF(dh
);
3542 if (fop_realvp(fp
->f_vnode
, &vp
, NULL
))
3544 if (vp
->v_type
!= VDOOR
)
3546 return (door_getparam_common(VTOD(vp
), type
, out
));