4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2016 by Delphix. All rights reserved.
28 * System call I/F to doors (outside of vnodes I/F) and misc support
31 #include <sys/types.h>
32 #include <sys/systm.h>
34 #include <sys/door_data.h>
36 #include <sys/thread.h>
37 #include <sys/prsystm.h>
38 #include <sys/procfs.h>
39 #include <sys/class.h>
42 #include <sys/cmn_err.h>
43 #include <sys/stack.h>
44 #include <sys/debug.h>
45 #include <sys/cpuvar.h>
47 #include <sys/fcntl.h>
48 #include <sys/vnode.h>
50 #include <sys/vfs_opreg.h>
51 #include <sys/sobject.h>
52 #include <sys/schedctl.h>
53 #include <sys/callb.h>
54 #include <sys/ucred.h>
57 #include <sys/sysmacros.h>
58 #include <sys/vmsystm.h>
63 #include <vm/seg_vn.h>
64 #include <vm/seg_vn.h>
65 #include <vm/seg_kpm.h>
67 #include <sys/modctl.h>
68 #include <sys/syscall.h>
69 #include <sys/pathname.h>
73 * The maximum amount of data (in bytes) that will be transferred using
74 * an intermediate kernel buffer. For sizes greater than this we map
75 * in the destination pages and perform a 1-copy transfer.
77 size_t door_max_arg
= 16 * 1024;
80 * Maximum amount of data that will be transferred in a reply to a
81 * door_upcall. Need to guard against a process returning huge amounts
82 * of data and getting the kernel stuck in kmem_alloc.
84 size_t door_max_upcall_reply
= 1024 * 1024;
87 * Maximum number of descriptors allowed to be passed in a single
88 * door_call or door_return. We need to allocate kernel memory
89 * for all of them at once, so we can't let it scale without limit.
91 uint_t door_max_desc
= 1024;
94 * Definition of a door handle, used by other kernel subsystems when
95 * calling door functions. This is really a file structure but we
96 * want to hide that fact.
98 struct __door_handle
{
102 #define DHTOF(dh) ((file_t *)(dh))
103 #define FTODH(fp) ((door_handle_t)(fp))
105 static int doorfs(long, long, long, long, long, long);
107 static struct sysent door_sysent
= {
109 SE_ARGC
| SE_NOUNLOAD
,
113 static struct modlsys modlsys
= {
114 &mod_syscallops
, "doors", &door_sysent
117 #ifdef _SYSCALL32_IMPL
120 doorfs32(int32_t arg1
, int32_t arg2
, int32_t arg3
, int32_t arg4
,
121 int32_t arg5
, int32_t subcode
);
123 static struct sysent door_sysent32
= {
125 SE_ARGC
| SE_NOUNLOAD
,
129 static struct modlsys modlsys32
= {
131 "32-bit door syscalls",
136 static struct modlinkage modlinkage
= {
139 #ifdef _SYSCALL32_IMPL
147 extern struct vfs door_vfs
;
148 extern struct vnodeops
*door_vnodeops
;
153 static const fs_operation_def_t door_vfsops_template
[] = {
156 extern const fs_operation_def_t door_vnodeops_template
[];
157 vfsops_t
*door_vfsops
;
161 mutex_init(&door_knob
, NULL
, MUTEX_DEFAULT
, NULL
);
162 if ((major
= getudev()) == (major_t
)-1)
164 doordev
= makedevice(major
, 0);
166 /* Create a dummy vfs */
167 error
= vfs_makefsops(door_vfsops_template
, &door_vfsops
);
169 cmn_err(CE_WARN
, "door init: bad vfs ops");
172 VFS_INIT(&door_vfs
, door_vfsops
, NULL
);
173 door_vfs
.vfs_flag
= VFS_RDONLY
;
174 door_vfs
.vfs_dev
= doordev
;
175 vfs_make_fsid(&(door_vfs
.vfs_fsid
), doordev
, 0);
177 error
= vn_make_ops("doorfs", door_vnodeops_template
, &door_vnodeops
);
179 vfs_freevfsops(door_vfsops
);
180 cmn_err(CE_WARN
, "door init: bad vnode ops");
183 return (mod_install(&modlinkage
));
187 _info(struct modinfo
*modinfop
)
189 return (mod_info(&modlinkage
, modinfop
));
192 /* system call functions */
193 static int door_call(int, void *);
194 static int door_return(caddr_t
, size_t, door_desc_t
*, uint_t
, caddr_t
, size_t);
195 static int door_create(void (*pc_cookie
)(void *, char *, size_t, door_desc_t
*,
196 uint_t
), void *data_cookie
, uint_t
);
197 static int door_revoke(int);
198 static int door_info(int, struct door_info
*);
199 static int door_ucred(struct ucred_s
*);
200 static int door_bind(int);
201 static int door_unbind(void);
202 static int door_unref(void);
203 static int door_getparam(int, int, size_t *);
204 static int door_setparam(int, int, size_t);
206 #define DOOR_RETURN_OLD 4 /* historic value, for s10 */
209 * System call wrapper for all door related system calls
212 doorfs(long arg1
, long arg2
, long arg3
, long arg4
, long arg5
, long subcode
)
216 return (door_call(arg1
, (void *)arg2
));
218 door_return_desc_t
*drdp
= (door_return_desc_t
*)arg3
;
221 door_return_desc_t drd
;
222 if (copyin(drdp
, &drd
, sizeof (drd
)))
224 return (door_return((caddr_t
)arg1
, arg2
, drd
.desc_ptr
,
225 drd
.desc_num
, (caddr_t
)arg4
, arg5
));
227 return (door_return((caddr_t
)arg1
, arg2
, NULL
,
228 0, (caddr_t
)arg4
, arg5
));
230 case DOOR_RETURN_OLD
:
232 * In order to support the S10 runtime environment, we
233 * still respond to the old syscall subcode for door_return.
234 * We treat it as having no stack limits. This code should
235 * be removed when such support is no longer needed.
237 return (door_return((caddr_t
)arg1
, arg2
, (door_desc_t
*)arg3
,
238 arg4
, (caddr_t
)arg5
, 0));
240 return (door_create((void (*)())arg1
, (void *)arg2
, arg3
));
242 return (door_revoke(arg1
));
244 return (door_info(arg1
, (struct door_info
*)arg2
));
246 return (door_bind(arg1
));
248 return (door_unbind());
250 return (door_unref());
252 return (door_ucred((struct ucred_s
*)arg1
));
254 return (door_getparam(arg1
, arg2
, (size_t *)arg3
));
256 return (door_setparam(arg1
, arg2
, arg3
));
258 return (set_errno(EINVAL
));
262 #ifdef _SYSCALL32_IMPL
264 * System call wrapper for all door related system calls from 32-bit programs.
265 * Needed at the moment because of the casts - they undo some damage
266 * that truss causes (sign-extending the stack pointer) when truss'ing
267 * a 32-bit program using doors.
270 doorfs32(int32_t arg1
, int32_t arg2
, int32_t arg3
,
271 int32_t arg4
, int32_t arg5
, int32_t subcode
)
275 return (door_call(arg1
, (void *)(uintptr_t)(caddr32_t
)arg2
));
277 door_return_desc32_t
*drdp
=
278 (door_return_desc32_t
*)(uintptr_t)(caddr32_t
)arg3
;
280 door_return_desc32_t drd
;
281 if (copyin(drdp
, &drd
, sizeof (drd
)))
284 (caddr_t
)(uintptr_t)(caddr32_t
)arg1
, arg2
,
285 (door_desc_t
*)(uintptr_t)drd
.desc_ptr
,
286 drd
.desc_num
, (caddr_t
)(uintptr_t)(caddr32_t
)arg4
,
287 (size_t)(uintptr_t)(size32_t
)arg5
));
289 return (door_return((caddr_t
)(uintptr_t)(caddr32_t
)arg1
,
290 arg2
, NULL
, 0, (caddr_t
)(uintptr_t)(caddr32_t
)arg4
,
291 (size_t)(uintptr_t)(size32_t
)arg5
));
293 case DOOR_RETURN_OLD
:
295 * In order to support the S10 runtime environment, we
296 * still respond to the old syscall subcode for door_return.
297 * We treat it as having no stack limits. This code should
298 * be removed when such support is no longer needed.
300 return (door_return((caddr_t
)(uintptr_t)(caddr32_t
)arg1
, arg2
,
301 (door_desc_t
*)(uintptr_t)(caddr32_t
)arg3
, arg4
,
302 (caddr_t
)(uintptr_t)(caddr32_t
)arg5
, 0));
304 return (door_create((void (*)())(uintptr_t)(caddr32_t
)arg1
,
305 (void *)(uintptr_t)(caddr32_t
)arg2
, arg3
));
307 return (door_revoke(arg1
));
309 return (door_info(arg1
,
310 (struct door_info
*)(uintptr_t)(caddr32_t
)arg2
));
312 return (door_bind(arg1
));
314 return (door_unbind());
316 return (door_unref());
319 (struct ucred_s
*)(uintptr_t)(caddr32_t
)arg1
));
321 return (door_getparam(arg1
, arg2
,
322 (size_t *)(uintptr_t)(caddr32_t
)arg3
));
324 return (door_setparam(arg1
, arg2
, (size_t)(size32_t
)arg3
));
327 return (set_errno(EINVAL
));
332 void shuttle_resume(kthread_t
*, kmutex_t
*);
333 void shuttle_swtch(kmutex_t
*);
334 void shuttle_sleep(kthread_t
*);
339 static int door_create_common(void (*)(), void *, uint_t
, int, int *,
341 static int door_overflow(kthread_t
*, caddr_t
, size_t, door_desc_t
*, uint_t
);
342 static int door_args(kthread_t
*, int);
343 static int door_results(kthread_t
*, caddr_t
, size_t, door_desc_t
*, uint_t
);
344 static int door_copy(struct as
*, caddr_t
, caddr_t
, uint_t
);
345 static void door_server_exit(proc_t
*, kthread_t
*);
346 static void door_release_server(door_node_t
*, kthread_t
*);
347 static kthread_t
*door_get_server(door_node_t
*);
348 static door_node_t
*door_lookup(int, file_t
**);
349 static int door_translate_in(void);
350 static int door_translate_out(void);
351 static void door_fd_rele(door_desc_t
*, uint_t
, int);
352 static void door_list_insert(door_node_t
*);
353 static void door_info_common(door_node_t
*, door_info_t
*, file_t
*);
354 static int door_release_fds(door_desc_t
*, uint_t
);
355 static void door_fd_close(door_desc_t
*, uint_t
);
356 static void door_fp_close(struct file
**, uint_t
);
359 door_my_data(int create_if_missing
)
363 ddp
= curthread
->t_door
;
364 if (create_if_missing
&& ddp
== NULL
)
365 ddp
= curthread
->t_door
= kmem_zalloc(sizeof (*ddp
), KM_SLEEP
);
370 static door_server_t
*
371 door_my_server(int create_if_missing
)
373 door_data_t
*ddp
= door_my_data(create_if_missing
);
375 return ((ddp
!= NULL
)? DOOR_SERVER(ddp
) : NULL
);
378 static door_client_t
*
379 door_my_client(int create_if_missing
)
381 door_data_t
*ddp
= door_my_data(create_if_missing
);
383 return ((ddp
!= NULL
)? DOOR_CLIENT(ddp
) : NULL
);
387 * System call to create a door
390 door_create(void (*pc_cookie
)(), void *data_cookie
, uint_t attributes
)
395 if ((attributes
& ~DOOR_CREATE_MASK
) ||
396 ((attributes
& (DOOR_UNREF
| DOOR_UNREF_MULTI
)) ==
397 (DOOR_UNREF
| DOOR_UNREF_MULTI
)))
398 return (set_errno(EINVAL
));
400 if ((err
= door_create_common(pc_cookie
, data_cookie
, attributes
, 0,
402 return (set_errno(err
));
404 f_setfd(fd
, FD_CLOEXEC
);
409 * Common code for creating user and kernel doors. If a door was
410 * created, stores a file structure pointer in the location pointed
411 * to by fpp (if fpp is non-NULL) and returns 0. Also, if a non-NULL
412 * pointer to a file descriptor is passed in as fdp, allocates a file
413 * descriptor representing the door. If a door could not be created,
417 door_create_common(void (*pc_cookie
)(), void *data_cookie
, uint_t attributes
,
418 int from_kernel
, int *fdp
, file_t
**fpp
)
423 static door_id_t index
= 0;
424 proc_t
*p
= (from_kernel
)? &p0
: curproc
;
426 dp
= kmem_zalloc(sizeof (door_node_t
), KM_SLEEP
);
428 dp
->door_vnode
= vn_alloc(KM_SLEEP
);
430 dp
->door_data
= data_cookie
;
431 dp
->door_pc
= pc_cookie
;
432 dp
->door_flags
= attributes
;
433 #ifdef _SYSCALL32_IMPL
434 if (!from_kernel
&& get_udatamodel() != DATAMODEL_NATIVE
)
435 dp
->door_data_max
= UINT32_MAX
;
438 dp
->door_data_max
= SIZE_MAX
;
439 dp
->door_data_min
= 0UL;
440 dp
->door_desc_max
= (attributes
& DOOR_REFUSE_DESC
)? 0 : INT_MAX
;
443 vn_setops(vp
, door_vnodeops
);
445 vp
->v_vfsp
= &door_vfs
;
446 vp
->v_data
= (caddr_t
)dp
;
447 mutex_enter(&door_knob
);
448 dp
->door_index
= index
++;
449 /* add to per-process door list */
450 door_list_insert(dp
);
451 mutex_exit(&door_knob
);
453 if (falloc(vp
, FREAD
| FWRITE
, &fp
, fdp
)) {
455 * If the file table is full, remove the door from the
456 * per-process list, free the door, and return NULL.
458 mutex_enter(&door_knob
);
459 door_list_delete(dp
);
460 mutex_exit(&door_knob
);
462 kmem_free(dp
, sizeof (door_node_t
));
468 mutex_exit(&fp
->f_tlock
);
476 door_check_limits(door_node_t
*dp
, door_arg_t
*da
, int upcall
)
478 ASSERT(MUTEX_HELD(&door_knob
));
480 /* we allow unref upcalls through, despite any minimum */
481 if (da
->data_size
< dp
->door_data_min
&&
482 !(upcall
&& da
->data_ptr
== DOOR_UNREF_DATA
))
485 if (da
->data_size
> dp
->door_data_max
)
488 if (da
->desc_num
> 0 && (dp
->door_flags
& DOOR_REFUSE_DESC
))
491 if (da
->desc_num
> dp
->door_desc_max
)
501 door_call(int did
, void *args
)
505 kthread_t
*server_thread
;
508 door_client_t
*ct
; /* curthread door_data */
509 door_server_t
*st
; /* server thread door_data */
510 door_desc_t
*start
= NULL
;
513 /* destructor for data returned by a kernel server */
514 void (*destfn
)() = NULL
;
521 lwp
= ttolwp(curthread
);
522 datamodel
= lwp_getdatamodel(lwp
);
524 ct
= door_my_client(1);
530 if (datamodel
== DATAMODEL_NATIVE
) {
531 if (copyin(args
, &ct
->d_args
, sizeof (door_arg_t
)) != 0)
532 return (set_errno(EFAULT
));
536 if (copyin(args
, &da32
, sizeof (door_arg32_t
)) != 0)
537 return (set_errno(EFAULT
));
538 ct
->d_args
.data_ptr
=
539 (char *)(uintptr_t)da32
.data_ptr
;
540 ct
->d_args
.data_size
= da32
.data_size
;
541 ct
->d_args
.desc_ptr
=
542 (door_desc_t
*)(uintptr_t)da32
.desc_ptr
;
543 ct
->d_args
.desc_num
= da32
.desc_num
;
545 (char *)(uintptr_t)da32
.rbuf
;
546 ct
->d_args
.rsize
= da32
.rsize
;
549 /* No arguments, and no results allowed */
551 ct
->d_args
.data_size
= 0;
552 ct
->d_args
.desc_num
= 0;
553 ct
->d_args
.rsize
= 0;
556 if ((dp
= door_lookup(did
, NULL
)) == NULL
)
557 return (set_errno(EBADF
));
560 * We don't want to hold the door FD over the entire operation;
561 * instead, we put a hold on the door vnode and release the FD
568 * This should be done in shuttle_resume(), just before going to
569 * sleep, but we want to avoid overhead while holding door_knob.
570 * prstop() is just a no-op if we don't really go to sleep.
571 * We test not-kernel-address-space for the sake of clustering code.
573 if (lwp
&& lwp
->lwp_nostop
== 0 && curproc
->p_as
!= &kas
)
574 prstop(PR_REQUESTED
, 0);
576 mutex_enter(&door_knob
);
577 if (DOOR_INVALID(dp
)) {
578 mutex_exit(&door_knob
);
584 * before we do anything, check that we are not overflowing the
587 error
= door_check_limits(dp
, &ct
->d_args
, 0);
589 mutex_exit(&door_knob
);
594 * Check for in-kernel door server.
596 if (dp
->door_target
== &p0
) {
597 caddr_t rbuf
= ct
->d_args
.rbuf
;
598 size_t rsize
= ct
->d_args
.rsize
;
602 ct
->d_error
= DOOR_WAIT
;
603 mutex_exit(&door_knob
);
604 /* translate file descriptors to vnodes */
605 if (ct
->d_args
.desc_num
) {
606 error
= door_translate_in();
611 * Call kernel door server. Arguments are passed and
612 * returned as a door_arg pointer. When called, data_ptr
613 * points to user data and desc_ptr points to a kernel list
614 * of door descriptors that have been converted to file
615 * structure pointers. It's the server function's
616 * responsibility to copyin the data pointed to by data_ptr
617 * (this avoids extra copying in some cases). On return,
618 * data_ptr points to a user buffer of data, and desc_ptr
619 * points to a kernel list of door descriptors representing
620 * files. When a reference is passed to a kernel server,
621 * it is the server's responsibility to release the reference
622 * (by calling closef). When the server includes a
623 * reference in its reply, it is released as part of the
624 * the call (the server must duplicate the reference if
625 * it wants to retain a copy). The destfn, if set to
626 * non-NULL, is a destructor to be called when the returned
627 * kernel data (if any) is no longer needed (has all been
628 * translated and copied to user level).
630 (*(dp
->door_pc
))(dp
->door_data
, &ct
->d_args
,
631 &destfn
, &destarg
, &error
);
632 mutex_enter(&door_knob
);
633 /* not implemented yet */
634 if (--dp
->door_active
== 0 && (dp
->door_flags
& DOOR_DELAY
))
635 door_deliver_unref(dp
);
636 mutex_exit(&door_knob
);
640 /* translate vnodes to files */
641 if (ct
->d_args
.desc_num
) {
642 error
= door_translate_out();
646 ct
->d_buf
= ct
->d_args
.rbuf
;
647 ct
->d_bufsize
= ct
->d_args
.rsize
;
648 if (rsize
< (ct
->d_args
.data_size
+
649 (ct
->d_args
.desc_num
* sizeof (door_desc_t
)))) {
650 /* handle overflow */
651 error
= door_overflow(curthread
, ct
->d_args
.data_ptr
,
652 ct
->d_args
.data_size
, ct
->d_args
.desc_ptr
,
653 ct
->d_args
.desc_num
);
656 /* door_overflow sets d_args rbuf and rsize */
658 ct
->d_args
.rbuf
= rbuf
;
659 ct
->d_args
.rsize
= rsize
;
665 * Get a server thread from the target domain
667 if ((server_thread
= door_get_server(dp
)) == NULL
) {
668 if (DOOR_INVALID(dp
))
672 mutex_exit(&door_knob
);
676 st
= DOOR_SERVER(server_thread
->t_door
);
677 if (ct
->d_args
.desc_num
|| ct
->d_args
.data_size
) {
678 int is_private
= (dp
->door_flags
& DOOR_PRIVATE
);
680 * Move data from client to server
683 mutex_exit(&door_knob
);
684 error
= door_args(server_thread
, is_private
);
685 mutex_enter(&door_knob
);
689 * We're not going to resume this thread after all
691 door_release_server(dp
, server_thread
);
692 shuttle_sleep(server_thread
);
693 mutex_exit(&door_knob
);
699 ct
->d_error
= DOOR_WAIT
;
701 st
->d_caller
= curthread
;
704 shuttle_resume(server_thread
, &door_knob
);
706 mutex_enter(&door_knob
);
708 if ((error
= ct
->d_error
) < 0) { /* DOOR_WAIT or DOOR_EXIT */
710 * Premature wakeup. Find out why (stop, forkall, sig, exit ...)
712 mutex_exit(&door_knob
); /* May block in ISSIG */
714 if (ISSIG(curthread
, FORREAL
) || lwp
->lwp_sysabort
||
715 MUSTRETURN(curproc
, curthread
) ||
716 (cancel_pending
= schedctl_cancel_pending()) != 0) {
717 /* Signal, forkall, ... */
718 lwp
->lwp_sysabort
= 0;
720 schedctl_cancel_eintr();
721 mutex_enter(&door_knob
);
724 * If the server has finished processing our call,
725 * or exited (calling door_slam()), then d_error
726 * will have changed. If the server hasn't finished
727 * yet, d_error will still be DOOR_WAIT, and we
728 * let it know we are not interested in any
729 * results by sending a SIGCANCEL, unless the door
730 * is marked with DOOR_NO_CANCEL.
732 if (ct
->d_error
== DOOR_WAIT
&&
733 st
->d_caller
== curthread
) {
734 proc_t
*p
= ttoproc(server_thread
);
739 if (!(dp
->door_flags
& DOOR_NO_CANCEL
)) {
741 mutex_exit(&door_knob
);
743 mutex_enter(&p
->p_lock
);
744 sigtoproc(p
, server_thread
, SIGCANCEL
);
745 mutex_exit(&p
->p_lock
);
747 mutex_enter(&door_knob
);
753 * Return from stop(), server exit...
755 * Note that the server could have done a
756 * door_return while the client was in stop state
757 * (ISSIG), in which case the error condition
758 * is updated by the server.
760 mutex_enter(&door_knob
);
761 if (ct
->d_error
== DOOR_WAIT
) {
762 /* Still waiting for a reply */
763 shuttle_swtch(&door_knob
);
764 mutex_enter(&door_knob
);
767 } else if (ct
->d_error
== DOOR_EXIT
) {
771 /* Server did a door_return during ISSIG */
776 * Can't exit if the server is currently copying
779 while (DOOR_T_HELD(ct
))
780 cv_wait(&ct
->d_cv
, &door_knob
);
783 * If the server has not processed our message, free the
786 if (!ct
->d_args_done
) {
792 * Find out if results were successfully copied.
794 if (ct
->d_error
== 0)
797 ASSERT(ct
->d_args_done
);
798 lwp
->lwp_asleep
= 0; /* /proc */
799 lwp
->lwp_sysabort
= 0; /* /proc */
800 if (--dp
->door_active
== 0 && (dp
->door_flags
& DOOR_DELAY
))
801 door_deliver_unref(dp
);
802 mutex_exit(&door_knob
);
805 door_fp_close(ct
->d_fpp
, ct
->d_args
.desc_num
);
809 * Move the results to userland (if any)
817 * If server returned results successfully, then we've
818 * been interrupted and may need to clean up.
821 ASSERT(error
== EINTR
);
822 door_fp_close(ct
->d_fpp
, ct
->d_args
.desc_num
);
828 * Copy back data if we haven't caused an overflow (already
829 * handled) and we are using a 2 copy transfer, or we are
830 * returning data from a kernel server.
832 if (ct
->d_args
.data_size
) {
833 ct
->d_args
.data_ptr
= ct
->d_args
.rbuf
;
834 if (ct
->d_kernel
|| (!ct
->d_overflow
&&
835 ct
->d_args
.data_size
<= door_max_arg
)) {
836 if (copyout_nowatch(ct
->d_buf
, ct
->d_args
.rbuf
,
837 ct
->d_args
.data_size
)) {
838 door_fp_close(ct
->d_fpp
, ct
->d_args
.desc_num
);
846 * stuff returned doors into our proc, copyout the descriptors
848 if (ct
->d_args
.desc_num
) {
851 uint_t n
= ct
->d_args
.desc_num
;
853 dsize
= n
* sizeof (door_desc_t
);
854 start
= didpp
= kmem_alloc(dsize
, KM_SLEEP
);
858 if (door_insert(*fpp
, didpp
) == -1) {
859 /* Close remaining files */
860 door_fp_close(fpp
, n
+ 1);
864 fpp
++; didpp
++; ncopied
++;
867 ct
->d_args
.desc_ptr
= (door_desc_t
*)(ct
->d_args
.rbuf
+
868 roundup(ct
->d_args
.data_size
, sizeof (door_desc_t
)));
870 if (copyout_nowatch(start
, ct
->d_args
.desc_ptr
, dsize
)) {
879 if (datamodel
== DATAMODEL_NATIVE
) {
880 if (copyout_nowatch(&ct
->d_args
, args
,
881 sizeof (door_arg_t
)) != 0)
886 da32
.data_ptr
= (caddr32_t
)(uintptr_t)ct
->d_args
.data_ptr
;
887 da32
.data_size
= ct
->d_args
.data_size
;
888 da32
.desc_ptr
= (caddr32_t
)(uintptr_t)ct
->d_args
.desc_ptr
;
889 da32
.desc_num
= ct
->d_args
.desc_num
;
890 da32
.rbuf
= (caddr32_t
)(uintptr_t)ct
->d_args
.rbuf
;
891 da32
.rsize
= ct
->d_args
.rsize
;
892 if (copyout_nowatch(&da32
, args
, sizeof (door_arg32_t
)) != 0) {
900 /* clean up the overflow buffer if an error occurred */
901 if (error
!= 0 && ct
->d_overflow
) {
902 (void) as_unmap(curproc
->p_as
, ct
->d_args
.rbuf
,
907 /* call destructor */
909 ASSERT(ct
->d_kernel
);
910 (*destfn
)(dp
->door_data
, destarg
);
919 ASSERT(!ct
->d_kernel
);
920 kmem_free(ct
->d_buf
, ct
->d_bufsize
);
926 /* clean up the descriptor copyout buffer */
929 door_fd_close(start
, ncopied
);
930 kmem_free(start
, dsize
);
934 kmem_free(ct
->d_fpp
, ct
->d_fpp_size
);
940 return (set_errno(error
));
946 door_setparam_common(door_node_t
*dp
, int from_kernel
, int type
, size_t val
)
950 mutex_enter(&door_knob
);
952 if (DOOR_INVALID(dp
)) {
953 mutex_exit(&door_knob
);
958 * door_ki_setparam() can only affect kernel doors.
959 * door_setparam() can only affect doors attached to the current
962 if ((from_kernel
&& dp
->door_target
!= &p0
) ||
963 (!from_kernel
&& dp
->door_target
!= curproc
)) {
964 mutex_exit(&door_knob
);
969 case DOOR_PARAM_DESC_MAX
:
972 else if ((dp
->door_flags
& DOOR_REFUSE_DESC
) && val
!= 0)
975 dp
->door_desc_max
= (uint_t
)val
;
978 case DOOR_PARAM_DATA_MIN
:
979 if (val
> dp
->door_data_max
)
982 dp
->door_data_min
= val
;
985 case DOOR_PARAM_DATA_MAX
:
986 if (val
< dp
->door_data_min
)
989 dp
->door_data_max
= val
;
997 mutex_exit(&door_knob
);
1002 door_getparam_common(door_node_t
*dp
, int type
, size_t *out
)
1006 mutex_enter(&door_knob
);
1008 case DOOR_PARAM_DESC_MAX
:
1009 *out
= (size_t)dp
->door_desc_max
;
1011 case DOOR_PARAM_DATA_MIN
:
1012 *out
= dp
->door_data_min
;
1014 case DOOR_PARAM_DATA_MAX
:
1015 *out
= dp
->door_data_max
;
1021 mutex_exit(&door_knob
);
1026 door_setparam(int did
, int type
, size_t val
)
1031 if ((dp
= door_lookup(did
, NULL
)) == NULL
)
1032 return (set_errno(EBADF
));
1034 error
= door_setparam_common(dp
, 0, type
, val
);
1039 return (set_errno(error
));
1045 door_getparam(int did
, int type
, size_t *out
)
1051 if ((dp
= door_lookup(did
, NULL
)) == NULL
)
1052 return (set_errno(EBADF
));
1054 error
= door_getparam_common(dp
, type
, &val
);
1059 return (set_errno(error
));
1061 if (get_udatamodel() == DATAMODEL_NATIVE
) {
1062 if (copyout(&val
, out
, sizeof (val
)))
1063 return (set_errno(EFAULT
));
1064 #ifdef _SYSCALL32_IMPL
1066 size32_t val32
= (size32_t
)val
;
1069 return (set_errno(EOVERFLOW
));
1071 if (copyout(&val32
, out
, sizeof (val32
)))
1072 return (set_errno(EFAULT
));
1073 #endif /* _SYSCALL32_IMPL */
1080 * A copyout() which proceeds from high addresses to low addresses. This way,
1081 * stack guard pages are effective.
1083 * Note that we use copyout_nowatch(); this is called while the client is
1087 door_stack_copyout(const void *kaddr
, void *uaddr
, size_t count
)
1089 const char *kbase
= (const char *)kaddr
;
1090 uintptr_t ubase
= (uintptr_t)uaddr
;
1091 size_t pgsize
= PAGESIZE
;
1093 if (count
<= pgsize
)
1094 return (copyout_nowatch(kaddr
, uaddr
, count
));
1097 uintptr_t start
, end
, offset
, amount
;
1099 end
= ubase
+ count
;
1100 start
= P2ALIGN(end
- 1, pgsize
);
1101 if (P2ALIGN(ubase
, pgsize
) == start
)
1104 offset
= start
- ubase
;
1105 amount
= end
- start
;
1107 ASSERT(amount
> 0 && amount
<= count
&& amount
<= pgsize
);
1109 if (copyout_nowatch(kbase
+ offset
, (void *)start
, amount
))
1117 * Writes the stack layout for door_return() into the door_server_t of the
1121 door_layout(kthread_t
*tp
, size_t data_size
, uint_t ndesc
, int info_needed
)
1123 door_server_t
*st
= DOOR_SERVER(tp
->t_door
);
1124 door_layout_t
*out
= &st
->d_layout
;
1125 uintptr_t base_sp
= (uintptr_t)st
->d_sp
;
1126 size_t ssize
= st
->d_ssize
;
1128 uintptr_t descp
, datap
, infop
, resultsp
, finalsp
;
1129 size_t align
= STACK_ALIGN
;
1130 size_t results_sz
= sizeof (struct door_results
);
1131 model_t datamodel
= lwp_getdatamodel(ttolwp(tp
));
1133 ASSERT(!st
->d_layout_done
);
1135 #ifndef _STACK_GROWS_DOWNWARD
1136 #error stack does not grow downward, door_layout() must change
1139 #ifdef _SYSCALL32_IMPL
1140 if (datamodel
!= DATAMODEL_NATIVE
) {
1141 align
= STACK_ALIGN32
;
1142 results_sz
= sizeof (struct door_results32
);
1146 descsz
= ndesc
* sizeof (door_desc_t
);
1149 * To speed up the overflow checking, we do an initial check
1150 * that the passed in data size won't cause us to wrap past
1151 * base_sp. Since door_max_desc limits descsz, we can
1152 * safely use it here. 65535 is an arbitrary 'bigger than
1153 * we need, small enough to not cause trouble' constant;
1154 * the only constraint is that it must be > than:
1157 * sizeof (door_info_t) +
1158 * sizeof (door_results_t) +
1159 * (max adjustment from door_final_sp())
1161 * After we compute the layout, we can safely do a "did we wrap
1162 * around" check, followed by a check against the recorded
1165 if (data_size
>= SIZE_MAX
- (size_t)65535UL - descsz
)
1166 return (E2BIG
); /* overflow */
1168 descp
= P2ALIGN(base_sp
- descsz
, align
);
1169 datap
= P2ALIGN(descp
- data_size
, align
);
1172 infop
= P2ALIGN(datap
- sizeof (door_info_t
), align
);
1176 resultsp
= P2ALIGN(infop
- results_sz
, align
);
1177 finalsp
= door_final_sp(resultsp
, align
, datamodel
);
1179 if (finalsp
> base_sp
)
1180 return (E2BIG
); /* overflow */
1182 if (ssize
!= 0 && (base_sp
- finalsp
) > ssize
)
1183 return (E2BIG
); /* doesn't fit in stack */
1185 out
->dl_descp
= (ndesc
!= 0)? (caddr_t
)descp
: 0;
1186 out
->dl_datap
= (data_size
!= 0)? (caddr_t
)datap
: 0;
1187 out
->dl_infop
= info_needed
? (caddr_t
)infop
: 0;
1188 out
->dl_resultsp
= (caddr_t
)resultsp
;
1189 out
->dl_sp
= (caddr_t
)finalsp
;
1191 st
->d_layout_done
= 1;
1196 door_server_dispatch(door_client_t
*ct
, door_node_t
*dp
)
1198 door_server_t
*st
= DOOR_SERVER(curthread
->t_door
);
1199 door_layout_t
*layout
= &st
->d_layout
;
1202 int is_private
= (dp
->door_flags
& DOOR_PRIVATE
);
1204 door_pool_t
*pool
= (is_private
)? &dp
->door_servers
:
1205 &curproc
->p_server_threads
;
1207 int empty_pool
= (pool
->dp_threads
== NULL
);
1209 caddr_t infop
= NULL
;
1211 size_t datasize
= 0;
1214 file_t
**fpp
= ct
->d_fpp
;
1215 door_desc_t
*start
= NULL
;
1220 datap
= ct
->d_args
.data_ptr
;
1221 datasize
= ct
->d_args
.data_size
;
1222 ndesc
= ct
->d_args
.desc_num
;
1225 descsize
= ndesc
* sizeof (door_desc_t
);
1228 * Reset datap to NULL if we aren't passing any data. Be careful
1229 * to let unref notifications through, though.
1231 if (datap
== DOOR_UNREF_DATA
) {
1232 if (ct
->d_upcall
!= NULL
)
1236 } else if (datasize
== 0) {
1241 * Get the stack layout, if it hasn't already been done.
1243 if (!st
->d_layout_done
) {
1244 error
= door_layout(curthread
, datasize
, ndesc
,
1245 (is_private
&& empty_pool
));
1251 * fill out the stack, starting from the top. Layout was already
1252 * filled in by door_args() or door_translate_out().
1254 if (layout
->dl_descp
!= NULL
) {
1256 start
= kmem_alloc(descsize
, KM_SLEEP
);
1259 if (door_insert(*fpp
, &start
[ncopied
]) == -1) {
1267 if (door_stack_copyout(start
, layout
->dl_descp
, descsize
)) {
1272 fpp
= NULL
; /* finished processing */
1274 if (layout
->dl_datap
!= NULL
) {
1275 ASSERT(datasize
!= 0);
1276 datap
= layout
->dl_datap
;
1277 if (ct
->d_upcall
!= NULL
|| datasize
<= door_max_arg
) {
1278 if (door_stack_copyout(ct
->d_buf
, datap
, datasize
)) {
1285 if (is_private
&& empty_pool
) {
1288 infop
= layout
->dl_infop
;
1289 ASSERT(infop
!= NULL
);
1291 di
.di_target
= curproc
->p_pid
;
1292 di
.di_proc
= (door_ptr_t
)(uintptr_t)dp
->door_pc
;
1293 di
.di_data
= (door_ptr_t
)(uintptr_t)dp
->door_data
;
1294 di
.di_uniquifier
= dp
->door_index
;
1295 di
.di_attributes
= (dp
->door_flags
& DOOR_ATTR_MASK
) |
1298 if (door_stack_copyout(&di
, infop
, sizeof (di
))) {
1304 if (get_udatamodel() == DATAMODEL_NATIVE
) {
1305 struct door_results dr
;
1307 dr
.cookie
= dp
->door_data
;
1308 dr
.data_ptr
= datap
;
1309 dr
.data_size
= datasize
;
1310 dr
.desc_ptr
= (door_desc_t
*)layout
->dl_descp
;
1311 dr
.desc_num
= ncopied
;
1312 dr
.pc
= dp
->door_pc
;
1313 dr
.nservers
= !empty_pool
;
1314 dr
.door_info
= (door_info_t
*)infop
;
1316 if (door_stack_copyout(&dr
, layout
->dl_resultsp
, sizeof (dr
))) {
1320 #ifdef _SYSCALL32_IMPL
1322 struct door_results32 dr32
;
1324 dr32
.cookie
= (caddr32_t
)(uintptr_t)dp
->door_data
;
1325 dr32
.data_ptr
= (caddr32_t
)(uintptr_t)datap
;
1326 dr32
.data_size
= (size32_t
)datasize
;
1327 dr32
.desc_ptr
= (caddr32_t
)(uintptr_t)layout
->dl_descp
;
1328 dr32
.desc_num
= ncopied
;
1329 dr32
.pc
= (caddr32_t
)(uintptr_t)dp
->door_pc
;
1330 dr32
.nservers
= !empty_pool
;
1331 dr32
.door_info
= (caddr32_t
)(uintptr_t)infop
;
1333 if (door_stack_copyout(&dr32
, layout
->dl_resultsp
,
1341 error
= door_finish_dispatch(layout
->dl_sp
);
1343 if (start
!= NULL
) {
1345 door_fd_close(start
, ncopied
);
1346 kmem_free(start
, descsize
);
1349 door_fp_close(fpp
, ndesc
);
1355 * Return the results (if any) to the caller (if any) and wait for the
1356 * next invocation on a door.
1359 door_return(caddr_t data_ptr
, size_t data_size
,
1360 door_desc_t
*desc_ptr
, uint_t desc_num
, caddr_t sp
, size_t ssize
)
1366 door_server_t
*st
; /* curthread door_data */
1367 door_client_t
*ct
; /* caller door_data */
1370 st
= door_my_server(1);
1373 * If thread was bound to a door that no longer exists, return
1374 * an error. This can happen if a thread is bound to a door
1375 * before the process calls forkall(); in the child, the door
1376 * doesn't exist and door_fork() sets the d_invbound flag.
1379 return (set_errno(EINVAL
));
1381 st
->d_sp
= sp
; /* Save base of stack. */
1382 st
->d_ssize
= ssize
; /* and its size */
1385 * This should be done in shuttle_resume(), just before going to
1386 * sleep, but we want to avoid overhead while holding door_knob.
1387 * prstop() is just a no-op if we don't really go to sleep.
1388 * We test not-kernel-address-space for the sake of clustering code.
1390 lwp
= ttolwp(curthread
);
1391 if (lwp
&& lwp
->lwp_nostop
== 0 && curproc
->p_as
!= &kas
)
1392 prstop(PR_REQUESTED
, 0);
1394 /* Make sure the caller hasn't gone away */
1395 mutex_enter(&door_knob
);
1396 if ((caller
= st
->d_caller
) == NULL
|| caller
->t_door
== NULL
) {
1397 if (desc_num
!= 0) {
1398 /* close any DOOR_RELEASE descriptors */
1399 mutex_exit(&door_knob
);
1400 error
= door_release_fds(desc_ptr
, desc_num
);
1402 return (set_errno(error
));
1403 mutex_enter(&door_knob
);
1407 ct
= DOOR_CLIENT(caller
->t_door
);
1409 ct
->d_args
.data_size
= data_size
;
1410 ct
->d_args
.desc_num
= desc_num
;
1412 * Transfer results, if any, to the client
1414 if (data_size
!= 0 || desc_num
!= 0) {
1416 * Prevent the client from exiting until we have finished
1420 mutex_exit(&door_knob
);
1421 error
= door_results(caller
, data_ptr
, data_size
,
1422 desc_ptr
, desc_num
);
1423 mutex_enter(&door_knob
);
1426 * Pass EOVERFLOW errors back to the client
1428 if (error
&& error
!= EOVERFLOW
) {
1429 mutex_exit(&door_knob
);
1430 return (set_errno(error
));
1434 /* Put ourselves on the available server thread list */
1435 door_release_server(st
->d_pool
, curthread
);
1438 * Make sure the caller is still waiting to be resumed
1443 thread_lock(caller
);
1444 ct
->d_error
= error
; /* Return any errors */
1445 if (caller
->t_state
== TS_SLEEP
&&
1446 SOBJ_TYPE(caller
->t_sobj_ops
) == SOBJ_SHUTTLE
) {
1449 tlp
= caller
->t_lockp
;
1451 * Setting t_disp_queue prevents erroneous preemptions
1452 * if this thread is still in execution on another
1455 caller
->t_disp_queue
= cp
->cpu_disp
;
1458 * We are calling thread_onproc() instead of
1459 * THREAD_ONPROC() because compiler can reorder
1460 * the two stores of t_state and t_lockp in
1463 thread_onproc(caller
, cp
);
1464 disp_lock_exit_high(tlp
);
1465 shuttle_resume(caller
, &door_knob
);
1467 /* May have been setrun or in stop state */
1468 thread_unlock(caller
);
1469 shuttle_swtch(&door_knob
);
1472 shuttle_swtch(&door_knob
);
1476 * We've sprung to life. Determine if we are part of a door
1477 * invocation, or just interrupted
1479 mutex_enter(&door_knob
);
1480 if ((dp
= st
->d_active
) != NULL
) {
1482 * Normal door invocation. Return any error condition
1483 * encountered while trying to pass args to the server
1486 lwp
->lwp_asleep
= 0;
1488 * Prevent the caller from leaving us while we
1489 * are copying out the arguments from it's buffer.
1491 ASSERT(st
->d_caller
!= NULL
);
1492 ct
= DOOR_CLIENT(st
->d_caller
->t_door
);
1495 mutex_exit(&door_knob
);
1496 error
= door_server_dispatch(ct
, dp
);
1497 mutex_enter(&door_knob
);
1500 /* let the client know we have processed its message */
1501 ct
->d_args_done
= 1;
1504 caller
= st
->d_caller
;
1506 ct
= DOOR_CLIENT(caller
->t_door
);
1511 mutex_exit(&door_knob
);
1515 * We are not involved in a door_invocation.
1516 * Check for /proc related activity...
1518 st
->d_caller
= NULL
;
1519 door_server_exit(curproc
, curthread
);
1520 mutex_exit(&door_knob
);
1522 if (ISSIG(curthread
, FORREAL
) || lwp
->lwp_sysabort
||
1523 MUSTRETURN(curproc
, curthread
) ||
1524 (cancel_pending
= schedctl_cancel_pending()) != 0) {
1526 schedctl_cancel_eintr();
1527 lwp
->lwp_asleep
= 0;
1528 lwp
->lwp_sysabort
= 0;
1529 return (set_errno(EINTR
));
1531 /* Go back and wait for another request */
1532 lwp
->lwp_asleep
= 0;
1533 mutex_enter(&door_knob
);
1540 * Revoke any future invocations on this door
1543 door_revoke(int did
)
1548 if ((d
= door_lookup(did
, NULL
)) == NULL
)
1549 return (set_errno(EBADF
));
1551 mutex_enter(&door_knob
);
1552 if (d
->door_target
!= curproc
) {
1553 mutex_exit(&door_knob
);
1555 return (set_errno(EPERM
));
1557 d
->door_flags
|= DOOR_REVOKED
;
1558 if (d
->door_flags
& DOOR_PRIVATE
)
1559 cv_broadcast(&d
->door_servers
.dp_cv
);
1561 cv_broadcast(&curproc
->p_server_threads
.dp_cv
);
1562 mutex_exit(&door_knob
);
1564 /* Invalidate the descriptor */
1565 if ((error
= closeandsetf(did
, NULL
)) != 0)
1566 return (set_errno(error
));
1571 door_info(int did
, struct door_info
*d_info
)
1578 if (did
== DOOR_QUERY
) {
1579 /* Get information on door current thread is bound to */
1580 if ((st
= door_my_server(0)) == NULL
||
1581 (dp
= st
->d_pool
) == NULL
)
1582 /* Thread isn't bound to a door */
1583 return (set_errno(EBADF
));
1584 } else if ((dp
= door_lookup(did
, &fp
)) == NULL
) {
1586 return (set_errno(EBADF
));
1589 door_info_common(dp
, &di
, fp
);
1591 if (did
!= DOOR_QUERY
)
1594 if (copyout(&di
, d_info
, sizeof (struct door_info
)))
1595 return (set_errno(EFAULT
));
1600 * Common code for getting information about a door either via the
1601 * door_info system call or the door_ki_info kernel call.
1604 door_info_common(door_node_t
*dp
, struct door_info
*dip
, file_t
*fp
)
1608 bzero(dip
, sizeof (door_info_t
));
1610 mutex_enter(&door_knob
);
1611 if (dp
->door_target
== NULL
)
1612 dip
->di_target
= -1;
1614 dip
->di_target
= dp
->door_target
->p_pid
;
1616 dip
->di_attributes
= dp
->door_flags
& DOOR_ATTR_MASK
;
1617 if (dp
->door_target
== curproc
)
1618 dip
->di_attributes
|= DOOR_LOCAL
;
1619 dip
->di_proc
= (door_ptr_t
)(uintptr_t)dp
->door_pc
;
1620 dip
->di_data
= (door_ptr_t
)(uintptr_t)dp
->door_data
;
1621 dip
->di_uniquifier
= dp
->door_index
;
1623 * If this door is in the middle of having an unreferenced
1624 * notification delivered, don't count the VN_HOLD by
1625 * door_deliver_unref in determining if it is unreferenced.
1626 * This handles the case where door_info is called from the
1627 * thread delivering the unref notification.
1629 if (dp
->door_flags
& DOOR_UNREF_ACTIVE
)
1633 mutex_exit(&door_knob
);
1637 * If this thread is bound to the door, then we can just
1638 * check the vnode; a ref count of 1 (or 2 if this is
1639 * handling an unref notification) means that the hold
1640 * from the door_bind is the only reference to the door
1641 * (no file descriptor refers to it).
1643 if (DTOV(dp
)->v_count
== unref_count
)
1644 dip
->di_attributes
|= DOOR_IS_UNREF
;
1647 * If we're working from a file descriptor or door handle
1648 * we need to look at the file structure count. We don't
1649 * need to hold the vnode lock since this is just a snapshot.
1651 mutex_enter(&fp
->f_tlock
);
1652 if (fp
->f_count
== 1 && DTOV(dp
)->v_count
== unref_count
)
1653 dip
->di_attributes
|= DOOR_IS_UNREF
;
1654 mutex_exit(&fp
->f_tlock
);
1659 * Return credentials of the door caller (if any) for this invocation
1662 door_ucred(struct ucred_s
*uch
)
1669 struct ucred_s
*res
;
1672 mutex_enter(&door_knob
);
1673 if ((st
= door_my_server(0)) == NULL
||
1674 (caller
= st
->d_caller
) == NULL
) {
1675 mutex_exit(&door_knob
);
1676 return (set_errno(EINVAL
));
1679 ASSERT(caller
->t_door
!= NULL
);
1680 ct
= DOOR_CLIENT(caller
->t_door
);
1682 /* Prevent caller from exiting while we examine the cred */
1684 mutex_exit(&door_knob
);
1686 p
= ttoproc(caller
);
1689 * If the credentials are not specified by the client, get the one
1690 * associated with the calling process.
1692 if ((dup
= ct
->d_upcall
) != NULL
)
1693 res
= cred2ucred(dup
->du_cred
, p0
.p_pid
, NULL
, CRED());
1695 res
= cred2ucred(caller
->t_cred
, p
->p_pid
, NULL
, CRED());
1697 mutex_enter(&door_knob
);
1699 mutex_exit(&door_knob
);
1701 err
= copyout(res
, uch
, res
->uc_size
);
1703 kmem_free(res
, res
->uc_size
);
1706 return (set_errno(EFAULT
));
1712 * Bind the current lwp to the server thread pool associated with 'did'
1720 if ((dp
= door_lookup(did
, NULL
)) == NULL
) {
1722 return (set_errno(EBADF
));
1726 * Can't bind to a non-private door, and can't bind to a door
1727 * served by another process.
1729 if ((dp
->door_flags
& DOOR_PRIVATE
) == 0 ||
1730 dp
->door_target
!= curproc
) {
1732 return (set_errno(EINVAL
));
1735 st
= door_my_server(1);
1737 door_unbind_thread(st
->d_pool
);
1740 door_bind_thread(dp
);
1747 * Unbind the current lwp from it's server thread pool
1754 if ((st
= door_my_server(0)) == NULL
)
1755 return (set_errno(EBADF
));
1757 if (st
->d_invbound
) {
1758 ASSERT(st
->d_pool
== NULL
);
1762 if (st
->d_pool
== NULL
)
1763 return (set_errno(EBADF
));
1764 door_unbind_thread(st
->d_pool
);
1770 * Create a descriptor for the associated file and fill in the
1771 * attributes associated with it.
1773 * Return 0 for success, -1 otherwise;
1776 door_insert(struct file
*fp
, door_desc_t
*dp
)
1780 door_attr_t attributes
= DOOR_DESCRIPTOR
;
1782 ASSERT(MUTEX_NOT_HELD(&door_knob
));
1783 if ((fd
= ufalloc(0)) == -1)
1786 dp
->d_data
.d_desc
.d_descriptor
= fd
;
1788 /* Fill in the attributes */
1789 if (fop_realvp(fp
->f_vnode
, &vp
, NULL
))
1791 if (vp
&& vp
->v_type
== VDOOR
) {
1792 if (VTOD(vp
)->door_target
== curproc
)
1793 attributes
|= DOOR_LOCAL
;
1794 attributes
|= VTOD(vp
)->door_flags
& DOOR_ATTR_MASK
;
1795 dp
->d_data
.d_desc
.d_id
= VTOD(vp
)->door_index
;
1797 dp
->d_attributes
= attributes
;
1802 * Return an available thread for this server. A NULL return value indicates
1804 * The door has been revoked, or
1805 * a signal was received.
1806 * The two conditions can be differentiated using DOOR_INVALID(dp).
1809 door_get_server(door_node_t
*dp
)
1812 kthread_t
*server_t
;
1820 ASSERT(MUTEX_HELD(&door_knob
));
1822 if (dp
->door_flags
& DOOR_PRIVATE
)
1823 pool
= &dp
->door_servers
;
1825 pool
= &dp
->door_target
->p_server_threads
;
1829 * We search the thread pool, looking for a server thread
1830 * ready to take an invocation (i.e. one which is still
1831 * sleeping on a shuttle object). If none are available,
1832 * we sleep on the pool's CV, and will be signaled when a
1833 * thread is added to the pool.
1835 * This relies on the fact that once a thread in the thread
1836 * pool wakes up, it *must* remove and add itself to the pool
1837 * before it can receive door calls.
1839 if (DOOR_INVALID(dp
))
1840 return (NULL
); /* Target has become invalid */
1842 for (ktp
= &pool
->dp_threads
;
1843 (server_t
= *ktp
) != NULL
;
1844 ktp
= &st
->d_servers
) {
1845 st
= DOOR_SERVER(server_t
->t_door
);
1847 thread_lock(server_t
);
1848 if (server_t
->t_state
== TS_SLEEP
&&
1849 SOBJ_TYPE(server_t
->t_sobj_ops
) == SOBJ_SHUTTLE
)
1851 thread_unlock(server_t
);
1853 if (server_t
!= NULL
)
1854 break; /* we've got a live one! */
1856 if (!cv_wait_sig_swap_core(&pool
->dp_cv
, &door_knob
,
1859 * If we were signaled and the door is still
1860 * valid, pass the signal on to another waiter.
1862 if (signalled
&& !DOOR_INVALID(dp
))
1863 cv_signal(&pool
->dp_cv
);
1864 return (NULL
); /* Got a signal */
1869 * We've got a thread_lock()ed thread which is still on the
1870 * shuttle. Take it off the list of available server threads
1871 * and mark it as ONPROC. We are committed to resuming this
1874 tlp
= server_t
->t_lockp
;
1877 *ktp
= st
->d_servers
;
1878 st
->d_servers
= NULL
;
1880 * Setting t_disp_queue prevents erroneous preemptions
1881 * if this thread is still in execution on another processor
1883 server_t
->t_disp_queue
= cp
->cpu_disp
;
1884 CL_ACTIVE(server_t
);
1886 * We are calling thread_onproc() instead of
1887 * THREAD_ONPROC() because compiler can reorder
1888 * the two stores of t_state and t_lockp in
1891 thread_onproc(server_t
, cp
);
1892 disp_lock_exit(tlp
);
1897 * Put a server thread back in the pool.
1900 door_release_server(door_node_t
*dp
, kthread_t
*t
)
1902 door_server_t
*st
= DOOR_SERVER(t
->t_door
);
1905 ASSERT(MUTEX_HELD(&door_knob
));
1906 st
->d_active
= NULL
;
1907 st
->d_caller
= NULL
;
1908 st
->d_layout_done
= 0;
1909 if (dp
&& (dp
->door_flags
& DOOR_PRIVATE
)) {
1910 ASSERT(dp
->door_target
== NULL
||
1911 dp
->door_target
== ttoproc(t
));
1912 pool
= &dp
->door_servers
;
1914 pool
= &ttoproc(t
)->p_server_threads
;
1917 st
->d_servers
= pool
->dp_threads
;
1918 pool
->dp_threads
= t
;
1920 /* If someone is waiting for a server thread, wake him up */
1921 cv_signal(&pool
->dp_cv
);
1925 * Remove a server thread from the pool if present.
1928 door_server_exit(proc_t
*p
, kthread_t
*t
)
1932 door_server_t
*st
= DOOR_SERVER(t
->t_door
);
1934 ASSERT(MUTEX_HELD(&door_knob
));
1935 if (st
->d_pool
!= NULL
) {
1936 ASSERT(st
->d_pool
->door_flags
& DOOR_PRIVATE
);
1937 pool
= &st
->d_pool
->door_servers
;
1939 pool
= &p
->p_server_threads
;
1942 next
= &pool
->dp_threads
;
1943 while (*next
!= NULL
) {
1945 *next
= DOOR_SERVER(t
->t_door
)->d_servers
;
1948 next
= &(DOOR_SERVER((*next
)->t_door
)->d_servers
);
1953 * Lookup the door descriptor. Caller must call releasef when finished
1954 * with associated door.
1956 static door_node_t
*
1957 door_lookup(int did
, file_t
**fpp
)
1962 ASSERT(MUTEX_NOT_HELD(&door_knob
));
1963 if ((fp
= getf(did
)) == NULL
)
1966 * Use the underlying vnode (we may be namefs mounted)
1968 if (fop_realvp(fp
->f_vnode
, &vp
, NULL
))
1971 if (vp
== NULL
|| vp
->v_type
!= VDOOR
) {
1983 * The current thread is exiting, so clean up any pending
1984 * invocation details
1995 * If we are an active door server, notify our
1996 * client that we are exiting and revoke our door.
1998 if ((dt
= door_my_data(0)) == NULL
)
2000 ct
= DOOR_CLIENT(dt
);
2001 st
= DOOR_SERVER(dt
);
2003 mutex_enter(&door_knob
);
2005 if (DOOR_T_HELD(ct
))
2006 cv_wait(&ct
->d_cv
, &door_knob
);
2007 else if (DOOR_T_HELD(st
))
2008 cv_wait(&st
->d_cv
, &door_knob
);
2010 break; /* neither flag is set */
2012 curthread
->t_door
= NULL
;
2013 if ((dp
= st
->d_active
) != NULL
) {
2014 kthread_t
*t
= st
->d_caller
;
2015 proc_t
*p
= curproc
;
2017 /* Revoke our door if the process is exiting */
2018 if (dp
->door_target
== p
&& (p
->p_flag
& SEXITING
)) {
2019 door_list_delete(dp
);
2020 dp
->door_target
= NULL
;
2021 dp
->door_flags
|= DOOR_REVOKED
;
2022 if (dp
->door_flags
& DOOR_PRIVATE
)
2023 cv_broadcast(&dp
->door_servers
.dp_cv
);
2025 cv_broadcast(&p
->p_server_threads
.dp_cv
);
2030 * Let the caller know we are gone
2032 DOOR_CLIENT(t
->t_door
)->d_error
= DOOR_EXIT
;
2034 if (t
->t_state
== TS_SLEEP
&&
2035 SOBJ_TYPE(t
->t_sobj_ops
) == SOBJ_SHUTTLE
)
2040 mutex_exit(&door_knob
);
2042 door_unbind_thread(st
->d_pool
); /* Implicit door_unbind */
2043 kmem_free(dt
, sizeof (door_data_t
));
2047 * Set DOOR_REVOKED for all doors of the current process. This is called
2048 * on exit before all lwp's are being terminated so that door calls will
2049 * return with an error.
2055 proc_t
*p
= ttoproc(curthread
);
2057 mutex_enter(&door_knob
);
2058 for (dp
= p
->p_door_list
; dp
!= NULL
; dp
= dp
->door_list
) {
2059 ASSERT(dp
->door_target
== p
);
2060 dp
->door_flags
|= DOOR_REVOKED
;
2061 if (dp
->door_flags
& DOOR_PRIVATE
)
2062 cv_broadcast(&dp
->door_servers
.dp_cv
);
2064 cv_broadcast(&p
->p_server_threads
.dp_cv
);
2065 mutex_exit(&door_knob
);
2069 * The process is exiting, and all doors it created need to be revoked.
2075 proc_t
*p
= ttoproc(curthread
);
2077 ASSERT(p
->p_lwpcnt
== 1);
2079 * Walk the list of active doors created by this process and
2082 mutex_enter(&door_knob
);
2083 for (dp
= p
->p_door_list
; dp
!= NULL
; dp
= dp
->door_list
) {
2084 dp
->door_target
= NULL
;
2085 dp
->door_flags
|= DOOR_REVOKED
;
2086 if (dp
->door_flags
& DOOR_PRIVATE
)
2087 cv_broadcast(&dp
->door_servers
.dp_cv
);
2089 cv_broadcast(&p
->p_server_threads
.dp_cv
);
2090 /* Clear the list */
2091 p
->p_door_list
= NULL
;
2093 /* Clean up the unref list */
2094 while ((dp
= p
->p_unref_list
) != NULL
) {
2095 p
->p_unref_list
= dp
->door_ulist
;
2096 dp
->door_ulist
= NULL
;
2097 mutex_exit(&door_knob
);
2099 mutex_enter(&door_knob
);
2101 mutex_exit(&door_knob
);
2106 * The process is executing forkall(), and we need to flag threads that
2107 * are bound to a door in the child. This will make the child threads
2108 * return an error to door_return unless they call door_unbind first.
2111 door_fork(kthread_t
*parent
, kthread_t
*child
)
2113 door_data_t
*pt
= parent
->t_door
;
2114 door_server_t
*st
= DOOR_SERVER(pt
);
2117 ASSERT(MUTEX_NOT_HELD(&door_knob
));
2118 if (pt
!= NULL
&& (st
->d_pool
!= NULL
|| st
->d_invbound
)) {
2119 /* parent thread is bound to a door */
2120 dt
= child
->t_door
=
2121 kmem_zalloc(sizeof (door_data_t
), KM_SLEEP
);
2122 DOOR_SERVER(dt
)->d_invbound
= 1;
2127 * Deliver queued unrefs to appropriate door server.
2133 static door_arg_t unref_args
= { DOOR_UNREF_DATA
, 0, 0, 0, 0, 0 };
2134 proc_t
*p
= ttoproc(curthread
);
2136 /* make sure there's only one unref thread per process */
2137 mutex_enter(&door_knob
);
2138 if (p
->p_unref_thread
) {
2139 mutex_exit(&door_knob
);
2140 return (set_errno(EALREADY
));
2142 p
->p_unref_thread
= 1;
2143 mutex_exit(&door_knob
);
2145 (void) door_my_data(1); /* create info, if necessary */
2148 mutex_enter(&door_knob
);
2150 /* Grab a queued request */
2151 while ((dp
= p
->p_unref_list
) == NULL
) {
2152 if (!cv_wait_sig(&p
->p_unref_cv
, &door_knob
)) {
2155 * Return so we can finish forkall() or exit().
2157 p
->p_unref_thread
= 0;
2158 mutex_exit(&door_knob
);
2159 return (set_errno(EINTR
));
2162 p
->p_unref_list
= dp
->door_ulist
;
2163 dp
->door_ulist
= NULL
;
2164 dp
->door_flags
|= DOOR_UNREF_ACTIVE
;
2165 mutex_exit(&door_knob
);
2167 (void) door_upcall(DTOV(dp
), &unref_args
, NULL
, SIZE_MAX
, 0);
2169 if (unref_args
.rbuf
!= 0) {
2170 kmem_free(unref_args
.rbuf
, unref_args
.rsize
);
2171 unref_args
.rbuf
= NULL
;
2172 unref_args
.rsize
= 0;
2175 mutex_enter(&door_knob
);
2176 ASSERT(dp
->door_flags
& DOOR_UNREF_ACTIVE
);
2177 dp
->door_flags
&= ~DOOR_UNREF_ACTIVE
;
2178 mutex_exit(&door_knob
);
2185 * Deliver queued unrefs to kernel door server.
2189 door_unref_kernel(caddr_t arg
)
2192 static door_arg_t unref_args
= { DOOR_UNREF_DATA
, 0, 0, 0, 0, 0 };
2193 proc_t
*p
= ttoproc(curthread
);
2194 callb_cpr_t cprinfo
;
2196 /* should only be one of these */
2197 mutex_enter(&door_knob
);
2198 if (p
->p_unref_thread
) {
2199 mutex_exit(&door_knob
);
2202 p
->p_unref_thread
= 1;
2203 mutex_exit(&door_knob
);
2205 (void) door_my_data(1); /* make sure we have a door_data_t */
2207 CALLB_CPR_INIT(&cprinfo
, &door_knob
, callb_generic_cpr
, "door_unref");
2209 mutex_enter(&door_knob
);
2210 /* Grab a queued request */
2211 while ((dp
= p
->p_unref_list
) == NULL
) {
2212 CALLB_CPR_SAFE_BEGIN(&cprinfo
);
2213 cv_wait(&p
->p_unref_cv
, &door_knob
);
2214 CALLB_CPR_SAFE_END(&cprinfo
, &door_knob
);
2216 p
->p_unref_list
= dp
->door_ulist
;
2217 dp
->door_ulist
= NULL
;
2218 dp
->door_flags
|= DOOR_UNREF_ACTIVE
;
2219 mutex_exit(&door_knob
);
2221 (*(dp
->door_pc
))(dp
->door_data
, &unref_args
, NULL
, NULL
, NULL
);
2223 mutex_enter(&door_knob
);
2224 ASSERT(dp
->door_flags
& DOOR_UNREF_ACTIVE
);
2225 dp
->door_flags
&= ~DOOR_UNREF_ACTIVE
;
2226 mutex_exit(&door_knob
);
2233 * Queue an unref invocation for processing for the current process
2234 * The door may or may not be revoked at this point.
2237 door_deliver_unref(door_node_t
*d
)
2239 struct proc
*server
= d
->door_target
;
2241 ASSERT(MUTEX_HELD(&door_knob
));
2242 ASSERT(d
->door_active
== 0);
2247 * Create a lwp to deliver unref calls if one isn't already running.
2249 * A separate thread is used to deliver unrefs since the current
2250 * thread may be holding resources (e.g. locks) in user land that
2251 * may be needed by the unref processing. This would cause a
2254 if (d
->door_flags
& DOOR_UNREF_MULTI
) {
2255 /* multiple unrefs */
2256 d
->door_flags
&= ~DOOR_DELAY
;
2258 /* Only 1 unref per door */
2259 d
->door_flags
&= ~(DOOR_UNREF
|DOOR_DELAY
);
2261 mutex_exit(&door_knob
);
2264 * Need to bump the vnode count before putting the door on the
2265 * list so it doesn't get prematurely released by door_unref.
2269 mutex_enter(&door_knob
);
2270 /* is this door already on the unref list? */
2271 if (d
->door_flags
& DOOR_UNREF_MULTI
) {
2273 for (dp
= server
->p_unref_list
; dp
!= NULL
;
2274 dp
= dp
->door_ulist
) {
2276 /* already there, don't need to add another */
2277 mutex_exit(&door_knob
);
2279 mutex_enter(&door_knob
);
2284 ASSERT(d
->door_ulist
== NULL
);
2285 d
->door_ulist
= server
->p_unref_list
;
2286 server
->p_unref_list
= d
;
2287 cv_broadcast(&server
->p_unref_cv
);
2291 * The callers buffer isn't big enough for all of the data/fd's. Allocate
2292 * space in the callers address space for the results and copy the data
2295 * For EOVERFLOW, we must clean up the server's door descriptors.
2300 caddr_t data_ptr
, /* data location */
2301 size_t data_size
, /* data size */
2302 door_desc_t
*desc_ptr
, /* descriptor location */
2303 uint_t desc_num
) /* descriptor size */
2305 proc_t
*callerp
= ttoproc(caller
);
2306 struct as
*as
= callerp
->p_as
;
2307 door_client_t
*ct
= DOOR_CLIENT(caller
->t_door
);
2308 caddr_t addr
; /* Resulting address in target */
2309 size_t rlen
; /* Rounded len */
2312 size_t ds
= desc_num
* sizeof (door_desc_t
);
2314 ASSERT(MUTEX_NOT_HELD(&door_knob
));
2315 ASSERT(DOOR_T_HELD(ct
) || ct
->d_kernel
);
2317 /* Do initial overflow check */
2318 if (!ufcanalloc(callerp
, desc_num
))
2322 * Allocate space for this stuff in the callers address space
2324 rlen
= roundup(data_size
+ ds
, PAGESIZE
);
2326 map_addr_proc(&addr
, rlen
, 0, 1, as
->a_userlimit
, ttoproc(caller
), 0);
2328 as_map(as
, addr
, rlen
, segvn_create
, zfod_argsp
) != 0) {
2329 /* No virtual memory available, or anon mapping failed */
2331 if (!ct
->d_kernel
&& desc_num
> 0) {
2332 int error
= door_release_fds(desc_ptr
, desc_num
);
2343 if (data_size
!= 0) {
2344 caddr_t src
= data_ptr
;
2345 caddr_t saddr
= addr
;
2353 amount
= len
> PAGESIZE
? PAGESIZE
: len
;
2354 if ((error
= door_copy(as
, src
, saddr
, amount
)) != 0) {
2355 (void) as_unmap(as
, addr
, rlen
);
2364 if (desc_num
!= 0) {
2365 door_desc_t
*didpp
, *start
;
2369 start
= didpp
= kmem_alloc(ds
, KM_SLEEP
);
2370 if (copyin_nowatch(desc_ptr
, didpp
, ds
)) {
2371 kmem_free(start
, ds
);
2372 (void) as_unmap(as
, addr
, rlen
);
2376 fpp_size
= desc_num
* sizeof (struct file
*);
2377 if (fpp_size
> ct
->d_fpp_size
) {
2378 /* make more space */
2380 kmem_free(ct
->d_fpp
, ct
->d_fpp_size
);
2381 ct
->d_fpp_size
= fpp_size
;
2382 ct
->d_fpp
= kmem_alloc(ct
->d_fpp_size
, KM_SLEEP
);
2386 for (i
= 0; i
< desc_num
; i
++) {
2388 int fd
= didpp
->d_data
.d_desc
.d_descriptor
;
2390 if (!(didpp
->d_attributes
& DOOR_DESCRIPTOR
) ||
2391 (fp
= getf(fd
)) == NULL
) {
2392 /* close translated references */
2393 door_fp_close(ct
->d_fpp
, fpp
- ct
->d_fpp
);
2394 /* close untranslated references */
2395 door_fd_rele(didpp
, desc_num
- i
, 0);
2396 kmem_free(start
, ds
);
2397 (void) as_unmap(as
, addr
, rlen
);
2400 mutex_enter(&fp
->f_tlock
);
2402 mutex_exit(&fp
->f_tlock
);
2407 if (didpp
->d_attributes
& DOOR_RELEASE
) {
2408 /* release passed reference */
2409 (void) closeandsetf(fd
, NULL
);
2414 kmem_free(start
, ds
);
2419 ct
->d_args
.rbuf
= addr
;
2420 ct
->d_args
.rsize
= rlen
;
2425 * Transfer arguments from the client to the server.
2428 door_args(kthread_t
*server
, int is_private
)
2430 door_server_t
*st
= DOOR_SERVER(server
->t_door
);
2431 door_client_t
*ct
= DOOR_CLIENT(curthread
->t_door
);
2436 ASSERT(DOOR_T_HELD(st
));
2437 ASSERT(MUTEX_NOT_HELD(&door_knob
));
2439 ndid
= ct
->d_args
.desc_num
;
2440 if (ndid
> door_max_desc
)
2444 * Get the stack layout, and fail now if it won't fit.
2446 error
= door_layout(server
, ct
->d_args
.data_size
, ndid
, is_private
);
2450 dsize
= ndid
* sizeof (door_desc_t
);
2451 if (ct
->d_args
.data_size
!= 0) {
2452 if (ct
->d_args
.data_size
<= door_max_arg
) {
2454 * Use a 2 copy method for small amounts of data
2456 * Allocate a little more than we need for the
2457 * args, in the hope that the results will fit
2458 * without having to reallocate a buffer
2460 ASSERT(ct
->d_buf
== NULL
);
2461 ct
->d_bufsize
= roundup(ct
->d_args
.data_size
,
2463 ct
->d_buf
= kmem_alloc(ct
->d_bufsize
, KM_SLEEP
);
2464 if (copyin_nowatch(ct
->d_args
.data_ptr
,
2465 ct
->d_buf
, ct
->d_args
.data_size
) != 0) {
2466 kmem_free(ct
->d_buf
, ct
->d_bufsize
);
2475 size_t len
= ct
->d_args
.data_size
;
2479 * Use a 1 copy method
2481 as
= ttoproc(server
)->p_as
;
2482 src
= ct
->d_args
.data_ptr
;
2484 dest
= st
->d_layout
.dl_datap
;
2485 base
= (uintptr_t)dest
;
2488 * Copy data directly into server. We proceed
2489 * downward from the top of the stack, to mimic
2490 * normal stack usage. This allows the guard page
2491 * to stop us before we corrupt anything.
2500 * Locate the next part to copy.
2503 start
= P2ALIGN(end
- 1, PAGESIZE
);
2506 * if we are on the final (first) page, fix
2507 * up the start position.
2509 if (P2ALIGN(base
, PAGESIZE
) == start
)
2512 offset
= start
- base
; /* the copy offset */
2513 amount
= end
- start
; /* # bytes to copy */
2515 ASSERT(amount
> 0 && amount
<= len
&&
2516 amount
<= PAGESIZE
);
2518 error
= door_copy(as
, src
+ offset
,
2519 dest
+ offset
, amount
);
2527 * Copyin the door args and translate them into files
2534 start
= didpp
= kmem_alloc(dsize
, KM_SLEEP
);
2536 if (copyin_nowatch(ct
->d_args
.desc_ptr
, didpp
, dsize
)) {
2537 kmem_free(start
, dsize
);
2540 ct
->d_fpp_size
= ndid
* sizeof (struct file
*);
2541 ct
->d_fpp
= kmem_alloc(ct
->d_fpp_size
, KM_SLEEP
);
2545 int fd
= didpp
->d_data
.d_desc
.d_descriptor
;
2547 /* We only understand file descriptors as passed objs */
2548 if (!(didpp
->d_attributes
& DOOR_DESCRIPTOR
) ||
2549 (fp
= getf(fd
)) == NULL
) {
2550 /* close translated references */
2551 door_fp_close(ct
->d_fpp
, fpp
- ct
->d_fpp
);
2552 /* close untranslated references */
2553 door_fd_rele(didpp
, ndid
+ 1, 0);
2554 kmem_free(start
, dsize
);
2555 kmem_free(ct
->d_fpp
, ct
->d_fpp_size
);
2561 mutex_enter(&fp
->f_tlock
);
2563 mutex_exit(&fp
->f_tlock
);
2568 if (didpp
->d_attributes
& DOOR_RELEASE
) {
2569 /* release passed reference */
2570 (void) closeandsetf(fd
, NULL
);
2575 kmem_free(start
, dsize
);
2581 * Transfer arguments from a user client to a kernel server. This copies in
2582 * descriptors and translates them into door handles. It doesn't touch the
2583 * other data, letting the kernel server deal with that (to avoid needing
2584 * to copy the data twice).
2587 door_translate_in(void)
2589 door_client_t
*ct
= DOOR_CLIENT(curthread
->t_door
);
2592 ASSERT(MUTEX_NOT_HELD(&door_knob
));
2593 ndid
= ct
->d_args
.desc_num
;
2594 if (ndid
> door_max_desc
)
2597 * Copyin the door args and translate them into door handles.
2602 size_t dsize
= ndid
* sizeof (door_desc_t
);
2605 start
= didpp
= kmem_alloc(dsize
, KM_SLEEP
);
2607 if (copyin_nowatch(ct
->d_args
.desc_ptr
, didpp
, dsize
)) {
2608 kmem_free(start
, dsize
);
2613 int fd
= didpp
->d_data
.d_desc
.d_descriptor
;
2616 * We only understand file descriptors as passed objs
2618 if ((didpp
->d_attributes
& DOOR_DESCRIPTOR
) &&
2619 (fp
= getf(fd
)) != NULL
) {
2620 didpp
->d_data
.d_handle
= FTODH(fp
);
2622 door_ki_hold(didpp
->d_data
.d_handle
);
2626 if (didpp
->d_attributes
& DOOR_RELEASE
) {
2627 /* release passed reference */
2628 (void) closeandsetf(fd
, NULL
);
2631 if (fop_realvp(fp
->f_vnode
, &vp
, NULL
))
2634 /* Set attributes */
2635 didpp
->d_attributes
= DOOR_HANDLE
|
2636 (VTOD(vp
)->door_flags
& DOOR_ATTR_MASK
);
2638 /* close translated references */
2639 door_fd_close(start
, didpp
- start
);
2640 /* close untranslated references */
2641 door_fd_rele(didpp
, ndid
+ 1, 0);
2642 kmem_free(start
, dsize
);
2647 ct
->d_args
.desc_ptr
= start
;
2653 * Translate door arguments from kernel to user. This copies the passed
2654 * door handles. It doesn't touch other data. It is used by door_upcall,
2655 * and for data returned by a door_call to a kernel server.
2658 door_translate_out(void)
2660 door_client_t
*ct
= DOOR_CLIENT(curthread
->t_door
);
2663 ASSERT(MUTEX_NOT_HELD(&door_knob
));
2664 ndid
= ct
->d_args
.desc_num
;
2665 if (ndid
> door_max_desc
) {
2666 door_fd_rele(ct
->d_args
.desc_ptr
, ndid
, 1);
2670 * Translate the door args into files
2673 door_desc_t
*didpp
= ct
->d_args
.desc_ptr
;
2676 ct
->d_fpp_size
= ndid
* sizeof (struct file
*);
2677 fpp
= ct
->d_fpp
= kmem_alloc(ct
->d_fpp_size
, KM_SLEEP
);
2679 struct file
*fp
= NULL
;
2683 * We understand file descriptors and door
2684 * handles as passed objs.
2686 if (didpp
->d_attributes
& DOOR_DESCRIPTOR
) {
2687 fd
= didpp
->d_data
.d_desc
.d_descriptor
;
2689 } else if (didpp
->d_attributes
& DOOR_HANDLE
)
2690 fp
= DHTOF(didpp
->d_data
.d_handle
);
2693 mutex_enter(&fp
->f_tlock
);
2695 mutex_exit(&fp
->f_tlock
);
2698 if (didpp
->d_attributes
& DOOR_DESCRIPTOR
)
2700 if (didpp
->d_attributes
& DOOR_RELEASE
) {
2701 /* release passed reference */
2703 (void) closeandsetf(fd
, NULL
);
2708 /* close translated references */
2709 door_fp_close(ct
->d_fpp
, fpp
- ct
->d_fpp
);
2710 /* close untranslated references */
2711 door_fd_rele(didpp
, ndid
+ 1, 1);
2712 kmem_free(ct
->d_fpp
, ct
->d_fpp_size
);
2724 * Move the results from the server to the client
2727 door_results(kthread_t
*caller
, caddr_t data_ptr
, size_t data_size
,
2728 door_desc_t
*desc_ptr
, uint_t desc_num
)
2730 door_client_t
*ct
= DOOR_CLIENT(caller
->t_door
);
2731 door_upcall_t
*dup
= ct
->d_upcall
;
2736 ASSERT(DOOR_T_HELD(ct
));
2737 ASSERT(MUTEX_NOT_HELD(&door_knob
));
2739 if (ct
->d_noresults
)
2740 return (E2BIG
); /* No results expected */
2742 if (desc_num
> door_max_desc
)
2743 return (E2BIG
); /* Too many descriptors */
2745 dsize
= desc_num
* sizeof (door_desc_t
);
2747 * Check if the results are bigger than the clients buffer
2750 rlen
= roundup(data_size
, sizeof (door_desc_t
));
2753 if ((result_size
= rlen
+ dsize
) == 0)
2757 if (desc_num
> dup
->du_max_descs
)
2760 if (data_size
> dup
->du_max_data
)
2766 if (ct
->d_args
.rbuf
== NULL
|| ct
->d_args
.rsize
< result_size
) {
2768 * If there's no return buffer or the buffer is too
2769 * small, allocate a new one. The old buffer (if it
2770 * exists) will be freed by the upcall client.
2772 if (result_size
> door_max_upcall_reply
)
2774 ct
->d_args
.rsize
= result_size
;
2775 ct
->d_args
.rbuf
= kmem_alloc(result_size
, KM_SLEEP
);
2777 ct
->d_args
.data_ptr
= ct
->d_args
.rbuf
;
2778 if (data_size
!= 0 &&
2779 copyin_nowatch(data_ptr
, ct
->d_args
.data_ptr
,
2782 } else if (result_size
> ct
->d_args
.rsize
) {
2783 return (door_overflow(caller
, data_ptr
, data_size
,
2784 desc_ptr
, desc_num
));
2785 } else if (data_size
!= 0) {
2786 if (data_size
<= door_max_arg
) {
2788 * Use a 2 copy method for small amounts of data
2790 if (ct
->d_buf
== NULL
) {
2791 ct
->d_bufsize
= data_size
;
2792 ct
->d_buf
= kmem_alloc(ct
->d_bufsize
, KM_SLEEP
);
2793 } else if (ct
->d_bufsize
< data_size
) {
2794 kmem_free(ct
->d_buf
, ct
->d_bufsize
);
2795 ct
->d_bufsize
= data_size
;
2796 ct
->d_buf
= kmem_alloc(ct
->d_bufsize
, KM_SLEEP
);
2798 if (copyin_nowatch(data_ptr
, ct
->d_buf
, data_size
) != 0)
2801 struct as
*as
= ttoproc(caller
)->p_as
;
2802 caddr_t dest
= ct
->d_args
.rbuf
;
2803 caddr_t src
= data_ptr
;
2804 size_t len
= data_size
;
2806 /* Copy data directly into client */
2813 off
= (uintptr_t)dest
& PAGEOFFSET
;
2815 max
= PAGESIZE
- off
;
2818 amount
= len
> max
? max
: len
;
2819 error
= door_copy(as
, src
, dest
, amount
);
2830 * Copyin the returned door ids and translate them into door_node_t
2832 if (desc_num
!= 0) {
2839 /* First, check if we would overflow client */
2840 if (!ufcanalloc(ttoproc(caller
), desc_num
))
2843 start
= didpp
= kmem_alloc(dsize
, KM_SLEEP
);
2844 if (copyin_nowatch(desc_ptr
, didpp
, dsize
)) {
2845 kmem_free(start
, dsize
);
2848 fpp_size
= desc_num
* sizeof (struct file
*);
2849 if (fpp_size
> ct
->d_fpp_size
) {
2850 /* make more space */
2852 kmem_free(ct
->d_fpp
, ct
->d_fpp_size
);
2853 ct
->d_fpp_size
= fpp_size
;
2854 ct
->d_fpp
= kmem_alloc(fpp_size
, KM_SLEEP
);
2858 for (i
= 0; i
< desc_num
; i
++) {
2860 int fd
= didpp
->d_data
.d_desc
.d_descriptor
;
2862 /* Only understand file descriptor results */
2863 if (!(didpp
->d_attributes
& DOOR_DESCRIPTOR
) ||
2864 (fp
= getf(fd
)) == NULL
) {
2865 /* close translated references */
2866 door_fp_close(ct
->d_fpp
, fpp
- ct
->d_fpp
);
2867 /* close untranslated references */
2868 door_fd_rele(didpp
, desc_num
- i
, 0);
2869 kmem_free(start
, dsize
);
2873 mutex_enter(&fp
->f_tlock
);
2875 mutex_exit(&fp
->f_tlock
);
2880 if (didpp
->d_attributes
& DOOR_RELEASE
) {
2881 /* release passed reference */
2882 (void) closeandsetf(fd
, NULL
);
2887 kmem_free(start
, dsize
);
2893 * Close all the descriptors.
2896 door_fd_close(door_desc_t
*d
, uint_t n
)
2900 ASSERT(MUTEX_NOT_HELD(&door_knob
));
2901 for (i
= 0; i
< n
; i
++) {
2902 if (d
->d_attributes
& DOOR_DESCRIPTOR
) {
2903 (void) closeandsetf(
2904 d
->d_data
.d_desc
.d_descriptor
, NULL
);
2905 } else if (d
->d_attributes
& DOOR_HANDLE
) {
2906 door_ki_rele(d
->d_data
.d_handle
);
2913 * Close descriptors that have the DOOR_RELEASE attribute set.
2916 door_fd_rele(door_desc_t
*d
, uint_t n
, int from_kernel
)
2920 ASSERT(MUTEX_NOT_HELD(&door_knob
));
2921 for (i
= 0; i
< n
; i
++) {
2922 if (d
->d_attributes
& DOOR_RELEASE
) {
2923 if (d
->d_attributes
& DOOR_DESCRIPTOR
) {
2924 (void) closeandsetf(
2925 d
->d_data
.d_desc
.d_descriptor
, NULL
);
2926 } else if (from_kernel
&&
2927 (d
->d_attributes
& DOOR_HANDLE
)) {
2928 door_ki_rele(d
->d_data
.d_handle
);
2936 * Copy descriptors into the kernel so we can release any marked
2940 door_release_fds(door_desc_t
*desc_ptr
, uint_t ndesc
)
2946 ASSERT(MUTEX_NOT_HELD(&door_knob
));
2949 desc_num
= MIN(ndesc
, door_max_desc
);
2951 dsize
= desc_num
* sizeof (door_desc_t
);
2952 didpp
= kmem_alloc(dsize
, KM_SLEEP
);
2955 uint_t count
= MIN(ndesc
, desc_num
);
2957 if (copyin_nowatch(desc_ptr
, didpp
,
2958 count
* sizeof (door_desc_t
))) {
2959 kmem_free(didpp
, dsize
);
2962 door_fd_rele(didpp
, count
, 0);
2967 kmem_free(didpp
, dsize
);
2972 * Decrement ref count on all the files passed
2975 door_fp_close(struct file
**fp
, uint_t n
)
2979 ASSERT(MUTEX_NOT_HELD(&door_knob
));
2981 for (i
= 0; i
< n
; i
++)
2982 (void) closef(fp
[i
]);
2986 * Copy data from 'src' in current address space to 'dest' in 'as' for 'len'
2989 * Performs this using 1 mapin and 1 copy operation.
2991 * We really should do more than 1 page at a time to improve
2992 * performance, but for now this is treated as an anomalous condition.
2995 door_copy(struct as
*as
, caddr_t src
, caddr_t dest
, uint_t len
)
3004 ASSERT(len
<= PAGESIZE
);
3005 off
= (uintptr_t)dest
& PAGEOFFSET
; /* offset within the page */
3006 rdest
= (caddr_t
)((uintptr_t)dest
&
3007 (uintptr_t)PAGEMASK
); /* Page boundary */
3008 ASSERT(off
+ len
<= PAGESIZE
);
3011 * Lock down destination page.
3013 if (as_pagelock(as
, &pplist
, rdest
, PAGESIZE
, S_WRITE
))
3016 * Check if we have a shadow page list from as_pagelock. If not,
3017 * we took the slow path and have to find our page struct the hard
3020 if (pplist
== NULL
) {
3023 /* MMU mapping is already locked down */
3024 AS_LOCK_ENTER(as
, RW_READER
);
3025 pfnum
= hat_getpfnum(as
->a_hat
, rdest
);
3029 * TODO: The pfn step should not be necessary - need
3030 * a hat_getpp() function.
3032 if (pf_is_memory(pfnum
)) {
3033 pp
= page_numtopp_nolock(pfnum
);
3034 ASSERT(pp
== NULL
|| PAGE_LOCKED(pp
));
3038 as_pageunlock(as
, pplist
, rdest
, PAGESIZE
, S_WRITE
);
3045 * Map destination page into kernel address
3048 kaddr
= (caddr_t
)hat_kpm_mapin(pp
, NULL
);
3050 kaddr
= (caddr_t
)ppmapin(pp
, PROT_READ
| PROT_WRITE
,
3054 * Copy from src to dest
3056 if (copyin_nowatch(src
, kaddr
+ off
, len
) != 0)
3059 * Unmap destination page from kernel
3062 hat_kpm_mapout(pp
, NULL
, kaddr
);
3066 * Unlock destination page
3068 as_pageunlock(as
, pplist
, rdest
, PAGESIZE
, S_WRITE
);
3073 * General kernel upcall using doors
3074 * Returns 0 on success, errno for failures.
3075 * Caller must have a hold on the door based vnode, and on any
3076 * references passed in desc_ptr. The references are released
3077 * in the event of an error, and passed without duplication
3078 * otherwise. Note that param->rbuf must be 64-bit aligned in
3079 * a 64-bit kernel, since it may be used to store door descriptors
3080 * if they are returned by the server. The caller is responsible
3081 * for holding a reference to the cred passed in.
3084 door_upcall(vnode_t
*vp
, door_arg_t
*param
, struct cred
*cred
,
3085 size_t max_data
, uint_t max_descs
)
3090 kthread_t
*server_thread
;
3093 door_client_t
*ct
; /* curthread door_data */
3094 door_server_t
*st
; /* server thread door_data */
3098 if (vp
->v_type
!= VDOOR
) {
3099 if (param
->desc_num
)
3100 door_fd_rele(param
->desc_ptr
, param
->desc_num
, 1);
3104 lwp
= ttolwp(curthread
);
3105 ct
= door_my_client(1);
3106 dp
= VTOD(vp
); /* Convert to a door_node_t */
3108 dup
= kmem_zalloc(sizeof (*dup
), KM_SLEEP
);
3109 dup
->du_cred
= (cred
!= NULL
) ? cred
: curthread
->t_cred
;
3110 dup
->du_max_data
= max_data
;
3111 dup
->du_max_descs
= max_descs
;
3114 * This should be done in shuttle_resume(), just before going to
3115 * sleep, but we want to avoid overhead while holding door_knob.
3116 * prstop() is just a no-op if we don't really go to sleep.
3117 * We test not-kernel-address-space for the sake of clustering code.
3119 if (lwp
&& lwp
->lwp_nostop
== 0 && curproc
->p_as
!= &kas
)
3120 prstop(PR_REQUESTED
, 0);
3122 mutex_enter(&door_knob
);
3123 if (DOOR_INVALID(dp
)) {
3124 mutex_exit(&door_knob
);
3125 if (param
->desc_num
)
3126 door_fd_rele(param
->desc_ptr
, param
->desc_num
, 1);
3131 if (dp
->door_target
== &p0
) {
3132 /* Can't do an upcall to a kernel server */
3133 mutex_exit(&door_knob
);
3134 if (param
->desc_num
)
3135 door_fd_rele(param
->desc_ptr
, param
->desc_num
, 1);
3140 error
= door_check_limits(dp
, param
, 1);
3142 mutex_exit(&door_knob
);
3143 if (param
->desc_num
)
3144 door_fd_rele(param
->desc_ptr
, param
->desc_num
, 1);
3149 * Get a server thread from the target domain
3151 if ((server_thread
= door_get_server(dp
)) == NULL
) {
3152 if (DOOR_INVALID(dp
))
3156 mutex_exit(&door_knob
);
3157 if (param
->desc_num
)
3158 door_fd_rele(param
->desc_ptr
, param
->desc_num
, 1);
3162 st
= DOOR_SERVER(server_thread
->t_door
);
3163 ct
->d_buf
= param
->data_ptr
;
3164 ct
->d_bufsize
= param
->data_size
;
3165 ct
->d_args
= *param
; /* structure assignment */
3167 if (ct
->d_args
.desc_num
) {
3169 * Move data from client to server
3172 mutex_exit(&door_knob
);
3173 error
= door_translate_out();
3174 mutex_enter(&door_knob
);
3178 * We're not going to resume this thread after all
3180 door_release_server(dp
, server_thread
);
3181 shuttle_sleep(server_thread
);
3182 mutex_exit(&door_knob
);
3188 if (param
->rsize
== 0)
3189 ct
->d_noresults
= 1;
3191 ct
->d_noresults
= 0;
3195 ct
->d_error
= DOOR_WAIT
;
3196 st
->d_caller
= curthread
;
3199 shuttle_resume(server_thread
, &door_knob
);
3201 mutex_enter(&door_knob
);
3203 if ((error
= ct
->d_error
) < 0) { /* DOOR_WAIT or DOOR_EXIT */
3205 * Premature wakeup. Find out why (stop, forkall, sig, exit ...)
3207 mutex_exit(&door_knob
); /* May block in ISSIG */
3209 if (lwp
&& (ISSIG(curthread
, FORREAL
) || lwp
->lwp_sysabort
||
3210 MUSTRETURN(curproc
, curthread
) ||
3211 (cancel_pending
= schedctl_cancel_pending()) != 0)) {
3212 /* Signal, forkall, ... */
3214 schedctl_cancel_eintr();
3215 lwp
->lwp_sysabort
= 0;
3216 mutex_enter(&door_knob
);
3219 * If the server has finished processing our call,
3220 * or exited (calling door_slam()), then d_error
3221 * will have changed. If the server hasn't finished
3222 * yet, d_error will still be DOOR_WAIT, and we
3223 * let it know we are not interested in any
3224 * results by sending a SIGCANCEL, unless the door
3225 * is marked with DOOR_NO_CANCEL.
3227 if (ct
->d_error
== DOOR_WAIT
&&
3228 st
->d_caller
== curthread
) {
3229 proc_t
*p
= ttoproc(server_thread
);
3231 st
->d_active
= NULL
;
3232 st
->d_caller
= NULL
;
3233 if (!(dp
->door_flags
& DOOR_NO_CANCEL
)) {
3235 mutex_exit(&door_knob
);
3237 mutex_enter(&p
->p_lock
);
3238 sigtoproc(p
, server_thread
, SIGCANCEL
);
3239 mutex_exit(&p
->p_lock
);
3241 mutex_enter(&door_knob
);
3247 * Return from stop(), server exit...
3249 * Note that the server could have done a
3250 * door_return while the client was in stop state
3251 * (ISSIG), in which case the error condition
3252 * is updated by the server.
3254 mutex_enter(&door_knob
);
3255 if (ct
->d_error
== DOOR_WAIT
) {
3256 /* Still waiting for a reply */
3257 shuttle_swtch(&door_knob
);
3258 mutex_enter(&door_knob
);
3260 lwp
->lwp_asleep
= 0;
3261 goto shuttle_return
;
3262 } else if (ct
->d_error
== DOOR_EXIT
) {
3266 /* Server did a door_return during ISSIG */
3267 error
= ct
->d_error
;
3271 * Can't exit if the server is currently copying
3274 while (DOOR_T_HELD(ct
))
3275 cv_wait(&ct
->d_cv
, &door_knob
);
3278 * Find out if results were successfully copied.
3280 if (ct
->d_error
== 0)
3284 lwp
->lwp_asleep
= 0; /* /proc */
3285 lwp
->lwp_sysabort
= 0; /* /proc */
3287 if (--dp
->door_active
== 0 && (dp
->door_flags
& DOOR_DELAY
))
3288 door_deliver_unref(dp
);
3289 mutex_exit(&door_knob
);
3292 * Translate returned doors (if any)
3295 if (ct
->d_noresults
)
3300 * If server returned results successfully, then we've
3301 * been interrupted and may need to clean up.
3304 ASSERT(error
== EINTR
);
3305 door_fp_close(ct
->d_fpp
, ct
->d_args
.desc_num
);
3310 if (ct
->d_args
.desc_num
) {
3314 uint_t n
= ct
->d_args
.desc_num
;
3316 didpp
= ct
->d_args
.desc_ptr
= (door_desc_t
*)(ct
->d_args
.rbuf
+
3317 roundup(ct
->d_args
.data_size
, sizeof (door_desc_t
)));
3324 if (fop_realvp(fp
->f_vnode
, &vp
, NULL
))
3327 didpp
->d_attributes
= DOOR_HANDLE
|
3328 (VTOD(vp
)->door_flags
& DOOR_ATTR_MASK
);
3329 didpp
->d_data
.d_handle
= FTODH(fp
);
3335 /* on return data is in rbuf */
3336 *param
= ct
->d_args
; /* structure assignment */
3339 kmem_free(dup
, sizeof (*dup
));
3342 kmem_free(ct
->d_fpp
, ct
->d_fpp_size
);
3347 ct
->d_upcall
= NULL
;
3348 ct
->d_noresults
= 0;
3355 * Add a door to the per-process list of active doors for which the
3356 * process is a server.
3359 door_list_insert(door_node_t
*dp
)
3361 proc_t
*p
= dp
->door_target
;
3363 ASSERT(MUTEX_HELD(&door_knob
));
3364 dp
->door_list
= p
->p_door_list
;
3365 p
->p_door_list
= dp
;
3369 * Remove a door from the per-process list of active doors.
3372 door_list_delete(door_node_t
*dp
)
3376 ASSERT(MUTEX_HELD(&door_knob
));
3378 * Find the door in the list. If the door belongs to another process,
3379 * it's OK to use p_door_list since that process can't exit until all
3380 * doors have been taken off the list (see door_exit).
3382 pp
= &(dp
->door_target
->p_door_list
);
3384 pp
= &((*pp
)->door_list
);
3386 /* found it, take it off the list */
3387 *pp
= dp
->door_list
;
3392 * External kernel interfaces for doors. These functions are available
3393 * outside the doorfs module for use in creating and using doors from
3394 * within the kernel.
3398 * door_ki_upcall invokes a user-level door server from the kernel, with
3399 * the credentials associated with curthread.
3402 door_ki_upcall(door_handle_t dh
, door_arg_t
*param
)
3404 return (door_ki_upcall_limited(dh
, param
, NULL
, SIZE_MAX
, UINT_MAX
));
3408 * door_ki_upcall_limited invokes a user-level door server from the
3409 * kernel with the given credentials and reply limits. If the "cred"
3410 * argument is NULL, uses the credentials associated with current
3411 * thread. max_data limits the maximum length of the returned data (the
3412 * client will get E2BIG if they go over), and max_desc limits the
3413 * number of returned descriptors (the client will get EMFILE if they
3417 door_ki_upcall_limited(door_handle_t dh
, door_arg_t
*param
, struct cred
*cred
,
3418 size_t max_data
, uint_t max_desc
)
3420 file_t
*fp
= DHTOF(dh
);
3423 if (fop_realvp(fp
->f_vnode
, &realvp
, NULL
))
3424 realvp
= fp
->f_vnode
;
3425 return (door_upcall(realvp
, param
, cred
, max_data
, max_desc
));
3429 * Function call to create a "kernel" door server. A kernel door
3430 * server provides a way for a user-level process to invoke a function
3431 * in the kernel through a door_call. From the caller's point of
3432 * view, a kernel door server looks the same as a user-level one
3433 * (except the server pid is 0). Unlike normal door calls, the
3434 * kernel door function is invoked via a normal function call in the
3435 * same thread and context as the caller.
3438 door_ki_create(void (*pc_cookie
)(), void *data_cookie
, uint_t attributes
,
3444 /* no DOOR_PRIVATE */
3445 if ((attributes
& ~DOOR_KI_CREATE_MASK
) ||
3446 (attributes
& (DOOR_UNREF
| DOOR_UNREF_MULTI
)) ==
3447 (DOOR_UNREF
| DOOR_UNREF_MULTI
))
3450 err
= door_create_common(pc_cookie
, data_cookie
, attributes
,
3452 if (err
== 0 && (attributes
& (DOOR_UNREF
| DOOR_UNREF_MULTI
)) &&
3453 p0
.p_unref_thread
== 0) {
3454 /* need to create unref thread for process 0 */
3455 (void) thread_create(NULL
, 0, door_unref_kernel
, NULL
, 0, &p0
,
3456 TS_RUN
, minclsyspri
);
3465 door_ki_hold(door_handle_t dh
)
3467 file_t
*fp
= DHTOF(dh
);
3469 mutex_enter(&fp
->f_tlock
);
3471 mutex_exit(&fp
->f_tlock
);
3475 door_ki_rele(door_handle_t dh
)
3477 file_t
*fp
= DHTOF(dh
);
3483 door_ki_open(char *pathname
, door_handle_t
*dhp
)
3489 if ((err
= lookupname(pathname
, UIO_SYSSPACE
, FOLLOW
, NULL
, &vp
)) != 0)
3491 if (err
= fop_open(&vp
, FREAD
, kcred
, NULL
)) {
3495 if (vp
->v_type
!= VDOOR
) {
3499 if ((err
= falloc(vp
, FREAD
| FWRITE
, &fp
, NULL
)) != 0) {
3503 /* falloc returns with f_tlock held on success */
3504 mutex_exit(&fp
->f_tlock
);
3510 door_ki_info(door_handle_t dh
, struct door_info
*dip
)
3512 file_t
*fp
= DHTOF(dh
);
3515 if (fop_realvp(fp
->f_vnode
, &vp
, NULL
))
3517 if (vp
->v_type
!= VDOOR
)
3519 door_info_common(VTOD(vp
), dip
, fp
);
3524 door_ki_lookup(int did
)
3529 /* is the descriptor really a door? */
3530 if (door_lookup(did
, &fp
) == NULL
)
3532 /* got the door, put a hold on it and release the fd */
3540 door_ki_setparam(door_handle_t dh
, int type
, size_t val
)
3542 file_t
*fp
= DHTOF(dh
);
3545 if (fop_realvp(fp
->f_vnode
, &vp
, NULL
))
3547 if (vp
->v_type
!= VDOOR
)
3549 return (door_setparam_common(VTOD(vp
), 1, type
, val
));
3553 door_ki_getparam(door_handle_t dh
, int type
, size_t *out
)
3555 file_t
*fp
= DHTOF(dh
);
3558 if (fop_realvp(fp
->f_vnode
, &vp
, NULL
))
3560 if (vp
->v_type
!= VDOOR
)
3562 return (door_getparam_common(VTOD(vp
), type
, out
));