stop shipping useless ksh93 builtins into /usr/bin
[unleashed.git] / kernel / fs / doorfs / door_sys.c
blob912473b77cbd228f6eff0a02b955b29ce01ce21f
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
23 * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2016 by Delphix. All rights reserved.
28 * System call I/F to doors (outside of vnodes I/F) and misc support
29 * routines
31 #include <sys/types.h>
32 #include <sys/systm.h>
33 #include <sys/door.h>
34 #include <sys/door_data.h>
35 #include <sys/proc.h>
36 #include <sys/thread.h>
37 #include <sys/prsystm.h>
38 #include <sys/procfs.h>
39 #include <sys/class.h>
40 #include <sys/cred.h>
41 #include <sys/kmem.h>
42 #include <sys/cmn_err.h>
43 #include <sys/stack.h>
44 #include <sys/debug.h>
45 #include <sys/cpuvar.h>
46 #include <sys/file.h>
47 #include <sys/fcntl.h>
48 #include <sys/vnode.h>
49 #include <sys/vfs.h>
50 #include <sys/sobject.h>
51 #include <sys/schedctl.h>
52 #include <sys/callb.h>
53 #include <sys/ucred.h>
55 #include <sys/mman.h>
56 #include <sys/sysmacros.h>
57 #include <sys/vmsystm.h>
58 #include <vm/as.h>
59 #include <vm/hat.h>
60 #include <vm/page.h>
61 #include <vm/seg.h>
62 #include <vm/seg_vn.h>
63 #include <vm/seg_vn.h>
64 #include <vm/seg_kpm.h>
66 #include <sys/modctl.h>
67 #include <sys/syscall.h>
68 #include <sys/pathname.h>
69 #include <sys/rctl.h>
72 * The maximum amount of data (in bytes) that will be transferred using
73 * an intermediate kernel buffer. For sizes greater than this we map
74 * in the destination pages and perform a 1-copy transfer.
76 size_t door_max_arg = 16 * 1024;
79 * Maximum amount of data that will be transferred in a reply to a
80 * door_upcall. Need to guard against a process returning huge amounts
81 * of data and getting the kernel stuck in kmem_alloc.
83 size_t door_max_upcall_reply = 1024 * 1024;
86 * Maximum number of descriptors allowed to be passed in a single
87 * door_call or door_return. We need to allocate kernel memory
88 * for all of them at once, so we can't let it scale without limit.
90 uint_t door_max_desc = 1024;
93 * Definition of a door handle, used by other kernel subsystems when
94 * calling door functions. This is really a file structure but we
95 * want to hide that fact.
97 struct __door_handle {
98 file_t dh_file;
101 #define DHTOF(dh) ((file_t *)(dh))
102 #define FTODH(fp) ((door_handle_t)(fp))
104 static int doorfs(long, long, long, long, long, long);
106 static struct sysent door_sysent = {
108 SE_ARGC | SE_NOUNLOAD,
109 (int (*)())doorfs,
112 static struct modlsys modlsys = {
113 &mod_syscallops, "doors", &door_sysent
116 #ifdef _SYSCALL32_IMPL
118 static int
119 doorfs32(int32_t arg1, int32_t arg2, int32_t arg3, int32_t arg4,
120 int32_t arg5, int32_t subcode);
122 static struct sysent door_sysent32 = {
124 SE_ARGC | SE_NOUNLOAD,
125 (int (*)())doorfs32,
128 static struct modlsys modlsys32 = {
129 &mod_syscallops32,
130 "32-bit door syscalls",
131 &door_sysent32
133 #endif
135 static struct modlinkage modlinkage = {
136 MODREV_1,
137 &modlsys,
138 #ifdef _SYSCALL32_IMPL
139 &modlsys32,
140 #endif
141 NULL
144 dev_t doordev;
146 extern struct vfs door_vfs;
147 extern const struct vnodeops door_vnodeops;
149 /* yes, we want all defaults */
150 static const struct vfsops door_vfsops;
153 _init(void)
155 major_t major;
157 mutex_init(&door_knob, NULL, MUTEX_DEFAULT, NULL);
158 if ((major = getudev()) == (major_t)-1)
159 return (ENXIO);
160 doordev = makedevice(major, 0);
162 VFS_INIT(&door_vfs, &door_vfsops, NULL);
163 door_vfs.vfs_flag = VFS_RDONLY;
164 door_vfs.vfs_dev = doordev;
165 vfs_make_fsid(&(door_vfs.vfs_fsid), doordev, 0);
167 return (mod_install(&modlinkage));
171 _info(struct modinfo *modinfop)
173 return (mod_info(&modlinkage, modinfop));
176 /* system call functions */
177 static int door_call(int, void *);
178 static int door_return(caddr_t, size_t, door_desc_t *, uint_t, caddr_t, size_t);
179 static int door_create(void (*pc_cookie)(void *, char *, size_t, door_desc_t *,
180 uint_t), void *data_cookie, uint_t);
181 static int door_revoke(int);
182 static int door_info(int, struct door_info *);
183 static int door_ucred(struct ucred_s *);
184 static int door_bind(int);
185 static int door_unbind(void);
186 static int door_unref(void);
187 static int door_getparam(int, int, size_t *);
188 static int door_setparam(int, int, size_t);
190 #define DOOR_RETURN_OLD 4 /* historic value, for s10 */
193 * System call wrapper for all door related system calls
195 static int
196 doorfs(long arg1, long arg2, long arg3, long arg4, long arg5, long subcode)
198 switch (subcode) {
199 case DOOR_CALL:
200 return (door_call(arg1, (void *)arg2));
201 case DOOR_RETURN: {
202 door_return_desc_t *drdp = (door_return_desc_t *)arg3;
204 if (drdp != NULL) {
205 door_return_desc_t drd;
206 if (copyin(drdp, &drd, sizeof (drd)))
207 return (EFAULT);
208 return (door_return((caddr_t)arg1, arg2, drd.desc_ptr,
209 drd.desc_num, (caddr_t)arg4, arg5));
211 return (door_return((caddr_t)arg1, arg2, NULL,
212 0, (caddr_t)arg4, arg5));
214 case DOOR_RETURN_OLD:
216 * In order to support the S10 runtime environment, we
217 * still respond to the old syscall subcode for door_return.
218 * We treat it as having no stack limits. This code should
219 * be removed when such support is no longer needed.
221 return (door_return((caddr_t)arg1, arg2, (door_desc_t *)arg3,
222 arg4, (caddr_t)arg5, 0));
223 case DOOR_CREATE:
224 return (door_create((void (*)())arg1, (void *)arg2, arg3));
225 case DOOR_REVOKE:
226 return (door_revoke(arg1));
227 case DOOR_INFO:
228 return (door_info(arg1, (struct door_info *)arg2));
229 case DOOR_BIND:
230 return (door_bind(arg1));
231 case DOOR_UNBIND:
232 return (door_unbind());
233 case DOOR_UNREFSYS:
234 return (door_unref());
235 case DOOR_UCRED:
236 return (door_ucred((struct ucred_s *)arg1));
237 case DOOR_GETPARAM:
238 return (door_getparam(arg1, arg2, (size_t *)arg3));
239 case DOOR_SETPARAM:
240 return (door_setparam(arg1, arg2, arg3));
241 default:
242 return (set_errno(EINVAL));
246 #ifdef _SYSCALL32_IMPL
248 * System call wrapper for all door related system calls from 32-bit programs.
249 * Needed at the moment because of the casts - they undo some damage
250 * that truss causes (sign-extending the stack pointer) when truss'ing
251 * a 32-bit program using doors.
253 static int
254 doorfs32(int32_t arg1, int32_t arg2, int32_t arg3,
255 int32_t arg4, int32_t arg5, int32_t subcode)
257 switch (subcode) {
258 case DOOR_CALL:
259 return (door_call(arg1, (void *)(uintptr_t)(caddr32_t)arg2));
260 case DOOR_RETURN: {
261 door_return_desc32_t *drdp =
262 (door_return_desc32_t *)(uintptr_t)(caddr32_t)arg3;
263 if (drdp != NULL) {
264 door_return_desc32_t drd;
265 if (copyin(drdp, &drd, sizeof (drd)))
266 return (EFAULT);
267 return (door_return(
268 (caddr_t)(uintptr_t)(caddr32_t)arg1, arg2,
269 (door_desc_t *)(uintptr_t)drd.desc_ptr,
270 drd.desc_num, (caddr_t)(uintptr_t)(caddr32_t)arg4,
271 (size_t)(uintptr_t)(size32_t)arg5));
273 return (door_return((caddr_t)(uintptr_t)(caddr32_t)arg1,
274 arg2, NULL, 0, (caddr_t)(uintptr_t)(caddr32_t)arg4,
275 (size_t)(uintptr_t)(size32_t)arg5));
277 case DOOR_RETURN_OLD:
279 * In order to support the S10 runtime environment, we
280 * still respond to the old syscall subcode for door_return.
281 * We treat it as having no stack limits. This code should
282 * be removed when such support is no longer needed.
284 return (door_return((caddr_t)(uintptr_t)(caddr32_t)arg1, arg2,
285 (door_desc_t *)(uintptr_t)(caddr32_t)arg3, arg4,
286 (caddr_t)(uintptr_t)(caddr32_t)arg5, 0));
287 case DOOR_CREATE:
288 return (door_create((void (*)())(uintptr_t)(caddr32_t)arg1,
289 (void *)(uintptr_t)(caddr32_t)arg2, arg3));
290 case DOOR_REVOKE:
291 return (door_revoke(arg1));
292 case DOOR_INFO:
293 return (door_info(arg1,
294 (struct door_info *)(uintptr_t)(caddr32_t)arg2));
295 case DOOR_BIND:
296 return (door_bind(arg1));
297 case DOOR_UNBIND:
298 return (door_unbind());
299 case DOOR_UNREFSYS:
300 return (door_unref());
301 case DOOR_UCRED:
302 return (door_ucred(
303 (struct ucred_s *)(uintptr_t)(caddr32_t)arg1));
304 case DOOR_GETPARAM:
305 return (door_getparam(arg1, arg2,
306 (size_t *)(uintptr_t)(caddr32_t)arg3));
307 case DOOR_SETPARAM:
308 return (door_setparam(arg1, arg2, (size_t)(size32_t)arg3));
310 default:
311 return (set_errno(EINVAL));
314 #endif
316 void shuttle_resume(kthread_t *, kmutex_t *);
317 void shuttle_swtch(kmutex_t *);
318 void shuttle_sleep(kthread_t *);
321 * Support routines
323 static int door_create_common(void (*)(), void *, uint_t, int, int *,
324 file_t **);
325 static int door_overflow(kthread_t *, caddr_t, size_t, door_desc_t *, uint_t);
326 static int door_args(kthread_t *, int);
327 static int door_results(kthread_t *, caddr_t, size_t, door_desc_t *, uint_t);
328 static int door_copy(struct as *, caddr_t, caddr_t, uint_t);
329 static void door_server_exit(proc_t *, kthread_t *);
330 static void door_release_server(door_node_t *, kthread_t *);
331 static kthread_t *door_get_server(door_node_t *);
332 static door_node_t *door_lookup(int, file_t **);
333 static int door_translate_in(void);
334 static int door_translate_out(void);
335 static void door_fd_rele(door_desc_t *, uint_t, int);
336 static void door_list_insert(door_node_t *);
337 static void door_info_common(door_node_t *, door_info_t *, file_t *);
338 static int door_release_fds(door_desc_t *, uint_t);
339 static void door_fd_close(door_desc_t *, uint_t);
340 static void door_fp_close(struct file **, uint_t);
342 static door_data_t *
343 door_my_data(int create_if_missing)
345 door_data_t *ddp;
347 ddp = curthread->t_door;
348 if (create_if_missing && ddp == NULL)
349 ddp = curthread->t_door = kmem_zalloc(sizeof (*ddp), KM_SLEEP);
351 return (ddp);
354 static door_server_t *
355 door_my_server(int create_if_missing)
357 door_data_t *ddp = door_my_data(create_if_missing);
359 return ((ddp != NULL)? DOOR_SERVER(ddp) : NULL);
362 static door_client_t *
363 door_my_client(int create_if_missing)
365 door_data_t *ddp = door_my_data(create_if_missing);
367 return ((ddp != NULL)? DOOR_CLIENT(ddp) : NULL);
371 * System call to create a door
374 door_create(void (*pc_cookie)(), void *data_cookie, uint_t attributes)
376 int fd;
377 int err;
379 if ((attributes & ~DOOR_CREATE_MASK) ||
380 ((attributes & (DOOR_UNREF | DOOR_UNREF_MULTI)) ==
381 (DOOR_UNREF | DOOR_UNREF_MULTI)))
382 return (set_errno(EINVAL));
384 if ((err = door_create_common(pc_cookie, data_cookie, attributes, 0,
385 &fd, NULL)) != 0)
386 return (set_errno(err));
388 f_setfd(fd, FD_CLOEXEC);
389 return (fd);
393 * Common code for creating user and kernel doors. If a door was
394 * created, stores a file structure pointer in the location pointed
395 * to by fpp (if fpp is non-NULL) and returns 0. Also, if a non-NULL
396 * pointer to a file descriptor is passed in as fdp, allocates a file
397 * descriptor representing the door. If a door could not be created,
398 * returns an error.
400 static int
401 door_create_common(void (*pc_cookie)(), void *data_cookie, uint_t attributes,
402 int from_kernel, int *fdp, file_t **fpp)
404 door_node_t *dp;
405 vnode_t *vp;
406 struct file *fp;
407 static door_id_t index = 0;
408 proc_t *p = (from_kernel)? &p0 : curproc;
410 dp = kmem_zalloc(sizeof (door_node_t), KM_SLEEP);
412 dp->door_vnode = vn_alloc(KM_SLEEP);
413 dp->door_target = p;
414 dp->door_data = data_cookie;
415 dp->door_pc = pc_cookie;
416 dp->door_flags = attributes;
417 #ifdef _SYSCALL32_IMPL
418 if (!from_kernel && get_udatamodel() != DATAMODEL_NATIVE)
419 dp->door_data_max = UINT32_MAX;
420 else
421 #endif
422 dp->door_data_max = SIZE_MAX;
423 dp->door_data_min = 0UL;
424 dp->door_desc_max = (attributes & DOOR_REFUSE_DESC)? 0 : INT_MAX;
426 vp = DTOV(dp);
427 vn_setops(vp, &door_vnodeops);
428 vp->v_type = VDOOR;
429 vp->v_vfsp = &door_vfs;
430 vp->v_data = (caddr_t)dp;
431 mutex_enter(&door_knob);
432 dp->door_index = index++;
433 /* add to per-process door list */
434 door_list_insert(dp);
435 mutex_exit(&door_knob);
437 if (falloc(vp, FREAD | FWRITE, &fp, fdp)) {
439 * If the file table is full, remove the door from the
440 * per-process list, free the door, and return NULL.
442 mutex_enter(&door_knob);
443 door_list_delete(dp);
444 mutex_exit(&door_knob);
445 vn_free(vp);
446 kmem_free(dp, sizeof (door_node_t));
447 return (EMFILE);
449 vn_exists(vp);
450 if (fdp != NULL)
451 setf(*fdp, fp);
452 mutex_exit(&fp->f_tlock);
454 if (fpp != NULL)
455 *fpp = fp;
456 return (0);
459 static int
460 door_check_limits(door_node_t *dp, door_arg_t *da, int upcall)
462 ASSERT(MUTEX_HELD(&door_knob));
464 /* we allow unref upcalls through, despite any minimum */
465 if (da->data_size < dp->door_data_min &&
466 !(upcall && da->data_ptr == DOOR_UNREF_DATA))
467 return (ENOBUFS);
469 if (da->data_size > dp->door_data_max)
470 return (ENOBUFS);
472 if (da->desc_num > 0 && (dp->door_flags & DOOR_REFUSE_DESC))
473 return (ENOTSUP);
475 if (da->desc_num > dp->door_desc_max)
476 return (ENFILE);
478 return (0);
482 * Door invocation.
485 door_call(int did, void *args)
487 /* Locals */
488 door_node_t *dp;
489 kthread_t *server_thread;
490 int error = 0;
491 klwp_t *lwp;
492 door_client_t *ct; /* curthread door_data */
493 door_server_t *st; /* server thread door_data */
494 door_desc_t *start = NULL;
495 uint_t ncopied = 0;
496 size_t dsize;
497 /* destructor for data returned by a kernel server */
498 void (*destfn)() = NULL;
499 void *destarg;
500 model_t datamodel;
501 int gotresults = 0;
502 int needcleanup = 0;
503 int cancel_pending;
505 lwp = ttolwp(curthread);
506 datamodel = lwp_getdatamodel(lwp);
508 ct = door_my_client(1);
511 * Get the arguments
513 if (args) {
514 if (datamodel == DATAMODEL_NATIVE) {
515 if (copyin(args, &ct->d_args, sizeof (door_arg_t)) != 0)
516 return (set_errno(EFAULT));
517 } else {
518 door_arg32_t da32;
520 if (copyin(args, &da32, sizeof (door_arg32_t)) != 0)
521 return (set_errno(EFAULT));
522 ct->d_args.data_ptr =
523 (char *)(uintptr_t)da32.data_ptr;
524 ct->d_args.data_size = da32.data_size;
525 ct->d_args.desc_ptr =
526 (door_desc_t *)(uintptr_t)da32.desc_ptr;
527 ct->d_args.desc_num = da32.desc_num;
528 ct->d_args.rbuf =
529 (char *)(uintptr_t)da32.rbuf;
530 ct->d_args.rsize = da32.rsize;
532 } else {
533 /* No arguments, and no results allowed */
534 ct->d_noresults = 1;
535 ct->d_args.data_size = 0;
536 ct->d_args.desc_num = 0;
537 ct->d_args.rsize = 0;
540 if ((dp = door_lookup(did, NULL)) == NULL)
541 return (set_errno(EBADF));
544 * We don't want to hold the door FD over the entire operation;
545 * instead, we put a hold on the door vnode and release the FD
546 * immediately
548 VN_HOLD(DTOV(dp));
549 releasef(did);
552 * This should be done in shuttle_resume(), just before going to
553 * sleep, but we want to avoid overhead while holding door_knob.
554 * prstop() is just a no-op if we don't really go to sleep.
555 * We test not-kernel-address-space for the sake of clustering code.
557 if (lwp && lwp->lwp_nostop == 0 && curproc->p_as != &kas)
558 prstop(PR_REQUESTED, 0);
560 mutex_enter(&door_knob);
561 if (DOOR_INVALID(dp)) {
562 mutex_exit(&door_knob);
563 error = EBADF;
564 goto out;
568 * before we do anything, check that we are not overflowing the
569 * required limits.
571 error = door_check_limits(dp, &ct->d_args, 0);
572 if (error != 0) {
573 mutex_exit(&door_knob);
574 goto out;
578 * Check for in-kernel door server.
580 if (dp->door_target == &p0) {
581 caddr_t rbuf = ct->d_args.rbuf;
582 size_t rsize = ct->d_args.rsize;
584 dp->door_active++;
585 ct->d_kernel = 1;
586 ct->d_error = DOOR_WAIT;
587 mutex_exit(&door_knob);
588 /* translate file descriptors to vnodes */
589 if (ct->d_args.desc_num) {
590 error = door_translate_in();
591 if (error)
592 goto out;
595 * Call kernel door server. Arguments are passed and
596 * returned as a door_arg pointer. When called, data_ptr
597 * points to user data and desc_ptr points to a kernel list
598 * of door descriptors that have been converted to file
599 * structure pointers. It's the server function's
600 * responsibility to copyin the data pointed to by data_ptr
601 * (this avoids extra copying in some cases). On return,
602 * data_ptr points to a user buffer of data, and desc_ptr
603 * points to a kernel list of door descriptors representing
604 * files. When a reference is passed to a kernel server,
605 * it is the server's responsibility to release the reference
606 * (by calling closef). When the server includes a
607 * reference in its reply, it is released as part of the
608 * the call (the server must duplicate the reference if
609 * it wants to retain a copy). The destfn, if set to
610 * non-NULL, is a destructor to be called when the returned
611 * kernel data (if any) is no longer needed (has all been
612 * translated and copied to user level).
614 (*(dp->door_pc))(dp->door_data, &ct->d_args,
615 &destfn, &destarg, &error);
616 mutex_enter(&door_knob);
617 /* not implemented yet */
618 if (--dp->door_active == 0 && (dp->door_flags & DOOR_DELAY))
619 door_deliver_unref(dp);
620 mutex_exit(&door_knob);
621 if (error)
622 goto out;
624 /* translate vnodes to files */
625 if (ct->d_args.desc_num) {
626 error = door_translate_out();
627 if (error)
628 goto out;
630 ct->d_buf = ct->d_args.rbuf;
631 ct->d_bufsize = ct->d_args.rsize;
632 if (rsize < (ct->d_args.data_size +
633 (ct->d_args.desc_num * sizeof (door_desc_t)))) {
634 /* handle overflow */
635 error = door_overflow(curthread, ct->d_args.data_ptr,
636 ct->d_args.data_size, ct->d_args.desc_ptr,
637 ct->d_args.desc_num);
638 if (error)
639 goto out;
640 /* door_overflow sets d_args rbuf and rsize */
641 } else {
642 ct->d_args.rbuf = rbuf;
643 ct->d_args.rsize = rsize;
645 goto results;
649 * Get a server thread from the target domain
651 if ((server_thread = door_get_server(dp)) == NULL) {
652 if (DOOR_INVALID(dp))
653 error = EBADF;
654 else
655 error = EAGAIN;
656 mutex_exit(&door_knob);
657 goto out;
660 st = DOOR_SERVER(server_thread->t_door);
661 if (ct->d_args.desc_num || ct->d_args.data_size) {
662 int is_private = (dp->door_flags & DOOR_PRIVATE);
664 * Move data from client to server
666 DOOR_T_HOLD(st);
667 mutex_exit(&door_knob);
668 error = door_args(server_thread, is_private);
669 mutex_enter(&door_knob);
670 DOOR_T_RELEASE(st);
671 if (error) {
673 * We're not going to resume this thread after all
675 door_release_server(dp, server_thread);
676 shuttle_sleep(server_thread);
677 mutex_exit(&door_knob);
678 goto out;
682 dp->door_active++;
683 ct->d_error = DOOR_WAIT;
684 ct->d_args_done = 0;
685 st->d_caller = curthread;
686 st->d_active = dp;
688 shuttle_resume(server_thread, &door_knob);
690 mutex_enter(&door_knob);
691 shuttle_return:
692 if ((error = ct->d_error) < 0) { /* DOOR_WAIT or DOOR_EXIT */
694 * Premature wakeup. Find out why (stop, forkall, sig, exit ...)
696 mutex_exit(&door_knob); /* May block in ISSIG */
697 cancel_pending = 0;
698 if (ISSIG(curthread, FORREAL) || lwp->lwp_sysabort ||
699 MUSTRETURN(curproc, curthread) ||
700 (cancel_pending = schedctl_cancel_pending()) != 0) {
701 /* Signal, forkall, ... */
702 lwp->lwp_sysabort = 0;
703 if (cancel_pending)
704 schedctl_cancel_eintr();
705 mutex_enter(&door_knob);
706 error = EINTR;
708 * If the server has finished processing our call,
709 * or exited (calling door_slam()), then d_error
710 * will have changed. If the server hasn't finished
711 * yet, d_error will still be DOOR_WAIT, and we
712 * let it know we are not interested in any
713 * results by sending a SIGCANCEL, unless the door
714 * is marked with DOOR_NO_CANCEL.
716 if (ct->d_error == DOOR_WAIT &&
717 st->d_caller == curthread) {
718 proc_t *p = ttoproc(server_thread);
720 st->d_active = NULL;
721 st->d_caller = NULL;
723 if (!(dp->door_flags & DOOR_NO_CANCEL)) {
724 DOOR_T_HOLD(st);
725 mutex_exit(&door_knob);
727 mutex_enter(&p->p_lock);
728 sigtoproc(p, server_thread, SIGCANCEL);
729 mutex_exit(&p->p_lock);
731 mutex_enter(&door_knob);
732 DOOR_T_RELEASE(st);
735 } else {
737 * Return from stop(), server exit...
739 * Note that the server could have done a
740 * door_return while the client was in stop state
741 * (ISSIG), in which case the error condition
742 * is updated by the server.
744 mutex_enter(&door_knob);
745 if (ct->d_error == DOOR_WAIT) {
746 /* Still waiting for a reply */
747 shuttle_swtch(&door_knob);
748 mutex_enter(&door_knob);
749 lwp->lwp_asleep = 0;
750 goto shuttle_return;
751 } else if (ct->d_error == DOOR_EXIT) {
752 /* Server exit */
753 error = EINTR;
754 } else {
755 /* Server did a door_return during ISSIG */
756 error = ct->d_error;
760 * Can't exit if the server is currently copying
761 * results for me.
763 while (DOOR_T_HELD(ct))
764 cv_wait(&ct->d_cv, &door_knob);
767 * If the server has not processed our message, free the
768 * descriptors.
770 if (!ct->d_args_done) {
771 needcleanup = 1;
772 ct->d_args_done = 1;
776 * Find out if results were successfully copied.
778 if (ct->d_error == 0)
779 gotresults = 1;
781 ASSERT(ct->d_args_done);
782 lwp->lwp_asleep = 0; /* /proc */
783 lwp->lwp_sysabort = 0; /* /proc */
784 if (--dp->door_active == 0 && (dp->door_flags & DOOR_DELAY))
785 door_deliver_unref(dp);
786 mutex_exit(&door_knob);
788 if (needcleanup)
789 door_fp_close(ct->d_fpp, ct->d_args.desc_num);
791 results:
793 * Move the results to userland (if any)
796 if (ct->d_noresults)
797 goto out;
799 if (error) {
801 * If server returned results successfully, then we've
802 * been interrupted and may need to clean up.
804 if (gotresults) {
805 ASSERT(error == EINTR);
806 door_fp_close(ct->d_fpp, ct->d_args.desc_num);
808 goto out;
812 * Copy back data if we haven't caused an overflow (already
813 * handled) and we are using a 2 copy transfer, or we are
814 * returning data from a kernel server.
816 if (ct->d_args.data_size) {
817 ct->d_args.data_ptr = ct->d_args.rbuf;
818 if (ct->d_kernel || (!ct->d_overflow &&
819 ct->d_args.data_size <= door_max_arg)) {
820 if (copyout_nowatch(ct->d_buf, ct->d_args.rbuf,
821 ct->d_args.data_size)) {
822 door_fp_close(ct->d_fpp, ct->d_args.desc_num);
823 error = EFAULT;
824 goto out;
830 * stuff returned doors into our proc, copyout the descriptors
832 if (ct->d_args.desc_num) {
833 struct file **fpp;
834 door_desc_t *didpp;
835 uint_t n = ct->d_args.desc_num;
837 dsize = n * sizeof (door_desc_t);
838 start = didpp = kmem_alloc(dsize, KM_SLEEP);
839 fpp = ct->d_fpp;
841 while (n--) {
842 if (door_insert(*fpp, didpp) == -1) {
843 /* Close remaining files */
844 door_fp_close(fpp, n + 1);
845 error = EMFILE;
846 goto out;
848 fpp++; didpp++; ncopied++;
851 ct->d_args.desc_ptr = (door_desc_t *)(ct->d_args.rbuf +
852 roundup(ct->d_args.data_size, sizeof (door_desc_t)));
854 if (copyout_nowatch(start, ct->d_args.desc_ptr, dsize)) {
855 error = EFAULT;
856 goto out;
861 * Return the results
863 if (datamodel == DATAMODEL_NATIVE) {
864 if (copyout_nowatch(&ct->d_args, args,
865 sizeof (door_arg_t)) != 0)
866 error = EFAULT;
867 } else {
868 door_arg32_t da32;
870 da32.data_ptr = (caddr32_t)(uintptr_t)ct->d_args.data_ptr;
871 da32.data_size = ct->d_args.data_size;
872 da32.desc_ptr = (caddr32_t)(uintptr_t)ct->d_args.desc_ptr;
873 da32.desc_num = ct->d_args.desc_num;
874 da32.rbuf = (caddr32_t)(uintptr_t)ct->d_args.rbuf;
875 da32.rsize = ct->d_args.rsize;
876 if (copyout_nowatch(&da32, args, sizeof (door_arg32_t)) != 0) {
877 error = EFAULT;
881 out:
882 ct->d_noresults = 0;
884 /* clean up the overflow buffer if an error occurred */
885 if (error != 0 && ct->d_overflow) {
886 (void) as_unmap(curproc->p_as, ct->d_args.rbuf,
887 ct->d_args.rsize);
889 ct->d_overflow = 0;
891 /* call destructor */
892 if (destfn) {
893 ASSERT(ct->d_kernel);
894 (*destfn)(dp->door_data, destarg);
895 ct->d_buf = NULL;
896 ct->d_bufsize = 0;
899 if (dp)
900 VN_RELE(DTOV(dp));
902 if (ct->d_buf) {
903 ASSERT(!ct->d_kernel);
904 kmem_free(ct->d_buf, ct->d_bufsize);
905 ct->d_buf = NULL;
906 ct->d_bufsize = 0;
908 ct->d_kernel = 0;
910 /* clean up the descriptor copyout buffer */
911 if (start != NULL) {
912 if (error != 0)
913 door_fd_close(start, ncopied);
914 kmem_free(start, dsize);
917 if (ct->d_fpp) {
918 kmem_free(ct->d_fpp, ct->d_fpp_size);
919 ct->d_fpp = NULL;
920 ct->d_fpp_size = 0;
923 if (error)
924 return (set_errno(error));
926 return (0);
929 static int
930 door_setparam_common(door_node_t *dp, int from_kernel, int type, size_t val)
932 int error = 0;
934 mutex_enter(&door_knob);
936 if (DOOR_INVALID(dp)) {
937 mutex_exit(&door_knob);
938 return (EBADF);
942 * door_ki_setparam() can only affect kernel doors.
943 * door_setparam() can only affect doors attached to the current
944 * process.
946 if ((from_kernel && dp->door_target != &p0) ||
947 (!from_kernel && dp->door_target != curproc)) {
948 mutex_exit(&door_knob);
949 return (EPERM);
952 switch (type) {
953 case DOOR_PARAM_DESC_MAX:
954 if (val > INT_MAX)
955 error = ERANGE;
956 else if ((dp->door_flags & DOOR_REFUSE_DESC) && val != 0)
957 error = ENOTSUP;
958 else
959 dp->door_desc_max = (uint_t)val;
960 break;
962 case DOOR_PARAM_DATA_MIN:
963 if (val > dp->door_data_max)
964 error = EINVAL;
965 else
966 dp->door_data_min = val;
967 break;
969 case DOOR_PARAM_DATA_MAX:
970 if (val < dp->door_data_min)
971 error = EINVAL;
972 else
973 dp->door_data_max = val;
974 break;
976 default:
977 error = EINVAL;
978 break;
981 mutex_exit(&door_knob);
982 return (error);
985 static int
986 door_getparam_common(door_node_t *dp, int type, size_t *out)
988 int error = 0;
990 mutex_enter(&door_knob);
991 switch (type) {
992 case DOOR_PARAM_DESC_MAX:
993 *out = (size_t)dp->door_desc_max;
994 break;
995 case DOOR_PARAM_DATA_MIN:
996 *out = dp->door_data_min;
997 break;
998 case DOOR_PARAM_DATA_MAX:
999 *out = dp->door_data_max;
1000 break;
1001 default:
1002 error = EINVAL;
1003 break;
1005 mutex_exit(&door_knob);
1006 return (error);
1010 door_setparam(int did, int type, size_t val)
1012 door_node_t *dp;
1013 int error = 0;
1015 if ((dp = door_lookup(did, NULL)) == NULL)
1016 return (set_errno(EBADF));
1018 error = door_setparam_common(dp, 0, type, val);
1020 releasef(did);
1022 if (error)
1023 return (set_errno(error));
1025 return (0);
1029 door_getparam(int did, int type, size_t *out)
1031 door_node_t *dp;
1032 size_t val = 0;
1033 int error = 0;
1035 if ((dp = door_lookup(did, NULL)) == NULL)
1036 return (set_errno(EBADF));
1038 error = door_getparam_common(dp, type, &val);
1040 releasef(did);
1042 if (error)
1043 return (set_errno(error));
1045 if (get_udatamodel() == DATAMODEL_NATIVE) {
1046 if (copyout(&val, out, sizeof (val)))
1047 return (set_errno(EFAULT));
1048 #ifdef _SYSCALL32_IMPL
1049 } else {
1050 size32_t val32 = (size32_t)val;
1052 if (val != val32)
1053 return (set_errno(EOVERFLOW));
1055 if (copyout(&val32, out, sizeof (val32)))
1056 return (set_errno(EFAULT));
1057 #endif /* _SYSCALL32_IMPL */
1060 return (0);
1064 * A copyout() which proceeds from high addresses to low addresses. This way,
1065 * stack guard pages are effective.
1067 * Note that we use copyout_nowatch(); this is called while the client is
1068 * held.
1070 static int
1071 door_stack_copyout(const void *kaddr, void *uaddr, size_t count)
1073 const char *kbase = (const char *)kaddr;
1074 uintptr_t ubase = (uintptr_t)uaddr;
1075 size_t pgsize = PAGESIZE;
1077 if (count <= pgsize)
1078 return (copyout_nowatch(kaddr, uaddr, count));
1080 while (count > 0) {
1081 uintptr_t start, end, offset, amount;
1083 end = ubase + count;
1084 start = P2ALIGN(end - 1, pgsize);
1085 if (P2ALIGN(ubase, pgsize) == start)
1086 start = ubase;
1088 offset = start - ubase;
1089 amount = end - start;
1091 ASSERT(amount > 0 && amount <= count && amount <= pgsize);
1093 if (copyout_nowatch(kbase + offset, (void *)start, amount))
1094 return (1);
1095 count -= amount;
1097 return (0);
1101 * Writes the stack layout for door_return() into the door_server_t of the
1102 * server thread.
1104 static int
1105 door_layout(kthread_t *tp, size_t data_size, uint_t ndesc, int info_needed)
1107 door_server_t *st = DOOR_SERVER(tp->t_door);
1108 door_layout_t *out = &st->d_layout;
1109 uintptr_t base_sp = (uintptr_t)st->d_sp;
1110 size_t ssize = st->d_ssize;
1111 size_t descsz;
1112 uintptr_t descp, datap, infop, resultsp, finalsp;
1113 size_t align = STACK_ALIGN;
1114 size_t results_sz = sizeof (struct door_results);
1115 model_t datamodel = lwp_getdatamodel(ttolwp(tp));
1117 ASSERT(!st->d_layout_done);
1119 #ifndef _STACK_GROWS_DOWNWARD
1120 #error stack does not grow downward, door_layout() must change
1121 #endif
1123 #ifdef _SYSCALL32_IMPL
1124 if (datamodel != DATAMODEL_NATIVE) {
1125 align = STACK_ALIGN32;
1126 results_sz = sizeof (struct door_results32);
1128 #endif
1130 descsz = ndesc * sizeof (door_desc_t);
1133 * To speed up the overflow checking, we do an initial check
1134 * that the passed in data size won't cause us to wrap past
1135 * base_sp. Since door_max_desc limits descsz, we can
1136 * safely use it here. 65535 is an arbitrary 'bigger than
1137 * we need, small enough to not cause trouble' constant;
1138 * the only constraint is that it must be > than:
1140 * 5 * STACK_ALIGN +
1141 * sizeof (door_info_t) +
1142 * sizeof (door_results_t) +
1143 * (max adjustment from door_final_sp())
1145 * After we compute the layout, we can safely do a "did we wrap
1146 * around" check, followed by a check against the recorded
1147 * stack size.
1149 if (data_size >= SIZE_MAX - (size_t)65535UL - descsz)
1150 return (E2BIG); /* overflow */
1152 descp = P2ALIGN(base_sp - descsz, align);
1153 datap = P2ALIGN(descp - data_size, align);
1155 if (info_needed)
1156 infop = P2ALIGN(datap - sizeof (door_info_t), align);
1157 else
1158 infop = datap;
1160 resultsp = P2ALIGN(infop - results_sz, align);
1161 finalsp = door_final_sp(resultsp, align, datamodel);
1163 if (finalsp > base_sp)
1164 return (E2BIG); /* overflow */
1166 if (ssize != 0 && (base_sp - finalsp) > ssize)
1167 return (E2BIG); /* doesn't fit in stack */
1169 out->dl_descp = (ndesc != 0)? (caddr_t)descp : 0;
1170 out->dl_datap = (data_size != 0)? (caddr_t)datap : 0;
1171 out->dl_infop = info_needed? (caddr_t)infop : 0;
1172 out->dl_resultsp = (caddr_t)resultsp;
1173 out->dl_sp = (caddr_t)finalsp;
1175 st->d_layout_done = 1;
1176 return (0);
1179 static int
1180 door_server_dispatch(door_client_t *ct, door_node_t *dp)
1182 door_server_t *st = DOOR_SERVER(curthread->t_door);
1183 door_layout_t *layout = &st->d_layout;
1184 int error = 0;
1186 int is_private = (dp->door_flags & DOOR_PRIVATE);
1188 door_pool_t *pool = (is_private)? &dp->door_servers :
1189 &curproc->p_server_threads;
1191 int empty_pool = (pool->dp_threads == NULL);
1193 caddr_t infop = NULL;
1194 char *datap = NULL;
1195 size_t datasize = 0;
1196 size_t descsize;
1198 file_t **fpp = ct->d_fpp;
1199 door_desc_t *start = NULL;
1200 uint_t ndesc = 0;
1201 uint_t ncopied = 0;
1203 if (ct != NULL) {
1204 datap = ct->d_args.data_ptr;
1205 datasize = ct->d_args.data_size;
1206 ndesc = ct->d_args.desc_num;
1209 descsize = ndesc * sizeof (door_desc_t);
1212 * Reset datap to NULL if we aren't passing any data. Be careful
1213 * to let unref notifications through, though.
1215 if (datap == DOOR_UNREF_DATA) {
1216 if (ct->d_upcall != NULL)
1217 datasize = 0;
1218 else
1219 datap = NULL;
1220 } else if (datasize == 0) {
1221 datap = NULL;
1225 * Get the stack layout, if it hasn't already been done.
1227 if (!st->d_layout_done) {
1228 error = door_layout(curthread, datasize, ndesc,
1229 (is_private && empty_pool));
1230 if (error != 0)
1231 goto fail;
1235 * fill out the stack, starting from the top. Layout was already
1236 * filled in by door_args() or door_translate_out().
1238 if (layout->dl_descp != NULL) {
1239 ASSERT(ndesc != 0);
1240 start = kmem_alloc(descsize, KM_SLEEP);
1242 while (ndesc > 0) {
1243 if (door_insert(*fpp, &start[ncopied]) == -1) {
1244 error = EMFILE;
1245 goto fail;
1247 ndesc--;
1248 ncopied++;
1249 fpp++;
1251 if (door_stack_copyout(start, layout->dl_descp, descsize)) {
1252 error = E2BIG;
1253 goto fail;
1256 fpp = NULL; /* finished processing */
1258 if (layout->dl_datap != NULL) {
1259 ASSERT(datasize != 0);
1260 datap = layout->dl_datap;
1261 if (ct->d_upcall != NULL || datasize <= door_max_arg) {
1262 if (door_stack_copyout(ct->d_buf, datap, datasize)) {
1263 error = E2BIG;
1264 goto fail;
1269 if (is_private && empty_pool) {
1270 door_info_t di;
1272 infop = layout->dl_infop;
1273 ASSERT(infop != NULL);
1275 di.di_target = curproc->p_pid;
1276 di.di_proc = (door_ptr_t)(uintptr_t)dp->door_pc;
1277 di.di_data = (door_ptr_t)(uintptr_t)dp->door_data;
1278 di.di_uniquifier = dp->door_index;
1279 di.di_attributes = (dp->door_flags & DOOR_ATTR_MASK) |
1280 DOOR_LOCAL;
1282 if (door_stack_copyout(&di, infop, sizeof (di))) {
1283 error = E2BIG;
1284 goto fail;
1288 if (get_udatamodel() == DATAMODEL_NATIVE) {
1289 struct door_results dr;
1291 dr.cookie = dp->door_data;
1292 dr.data_ptr = datap;
1293 dr.data_size = datasize;
1294 dr.desc_ptr = (door_desc_t *)layout->dl_descp;
1295 dr.desc_num = ncopied;
1296 dr.pc = dp->door_pc;
1297 dr.nservers = !empty_pool;
1298 dr.door_info = (door_info_t *)infop;
1300 if (door_stack_copyout(&dr, layout->dl_resultsp, sizeof (dr))) {
1301 error = E2BIG;
1302 goto fail;
1304 #ifdef _SYSCALL32_IMPL
1305 } else {
1306 struct door_results32 dr32;
1308 dr32.cookie = (caddr32_t)(uintptr_t)dp->door_data;
1309 dr32.data_ptr = (caddr32_t)(uintptr_t)datap;
1310 dr32.data_size = (size32_t)datasize;
1311 dr32.desc_ptr = (caddr32_t)(uintptr_t)layout->dl_descp;
1312 dr32.desc_num = ncopied;
1313 dr32.pc = (caddr32_t)(uintptr_t)dp->door_pc;
1314 dr32.nservers = !empty_pool;
1315 dr32.door_info = (caddr32_t)(uintptr_t)infop;
1317 if (door_stack_copyout(&dr32, layout->dl_resultsp,
1318 sizeof (dr32))) {
1319 error = E2BIG;
1320 goto fail;
1322 #endif
1325 error = door_finish_dispatch(layout->dl_sp);
1326 fail:
1327 if (start != NULL) {
1328 if (error != 0)
1329 door_fd_close(start, ncopied);
1330 kmem_free(start, descsize);
1332 if (fpp != NULL)
1333 door_fp_close(fpp, ndesc);
1335 return (error);
1339 * Return the results (if any) to the caller (if any) and wait for the
1340 * next invocation on a door.
1343 door_return(caddr_t data_ptr, size_t data_size,
1344 door_desc_t *desc_ptr, uint_t desc_num, caddr_t sp, size_t ssize)
1346 kthread_t *caller;
1347 klwp_t *lwp;
1348 int error = 0;
1349 door_node_t *dp;
1350 door_server_t *st; /* curthread door_data */
1351 door_client_t *ct; /* caller door_data */
1352 int cancel_pending;
1354 st = door_my_server(1);
1357 * If thread was bound to a door that no longer exists, return
1358 * an error. This can happen if a thread is bound to a door
1359 * before the process calls forkall(); in the child, the door
1360 * doesn't exist and door_fork() sets the d_invbound flag.
1362 if (st->d_invbound)
1363 return (set_errno(EINVAL));
1365 st->d_sp = sp; /* Save base of stack. */
1366 st->d_ssize = ssize; /* and its size */
1369 * This should be done in shuttle_resume(), just before going to
1370 * sleep, but we want to avoid overhead while holding door_knob.
1371 * prstop() is just a no-op if we don't really go to sleep.
1372 * We test not-kernel-address-space for the sake of clustering code.
1374 lwp = ttolwp(curthread);
1375 if (lwp && lwp->lwp_nostop == 0 && curproc->p_as != &kas)
1376 prstop(PR_REQUESTED, 0);
1378 /* Make sure the caller hasn't gone away */
1379 mutex_enter(&door_knob);
1380 if ((caller = st->d_caller) == NULL || caller->t_door == NULL) {
1381 if (desc_num != 0) {
1382 /* close any DOOR_RELEASE descriptors */
1383 mutex_exit(&door_knob);
1384 error = door_release_fds(desc_ptr, desc_num);
1385 if (error)
1386 return (set_errno(error));
1387 mutex_enter(&door_knob);
1389 goto out;
1391 ct = DOOR_CLIENT(caller->t_door);
1393 ct->d_args.data_size = data_size;
1394 ct->d_args.desc_num = desc_num;
1396 * Transfer results, if any, to the client
1398 if (data_size != 0 || desc_num != 0) {
1400 * Prevent the client from exiting until we have finished
1401 * moving results.
1403 DOOR_T_HOLD(ct);
1404 mutex_exit(&door_knob);
1405 error = door_results(caller, data_ptr, data_size,
1406 desc_ptr, desc_num);
1407 mutex_enter(&door_knob);
1408 DOOR_T_RELEASE(ct);
1410 * Pass EOVERFLOW errors back to the client
1412 if (error && error != EOVERFLOW) {
1413 mutex_exit(&door_knob);
1414 return (set_errno(error));
1417 out:
1418 /* Put ourselves on the available server thread list */
1419 door_release_server(st->d_pool, curthread);
1422 * Make sure the caller is still waiting to be resumed
1424 if (caller) {
1425 disp_lock_t *tlp;
1427 thread_lock(caller);
1428 ct->d_error = error; /* Return any errors */
1429 if (caller->t_state == TS_SLEEP &&
1430 SOBJ_TYPE(caller->t_sobj_ops) == SOBJ_SHUTTLE) {
1431 cpu_t *cp = CPU;
1433 tlp = caller->t_lockp;
1435 * Setting t_disp_queue prevents erroneous preemptions
1436 * if this thread is still in execution on another
1437 * processor
1439 caller->t_disp_queue = cp->cpu_disp;
1440 CL_ACTIVE(caller);
1442 * We are calling thread_onproc() instead of
1443 * THREAD_ONPROC() because compiler can reorder
1444 * the two stores of t_state and t_lockp in
1445 * THREAD_ONPROC().
1447 thread_onproc(caller, cp);
1448 disp_lock_exit_high(tlp);
1449 shuttle_resume(caller, &door_knob);
1450 } else {
1451 /* May have been setrun or in stop state */
1452 thread_unlock(caller);
1453 shuttle_swtch(&door_knob);
1455 } else {
1456 shuttle_swtch(&door_knob);
1460 * We've sprung to life. Determine if we are part of a door
1461 * invocation, or just interrupted
1463 mutex_enter(&door_knob);
1464 if ((dp = st->d_active) != NULL) {
1466 * Normal door invocation. Return any error condition
1467 * encountered while trying to pass args to the server
1468 * thread.
1470 lwp->lwp_asleep = 0;
1472 * Prevent the caller from leaving us while we
1473 * are copying out the arguments from it's buffer.
1475 ASSERT(st->d_caller != NULL);
1476 ct = DOOR_CLIENT(st->d_caller->t_door);
1478 DOOR_T_HOLD(ct);
1479 mutex_exit(&door_knob);
1480 error = door_server_dispatch(ct, dp);
1481 mutex_enter(&door_knob);
1482 DOOR_T_RELEASE(ct);
1484 /* let the client know we have processed its message */
1485 ct->d_args_done = 1;
1487 if (error) {
1488 caller = st->d_caller;
1489 if (caller)
1490 ct = DOOR_CLIENT(caller->t_door);
1491 else
1492 ct = NULL;
1493 goto out;
1495 mutex_exit(&door_knob);
1496 return (0);
1497 } else {
1499 * We are not involved in a door_invocation.
1500 * Check for /proc related activity...
1502 st->d_caller = NULL;
1503 door_server_exit(curproc, curthread);
1504 mutex_exit(&door_knob);
1505 cancel_pending = 0;
1506 if (ISSIG(curthread, FORREAL) || lwp->lwp_sysabort ||
1507 MUSTRETURN(curproc, curthread) ||
1508 (cancel_pending = schedctl_cancel_pending()) != 0) {
1509 if (cancel_pending)
1510 schedctl_cancel_eintr();
1511 lwp->lwp_asleep = 0;
1512 lwp->lwp_sysabort = 0;
1513 return (set_errno(EINTR));
1515 /* Go back and wait for another request */
1516 lwp->lwp_asleep = 0;
1517 mutex_enter(&door_knob);
1518 caller = NULL;
1519 goto out;
1524 * Revoke any future invocations on this door
1527 door_revoke(int did)
1529 door_node_t *d;
1530 int error;
1532 if ((d = door_lookup(did, NULL)) == NULL)
1533 return (set_errno(EBADF));
1535 mutex_enter(&door_knob);
1536 if (d->door_target != curproc) {
1537 mutex_exit(&door_knob);
1538 releasef(did);
1539 return (set_errno(EPERM));
1541 d->door_flags |= DOOR_REVOKED;
1542 if (d->door_flags & DOOR_PRIVATE)
1543 cv_broadcast(&d->door_servers.dp_cv);
1544 else
1545 cv_broadcast(&curproc->p_server_threads.dp_cv);
1546 mutex_exit(&door_knob);
1547 releasef(did);
1548 /* Invalidate the descriptor */
1549 if ((error = closeandsetf(did, NULL)) != 0)
1550 return (set_errno(error));
1551 return (0);
1555 door_info(int did, struct door_info *d_info)
1557 door_node_t *dp;
1558 door_info_t di;
1559 door_server_t *st;
1560 file_t *fp = NULL;
1562 if (did == DOOR_QUERY) {
1563 /* Get information on door current thread is bound to */
1564 if ((st = door_my_server(0)) == NULL ||
1565 (dp = st->d_pool) == NULL)
1566 /* Thread isn't bound to a door */
1567 return (set_errno(EBADF));
1568 } else if ((dp = door_lookup(did, &fp)) == NULL) {
1569 /* Not a door */
1570 return (set_errno(EBADF));
1573 door_info_common(dp, &di, fp);
1575 if (did != DOOR_QUERY)
1576 releasef(did);
1578 if (copyout(&di, d_info, sizeof (struct door_info)))
1579 return (set_errno(EFAULT));
1580 return (0);
1584 * Common code for getting information about a door either via the
1585 * door_info system call or the door_ki_info kernel call.
1587 void
1588 door_info_common(door_node_t *dp, struct door_info *dip, file_t *fp)
1590 int unref_count;
1592 bzero(dip, sizeof (door_info_t));
1594 mutex_enter(&door_knob);
1595 if (dp->door_target == NULL)
1596 dip->di_target = -1;
1597 else
1598 dip->di_target = dp->door_target->p_pid;
1600 dip->di_attributes = dp->door_flags & DOOR_ATTR_MASK;
1601 if (dp->door_target == curproc)
1602 dip->di_attributes |= DOOR_LOCAL;
1603 dip->di_proc = (door_ptr_t)(uintptr_t)dp->door_pc;
1604 dip->di_data = (door_ptr_t)(uintptr_t)dp->door_data;
1605 dip->di_uniquifier = dp->door_index;
1607 * If this door is in the middle of having an unreferenced
1608 * notification delivered, don't count the VN_HOLD by
1609 * door_deliver_unref in determining if it is unreferenced.
1610 * This handles the case where door_info is called from the
1611 * thread delivering the unref notification.
1613 if (dp->door_flags & DOOR_UNREF_ACTIVE)
1614 unref_count = 2;
1615 else
1616 unref_count = 1;
1617 mutex_exit(&door_knob);
1619 if (fp == NULL) {
1621 * If this thread is bound to the door, then we can just
1622 * check the vnode; a ref count of 1 (or 2 if this is
1623 * handling an unref notification) means that the hold
1624 * from the door_bind is the only reference to the door
1625 * (no file descriptor refers to it).
1627 if (DTOV(dp)->v_count == unref_count)
1628 dip->di_attributes |= DOOR_IS_UNREF;
1629 } else {
1631 * If we're working from a file descriptor or door handle
1632 * we need to look at the file structure count. We don't
1633 * need to hold the vnode lock since this is just a snapshot.
1635 mutex_enter(&fp->f_tlock);
1636 if (fp->f_count == 1 && DTOV(dp)->v_count == unref_count)
1637 dip->di_attributes |= DOOR_IS_UNREF;
1638 mutex_exit(&fp->f_tlock);
1643 * Return credentials of the door caller (if any) for this invocation
1646 door_ucred(struct ucred_s *uch)
1648 kthread_t *caller;
1649 door_server_t *st;
1650 door_client_t *ct;
1651 door_upcall_t *dup;
1652 struct proc *p;
1653 struct ucred_s *res;
1654 int err;
1656 mutex_enter(&door_knob);
1657 if ((st = door_my_server(0)) == NULL ||
1658 (caller = st->d_caller) == NULL) {
1659 mutex_exit(&door_knob);
1660 return (set_errno(EINVAL));
1663 ASSERT(caller->t_door != NULL);
1664 ct = DOOR_CLIENT(caller->t_door);
1666 /* Prevent caller from exiting while we examine the cred */
1667 DOOR_T_HOLD(ct);
1668 mutex_exit(&door_knob);
1670 p = ttoproc(caller);
1673 * If the credentials are not specified by the client, get the one
1674 * associated with the calling process.
1676 if ((dup = ct->d_upcall) != NULL)
1677 res = cred2ucred(dup->du_cred, p0.p_pid, NULL, CRED());
1678 else
1679 res = cred2ucred(caller->t_cred, p->p_pid, NULL, CRED());
1681 mutex_enter(&door_knob);
1682 DOOR_T_RELEASE(ct);
1683 mutex_exit(&door_knob);
1685 err = copyout(res, uch, res->uc_size);
1687 kmem_free(res, res->uc_size);
1689 if (err != 0)
1690 return (set_errno(EFAULT));
1692 return (0);
1696 * Bind the current lwp to the server thread pool associated with 'did'
1699 door_bind(int did)
1701 door_node_t *dp;
1702 door_server_t *st;
1704 if ((dp = door_lookup(did, NULL)) == NULL) {
1705 /* Not a door */
1706 return (set_errno(EBADF));
1710 * Can't bind to a non-private door, and can't bind to a door
1711 * served by another process.
1713 if ((dp->door_flags & DOOR_PRIVATE) == 0 ||
1714 dp->door_target != curproc) {
1715 releasef(did);
1716 return (set_errno(EINVAL));
1719 st = door_my_server(1);
1720 if (st->d_pool)
1721 door_unbind_thread(st->d_pool);
1722 st->d_pool = dp;
1723 st->d_invbound = 0;
1724 door_bind_thread(dp);
1725 releasef(did);
1727 return (0);
1731 * Unbind the current lwp from it's server thread pool
1734 door_unbind(void)
1736 door_server_t *st;
1738 if ((st = door_my_server(0)) == NULL)
1739 return (set_errno(EBADF));
1741 if (st->d_invbound) {
1742 ASSERT(st->d_pool == NULL);
1743 st->d_invbound = 0;
1744 return (0);
1746 if (st->d_pool == NULL)
1747 return (set_errno(EBADF));
1748 door_unbind_thread(st->d_pool);
1749 st->d_pool = NULL;
1750 return (0);
1754 * Create a descriptor for the associated file and fill in the
1755 * attributes associated with it.
1757 * Return 0 for success, -1 otherwise;
1760 door_insert(struct file *fp, door_desc_t *dp)
1762 struct vnode *vp;
1763 int fd;
1764 door_attr_t attributes = DOOR_DESCRIPTOR;
1766 ASSERT(MUTEX_NOT_HELD(&door_knob));
1767 if ((fd = ufalloc(0)) == -1)
1768 return (-1);
1769 setf(fd, fp);
1770 dp->d_data.d_desc.d_descriptor = fd;
1772 /* Fill in the attributes */
1773 if (fop_realvp(fp->f_vnode, &vp, NULL))
1774 vp = fp->f_vnode;
1775 if (vp && vp->v_type == VDOOR) {
1776 if (VTOD(vp)->door_target == curproc)
1777 attributes |= DOOR_LOCAL;
1778 attributes |= VTOD(vp)->door_flags & DOOR_ATTR_MASK;
1779 dp->d_data.d_desc.d_id = VTOD(vp)->door_index;
1781 dp->d_attributes = attributes;
1782 return (0);
1786 * Return an available thread for this server. A NULL return value indicates
1787 * that either:
1788 * The door has been revoked, or
1789 * a signal was received.
1790 * The two conditions can be differentiated using DOOR_INVALID(dp).
1792 static kthread_t *
1793 door_get_server(door_node_t *dp)
1795 kthread_t **ktp;
1796 kthread_t *server_t;
1797 door_pool_t *pool;
1798 door_server_t *st;
1799 int signalled;
1801 disp_lock_t *tlp;
1802 cpu_t *cp;
1804 ASSERT(MUTEX_HELD(&door_knob));
1806 if (dp->door_flags & DOOR_PRIVATE)
1807 pool = &dp->door_servers;
1808 else
1809 pool = &dp->door_target->p_server_threads;
1811 for (;;) {
1813 * We search the thread pool, looking for a server thread
1814 * ready to take an invocation (i.e. one which is still
1815 * sleeping on a shuttle object). If none are available,
1816 * we sleep on the pool's CV, and will be signaled when a
1817 * thread is added to the pool.
1819 * This relies on the fact that once a thread in the thread
1820 * pool wakes up, it *must* remove and add itself to the pool
1821 * before it can receive door calls.
1823 if (DOOR_INVALID(dp))
1824 return (NULL); /* Target has become invalid */
1826 for (ktp = &pool->dp_threads;
1827 (server_t = *ktp) != NULL;
1828 ktp = &st->d_servers) {
1829 st = DOOR_SERVER(server_t->t_door);
1831 thread_lock(server_t);
1832 if (server_t->t_state == TS_SLEEP &&
1833 SOBJ_TYPE(server_t->t_sobj_ops) == SOBJ_SHUTTLE)
1834 break;
1835 thread_unlock(server_t);
1837 if (server_t != NULL)
1838 break; /* we've got a live one! */
1840 if (!cv_wait_sig_swap_core(&pool->dp_cv, &door_knob,
1841 &signalled)) {
1843 * If we were signaled and the door is still
1844 * valid, pass the signal on to another waiter.
1846 if (signalled && !DOOR_INVALID(dp))
1847 cv_signal(&pool->dp_cv);
1848 return (NULL); /* Got a signal */
1853 * We've got a thread_lock()ed thread which is still on the
1854 * shuttle. Take it off the list of available server threads
1855 * and mark it as ONPROC. We are committed to resuming this
1856 * thread now.
1858 tlp = server_t->t_lockp;
1859 cp = CPU;
1861 *ktp = st->d_servers;
1862 st->d_servers = NULL;
1864 * Setting t_disp_queue prevents erroneous preemptions
1865 * if this thread is still in execution on another processor
1867 server_t->t_disp_queue = cp->cpu_disp;
1868 CL_ACTIVE(server_t);
1870 * We are calling thread_onproc() instead of
1871 * THREAD_ONPROC() because compiler can reorder
1872 * the two stores of t_state and t_lockp in
1873 * THREAD_ONPROC().
1875 thread_onproc(server_t, cp);
1876 disp_lock_exit(tlp);
1877 return (server_t);
1881 * Put a server thread back in the pool.
1883 static void
1884 door_release_server(door_node_t *dp, kthread_t *t)
1886 door_server_t *st = DOOR_SERVER(t->t_door);
1887 door_pool_t *pool;
1889 ASSERT(MUTEX_HELD(&door_knob));
1890 st->d_active = NULL;
1891 st->d_caller = NULL;
1892 st->d_layout_done = 0;
1893 if (dp && (dp->door_flags & DOOR_PRIVATE)) {
1894 ASSERT(dp->door_target == NULL ||
1895 dp->door_target == ttoproc(t));
1896 pool = &dp->door_servers;
1897 } else {
1898 pool = &ttoproc(t)->p_server_threads;
1901 st->d_servers = pool->dp_threads;
1902 pool->dp_threads = t;
1904 /* If someone is waiting for a server thread, wake him up */
1905 cv_signal(&pool->dp_cv);
1909 * Remove a server thread from the pool if present.
1911 static void
1912 door_server_exit(proc_t *p, kthread_t *t)
1914 door_pool_t *pool;
1915 kthread_t **next;
1916 door_server_t *st = DOOR_SERVER(t->t_door);
1918 ASSERT(MUTEX_HELD(&door_knob));
1919 if (st->d_pool != NULL) {
1920 ASSERT(st->d_pool->door_flags & DOOR_PRIVATE);
1921 pool = &st->d_pool->door_servers;
1922 } else {
1923 pool = &p->p_server_threads;
1926 next = &pool->dp_threads;
1927 while (*next != NULL) {
1928 if (*next == t) {
1929 *next = DOOR_SERVER(t->t_door)->d_servers;
1930 return;
1932 next = &(DOOR_SERVER((*next)->t_door)->d_servers);
1937 * Lookup the door descriptor. Caller must call releasef when finished
1938 * with associated door.
1940 static door_node_t *
1941 door_lookup(int did, file_t **fpp)
1943 vnode_t *vp;
1944 file_t *fp;
1946 ASSERT(MUTEX_NOT_HELD(&door_knob));
1947 if ((fp = getf(did)) == NULL)
1948 return (NULL);
1950 * Use the underlying vnode (we may be namefs mounted)
1952 if (fop_realvp(fp->f_vnode, &vp, NULL))
1953 vp = fp->f_vnode;
1955 if (vp == NULL || vp->v_type != VDOOR) {
1956 releasef(did);
1957 return (NULL);
1960 if (fpp)
1961 *fpp = fp;
1963 return (VTOD(vp));
1967 * The current thread is exiting, so clean up any pending
1968 * invocation details
1970 void
1971 door_slam(void)
1973 door_node_t *dp;
1974 door_data_t *dt;
1975 door_client_t *ct;
1976 door_server_t *st;
1979 * If we are an active door server, notify our
1980 * client that we are exiting and revoke our door.
1982 if ((dt = door_my_data(0)) == NULL)
1983 return;
1984 ct = DOOR_CLIENT(dt);
1985 st = DOOR_SERVER(dt);
1987 mutex_enter(&door_knob);
1988 for (;;) {
1989 if (DOOR_T_HELD(ct))
1990 cv_wait(&ct->d_cv, &door_knob);
1991 else if (DOOR_T_HELD(st))
1992 cv_wait(&st->d_cv, &door_knob);
1993 else
1994 break; /* neither flag is set */
1996 curthread->t_door = NULL;
1997 if ((dp = st->d_active) != NULL) {
1998 kthread_t *t = st->d_caller;
1999 proc_t *p = curproc;
2001 /* Revoke our door if the process is exiting */
2002 if (dp->door_target == p && (p->p_flag & SEXITING)) {
2003 door_list_delete(dp);
2004 dp->door_target = NULL;
2005 dp->door_flags |= DOOR_REVOKED;
2006 if (dp->door_flags & DOOR_PRIVATE)
2007 cv_broadcast(&dp->door_servers.dp_cv);
2008 else
2009 cv_broadcast(&p->p_server_threads.dp_cv);
2012 if (t != NULL) {
2014 * Let the caller know we are gone
2016 DOOR_CLIENT(t->t_door)->d_error = DOOR_EXIT;
2017 thread_lock(t);
2018 if (t->t_state == TS_SLEEP &&
2019 SOBJ_TYPE(t->t_sobj_ops) == SOBJ_SHUTTLE)
2020 setrun_locked(t);
2021 thread_unlock(t);
2024 mutex_exit(&door_knob);
2025 if (st->d_pool)
2026 door_unbind_thread(st->d_pool); /* Implicit door_unbind */
2027 kmem_free(dt, sizeof (door_data_t));
2031 * Set DOOR_REVOKED for all doors of the current process. This is called
2032 * on exit before all lwp's are being terminated so that door calls will
2033 * return with an error.
2035 void
2036 door_revoke_all()
2038 door_node_t *dp;
2039 proc_t *p = ttoproc(curthread);
2041 mutex_enter(&door_knob);
2042 for (dp = p->p_door_list; dp != NULL; dp = dp->door_list) {
2043 ASSERT(dp->door_target == p);
2044 dp->door_flags |= DOOR_REVOKED;
2045 if (dp->door_flags & DOOR_PRIVATE)
2046 cv_broadcast(&dp->door_servers.dp_cv);
2048 cv_broadcast(&p->p_server_threads.dp_cv);
2049 mutex_exit(&door_knob);
2053 * The process is exiting, and all doors it created need to be revoked.
2055 void
2056 door_exit(void)
2058 door_node_t *dp;
2059 proc_t *p = ttoproc(curthread);
2061 ASSERT(p->p_lwpcnt == 1);
2063 * Walk the list of active doors created by this process and
2064 * revoke them all.
2066 mutex_enter(&door_knob);
2067 for (dp = p->p_door_list; dp != NULL; dp = dp->door_list) {
2068 dp->door_target = NULL;
2069 dp->door_flags |= DOOR_REVOKED;
2070 if (dp->door_flags & DOOR_PRIVATE)
2071 cv_broadcast(&dp->door_servers.dp_cv);
2073 cv_broadcast(&p->p_server_threads.dp_cv);
2074 /* Clear the list */
2075 p->p_door_list = NULL;
2077 /* Clean up the unref list */
2078 while ((dp = p->p_unref_list) != NULL) {
2079 p->p_unref_list = dp->door_ulist;
2080 dp->door_ulist = NULL;
2081 mutex_exit(&door_knob);
2082 VN_RELE(DTOV(dp));
2083 mutex_enter(&door_knob);
2085 mutex_exit(&door_knob);
2090 * The process is executing forkall(), and we need to flag threads that
2091 * are bound to a door in the child. This will make the child threads
2092 * return an error to door_return unless they call door_unbind first.
2094 void
2095 door_fork(kthread_t *parent, kthread_t *child)
2097 door_data_t *pt = parent->t_door;
2098 door_server_t *st = DOOR_SERVER(pt);
2099 door_data_t *dt;
2101 ASSERT(MUTEX_NOT_HELD(&door_knob));
2102 if (pt != NULL && (st->d_pool != NULL || st->d_invbound)) {
2103 /* parent thread is bound to a door */
2104 dt = child->t_door =
2105 kmem_zalloc(sizeof (door_data_t), KM_SLEEP);
2106 DOOR_SERVER(dt)->d_invbound = 1;
2111 * Deliver queued unrefs to appropriate door server.
2113 static int
2114 door_unref(void)
2116 door_node_t *dp;
2117 static door_arg_t unref_args = { DOOR_UNREF_DATA, 0, 0, 0, 0, 0 };
2118 proc_t *p = ttoproc(curthread);
2120 /* make sure there's only one unref thread per process */
2121 mutex_enter(&door_knob);
2122 if (p->p_unref_thread) {
2123 mutex_exit(&door_knob);
2124 return (set_errno(EALREADY));
2126 p->p_unref_thread = 1;
2127 mutex_exit(&door_knob);
2129 (void) door_my_data(1); /* create info, if necessary */
2131 for (;;) {
2132 mutex_enter(&door_knob);
2134 /* Grab a queued request */
2135 while ((dp = p->p_unref_list) == NULL) {
2136 if (!cv_wait_sig(&p->p_unref_cv, &door_knob)) {
2138 * Interrupted.
2139 * Return so we can finish forkall() or exit().
2141 p->p_unref_thread = 0;
2142 mutex_exit(&door_knob);
2143 return (set_errno(EINTR));
2146 p->p_unref_list = dp->door_ulist;
2147 dp->door_ulist = NULL;
2148 dp->door_flags |= DOOR_UNREF_ACTIVE;
2149 mutex_exit(&door_knob);
2151 (void) door_upcall(DTOV(dp), &unref_args, NULL, SIZE_MAX, 0);
2153 if (unref_args.rbuf != 0) {
2154 kmem_free(unref_args.rbuf, unref_args.rsize);
2155 unref_args.rbuf = NULL;
2156 unref_args.rsize = 0;
2159 mutex_enter(&door_knob);
2160 ASSERT(dp->door_flags & DOOR_UNREF_ACTIVE);
2161 dp->door_flags &= ~DOOR_UNREF_ACTIVE;
2162 mutex_exit(&door_knob);
2163 VN_RELE(DTOV(dp));
2169 * Deliver queued unrefs to kernel door server.
2171 /* ARGSUSED */
2172 static void
2173 door_unref_kernel(caddr_t arg)
2175 door_node_t *dp;
2176 static door_arg_t unref_args = { DOOR_UNREF_DATA, 0, 0, 0, 0, 0 };
2177 proc_t *p = ttoproc(curthread);
2178 callb_cpr_t cprinfo;
2180 /* should only be one of these */
2181 mutex_enter(&door_knob);
2182 if (p->p_unref_thread) {
2183 mutex_exit(&door_knob);
2184 return;
2186 p->p_unref_thread = 1;
2187 mutex_exit(&door_knob);
2189 (void) door_my_data(1); /* make sure we have a door_data_t */
2191 CALLB_CPR_INIT(&cprinfo, &door_knob, callb_generic_cpr, "door_unref");
2192 for (;;) {
2193 mutex_enter(&door_knob);
2194 /* Grab a queued request */
2195 while ((dp = p->p_unref_list) == NULL) {
2196 CALLB_CPR_SAFE_BEGIN(&cprinfo);
2197 cv_wait(&p->p_unref_cv, &door_knob);
2198 CALLB_CPR_SAFE_END(&cprinfo, &door_knob);
2200 p->p_unref_list = dp->door_ulist;
2201 dp->door_ulist = NULL;
2202 dp->door_flags |= DOOR_UNREF_ACTIVE;
2203 mutex_exit(&door_knob);
2205 (*(dp->door_pc))(dp->door_data, &unref_args, NULL, NULL, NULL);
2207 mutex_enter(&door_knob);
2208 ASSERT(dp->door_flags & DOOR_UNREF_ACTIVE);
2209 dp->door_flags &= ~DOOR_UNREF_ACTIVE;
2210 mutex_exit(&door_knob);
2211 VN_RELE(DTOV(dp));
2217 * Queue an unref invocation for processing for the current process
2218 * The door may or may not be revoked at this point.
2220 void
2221 door_deliver_unref(door_node_t *d)
2223 struct proc *server = d->door_target;
2225 ASSERT(MUTEX_HELD(&door_knob));
2226 ASSERT(d->door_active == 0);
2228 if (server == NULL)
2229 return;
2231 * Create a lwp to deliver unref calls if one isn't already running.
2233 * A separate thread is used to deliver unrefs since the current
2234 * thread may be holding resources (e.g. locks) in user land that
2235 * may be needed by the unref processing. This would cause a
2236 * deadlock.
2238 if (d->door_flags & DOOR_UNREF_MULTI) {
2239 /* multiple unrefs */
2240 d->door_flags &= ~DOOR_DELAY;
2241 } else {
2242 /* Only 1 unref per door */
2243 d->door_flags &= ~(DOOR_UNREF|DOOR_DELAY);
2245 mutex_exit(&door_knob);
2248 * Need to bump the vnode count before putting the door on the
2249 * list so it doesn't get prematurely released by door_unref.
2251 VN_HOLD(DTOV(d));
2253 mutex_enter(&door_knob);
2254 /* is this door already on the unref list? */
2255 if (d->door_flags & DOOR_UNREF_MULTI) {
2256 door_node_t *dp;
2257 for (dp = server->p_unref_list; dp != NULL;
2258 dp = dp->door_ulist) {
2259 if (d == dp) {
2260 /* already there, don't need to add another */
2261 mutex_exit(&door_knob);
2262 VN_RELE(DTOV(d));
2263 mutex_enter(&door_knob);
2264 return;
2268 ASSERT(d->door_ulist == NULL);
2269 d->door_ulist = server->p_unref_list;
2270 server->p_unref_list = d;
2271 cv_broadcast(&server->p_unref_cv);
2275 * The callers buffer isn't big enough for all of the data/fd's. Allocate
2276 * space in the callers address space for the results and copy the data
2277 * there.
2279 * For EOVERFLOW, we must clean up the server's door descriptors.
2281 static int
2282 door_overflow(
2283 kthread_t *caller,
2284 caddr_t data_ptr, /* data location */
2285 size_t data_size, /* data size */
2286 door_desc_t *desc_ptr, /* descriptor location */
2287 uint_t desc_num) /* descriptor size */
2289 proc_t *callerp = ttoproc(caller);
2290 struct as *as = callerp->p_as;
2291 door_client_t *ct = DOOR_CLIENT(caller->t_door);
2292 caddr_t addr; /* Resulting address in target */
2293 size_t rlen; /* Rounded len */
2294 size_t len;
2295 uint_t i;
2296 size_t ds = desc_num * sizeof (door_desc_t);
2298 ASSERT(MUTEX_NOT_HELD(&door_knob));
2299 ASSERT(DOOR_T_HELD(ct) || ct->d_kernel);
2301 /* Do initial overflow check */
2302 if (!ufcanalloc(callerp, desc_num))
2303 return (EMFILE);
2306 * Allocate space for this stuff in the callers address space
2308 rlen = roundup(data_size + ds, PAGESIZE);
2309 as_rangelock(as);
2310 map_addr_proc(&addr, rlen, 0, 1, as->a_userlimit, ttoproc(caller), 0);
2311 if (addr == NULL ||
2312 as_map(as, addr, rlen, segvn_create, zfod_argsp) != 0) {
2313 /* No virtual memory available, or anon mapping failed */
2314 as_rangeunlock(as);
2315 if (!ct->d_kernel && desc_num > 0) {
2316 int error = door_release_fds(desc_ptr, desc_num);
2317 if (error)
2318 return (error);
2320 return (EOVERFLOW);
2322 as_rangeunlock(as);
2324 if (ct->d_kernel)
2325 goto out;
2327 if (data_size != 0) {
2328 caddr_t src = data_ptr;
2329 caddr_t saddr = addr;
2331 /* Copy any data */
2332 len = data_size;
2333 while (len != 0) {
2334 int amount;
2335 int error;
2337 amount = len > PAGESIZE ? PAGESIZE : len;
2338 if ((error = door_copy(as, src, saddr, amount)) != 0) {
2339 (void) as_unmap(as, addr, rlen);
2340 return (error);
2342 saddr += amount;
2343 src += amount;
2344 len -= amount;
2347 /* Copy any fd's */
2348 if (desc_num != 0) {
2349 door_desc_t *didpp, *start;
2350 struct file **fpp;
2351 int fpp_size;
2353 start = didpp = kmem_alloc(ds, KM_SLEEP);
2354 if (copyin_nowatch(desc_ptr, didpp, ds)) {
2355 kmem_free(start, ds);
2356 (void) as_unmap(as, addr, rlen);
2357 return (EFAULT);
2360 fpp_size = desc_num * sizeof (struct file *);
2361 if (fpp_size > ct->d_fpp_size) {
2362 /* make more space */
2363 if (ct->d_fpp_size)
2364 kmem_free(ct->d_fpp, ct->d_fpp_size);
2365 ct->d_fpp_size = fpp_size;
2366 ct->d_fpp = kmem_alloc(ct->d_fpp_size, KM_SLEEP);
2368 fpp = ct->d_fpp;
2370 for (i = 0; i < desc_num; i++) {
2371 struct file *fp;
2372 int fd = didpp->d_data.d_desc.d_descriptor;
2374 if (!(didpp->d_attributes & DOOR_DESCRIPTOR) ||
2375 (fp = getf(fd)) == NULL) {
2376 /* close translated references */
2377 door_fp_close(ct->d_fpp, fpp - ct->d_fpp);
2378 /* close untranslated references */
2379 door_fd_rele(didpp, desc_num - i, 0);
2380 kmem_free(start, ds);
2381 (void) as_unmap(as, addr, rlen);
2382 return (EINVAL);
2384 mutex_enter(&fp->f_tlock);
2385 fp->f_count++;
2386 mutex_exit(&fp->f_tlock);
2388 *fpp = fp;
2389 releasef(fd);
2391 if (didpp->d_attributes & DOOR_RELEASE) {
2392 /* release passed reference */
2393 (void) closeandsetf(fd, NULL);
2396 fpp++; didpp++;
2398 kmem_free(start, ds);
2401 out:
2402 ct->d_overflow = 1;
2403 ct->d_args.rbuf = addr;
2404 ct->d_args.rsize = rlen;
2405 return (0);
2409 * Transfer arguments from the client to the server.
2411 static int
2412 door_args(kthread_t *server, int is_private)
2414 door_server_t *st = DOOR_SERVER(server->t_door);
2415 door_client_t *ct = DOOR_CLIENT(curthread->t_door);
2416 uint_t ndid;
2417 size_t dsize;
2418 int error;
2420 ASSERT(DOOR_T_HELD(st));
2421 ASSERT(MUTEX_NOT_HELD(&door_knob));
2423 ndid = ct->d_args.desc_num;
2424 if (ndid > door_max_desc)
2425 return (E2BIG);
2428 * Get the stack layout, and fail now if it won't fit.
2430 error = door_layout(server, ct->d_args.data_size, ndid, is_private);
2431 if (error != 0)
2432 return (error);
2434 dsize = ndid * sizeof (door_desc_t);
2435 if (ct->d_args.data_size != 0) {
2436 if (ct->d_args.data_size <= door_max_arg) {
2438 * Use a 2 copy method for small amounts of data
2440 * Allocate a little more than we need for the
2441 * args, in the hope that the results will fit
2442 * without having to reallocate a buffer
2444 ASSERT(ct->d_buf == NULL);
2445 ct->d_bufsize = roundup(ct->d_args.data_size,
2446 DOOR_ROUND);
2447 ct->d_buf = kmem_alloc(ct->d_bufsize, KM_SLEEP);
2448 if (copyin_nowatch(ct->d_args.data_ptr,
2449 ct->d_buf, ct->d_args.data_size) != 0) {
2450 kmem_free(ct->d_buf, ct->d_bufsize);
2451 ct->d_buf = NULL;
2452 ct->d_bufsize = 0;
2453 return (EFAULT);
2455 } else {
2456 struct as *as;
2457 caddr_t src;
2458 caddr_t dest;
2459 size_t len = ct->d_args.data_size;
2460 uintptr_t base;
2463 * Use a 1 copy method
2465 as = ttoproc(server)->p_as;
2466 src = ct->d_args.data_ptr;
2468 dest = st->d_layout.dl_datap;
2469 base = (uintptr_t)dest;
2472 * Copy data directly into server. We proceed
2473 * downward from the top of the stack, to mimic
2474 * normal stack usage. This allows the guard page
2475 * to stop us before we corrupt anything.
2477 while (len != 0) {
2478 uintptr_t start;
2479 uintptr_t end;
2480 uintptr_t offset;
2481 size_t amount;
2484 * Locate the next part to copy.
2486 end = base + len;
2487 start = P2ALIGN(end - 1, PAGESIZE);
2490 * if we are on the final (first) page, fix
2491 * up the start position.
2493 if (P2ALIGN(base, PAGESIZE) == start)
2494 start = base;
2496 offset = start - base; /* the copy offset */
2497 amount = end - start; /* # bytes to copy */
2499 ASSERT(amount > 0 && amount <= len &&
2500 amount <= PAGESIZE);
2502 error = door_copy(as, src + offset,
2503 dest + offset, amount);
2504 if (error != 0)
2505 return (error);
2506 len -= amount;
2511 * Copyin the door args and translate them into files
2513 if (ndid != 0) {
2514 door_desc_t *didpp;
2515 door_desc_t *start;
2516 struct file **fpp;
2518 start = didpp = kmem_alloc(dsize, KM_SLEEP);
2520 if (copyin_nowatch(ct->d_args.desc_ptr, didpp, dsize)) {
2521 kmem_free(start, dsize);
2522 return (EFAULT);
2524 ct->d_fpp_size = ndid * sizeof (struct file *);
2525 ct->d_fpp = kmem_alloc(ct->d_fpp_size, KM_SLEEP);
2526 fpp = ct->d_fpp;
2527 while (ndid--) {
2528 struct file *fp;
2529 int fd = didpp->d_data.d_desc.d_descriptor;
2531 /* We only understand file descriptors as passed objs */
2532 if (!(didpp->d_attributes & DOOR_DESCRIPTOR) ||
2533 (fp = getf(fd)) == NULL) {
2534 /* close translated references */
2535 door_fp_close(ct->d_fpp, fpp - ct->d_fpp);
2536 /* close untranslated references */
2537 door_fd_rele(didpp, ndid + 1, 0);
2538 kmem_free(start, dsize);
2539 kmem_free(ct->d_fpp, ct->d_fpp_size);
2540 ct->d_fpp = NULL;
2541 ct->d_fpp_size = 0;
2542 return (EINVAL);
2544 /* Hold the fp */
2545 mutex_enter(&fp->f_tlock);
2546 fp->f_count++;
2547 mutex_exit(&fp->f_tlock);
2549 *fpp = fp;
2550 releasef(fd);
2552 if (didpp->d_attributes & DOOR_RELEASE) {
2553 /* release passed reference */
2554 (void) closeandsetf(fd, NULL);
2557 fpp++; didpp++;
2559 kmem_free(start, dsize);
2561 return (0);
2565 * Transfer arguments from a user client to a kernel server. This copies in
2566 * descriptors and translates them into door handles. It doesn't touch the
2567 * other data, letting the kernel server deal with that (to avoid needing
2568 * to copy the data twice).
2570 static int
2571 door_translate_in(void)
2573 door_client_t *ct = DOOR_CLIENT(curthread->t_door);
2574 uint_t ndid;
2576 ASSERT(MUTEX_NOT_HELD(&door_knob));
2577 ndid = ct->d_args.desc_num;
2578 if (ndid > door_max_desc)
2579 return (E2BIG);
2581 * Copyin the door args and translate them into door handles.
2583 if (ndid != 0) {
2584 door_desc_t *didpp;
2585 door_desc_t *start;
2586 size_t dsize = ndid * sizeof (door_desc_t);
2587 struct file *fp;
2589 start = didpp = kmem_alloc(dsize, KM_SLEEP);
2591 if (copyin_nowatch(ct->d_args.desc_ptr, didpp, dsize)) {
2592 kmem_free(start, dsize);
2593 return (EFAULT);
2595 while (ndid--) {
2596 vnode_t *vp;
2597 int fd = didpp->d_data.d_desc.d_descriptor;
2600 * We only understand file descriptors as passed objs
2602 if ((didpp->d_attributes & DOOR_DESCRIPTOR) &&
2603 (fp = getf(fd)) != NULL) {
2604 didpp->d_data.d_handle = FTODH(fp);
2605 /* Hold the door */
2606 door_ki_hold(didpp->d_data.d_handle);
2608 releasef(fd);
2610 if (didpp->d_attributes & DOOR_RELEASE) {
2611 /* release passed reference */
2612 (void) closeandsetf(fd, NULL);
2615 if (fop_realvp(fp->f_vnode, &vp, NULL))
2616 vp = fp->f_vnode;
2618 /* Set attributes */
2619 didpp->d_attributes = DOOR_HANDLE |
2620 (VTOD(vp)->door_flags & DOOR_ATTR_MASK);
2621 } else {
2622 /* close translated references */
2623 door_fd_close(start, didpp - start);
2624 /* close untranslated references */
2625 door_fd_rele(didpp, ndid + 1, 0);
2626 kmem_free(start, dsize);
2627 return (EINVAL);
2629 didpp++;
2631 ct->d_args.desc_ptr = start;
2633 return (0);
2637 * Translate door arguments from kernel to user. This copies the passed
2638 * door handles. It doesn't touch other data. It is used by door_upcall,
2639 * and for data returned by a door_call to a kernel server.
2641 static int
2642 door_translate_out(void)
2644 door_client_t *ct = DOOR_CLIENT(curthread->t_door);
2645 uint_t ndid;
2647 ASSERT(MUTEX_NOT_HELD(&door_knob));
2648 ndid = ct->d_args.desc_num;
2649 if (ndid > door_max_desc) {
2650 door_fd_rele(ct->d_args.desc_ptr, ndid, 1);
2651 return (E2BIG);
2654 * Translate the door args into files
2656 if (ndid != 0) {
2657 door_desc_t *didpp = ct->d_args.desc_ptr;
2658 struct file **fpp;
2660 ct->d_fpp_size = ndid * sizeof (struct file *);
2661 fpp = ct->d_fpp = kmem_alloc(ct->d_fpp_size, KM_SLEEP);
2662 while (ndid--) {
2663 struct file *fp = NULL;
2664 int fd = -1;
2667 * We understand file descriptors and door
2668 * handles as passed objs.
2670 if (didpp->d_attributes & DOOR_DESCRIPTOR) {
2671 fd = didpp->d_data.d_desc.d_descriptor;
2672 fp = getf(fd);
2673 } else if (didpp->d_attributes & DOOR_HANDLE)
2674 fp = DHTOF(didpp->d_data.d_handle);
2675 if (fp != NULL) {
2676 /* Hold the fp */
2677 mutex_enter(&fp->f_tlock);
2678 fp->f_count++;
2679 mutex_exit(&fp->f_tlock);
2681 *fpp = fp;
2682 if (didpp->d_attributes & DOOR_DESCRIPTOR)
2683 releasef(fd);
2684 if (didpp->d_attributes & DOOR_RELEASE) {
2685 /* release passed reference */
2686 if (fd >= 0)
2687 (void) closeandsetf(fd, NULL);
2688 else
2689 (void) closef(fp);
2691 } else {
2692 /* close translated references */
2693 door_fp_close(ct->d_fpp, fpp - ct->d_fpp);
2694 /* close untranslated references */
2695 door_fd_rele(didpp, ndid + 1, 1);
2696 kmem_free(ct->d_fpp, ct->d_fpp_size);
2697 ct->d_fpp = NULL;
2698 ct->d_fpp_size = 0;
2699 return (EINVAL);
2701 fpp++; didpp++;
2704 return (0);
2708 * Move the results from the server to the client
2710 static int
2711 door_results(kthread_t *caller, caddr_t data_ptr, size_t data_size,
2712 door_desc_t *desc_ptr, uint_t desc_num)
2714 door_client_t *ct = DOOR_CLIENT(caller->t_door);
2715 door_upcall_t *dup = ct->d_upcall;
2716 size_t dsize;
2717 size_t rlen;
2718 size_t result_size;
2720 ASSERT(DOOR_T_HELD(ct));
2721 ASSERT(MUTEX_NOT_HELD(&door_knob));
2723 if (ct->d_noresults)
2724 return (E2BIG); /* No results expected */
2726 if (desc_num > door_max_desc)
2727 return (E2BIG); /* Too many descriptors */
2729 dsize = desc_num * sizeof (door_desc_t);
2731 * Check if the results are bigger than the clients buffer
2733 if (dsize)
2734 rlen = roundup(data_size, sizeof (door_desc_t));
2735 else
2736 rlen = data_size;
2737 if ((result_size = rlen + dsize) == 0)
2738 return (0);
2740 if (dup != NULL) {
2741 if (desc_num > dup->du_max_descs)
2742 return (EMFILE);
2744 if (data_size > dup->du_max_data)
2745 return (E2BIG);
2748 * Handle upcalls
2750 if (ct->d_args.rbuf == NULL || ct->d_args.rsize < result_size) {
2752 * If there's no return buffer or the buffer is too
2753 * small, allocate a new one. The old buffer (if it
2754 * exists) will be freed by the upcall client.
2756 if (result_size > door_max_upcall_reply)
2757 return (E2BIG);
2758 ct->d_args.rsize = result_size;
2759 ct->d_args.rbuf = kmem_alloc(result_size, KM_SLEEP);
2761 ct->d_args.data_ptr = ct->d_args.rbuf;
2762 if (data_size != 0 &&
2763 copyin_nowatch(data_ptr, ct->d_args.data_ptr,
2764 data_size) != 0)
2765 return (EFAULT);
2766 } else if (result_size > ct->d_args.rsize) {
2767 return (door_overflow(caller, data_ptr, data_size,
2768 desc_ptr, desc_num));
2769 } else if (data_size != 0) {
2770 if (data_size <= door_max_arg) {
2772 * Use a 2 copy method for small amounts of data
2774 if (ct->d_buf == NULL) {
2775 ct->d_bufsize = data_size;
2776 ct->d_buf = kmem_alloc(ct->d_bufsize, KM_SLEEP);
2777 } else if (ct->d_bufsize < data_size) {
2778 kmem_free(ct->d_buf, ct->d_bufsize);
2779 ct->d_bufsize = data_size;
2780 ct->d_buf = kmem_alloc(ct->d_bufsize, KM_SLEEP);
2782 if (copyin_nowatch(data_ptr, ct->d_buf, data_size) != 0)
2783 return (EFAULT);
2784 } else {
2785 struct as *as = ttoproc(caller)->p_as;
2786 caddr_t dest = ct->d_args.rbuf;
2787 caddr_t src = data_ptr;
2788 size_t len = data_size;
2790 /* Copy data directly into client */
2791 while (len != 0) {
2792 uint_t amount;
2793 uint_t max;
2794 uint_t off;
2795 int error;
2797 off = (uintptr_t)dest & PAGEOFFSET;
2798 if (off)
2799 max = PAGESIZE - off;
2800 else
2801 max = PAGESIZE;
2802 amount = len > max ? max : len;
2803 error = door_copy(as, src, dest, amount);
2804 if (error != 0)
2805 return (error);
2806 dest += amount;
2807 src += amount;
2808 len -= amount;
2814 * Copyin the returned door ids and translate them into door_node_t
2816 if (desc_num != 0) {
2817 door_desc_t *start;
2818 door_desc_t *didpp;
2819 struct file **fpp;
2820 size_t fpp_size;
2821 uint_t i;
2823 /* First, check if we would overflow client */
2824 if (!ufcanalloc(ttoproc(caller), desc_num))
2825 return (EMFILE);
2827 start = didpp = kmem_alloc(dsize, KM_SLEEP);
2828 if (copyin_nowatch(desc_ptr, didpp, dsize)) {
2829 kmem_free(start, dsize);
2830 return (EFAULT);
2832 fpp_size = desc_num * sizeof (struct file *);
2833 if (fpp_size > ct->d_fpp_size) {
2834 /* make more space */
2835 if (ct->d_fpp_size)
2836 kmem_free(ct->d_fpp, ct->d_fpp_size);
2837 ct->d_fpp_size = fpp_size;
2838 ct->d_fpp = kmem_alloc(fpp_size, KM_SLEEP);
2840 fpp = ct->d_fpp;
2842 for (i = 0; i < desc_num; i++) {
2843 struct file *fp;
2844 int fd = didpp->d_data.d_desc.d_descriptor;
2846 /* Only understand file descriptor results */
2847 if (!(didpp->d_attributes & DOOR_DESCRIPTOR) ||
2848 (fp = getf(fd)) == NULL) {
2849 /* close translated references */
2850 door_fp_close(ct->d_fpp, fpp - ct->d_fpp);
2851 /* close untranslated references */
2852 door_fd_rele(didpp, desc_num - i, 0);
2853 kmem_free(start, dsize);
2854 return (EINVAL);
2857 mutex_enter(&fp->f_tlock);
2858 fp->f_count++;
2859 mutex_exit(&fp->f_tlock);
2861 *fpp = fp;
2862 releasef(fd);
2864 if (didpp->d_attributes & DOOR_RELEASE) {
2865 /* release passed reference */
2866 (void) closeandsetf(fd, NULL);
2869 fpp++; didpp++;
2871 kmem_free(start, dsize);
2873 return (0);
2877 * Close all the descriptors.
2879 static void
2880 door_fd_close(door_desc_t *d, uint_t n)
2882 uint_t i;
2884 ASSERT(MUTEX_NOT_HELD(&door_knob));
2885 for (i = 0; i < n; i++) {
2886 if (d->d_attributes & DOOR_DESCRIPTOR) {
2887 (void) closeandsetf(
2888 d->d_data.d_desc.d_descriptor, NULL);
2889 } else if (d->d_attributes & DOOR_HANDLE) {
2890 door_ki_rele(d->d_data.d_handle);
2892 d++;
2897 * Close descriptors that have the DOOR_RELEASE attribute set.
2899 void
2900 door_fd_rele(door_desc_t *d, uint_t n, int from_kernel)
2902 uint_t i;
2904 ASSERT(MUTEX_NOT_HELD(&door_knob));
2905 for (i = 0; i < n; i++) {
2906 if (d->d_attributes & DOOR_RELEASE) {
2907 if (d->d_attributes & DOOR_DESCRIPTOR) {
2908 (void) closeandsetf(
2909 d->d_data.d_desc.d_descriptor, NULL);
2910 } else if (from_kernel &&
2911 (d->d_attributes & DOOR_HANDLE)) {
2912 door_ki_rele(d->d_data.d_handle);
2915 d++;
2920 * Copy descriptors into the kernel so we can release any marked
2921 * DOOR_RELEASE.
2924 door_release_fds(door_desc_t *desc_ptr, uint_t ndesc)
2926 size_t dsize;
2927 door_desc_t *didpp;
2928 uint_t desc_num;
2930 ASSERT(MUTEX_NOT_HELD(&door_knob));
2931 ASSERT(ndesc != 0);
2933 desc_num = MIN(ndesc, door_max_desc);
2935 dsize = desc_num * sizeof (door_desc_t);
2936 didpp = kmem_alloc(dsize, KM_SLEEP);
2938 while (ndesc > 0) {
2939 uint_t count = MIN(ndesc, desc_num);
2941 if (copyin_nowatch(desc_ptr, didpp,
2942 count * sizeof (door_desc_t))) {
2943 kmem_free(didpp, dsize);
2944 return (EFAULT);
2946 door_fd_rele(didpp, count, 0);
2948 ndesc -= count;
2949 desc_ptr += count;
2951 kmem_free(didpp, dsize);
2952 return (0);
2956 * Decrement ref count on all the files passed
2958 static void
2959 door_fp_close(struct file **fp, uint_t n)
2961 uint_t i;
2963 ASSERT(MUTEX_NOT_HELD(&door_knob));
2965 for (i = 0; i < n; i++)
2966 (void) closef(fp[i]);
2970 * Copy data from 'src' in current address space to 'dest' in 'as' for 'len'
2971 * bytes.
2973 * Performs this using 1 mapin and 1 copy operation.
2975 * We really should do more than 1 page at a time to improve
2976 * performance, but for now this is treated as an anomalous condition.
2978 static int
2979 door_copy(struct as *as, caddr_t src, caddr_t dest, uint_t len)
2981 caddr_t kaddr;
2982 caddr_t rdest;
2983 uint_t off;
2984 page_t **pplist;
2985 page_t *pp = NULL;
2986 int error = 0;
2988 ASSERT(len <= PAGESIZE);
2989 off = (uintptr_t)dest & PAGEOFFSET; /* offset within the page */
2990 rdest = (caddr_t)((uintptr_t)dest &
2991 (uintptr_t)PAGEMASK); /* Page boundary */
2992 ASSERT(off + len <= PAGESIZE);
2995 * Lock down destination page.
2997 if (as_pagelock(as, &pplist, rdest, PAGESIZE, S_WRITE))
2998 return (E2BIG);
3000 * Check if we have a shadow page list from as_pagelock. If not,
3001 * we took the slow path and have to find our page struct the hard
3002 * way.
3004 if (pplist == NULL) {
3005 pfn_t pfnum;
3007 /* MMU mapping is already locked down */
3008 AS_LOCK_ENTER(as, RW_READER);
3009 pfnum = hat_getpfnum(as->a_hat, rdest);
3010 AS_LOCK_EXIT(as);
3013 * TODO: The pfn step should not be necessary - need
3014 * a hat_getpp() function.
3016 if (pf_is_memory(pfnum)) {
3017 pp = page_numtopp_nolock(pfnum);
3018 ASSERT(pp == NULL || PAGE_LOCKED(pp));
3019 } else
3020 pp = NULL;
3021 if (pp == NULL) {
3022 as_pageunlock(as, pplist, rdest, PAGESIZE, S_WRITE);
3023 return (E2BIG);
3025 } else {
3026 pp = *pplist;
3029 * Map destination page into kernel address
3031 if (kpm_enable)
3032 kaddr = (caddr_t)hat_kpm_mapin(pp, NULL);
3033 else
3034 kaddr = (caddr_t)ppmapin(pp, PROT_READ | PROT_WRITE,
3035 (caddr_t)-1);
3038 * Copy from src to dest
3040 if (copyin_nowatch(src, kaddr + off, len) != 0)
3041 error = EFAULT;
3043 * Unmap destination page from kernel
3045 if (kpm_enable)
3046 hat_kpm_mapout(pp, NULL, kaddr);
3047 else
3048 ppmapout(kaddr);
3050 * Unlock destination page
3052 as_pageunlock(as, pplist, rdest, PAGESIZE, S_WRITE);
3053 return (error);
3057 * General kernel upcall using doors
3058 * Returns 0 on success, errno for failures.
3059 * Caller must have a hold on the door based vnode, and on any
3060 * references passed in desc_ptr. The references are released
3061 * in the event of an error, and passed without duplication
3062 * otherwise. Note that param->rbuf must be 64-bit aligned in
3063 * a 64-bit kernel, since it may be used to store door descriptors
3064 * if they are returned by the server. The caller is responsible
3065 * for holding a reference to the cred passed in.
3068 door_upcall(vnode_t *vp, door_arg_t *param, struct cred *cred,
3069 size_t max_data, uint_t max_descs)
3071 /* Locals */
3072 door_upcall_t *dup;
3073 door_node_t *dp;
3074 kthread_t *server_thread;
3075 int error = 0;
3076 klwp_t *lwp;
3077 door_client_t *ct; /* curthread door_data */
3078 door_server_t *st; /* server thread door_data */
3079 int gotresults = 0;
3080 int cancel_pending;
3082 if (vp->v_type != VDOOR) {
3083 if (param->desc_num)
3084 door_fd_rele(param->desc_ptr, param->desc_num, 1);
3085 return (EINVAL);
3088 lwp = ttolwp(curthread);
3089 ct = door_my_client(1);
3090 dp = VTOD(vp); /* Convert to a door_node_t */
3092 dup = kmem_zalloc(sizeof (*dup), KM_SLEEP);
3093 dup->du_cred = (cred != NULL) ? cred : curthread->t_cred;
3094 dup->du_max_data = max_data;
3095 dup->du_max_descs = max_descs;
3098 * This should be done in shuttle_resume(), just before going to
3099 * sleep, but we want to avoid overhead while holding door_knob.
3100 * prstop() is just a no-op if we don't really go to sleep.
3101 * We test not-kernel-address-space for the sake of clustering code.
3103 if (lwp && lwp->lwp_nostop == 0 && curproc->p_as != &kas)
3104 prstop(PR_REQUESTED, 0);
3106 mutex_enter(&door_knob);
3107 if (DOOR_INVALID(dp)) {
3108 mutex_exit(&door_knob);
3109 if (param->desc_num)
3110 door_fd_rele(param->desc_ptr, param->desc_num, 1);
3111 error = EBADF;
3112 goto out;
3115 if (dp->door_target == &p0) {
3116 /* Can't do an upcall to a kernel server */
3117 mutex_exit(&door_knob);
3118 if (param->desc_num)
3119 door_fd_rele(param->desc_ptr, param->desc_num, 1);
3120 error = EINVAL;
3121 goto out;
3124 error = door_check_limits(dp, param, 1);
3125 if (error != 0) {
3126 mutex_exit(&door_knob);
3127 if (param->desc_num)
3128 door_fd_rele(param->desc_ptr, param->desc_num, 1);
3129 goto out;
3133 * Get a server thread from the target domain
3135 if ((server_thread = door_get_server(dp)) == NULL) {
3136 if (DOOR_INVALID(dp))
3137 error = EBADF;
3138 else
3139 error = EAGAIN;
3140 mutex_exit(&door_knob);
3141 if (param->desc_num)
3142 door_fd_rele(param->desc_ptr, param->desc_num, 1);
3143 goto out;
3146 st = DOOR_SERVER(server_thread->t_door);
3147 ct->d_buf = param->data_ptr;
3148 ct->d_bufsize = param->data_size;
3149 ct->d_args = *param; /* structure assignment */
3151 if (ct->d_args.desc_num) {
3153 * Move data from client to server
3155 DOOR_T_HOLD(st);
3156 mutex_exit(&door_knob);
3157 error = door_translate_out();
3158 mutex_enter(&door_knob);
3159 DOOR_T_RELEASE(st);
3160 if (error) {
3162 * We're not going to resume this thread after all
3164 door_release_server(dp, server_thread);
3165 shuttle_sleep(server_thread);
3166 mutex_exit(&door_knob);
3167 goto out;
3171 ct->d_upcall = dup;
3172 if (param->rsize == 0)
3173 ct->d_noresults = 1;
3174 else
3175 ct->d_noresults = 0;
3177 dp->door_active++;
3179 ct->d_error = DOOR_WAIT;
3180 st->d_caller = curthread;
3181 st->d_active = dp;
3183 shuttle_resume(server_thread, &door_knob);
3185 mutex_enter(&door_knob);
3186 shuttle_return:
3187 if ((error = ct->d_error) < 0) { /* DOOR_WAIT or DOOR_EXIT */
3189 * Premature wakeup. Find out why (stop, forkall, sig, exit ...)
3191 mutex_exit(&door_knob); /* May block in ISSIG */
3192 cancel_pending = 0;
3193 if (lwp && (ISSIG(curthread, FORREAL) || lwp->lwp_sysabort ||
3194 MUSTRETURN(curproc, curthread) ||
3195 (cancel_pending = schedctl_cancel_pending()) != 0)) {
3196 /* Signal, forkall, ... */
3197 if (cancel_pending)
3198 schedctl_cancel_eintr();
3199 lwp->lwp_sysabort = 0;
3200 mutex_enter(&door_knob);
3201 error = EINTR;
3203 * If the server has finished processing our call,
3204 * or exited (calling door_slam()), then d_error
3205 * will have changed. If the server hasn't finished
3206 * yet, d_error will still be DOOR_WAIT, and we
3207 * let it know we are not interested in any
3208 * results by sending a SIGCANCEL, unless the door
3209 * is marked with DOOR_NO_CANCEL.
3211 if (ct->d_error == DOOR_WAIT &&
3212 st->d_caller == curthread) {
3213 proc_t *p = ttoproc(server_thread);
3215 st->d_active = NULL;
3216 st->d_caller = NULL;
3217 if (!(dp->door_flags & DOOR_NO_CANCEL)) {
3218 DOOR_T_HOLD(st);
3219 mutex_exit(&door_knob);
3221 mutex_enter(&p->p_lock);
3222 sigtoproc(p, server_thread, SIGCANCEL);
3223 mutex_exit(&p->p_lock);
3225 mutex_enter(&door_knob);
3226 DOOR_T_RELEASE(st);
3229 } else {
3231 * Return from stop(), server exit...
3233 * Note that the server could have done a
3234 * door_return while the client was in stop state
3235 * (ISSIG), in which case the error condition
3236 * is updated by the server.
3238 mutex_enter(&door_knob);
3239 if (ct->d_error == DOOR_WAIT) {
3240 /* Still waiting for a reply */
3241 shuttle_swtch(&door_knob);
3242 mutex_enter(&door_knob);
3243 if (lwp)
3244 lwp->lwp_asleep = 0;
3245 goto shuttle_return;
3246 } else if (ct->d_error == DOOR_EXIT) {
3247 /* Server exit */
3248 error = EINTR;
3249 } else {
3250 /* Server did a door_return during ISSIG */
3251 error = ct->d_error;
3255 * Can't exit if the server is currently copying
3256 * results for me
3258 while (DOOR_T_HELD(ct))
3259 cv_wait(&ct->d_cv, &door_knob);
3262 * Find out if results were successfully copied.
3264 if (ct->d_error == 0)
3265 gotresults = 1;
3267 if (lwp) {
3268 lwp->lwp_asleep = 0; /* /proc */
3269 lwp->lwp_sysabort = 0; /* /proc */
3271 if (--dp->door_active == 0 && (dp->door_flags & DOOR_DELAY))
3272 door_deliver_unref(dp);
3273 mutex_exit(&door_knob);
3276 * Translate returned doors (if any)
3279 if (ct->d_noresults)
3280 goto out;
3282 if (error) {
3284 * If server returned results successfully, then we've
3285 * been interrupted and may need to clean up.
3287 if (gotresults) {
3288 ASSERT(error == EINTR);
3289 door_fp_close(ct->d_fpp, ct->d_args.desc_num);
3291 goto out;
3294 if (ct->d_args.desc_num) {
3295 struct file **fpp;
3296 door_desc_t *didpp;
3297 vnode_t *vp;
3298 uint_t n = ct->d_args.desc_num;
3300 didpp = ct->d_args.desc_ptr = (door_desc_t *)(ct->d_args.rbuf +
3301 roundup(ct->d_args.data_size, sizeof (door_desc_t)));
3302 fpp = ct->d_fpp;
3304 while (n--) {
3305 struct file *fp;
3307 fp = *fpp;
3308 if (fop_realvp(fp->f_vnode, &vp, NULL))
3309 vp = fp->f_vnode;
3311 didpp->d_attributes = DOOR_HANDLE |
3312 (VTOD(vp)->door_flags & DOOR_ATTR_MASK);
3313 didpp->d_data.d_handle = FTODH(fp);
3315 fpp++; didpp++;
3319 /* on return data is in rbuf */
3320 *param = ct->d_args; /* structure assignment */
3322 out:
3323 kmem_free(dup, sizeof (*dup));
3325 if (ct->d_fpp) {
3326 kmem_free(ct->d_fpp, ct->d_fpp_size);
3327 ct->d_fpp = NULL;
3328 ct->d_fpp_size = 0;
3331 ct->d_upcall = NULL;
3332 ct->d_noresults = 0;
3333 ct->d_buf = NULL;
3334 ct->d_bufsize = 0;
3335 return (error);
3339 * Add a door to the per-process list of active doors for which the
3340 * process is a server.
3342 static void
3343 door_list_insert(door_node_t *dp)
3345 proc_t *p = dp->door_target;
3347 ASSERT(MUTEX_HELD(&door_knob));
3348 dp->door_list = p->p_door_list;
3349 p->p_door_list = dp;
3353 * Remove a door from the per-process list of active doors.
3355 void
3356 door_list_delete(door_node_t *dp)
3358 door_node_t **pp;
3360 ASSERT(MUTEX_HELD(&door_knob));
3362 * Find the door in the list. If the door belongs to another process,
3363 * it's OK to use p_door_list since that process can't exit until all
3364 * doors have been taken off the list (see door_exit).
3366 pp = &(dp->door_target->p_door_list);
3367 while (*pp != dp)
3368 pp = &((*pp)->door_list);
3370 /* found it, take it off the list */
3371 *pp = dp->door_list;
3376 * External kernel interfaces for doors. These functions are available
3377 * outside the doorfs module for use in creating and using doors from
3378 * within the kernel.
3382 * door_ki_upcall invokes a user-level door server from the kernel, with
3383 * the credentials associated with curthread.
3386 door_ki_upcall(door_handle_t dh, door_arg_t *param)
3388 return (door_ki_upcall_limited(dh, param, NULL, SIZE_MAX, UINT_MAX));
3392 * door_ki_upcall_limited invokes a user-level door server from the
3393 * kernel with the given credentials and reply limits. If the "cred"
3394 * argument is NULL, uses the credentials associated with current
3395 * thread. max_data limits the maximum length of the returned data (the
3396 * client will get E2BIG if they go over), and max_desc limits the
3397 * number of returned descriptors (the client will get EMFILE if they
3398 * go over).
3401 door_ki_upcall_limited(door_handle_t dh, door_arg_t *param, struct cred *cred,
3402 size_t max_data, uint_t max_desc)
3404 file_t *fp = DHTOF(dh);
3405 vnode_t *realvp;
3407 if (fop_realvp(fp->f_vnode, &realvp, NULL))
3408 realvp = fp->f_vnode;
3409 return (door_upcall(realvp, param, cred, max_data, max_desc));
3413 * Function call to create a "kernel" door server. A kernel door
3414 * server provides a way for a user-level process to invoke a function
3415 * in the kernel through a door_call. From the caller's point of
3416 * view, a kernel door server looks the same as a user-level one
3417 * (except the server pid is 0). Unlike normal door calls, the
3418 * kernel door function is invoked via a normal function call in the
3419 * same thread and context as the caller.
3422 door_ki_create(void (*pc_cookie)(), void *data_cookie, uint_t attributes,
3423 door_handle_t *dhp)
3425 int err;
3426 file_t *fp;
3428 /* no DOOR_PRIVATE */
3429 if ((attributes & ~DOOR_KI_CREATE_MASK) ||
3430 (attributes & (DOOR_UNREF | DOOR_UNREF_MULTI)) ==
3431 (DOOR_UNREF | DOOR_UNREF_MULTI))
3432 return (EINVAL);
3434 err = door_create_common(pc_cookie, data_cookie, attributes,
3435 1, NULL, &fp);
3436 if (err == 0 && (attributes & (DOOR_UNREF | DOOR_UNREF_MULTI)) &&
3437 p0.p_unref_thread == 0) {
3438 /* need to create unref thread for process 0 */
3439 (void) thread_create(NULL, 0, door_unref_kernel, NULL, 0, &p0,
3440 TS_RUN, minclsyspri);
3442 if (err == 0) {
3443 *dhp = FTODH(fp);
3445 return (err);
3448 void
3449 door_ki_hold(door_handle_t dh)
3451 file_t *fp = DHTOF(dh);
3453 mutex_enter(&fp->f_tlock);
3454 fp->f_count++;
3455 mutex_exit(&fp->f_tlock);
3458 void
3459 door_ki_rele(door_handle_t dh)
3461 file_t *fp = DHTOF(dh);
3463 (void) closef(fp);
3467 door_ki_open(char *pathname, door_handle_t *dhp)
3469 file_t *fp;
3470 vnode_t *vp;
3471 int err;
3473 if ((err = lookupname(pathname, UIO_SYSSPACE, FOLLOW, NULL, &vp)) != 0)
3474 return (err);
3475 if (err = fop_open(&vp, FREAD, kcred, NULL)) {
3476 VN_RELE(vp);
3477 return (err);
3479 if (vp->v_type != VDOOR) {
3480 VN_RELE(vp);
3481 return (EINVAL);
3483 if ((err = falloc(vp, FREAD | FWRITE, &fp, NULL)) != 0) {
3484 VN_RELE(vp);
3485 return (err);
3487 /* falloc returns with f_tlock held on success */
3488 mutex_exit(&fp->f_tlock);
3489 *dhp = FTODH(fp);
3490 return (0);
3494 door_ki_info(door_handle_t dh, struct door_info *dip)
3496 file_t *fp = DHTOF(dh);
3497 vnode_t *vp;
3499 if (fop_realvp(fp->f_vnode, &vp, NULL))
3500 vp = fp->f_vnode;
3501 if (vp->v_type != VDOOR)
3502 return (EINVAL);
3503 door_info_common(VTOD(vp), dip, fp);
3504 return (0);
3507 door_handle_t
3508 door_ki_lookup(int did)
3510 file_t *fp;
3511 door_handle_t dh;
3513 /* is the descriptor really a door? */
3514 if (door_lookup(did, &fp) == NULL)
3515 return (NULL);
3516 /* got the door, put a hold on it and release the fd */
3517 dh = FTODH(fp);
3518 door_ki_hold(dh);
3519 releasef(did);
3520 return (dh);
3524 door_ki_setparam(door_handle_t dh, int type, size_t val)
3526 file_t *fp = DHTOF(dh);
3527 vnode_t *vp;
3529 if (fop_realvp(fp->f_vnode, &vp, NULL))
3530 vp = fp->f_vnode;
3531 if (vp->v_type != VDOOR)
3532 return (EINVAL);
3533 return (door_setparam_common(VTOD(vp), 1, type, val));
3537 door_ki_getparam(door_handle_t dh, int type, size_t *out)
3539 file_t *fp = DHTOF(dh);
3540 vnode_t *vp;
3542 if (fop_realvp(fp->f_vnode, &vp, NULL))
3543 vp = fp->f_vnode;
3544 if (vp->v_type != VDOOR)
3545 return (EINVAL);
3546 return (door_getparam_common(VTOD(vp), type, out));