9454 ::zfs_blkstats should count embedded blocks
[unleashed.git] / usr / src / uts / common / fs / fs_subr.c
blob3249a574f78e4ab0425a63c734f2ce7327cb074f
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
21 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
22 /* All Rights Reserved */
26 * Copyright (c) 1989, 2010, Oracle and/or its affiliates. All rights reserved.
27 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
28 * Copyright 2017 Joyent, Inc.
32 * Generic vnode operations.
34 #include <sys/types.h>
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/errno.h>
38 #include <sys/fcntl.h>
39 #include <sys/flock.h>
40 #include <sys/statvfs.h>
41 #include <sys/vfs.h>
42 #include <sys/vnode.h>
43 #include <sys/proc.h>
44 #include <sys/user.h>
45 #include <sys/unistd.h>
46 #include <sys/cred.h>
47 #include <sys/poll.h>
48 #include <sys/debug.h>
49 #include <sys/cmn_err.h>
50 #include <sys/stream.h>
51 #include <fs/fs_subr.h>
52 #include <fs/fs_reparse.h>
53 #include <sys/door.h>
54 #include <sys/acl.h>
55 #include <sys/share.h>
56 #include <sys/file.h>
57 #include <sys/kmem.h>
58 #include <sys/file.h>
59 #include <sys/nbmlock.h>
60 #include <acl/acl_common.h>
61 #include <sys/pathname.h>
63 static callb_cpr_t *frlock_serialize_blocked(flk_cb_when_t, void *);
66 * Tunable to limit the number of retry to recover from STALE error.
68 int fs_estale_retry = 5;
71 * supports for reparse point door upcall
73 static door_handle_t reparsed_door;
74 static kmutex_t reparsed_door_lock;
77 * The associated operation is not supported by the file system.
79 int
80 fs_nosys()
82 return (ENOSYS);
86 * The associated operation is invalid (on this vnode).
88 int
89 fs_inval()
91 return (EINVAL);
95 * The associated operation is valid only for directories.
97 int
98 fs_notdir()
100 return (ENOTDIR);
104 * Free the file system specific resources. For the file systems that
105 * do not support the forced unmount, it will be a nop function.
108 /*ARGSUSED*/
109 void
110 fs_freevfs(vfs_t *vfsp)
114 /* ARGSUSED */
116 fs_nosys_map(struct vnode *vp, offset_t off, struct as *as, caddr_t *addrp,
117 size_t len, uchar_t prot, uchar_t maxprot, uint_t flags, struct cred *cr,
118 caller_context_t *ct)
120 return (ENOSYS);
123 /* ARGSUSED */
125 fs_nosys_addmap(struct vnode *vp, offset_t off, struct as *as, caddr_t addr,
126 size_t len, uchar_t prot, uchar_t maxprot, uint_t flags, struct cred *cr,
127 caller_context_t *ct)
129 return (ENOSYS);
132 /* ARGSUSED */
134 fs_nosys_poll(vnode_t *vp, short events, int anyyet, short *reventsp,
135 struct pollhead **phpp, caller_context_t *ct)
137 return (ENOSYS);
142 * The file system has nothing to sync to disk. However, the
143 * VFS_SYNC operation must not fail.
145 /* ARGSUSED */
147 fs_sync(struct vfs *vfspp, short flag, cred_t *cr)
149 return (0);
153 * Does nothing but VOP_FSYNC must not fail.
155 /* ARGSUSED */
157 fs_fsync(vnode_t *vp, int syncflag, cred_t *cr, caller_context_t *ct)
159 return (0);
163 * Does nothing but VOP_PUTPAGE must not fail.
165 /* ARGSUSED */
167 fs_putpage(vnode_t *vp, offset_t off, size_t len, int flags, cred_t *cr,
168 caller_context_t *ctp)
170 return (0);
174 * Does nothing but VOP_IOCTL must not fail.
176 /* ARGSUSED */
178 fs_ioctl(vnode_t *vp, int com, intptr_t data, int flag, cred_t *cred,
179 int *rvalp)
181 return (0);
185 * Read/write lock/unlock. Does nothing.
187 /* ARGSUSED */
189 fs_rwlock(vnode_t *vp, int write_lock, caller_context_t *ctp)
191 return (-1);
194 /* ARGSUSED */
195 void
196 fs_rwunlock(vnode_t *vp, int write_lock, caller_context_t *ctp)
201 * Compare two vnodes.
203 /*ARGSUSED2*/
205 fs_cmp(vnode_t *vp1, vnode_t *vp2, caller_context_t *ct)
207 return (vp1 == vp2);
211 * No-op seek operation.
213 /* ARGSUSED */
215 fs_seek(vnode_t *vp, offset_t ooff, offset_t *noffp, caller_context_t *ct)
217 return ((*noffp < 0 || *noffp > MAXOFFSET_T) ? EINVAL : 0);
221 * File and record locking.
223 /* ARGSUSED */
225 fs_frlock(vnode_t *vp, int cmd, struct flock64 *bfp, int flag, offset_t offset,
226 flk_callback_t *flk_cbp, cred_t *cr, caller_context_t *ct)
228 int frcmd;
229 int nlmid;
230 int error = 0;
231 boolean_t skip_lock = B_FALSE;
232 flk_callback_t serialize_callback;
233 int serialize = 0;
234 v_mode_t mode;
236 switch (cmd) {
238 case F_GETLK:
239 case F_O_GETLK:
240 if (flag & F_REMOTELOCK) {
241 frcmd = RCMDLCK;
242 } else if (flag & F_PXFSLOCK) {
243 frcmd = PCMDLCK;
244 } else {
245 frcmd = 0;
246 bfp->l_pid = ttoproc(curthread)->p_pid;
247 bfp->l_sysid = 0;
249 break;
251 case F_OFD_GETLK:
253 * TBD we do not support remote OFD locks at this time.
255 if (flag & (F_REMOTELOCK | F_PXFSLOCK)) {
256 error = EINVAL;
257 goto done;
259 skip_lock = B_TRUE;
260 break;
262 case F_SETLK_NBMAND:
264 * Are NBMAND locks allowed on this file?
266 if (!vp->v_vfsp ||
267 !(vp->v_vfsp->vfs_flag & VFS_NBMAND)) {
268 error = EINVAL;
269 goto done;
271 if (vp->v_type != VREG) {
272 error = EINVAL;
273 goto done;
275 /*FALLTHROUGH*/
277 case F_SETLK:
278 if (flag & F_REMOTELOCK) {
279 frcmd = SETFLCK|RCMDLCK;
280 } else if (flag & F_PXFSLOCK) {
281 frcmd = SETFLCK|PCMDLCK;
282 } else {
283 frcmd = SETFLCK;
284 bfp->l_pid = ttoproc(curthread)->p_pid;
285 bfp->l_sysid = 0;
287 if (cmd == F_SETLK_NBMAND &&
288 (bfp->l_type == F_RDLCK || bfp->l_type == F_WRLCK)) {
289 frcmd |= NBMLCK;
292 if (nbl_need_check(vp)) {
293 nbl_start_crit(vp, RW_WRITER);
294 serialize = 1;
295 if (frcmd & NBMLCK) {
296 mode = (bfp->l_type == F_RDLCK) ?
297 V_READ : V_RDANDWR;
298 if (vn_is_mapped(vp, mode)) {
299 error = EAGAIN;
300 goto done;
304 break;
306 case F_SETLKW:
307 if (flag & F_REMOTELOCK) {
308 frcmd = SETFLCK|SLPFLCK|RCMDLCK;
309 } else if (flag & F_PXFSLOCK) {
310 frcmd = SETFLCK|SLPFLCK|PCMDLCK;
311 } else {
312 frcmd = SETFLCK|SLPFLCK;
313 bfp->l_pid = ttoproc(curthread)->p_pid;
314 bfp->l_sysid = 0;
317 if (nbl_need_check(vp)) {
318 nbl_start_crit(vp, RW_WRITER);
319 serialize = 1;
321 break;
323 case F_OFD_SETLK:
324 case F_OFD_SETLKW:
325 case F_FLOCK:
326 case F_FLOCKW:
328 * TBD we do not support remote OFD locks at this time.
330 if (flag & (F_REMOTELOCK | F_PXFSLOCK)) {
331 error = EINVAL;
332 goto done;
334 skip_lock = B_TRUE;
335 break;
337 case F_HASREMOTELOCKS:
338 nlmid = GETNLMID(bfp->l_sysid);
339 if (nlmid != 0) { /* booted as a cluster */
340 l_has_rmt(bfp) =
341 cl_flk_has_remote_locks_for_nlmid(vp, nlmid);
342 } else { /* not booted as a cluster */
343 l_has_rmt(bfp) = flk_has_remote_locks(vp);
346 goto done;
348 default:
349 error = EINVAL;
350 goto done;
354 * If this is a blocking lock request and we're serializing lock
355 * requests, modify the callback list to leave the critical region
356 * while we're waiting for the lock.
359 if (serialize && (frcmd & SLPFLCK) != 0) {
360 flk_add_callback(&serialize_callback,
361 frlock_serialize_blocked, vp, flk_cbp);
362 flk_cbp = &serialize_callback;
365 if (!skip_lock)
366 error = reclock(vp, bfp, frcmd, flag, offset, flk_cbp);
368 if (serialize && (frcmd & SLPFLCK) != 0)
369 flk_del_callback(&serialize_callback);
371 done:
372 if (serialize)
373 nbl_end_crit(vp);
375 return (error);
379 * Callback when a lock request blocks and we are serializing requests. If
380 * before sleeping, leave the critical region. If after wakeup, reenter
381 * the critical region.
384 static callb_cpr_t *
385 frlock_serialize_blocked(flk_cb_when_t when, void *infop)
387 vnode_t *vp = (vnode_t *)infop;
389 if (when == FLK_BEFORE_SLEEP)
390 nbl_end_crit(vp);
391 else {
392 nbl_start_crit(vp, RW_WRITER);
395 return (NULL);
399 * Allow any flags.
401 /* ARGSUSED */
403 fs_setfl(vnode_t *vp, int oflags, int nflags, cred_t *cr, caller_context_t *ct)
405 return (0);
409 * Return the answer requested to poll() for non-device files.
410 * Only POLLIN, POLLRDNORM, and POLLOUT are recognized.
412 struct pollhead fs_pollhd;
414 /* ARGSUSED */
416 fs_poll(vnode_t *vp, short events, int anyyet, short *reventsp,
417 struct pollhead **phpp, caller_context_t *ct)
420 * Reject all attempts for edge-triggered polling. These should only
421 * occur when regular files are added to a /dev/poll handle which is in
422 * epoll mode. The Linux epoll does not allow epoll-ing on regular
423 * files at all, so rejecting EPOLLET requests is congruent with those
424 * expectations.
426 if (events & POLLET) {
427 return (EPERM);
430 *reventsp = 0;
431 if (events & POLLIN)
432 *reventsp |= POLLIN;
433 if (events & POLLRDNORM)
434 *reventsp |= POLLRDNORM;
435 if (events & POLLRDBAND)
436 *reventsp |= POLLRDBAND;
437 if (events & POLLOUT)
438 *reventsp |= POLLOUT;
439 if (events & POLLWRBAND)
440 *reventsp |= POLLWRBAND;
442 * Emitting a pollhead without the intention of issuing pollwakeup()
443 * calls against it is a recipe for trouble. It's only acceptable in
444 * this case since the above logic matches practically all useful
445 * events.
447 if (*reventsp == 0 && !anyyet) {
448 *phpp = &fs_pollhd;
450 return (0);
454 * POSIX pathconf() support.
456 /* ARGSUSED */
458 fs_pathconf(vnode_t *vp, int cmd, ulong_t *valp, cred_t *cr,
459 caller_context_t *ct)
461 ulong_t val;
462 int error = 0;
463 struct statvfs64 vfsbuf;
465 switch (cmd) {
467 case _PC_LINK_MAX:
468 val = MAXLINK;
469 break;
471 case _PC_MAX_CANON:
472 val = MAX_CANON;
473 break;
475 case _PC_MAX_INPUT:
476 val = MAX_INPUT;
477 break;
479 case _PC_NAME_MAX:
480 bzero(&vfsbuf, sizeof (vfsbuf));
481 if (error = VFS_STATVFS(vp->v_vfsp, &vfsbuf))
482 break;
483 val = vfsbuf.f_namemax;
484 break;
486 case _PC_PATH_MAX:
487 case _PC_SYMLINK_MAX:
488 val = MAXPATHLEN;
489 break;
491 case _PC_PIPE_BUF:
492 val = PIPE_BUF;
493 break;
495 case _PC_NO_TRUNC:
496 if (vp->v_vfsp->vfs_flag & VFS_NOTRUNC)
497 val = 1; /* NOTRUNC is enabled for vp */
498 else
499 val = (ulong_t)-1;
500 break;
502 case _PC_VDISABLE:
503 val = _POSIX_VDISABLE;
504 break;
506 case _PC_CHOWN_RESTRICTED:
507 if (rstchown)
508 val = rstchown; /* chown restricted enabled */
509 else
510 val = (ulong_t)-1;
511 break;
513 case _PC_FILESIZEBITS:
516 * If ever we come here it means that underlying file system
517 * does not recognise the command and therefore this
518 * configurable limit cannot be determined. We return -1
519 * and don't change errno.
522 val = (ulong_t)-1; /* large file support */
523 break;
525 case _PC_ACL_ENABLED:
526 val = 0;
527 break;
529 case _PC_CASE_BEHAVIOR:
530 val = _CASE_SENSITIVE;
531 if (vfs_has_feature(vp->v_vfsp, VFSFT_CASEINSENSITIVE) == 1)
532 val |= _CASE_INSENSITIVE;
533 if (vfs_has_feature(vp->v_vfsp, VFSFT_NOCASESENSITIVE) == 1)
534 val &= ~_CASE_SENSITIVE;
535 break;
537 case _PC_SATTR_ENABLED:
538 case _PC_SATTR_EXISTS:
539 val = 0;
540 break;
542 case _PC_ACCESS_FILTERING:
543 val = 0;
544 break;
546 default:
547 error = EINVAL;
548 break;
551 if (error == 0)
552 *valp = val;
553 return (error);
557 * Dispose of a page.
559 /* ARGSUSED */
560 void
561 fs_dispose(struct vnode *vp, page_t *pp, int fl, int dn, struct cred *cr,
562 caller_context_t *ct)
565 ASSERT(fl == B_FREE || fl == B_INVAL);
567 if (fl == B_FREE)
568 page_free(pp, dn);
569 else
570 page_destroy(pp, dn);
573 /* ARGSUSED */
574 void
575 fs_nodispose(struct vnode *vp, page_t *pp, int fl, int dn, struct cred *cr,
576 caller_context_t *ct)
578 cmn_err(CE_PANIC, "fs_nodispose invoked");
582 * fabricate acls for file systems that do not support acls.
584 /* ARGSUSED */
586 fs_fab_acl(vnode_t *vp, vsecattr_t *vsecattr, int flag, cred_t *cr,
587 caller_context_t *ct)
589 aclent_t *aclentp;
590 struct vattr vattr;
591 int error;
592 size_t aclsize;
594 vsecattr->vsa_aclcnt = 0;
595 vsecattr->vsa_aclentsz = 0;
596 vsecattr->vsa_aclentp = NULL;
597 vsecattr->vsa_dfaclcnt = 0; /* Default ACLs are not fabricated */
598 vsecattr->vsa_dfaclentp = NULL;
600 vattr.va_mask = AT_MODE | AT_UID | AT_GID;
601 if (error = VOP_GETATTR(vp, &vattr, 0, cr, ct))
602 return (error);
604 if (vsecattr->vsa_mask & (VSA_ACLCNT | VSA_ACL)) {
605 aclsize = 4 * sizeof (aclent_t);
606 vsecattr->vsa_aclcnt = 4; /* USER, GROUP, OTHER, and CLASS */
607 vsecattr->vsa_aclentp = kmem_zalloc(aclsize, KM_SLEEP);
608 aclentp = vsecattr->vsa_aclentp;
610 aclentp->a_type = USER_OBJ; /* Owner */
611 aclentp->a_perm = ((ushort_t)(vattr.va_mode & 0700)) >> 6;
612 aclentp->a_id = vattr.va_uid; /* Really undefined */
613 aclentp++;
615 aclentp->a_type = GROUP_OBJ; /* Group */
616 aclentp->a_perm = ((ushort_t)(vattr.va_mode & 0070)) >> 3;
617 aclentp->a_id = vattr.va_gid; /* Really undefined */
618 aclentp++;
620 aclentp->a_type = OTHER_OBJ; /* Other */
621 aclentp->a_perm = vattr.va_mode & 0007;
622 aclentp->a_id = (gid_t)-1; /* Really undefined */
623 aclentp++;
625 aclentp->a_type = CLASS_OBJ; /* Class */
626 aclentp->a_perm = (ushort_t)(0007);
627 aclentp->a_id = (gid_t)-1; /* Really undefined */
628 } else if (vsecattr->vsa_mask & (VSA_ACECNT | VSA_ACE)) {
629 VERIFY(0 == acl_trivial_create(vattr.va_mode,
630 (vp->v_type == VDIR), (ace_t **)&vsecattr->vsa_aclentp,
631 &vsecattr->vsa_aclcnt));
632 vsecattr->vsa_aclentsz = vsecattr->vsa_aclcnt * sizeof (ace_t);
635 return (error);
639 * Common code for implementing DOS share reservations
641 /* ARGSUSED4 */
643 fs_shrlock(struct vnode *vp, int cmd, struct shrlock *shr, int flag, cred_t *cr,
644 caller_context_t *ct)
646 int error;
649 * Make sure that the file was opened with permissions appropriate
650 * for the request, and make sure the caller isn't trying to sneak
651 * in an NBMAND request.
653 if (cmd == F_SHARE) {
654 if (((shr->s_access & F_RDACC) && (flag & FREAD) == 0) ||
655 ((shr->s_access & F_WRACC) && (flag & FWRITE) == 0))
656 return (EBADF);
657 if (shr->s_access & (F_RMACC | F_MDACC))
658 return (EINVAL);
659 if (shr->s_deny & (F_MANDDNY | F_RMDNY))
660 return (EINVAL);
662 if (cmd == F_SHARE_NBMAND) {
663 /* make sure nbmand is allowed on the file */
664 if (!vp->v_vfsp ||
665 !(vp->v_vfsp->vfs_flag & VFS_NBMAND)) {
666 return (EINVAL);
668 if (vp->v_type != VREG) {
669 return (EINVAL);
673 nbl_start_crit(vp, RW_WRITER);
675 switch (cmd) {
677 case F_SHARE_NBMAND:
678 shr->s_deny |= F_MANDDNY;
679 /*FALLTHROUGH*/
680 case F_SHARE:
681 error = add_share(vp, shr);
682 break;
684 case F_UNSHARE:
685 error = del_share(vp, shr);
686 break;
688 case F_HASREMOTELOCKS:
690 * We are overloading this command to refer to remote
691 * shares as well as remote locks, despite its name.
693 shr->s_access = shr_has_remote_shares(vp, shr->s_sysid);
694 error = 0;
695 break;
697 default:
698 error = EINVAL;
699 break;
702 nbl_end_crit(vp);
703 return (error);
706 /*ARGSUSED1*/
708 fs_vnevent_nosupport(vnode_t *vp, vnevent_t e, vnode_t *dvp, char *fnm,
709 caller_context_t *ct)
711 ASSERT(vp != NULL);
712 return (ENOTSUP);
715 /*ARGSUSED1*/
717 fs_vnevent_support(vnode_t *vp, vnevent_t e, vnode_t *dvp, char *fnm,
718 caller_context_t *ct)
720 ASSERT(vp != NULL);
721 return (0);
725 * return 1 for non-trivial ACL.
727 * NB: It is not necessary for the caller to VOP_RWLOCK since
728 * we only issue VOP_GETSECATTR.
730 * Returns 0 == trivial
731 * 1 == NOT Trivial
732 * <0 could not determine.
735 fs_acl_nontrivial(vnode_t *vp, cred_t *cr)
737 ulong_t acl_styles;
738 ulong_t acl_flavor;
739 vsecattr_t vsecattr;
740 int error;
741 int isnontrivial;
743 /* determine the forms of ACLs maintained */
744 error = VOP_PATHCONF(vp, _PC_ACL_ENABLED, &acl_styles, cr, NULL);
746 /* clear bits we don't understand and establish default acl_style */
747 acl_styles &= (_ACL_ACLENT_ENABLED | _ACL_ACE_ENABLED);
748 if (error || (acl_styles == 0))
749 acl_styles = _ACL_ACLENT_ENABLED;
751 vsecattr.vsa_aclentp = NULL;
752 vsecattr.vsa_dfaclentp = NULL;
753 vsecattr.vsa_aclcnt = 0;
754 vsecattr.vsa_dfaclcnt = 0;
756 while (acl_styles) {
757 /* select one of the styles as current flavor */
758 acl_flavor = 0;
759 if (acl_styles & _ACL_ACLENT_ENABLED) {
760 acl_flavor = _ACL_ACLENT_ENABLED;
761 vsecattr.vsa_mask = VSA_ACLCNT | VSA_DFACLCNT;
762 } else if (acl_styles & _ACL_ACE_ENABLED) {
763 acl_flavor = _ACL_ACE_ENABLED;
764 vsecattr.vsa_mask = VSA_ACECNT | VSA_ACE;
767 ASSERT(vsecattr.vsa_mask && acl_flavor);
768 error = VOP_GETSECATTR(vp, &vsecattr, 0, cr, NULL);
769 if (error == 0)
770 break;
772 /* that flavor failed */
773 acl_styles &= ~acl_flavor;
776 /* if all styles fail then assume trivial */
777 if (acl_styles == 0)
778 return (0);
780 /* process the flavor that worked */
781 isnontrivial = 0;
782 if (acl_flavor & _ACL_ACLENT_ENABLED) {
783 if (vsecattr.vsa_aclcnt > MIN_ACL_ENTRIES)
784 isnontrivial = 1;
785 if (vsecattr.vsa_aclcnt && vsecattr.vsa_aclentp != NULL)
786 kmem_free(vsecattr.vsa_aclentp,
787 vsecattr.vsa_aclcnt * sizeof (aclent_t));
788 if (vsecattr.vsa_dfaclcnt && vsecattr.vsa_dfaclentp != NULL)
789 kmem_free(vsecattr.vsa_dfaclentp,
790 vsecattr.vsa_dfaclcnt * sizeof (aclent_t));
792 if (acl_flavor & _ACL_ACE_ENABLED) {
793 isnontrivial = ace_trivial(vsecattr.vsa_aclentp,
794 vsecattr.vsa_aclcnt);
796 if (vsecattr.vsa_aclcnt && vsecattr.vsa_aclentp != NULL)
797 kmem_free(vsecattr.vsa_aclentp,
798 vsecattr.vsa_aclcnt * sizeof (ace_t));
799 /* ACE has no vsecattr.vsa_dfaclcnt */
801 return (isnontrivial);
805 * Check whether we need a retry to recover from STALE error.
808 fs_need_estale_retry(int retry_count)
810 if (retry_count < fs_estale_retry)
811 return (1);
812 else
813 return (0);
817 static int (*fs_av_scan)(vnode_t *, cred_t *, int) = NULL;
820 * Routine for anti-virus scanner to call to register its scanning routine.
822 void
823 fs_vscan_register(int (*av_scan)(vnode_t *, cred_t *, int))
825 fs_av_scan = av_scan;
829 * Routine for file systems to call to initiate anti-virus scanning.
830 * Scanning will only be done on REGular files (currently).
833 fs_vscan(vnode_t *vp, cred_t *cr, int async)
835 int ret = 0;
837 if (fs_av_scan && vp->v_type == VREG)
838 ret = (*fs_av_scan)(vp, cr, async);
840 return (ret);
844 * support functions for reparse point
847 * reparse_vnode_parse
849 * Read the symlink data of a reparse point specified by the vnode
850 * and return the reparse data as name-value pair in the nvlist.
853 reparse_vnode_parse(vnode_t *vp, nvlist_t *nvl)
855 int err;
856 char *lkdata;
857 struct uio uio;
858 struct iovec iov;
860 if (vp == NULL || nvl == NULL)
861 return (EINVAL);
863 lkdata = kmem_alloc(MAXREPARSELEN, KM_SLEEP);
866 * Set up io vector to read sym link data
868 iov.iov_base = lkdata;
869 iov.iov_len = MAXREPARSELEN;
870 uio.uio_iov = &iov;
871 uio.uio_iovcnt = 1;
872 uio.uio_segflg = UIO_SYSSPACE;
873 uio.uio_extflg = UIO_COPY_CACHED;
874 uio.uio_loffset = (offset_t)0;
875 uio.uio_resid = MAXREPARSELEN;
877 if ((err = VOP_READLINK(vp, &uio, kcred, NULL)) == 0) {
878 *(lkdata + MAXREPARSELEN - uio.uio_resid) = '\0';
879 err = reparse_parse(lkdata, nvl);
881 kmem_free(lkdata, MAXREPARSELEN); /* done with lkdata */
883 return (err);
886 void
887 reparse_point_init()
889 mutex_init(&reparsed_door_lock, NULL, MUTEX_DEFAULT, NULL);
892 static door_handle_t
893 reparse_door_get_handle()
895 door_handle_t dh;
897 mutex_enter(&reparsed_door_lock);
898 if ((dh = reparsed_door) == NULL) {
899 if (door_ki_open(REPARSED_DOOR, &reparsed_door) != 0) {
900 reparsed_door = NULL;
901 dh = NULL;
902 } else
903 dh = reparsed_door;
905 mutex_exit(&reparsed_door_lock);
906 return (dh);
909 static void
910 reparse_door_reset_handle()
912 mutex_enter(&reparsed_door_lock);
913 reparsed_door = NULL;
914 mutex_exit(&reparsed_door_lock);
918 * reparse_kderef
920 * Accepts the service-specific item from the reparse point and returns
921 * the service-specific data requested. The caller specifies the size of
922 * the buffer provided via *bufsz; the routine will fail with EOVERFLOW
923 * if the results will not fit in the buffer, in which case, *bufsz will
924 * contain the number of bytes needed to hold the results.
926 * if ok return 0 and update *bufsize with length of actual result
927 * else return error code.
930 reparse_kderef(const char *svc_type, const char *svc_data, char *buf,
931 size_t *bufsize)
933 int err, retries, need_free, retried_doorhd;
934 size_t dlen, res_len;
935 char *darg;
936 door_arg_t door_args;
937 reparsed_door_res_t *resp;
938 door_handle_t rp_door;
940 if (svc_type == NULL || svc_data == NULL || buf == NULL ||
941 bufsize == NULL)
942 return (EINVAL);
944 /* get reparsed's door handle */
945 if ((rp_door = reparse_door_get_handle()) == NULL)
946 return (EBADF);
948 /* setup buffer for door_call args and results */
949 dlen = strlen(svc_type) + strlen(svc_data) + 2;
950 if (*bufsize < dlen) {
951 darg = kmem_alloc(dlen, KM_SLEEP);
952 need_free = 1;
953 } else {
954 darg = buf; /* use same buffer for door's args & results */
955 need_free = 0;
958 /* build argument string of door call */
959 (void) snprintf(darg, dlen, "%s:%s", svc_type, svc_data);
961 /* setup args for door call */
962 door_args.data_ptr = darg;
963 door_args.data_size = dlen;
964 door_args.desc_ptr = NULL;
965 door_args.desc_num = 0;
966 door_args.rbuf = buf;
967 door_args.rsize = *bufsize;
969 /* do the door_call */
970 retried_doorhd = 0;
971 retries = 0;
972 door_ki_hold(rp_door);
973 while ((err = door_ki_upcall_limited(rp_door, &door_args,
974 NULL, SIZE_MAX, 0)) != 0) {
975 if (err == EAGAIN || err == EINTR) {
976 if (++retries < REPARSED_DOORCALL_MAX_RETRY) {
977 delay(SEC_TO_TICK(1));
978 continue;
980 } else if (err == EBADF) {
981 /* door server goes away... */
982 reparse_door_reset_handle();
984 if (retried_doorhd == 0) {
985 door_ki_rele(rp_door);
986 retried_doorhd++;
987 rp_door = reparse_door_get_handle();
988 if (rp_door != NULL) {
989 door_ki_hold(rp_door);
990 continue;
994 break;
997 if (rp_door)
998 door_ki_rele(rp_door);
1000 if (need_free)
1001 kmem_free(darg, dlen); /* done with args buffer */
1003 if (err != 0)
1004 return (err);
1006 resp = (reparsed_door_res_t *)door_args.rbuf;
1007 if ((err = resp->res_status) == 0) {
1009 * have to save the length of the results before the
1010 * bcopy below since it's can be an overlap copy that
1011 * overwrites the reparsed_door_res_t structure at
1012 * the beginning of the buffer.
1014 res_len = (size_t)resp->res_len;
1016 /* deref call is ok */
1017 if (res_len > *bufsize)
1018 err = EOVERFLOW;
1019 else
1020 bcopy(resp->res_data, buf, res_len);
1021 *bufsize = res_len;
1023 if (door_args.rbuf != buf)
1024 kmem_free(door_args.rbuf, door_args.rsize);
1026 return (err);