4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2012, 2017 by Delphix. All rights reserved.
24 * Copyright 2015, OmniTI Computer Consulting, Inc. All rights reserved.
28 * ZFS control directory (a.k.a. ".zfs")
30 * This directory provides a common location for all ZFS meta-objects.
31 * Currently, this is only the 'snapshot' directory, but this may expand in the
32 * future. The elements are built using the GFS primitives, as the hierarchy
33 * does not actually exist on disk.
35 * For 'snapshot', we don't want to have all snapshots always mounted, because
36 * this would take up a huge amount of space in /etc/mnttab. We have three
39 * ctldir ------> snapshotdir -------> snapshot
45 * The 'snapshot' node contains just enough information to lookup '..' and act
46 * as a mountpoint for the snapshot. Whenever we lookup a specific snapshot, we
47 * perform an automount of the underlying filesystem and return the
48 * corresponding vnode.
50 * All mounts are handled automatically by the kernel, but unmounts are
51 * (currently) handled from user land. The main reason is that there is no
52 * reliable way to auto-unmount the filesystem when it's "no longer in use".
53 * When the user unmounts a filesystem, we call zfsctl_unmount(), which
54 * unmounts any snapshots within the snapshot directory.
56 * The '.zfs', '.zfs/snapshot', and all directories created under
57 * '.zfs/snapshot' (ie: '.zfs/snapshot/<snapname>') are all GFS nodes and
58 * share the same vfs_t as the head filesystem (what '.zfs' lives under).
60 * File systems mounted ontop of the GFS nodes '.zfs/snapshot/<snapname>'
61 * (ie: snapshots) are ZFS nodes and have their own unique vfs_t.
62 * However, vnodes within these mounted on file systems have their v_vfsp
63 * fields set to the head filesystem to make NFS happy (see
64 * zfsctl_snapdir_lookup()). We VFS_HOLD the head filesystem's vfs_t
65 * so that it cannot be freed until all snapshots have been unmounted.
68 #include <fs/fs_subr.h>
69 #include <sys/zfs_ctldir.h>
70 #include <sys/zfs_ioctl.h>
71 #include <sys/zfs_vfsops.h>
72 #include <sys/vfs_opreg.h>
76 #include <sys/dsl_destroy.h>
77 #include <sys/dsl_deleg.h>
78 #include <sys/mount.h>
79 #include <sys/sunddi.h>
81 #include "zfs_namecheck.h"
83 typedef struct zfsctl_node
{
84 gfs_dir_t zc_gfs_private
;
86 timestruc_t zc_cmtime
; /* ctime and mtime, always the same */
89 typedef struct zfsctl_snapdir
{
90 zfsctl_node_t sd_node
;
102 snapentry_compare(const void *a
, const void *b
)
104 const zfs_snapentry_t
*sa
= a
;
105 const zfs_snapentry_t
*sb
= b
;
106 int ret
= strcmp(sa
->se_name
, sb
->se_name
);
116 vnodeops_t
*zfsctl_ops_root
;
117 vnodeops_t
*zfsctl_ops_snapdir
;
118 vnodeops_t
*zfsctl_ops_snapshot
;
119 vnodeops_t
*zfsctl_ops_shares
;
121 static const fs_operation_def_t zfsctl_tops_root
[];
122 static const fs_operation_def_t zfsctl_tops_snapdir
[];
123 static const fs_operation_def_t zfsctl_tops_snapshot
[];
124 static const fs_operation_def_t zfsctl_tops_shares
[];
126 static vnode_t
*zfsctl_mknode_snapdir(vnode_t
*);
127 static vnode_t
*zfsctl_mknode_shares(vnode_t
*);
128 static vnode_t
*zfsctl_snapshot_mknode(vnode_t
*, uint64_t objset
);
129 static int zfsctl_unmount_snap(zfs_snapentry_t
*, int, cred_t
*);
131 static gfs_opsvec_t zfsctl_opsvec
[] = {
132 { ".zfs", zfsctl_tops_root
, &zfsctl_ops_root
},
133 { ".zfs/snapshot", zfsctl_tops_snapdir
, &zfsctl_ops_snapdir
},
134 { ".zfs/snapshot/vnode", zfsctl_tops_snapshot
, &zfsctl_ops_snapshot
},
135 { ".zfs/shares", zfsctl_tops_shares
, &zfsctl_ops_shares
},
140 * Root directory elements. We only have two entries
141 * snapshot and shares.
143 static gfs_dirent_t zfsctl_root_entries
[] = {
144 { "snapshot", zfsctl_mknode_snapdir
, GFS_CACHE_VNODE
},
145 { "shares", zfsctl_mknode_shares
, GFS_CACHE_VNODE
},
149 /* include . and .. in the calculation */
150 #define NROOT_ENTRIES ((sizeof (zfsctl_root_entries) / \
151 sizeof (gfs_dirent_t)) + 1)
155 * Initialize the various GFS pieces we'll need to create and manipulate .zfs
156 * directories. This is called from the ZFS init routine, and initializes the
157 * vnode ops vectors that we'll be using.
162 VERIFY(gfs_make_opsvec(zfsctl_opsvec
) == 0);
169 * Remove vfsctl vnode ops
172 vn_freevnodeops(zfsctl_ops_root
);
173 if (zfsctl_ops_snapdir
)
174 vn_freevnodeops(zfsctl_ops_snapdir
);
175 if (zfsctl_ops_snapshot
)
176 vn_freevnodeops(zfsctl_ops_snapshot
);
177 if (zfsctl_ops_shares
)
178 vn_freevnodeops(zfsctl_ops_shares
);
180 zfsctl_ops_root
= NULL
;
181 zfsctl_ops_snapdir
= NULL
;
182 zfsctl_ops_snapshot
= NULL
;
183 zfsctl_ops_shares
= NULL
;
187 zfsctl_is_node(vnode_t
*vp
)
189 return (vn_matchops(vp
, zfsctl_ops_root
) ||
190 vn_matchops(vp
, zfsctl_ops_snapdir
) ||
191 vn_matchops(vp
, zfsctl_ops_snapshot
) ||
192 vn_matchops(vp
, zfsctl_ops_shares
));
197 * Return the inode number associated with the 'snapshot' or
198 * 'shares' directory.
202 zfsctl_root_inode_cb(vnode_t
*vp
, int index
)
204 zfsvfs_t
*zfsvfs
= vp
->v_vfsp
->vfs_data
;
209 return (ZFSCTL_INO_SNAPDIR
);
211 return (zfsvfs
->z_shares_dir
);
215 * Create the '.zfs' directory. This directory is cached as part of the VFS
216 * structure. This results in a hold on the vfs_t. The code in zfs_umount()
217 * therefore checks against a vfs_count of 2 instead of 1. This reference
218 * is removed when the ctldir is destroyed in the unmount.
221 zfsctl_create(zfsvfs_t
*zfsvfs
)
227 ASSERT(zfsvfs
->z_ctldir
== NULL
);
229 vp
= gfs_root_create(sizeof (zfsctl_node_t
), zfsvfs
->z_vfs
,
230 zfsctl_ops_root
, ZFSCTL_INO_ROOT
, zfsctl_root_entries
,
231 zfsctl_root_inode_cb
, MAXNAMELEN
, NULL
, NULL
);
233 zcp
->zc_id
= ZFSCTL_INO_ROOT
;
235 VERIFY(VFS_ROOT(zfsvfs
->z_vfs
, &rvp
) == 0);
236 VERIFY(0 == sa_lookup(VTOZ(rvp
)->z_sa_hdl
, SA_ZPL_CRTIME(zfsvfs
),
237 &crtime
, sizeof (crtime
)));
238 ZFS_TIME_DECODE(&zcp
->zc_cmtime
, crtime
);
242 * We're only faking the fact that we have a root of a filesystem for
243 * the sake of the GFS interfaces. Undo the flag manipulation it did
246 vp
->v_flag
&= ~(VROOT
| VNOCACHE
| VNOMAP
| VNOSWAP
| VNOMOUNT
);
248 zfsvfs
->z_ctldir
= vp
;
252 * Destroy the '.zfs' directory. Only called when the filesystem is unmounted.
253 * There might still be more references if we were force unmounted, but only
254 * new zfs_inactive() calls can occur and they don't reference .zfs
257 zfsctl_destroy(zfsvfs_t
*zfsvfs
)
259 VN_RELE(zfsvfs
->z_ctldir
);
260 zfsvfs
->z_ctldir
= NULL
;
264 * Given a root znode, retrieve the associated .zfs directory.
265 * Add a hold to the vnode and return it.
268 zfsctl_root(znode_t
*zp
)
270 ASSERT(zfs_has_ctldir(zp
));
271 VN_HOLD(zp
->z_zfsvfs
->z_ctldir
);
272 return (zp
->z_zfsvfs
->z_ctldir
);
276 * Common open routine. Disallow any write access.
280 zfsctl_common_open(vnode_t
**vpp
, int flags
, cred_t
*cr
, caller_context_t
*ct
)
283 return (SET_ERROR(EACCES
));
289 * Common close routine. Nothing to do here.
293 zfsctl_common_close(vnode_t
*vpp
, int flags
, int count
, offset_t off
,
294 cred_t
*cr
, caller_context_t
*ct
)
300 * Common access routine. Disallow writes.
304 zfsctl_common_access(vnode_t
*vp
, int mode
, int flags
, cred_t
*cr
,
305 caller_context_t
*ct
)
307 if (flags
& V_ACE_MASK
) {
308 if (mode
& ACE_ALL_WRITE_PERMS
)
309 return (SET_ERROR(EACCES
));
312 return (SET_ERROR(EACCES
));
319 * Common getattr function. Fill in basic information.
322 zfsctl_common_getattr(vnode_t
*vp
, vattr_t
*vap
)
330 * We are a purely virtual object, so we have no
331 * blocksize or allocated blocks.
336 vap
->va_fsid
= vp
->v_vfsp
->vfs_dev
;
337 vap
->va_mode
= S_IRUSR
| S_IXUSR
| S_IRGRP
| S_IXGRP
|
341 * We live in the now (for atime).
349 zfsctl_common_fid(vnode_t
*vp
, fid_t
*fidp
, caller_context_t
*ct
)
351 zfsvfs_t
*zfsvfs
= vp
->v_vfsp
->vfs_data
;
352 zfsctl_node_t
*zcp
= vp
->v_data
;
353 uint64_t object
= zcp
->zc_id
;
359 if (fidp
->fid_len
< SHORT_FID_LEN
) {
360 fidp
->fid_len
= SHORT_FID_LEN
;
362 return (SET_ERROR(ENOSPC
));
365 zfid
= (zfid_short_t
*)fidp
;
367 zfid
->zf_len
= SHORT_FID_LEN
;
369 for (i
= 0; i
< sizeof (zfid
->zf_object
); i
++)
370 zfid
->zf_object
[i
] = (uint8_t)(object
>> (8 * i
));
372 /* .zfs znodes always have a generation number of 0 */
373 for (i
= 0; i
< sizeof (zfid
->zf_gen
); i
++)
383 zfsctl_shares_fid(vnode_t
*vp
, fid_t
*fidp
, caller_context_t
*ct
)
385 zfsvfs_t
*zfsvfs
= vp
->v_vfsp
->vfs_data
;
391 if (zfsvfs
->z_shares_dir
== 0) {
393 return (SET_ERROR(ENOTSUP
));
396 if ((error
= zfs_zget(zfsvfs
, zfsvfs
->z_shares_dir
, &dzp
)) == 0) {
397 error
= VOP_FID(ZTOV(dzp
), fidp
, ct
);
405 * .zfs inode namespace
407 * We need to generate unique inode numbers for all files and directories
408 * within the .zfs pseudo-filesystem. We use the following scheme:
413 * .zfs/snapshot/<snap> objectid(snap)
416 #define ZFSCTL_INO_SNAP(id) (id)
419 * Get root directory attributes.
423 zfsctl_root_getattr(vnode_t
*vp
, vattr_t
*vap
, int flags
, cred_t
*cr
,
424 caller_context_t
*ct
)
426 zfsvfs_t
*zfsvfs
= vp
->v_vfsp
->vfs_data
;
427 zfsctl_node_t
*zcp
= vp
->v_data
;
430 vap
->va_nodeid
= ZFSCTL_INO_ROOT
;
431 vap
->va_nlink
= vap
->va_size
= NROOT_ENTRIES
;
432 vap
->va_mtime
= vap
->va_ctime
= zcp
->zc_cmtime
;
434 zfsctl_common_getattr(vp
, vap
);
441 * Special case the handling of "..".
445 zfsctl_root_lookup(vnode_t
*dvp
, char *nm
, vnode_t
**vpp
, pathname_t
*pnp
,
446 int flags
, vnode_t
*rdir
, cred_t
*cr
, caller_context_t
*ct
,
447 int *direntflags
, pathname_t
*realpnp
)
449 zfsvfs_t
*zfsvfs
= dvp
->v_vfsp
->vfs_data
;
453 * No extended attributes allowed under .zfs
455 if (flags
& LOOKUP_XATTR
)
456 return (SET_ERROR(EINVAL
));
460 if (strcmp(nm
, "..") == 0) {
461 err
= VFS_ROOT(dvp
->v_vfsp
, vpp
);
463 err
= gfs_vop_lookup(dvp
, nm
, vpp
, pnp
, flags
, rdir
,
464 cr
, ct
, direntflags
, realpnp
);
473 zfsctl_pathconf(vnode_t
*vp
, int cmd
, ulong_t
*valp
, cred_t
*cr
,
474 caller_context_t
*ct
)
477 * We only care about ACL_ENABLED so that libsec can
478 * display ACL correctly and not default to POSIX draft.
480 if (cmd
== _PC_ACL_ENABLED
) {
481 *valp
= _ACL_ACE_ENABLED
;
485 return (fs_pathconf(vp
, cmd
, valp
, cr
, ct
));
488 static const fs_operation_def_t zfsctl_tops_root
[] = {
489 { VOPNAME_OPEN
, { .vop_open
= zfsctl_common_open
} },
490 { VOPNAME_CLOSE
, { .vop_close
= zfsctl_common_close
} },
491 { VOPNAME_IOCTL
, { .error
= fs_inval
} },
492 { VOPNAME_GETATTR
, { .vop_getattr
= zfsctl_root_getattr
} },
493 { VOPNAME_ACCESS
, { .vop_access
= zfsctl_common_access
} },
494 { VOPNAME_READDIR
, { .vop_readdir
= gfs_vop_readdir
} },
495 { VOPNAME_LOOKUP
, { .vop_lookup
= zfsctl_root_lookup
} },
496 { VOPNAME_SEEK
, { .vop_seek
= fs_seek
} },
497 { VOPNAME_INACTIVE
, { .vop_inactive
= gfs_vop_inactive
} },
498 { VOPNAME_PATHCONF
, { .vop_pathconf
= zfsctl_pathconf
} },
499 { VOPNAME_FID
, { .vop_fid
= zfsctl_common_fid
} },
504 * Gets the full dataset name that corresponds to the given snapshot name
506 * zfsctl_snapshot_zname("snap1") -> "mypool/myfs@snap1"
509 zfsctl_snapshot_zname(vnode_t
*vp
, const char *name
, int len
, char *zname
)
511 objset_t
*os
= ((zfsvfs_t
*)((vp
)->v_vfsp
->vfs_data
))->z_os
;
513 if (zfs_component_namecheck(name
, NULL
, NULL
) != 0)
514 return (SET_ERROR(EILSEQ
));
515 dmu_objset_name(os
, zname
);
516 if (strlen(zname
) + 1 + strlen(name
) >= len
)
517 return (SET_ERROR(ENAMETOOLONG
));
518 (void) strcat(zname
, "@");
519 (void) strcat(zname
, name
);
524 zfsctl_unmount_snap(zfs_snapentry_t
*sep
, int fflags
, cred_t
*cr
)
526 vnode_t
*svp
= sep
->se_root
;
529 ASSERT(vn_ismntpt(svp
));
531 /* this will be dropped by dounmount() */
532 if ((error
= vn_vfswlock(svp
)) != 0)
536 error
= dounmount(vn_mountedvfs(svp
), fflags
, cr
);
543 * We can't use VN_RELE(), as that will try to invoke
544 * zfsctl_snapdir_inactive(), which would cause us to destroy
545 * the sd_lock mutex held by our caller.
547 ASSERT(svp
->v_count
== 1);
548 gfs_vop_inactive(svp
, cr
, NULL
);
550 kmem_free(sep
->se_name
, strlen(sep
->se_name
) + 1);
551 kmem_free(sep
, sizeof (zfs_snapentry_t
));
557 zfsctl_rename_snap(zfsctl_snapdir_t
*sdp
, zfs_snapentry_t
*sep
, const char *nm
)
562 char newpath
[MAXNAMELEN
];
565 ASSERT(MUTEX_HELD(&sdp
->sd_lock
));
568 vfsp
= vn_mountedvfs(sep
->se_root
);
569 ASSERT(vfsp
!= NULL
);
574 * Change the name in the AVL tree.
576 avl_remove(&sdp
->sd_snaps
, sep
);
577 kmem_free(sep
->se_name
, strlen(sep
->se_name
) + 1);
578 sep
->se_name
= kmem_alloc(strlen(nm
) + 1, KM_SLEEP
);
579 (void) strcpy(sep
->se_name
, nm
);
580 VERIFY(avl_find(&sdp
->sd_snaps
, sep
, &where
) == NULL
);
581 avl_insert(&sdp
->sd_snaps
, sep
, where
);
584 * Change the current mountpoint info:
585 * - update the tail of the mntpoint path
586 * - update the tail of the resource path
588 pathref
= vfs_getmntpoint(vfsp
);
589 (void) strncpy(newpath
, refstr_value(pathref
), sizeof (newpath
));
590 VERIFY((tail
= strrchr(newpath
, '/')) != NULL
);
592 ASSERT3U(strlen(newpath
) + strlen(nm
), <, sizeof (newpath
));
593 (void) strcat(newpath
, nm
);
594 refstr_rele(pathref
);
595 vfs_setmntpoint(vfsp
, newpath
, 0);
597 pathref
= vfs_getresource(vfsp
);
598 (void) strncpy(newpath
, refstr_value(pathref
), sizeof (newpath
));
599 VERIFY((tail
= strrchr(newpath
, '@')) != NULL
);
601 ASSERT3U(strlen(newpath
) + strlen(nm
), <, sizeof (newpath
));
602 (void) strcat(newpath
, nm
);
603 refstr_rele(pathref
);
604 vfs_setresource(vfsp
, newpath
, 0);
611 zfsctl_snapdir_rename(vnode_t
*sdvp
, char *snm
, vnode_t
*tdvp
, char *tnm
,
612 cred_t
*cr
, caller_context_t
*ct
, int flags
)
614 zfsctl_snapdir_t
*sdp
= sdvp
->v_data
;
615 zfs_snapentry_t search
, *sep
;
618 char from
[ZFS_MAX_DATASET_NAME_LEN
], to
[ZFS_MAX_DATASET_NAME_LEN
];
619 char real
[ZFS_MAX_DATASET_NAME_LEN
], fsname
[ZFS_MAX_DATASET_NAME_LEN
];
622 zfsvfs
= sdvp
->v_vfsp
->vfs_data
;
625 if ((flags
& FIGNORECASE
) || zfsvfs
->z_case
== ZFS_CASE_INSENSITIVE
) {
626 err
= dmu_snapshot_realname(zfsvfs
->z_os
, snm
, real
,
627 sizeof (real
), NULL
);
630 } else if (err
!= ENOTSUP
) {
638 dmu_objset_name(zfsvfs
->z_os
, fsname
);
640 err
= zfsctl_snapshot_zname(sdvp
, snm
, sizeof (from
), from
);
642 err
= zfsctl_snapshot_zname(tdvp
, tnm
, sizeof (to
), to
);
644 err
= zfs_secpolicy_rename_perms(from
, to
, cr
);
649 * Cannot move snapshots out of the snapdir.
652 return (SET_ERROR(EINVAL
));
654 if (strcmp(snm
, tnm
) == 0)
657 mutex_enter(&sdp
->sd_lock
);
659 search
.se_name
= (char *)snm
;
660 if ((sep
= avl_find(&sdp
->sd_snaps
, &search
, &where
)) == NULL
) {
661 mutex_exit(&sdp
->sd_lock
);
662 return (SET_ERROR(ENOENT
));
665 err
= dsl_dataset_rename_snapshot(fsname
, snm
, tnm
, B_FALSE
);
667 zfsctl_rename_snap(sdp
, sep
, tnm
);
669 mutex_exit(&sdp
->sd_lock
);
676 zfsctl_snapdir_remove(vnode_t
*dvp
, char *name
, vnode_t
*cwd
, cred_t
*cr
,
677 caller_context_t
*ct
, int flags
)
679 zfsctl_snapdir_t
*sdp
= dvp
->v_data
;
680 zfs_snapentry_t
*sep
;
681 zfs_snapentry_t search
;
683 char snapname
[ZFS_MAX_DATASET_NAME_LEN
];
684 char real
[ZFS_MAX_DATASET_NAME_LEN
];
687 zfsvfs
= dvp
->v_vfsp
->vfs_data
;
690 if ((flags
& FIGNORECASE
) || zfsvfs
->z_case
== ZFS_CASE_INSENSITIVE
) {
692 err
= dmu_snapshot_realname(zfsvfs
->z_os
, name
, real
,
693 sizeof (real
), NULL
);
696 } else if (err
!= ENOTSUP
) {
704 err
= zfsctl_snapshot_zname(dvp
, name
, sizeof (snapname
), snapname
);
706 err
= zfs_secpolicy_destroy_perms(snapname
, cr
);
710 mutex_enter(&sdp
->sd_lock
);
712 search
.se_name
= name
;
713 sep
= avl_find(&sdp
->sd_snaps
, &search
, NULL
);
715 avl_remove(&sdp
->sd_snaps
, sep
);
716 err
= zfsctl_unmount_snap(sep
, MS_FORCE
, cr
);
718 avl_add(&sdp
->sd_snaps
, sep
);
720 err
= dsl_destroy_snapshot(snapname
, B_FALSE
);
722 err
= SET_ERROR(ENOENT
);
725 mutex_exit(&sdp
->sd_lock
);
731 * This creates a snapshot under '.zfs/snapshot'.
735 zfsctl_snapdir_mkdir(vnode_t
*dvp
, char *dirname
, vattr_t
*vap
, vnode_t
**vpp
,
736 cred_t
*cr
, caller_context_t
*cc
, int flags
, vsecattr_t
*vsecp
)
738 zfsvfs_t
*zfsvfs
= dvp
->v_vfsp
->vfs_data
;
739 char name
[ZFS_MAX_DATASET_NAME_LEN
];
741 static enum symfollow follow
= NO_FOLLOW
;
742 static enum uio_seg seg
= UIO_SYSSPACE
;
744 if (zfs_component_namecheck(dirname
, NULL
, NULL
) != 0)
745 return (SET_ERROR(EILSEQ
));
747 dmu_objset_name(zfsvfs
->z_os
, name
);
751 err
= zfs_secpolicy_snapshot_perms(name
, cr
);
756 err
= dmu_objset_snapshot_one(name
, dirname
);
759 err
= lookupnameat(dirname
, seg
, follow
, NULL
, vpp
, dvp
);
766 * Lookup entry point for the 'snapshot' directory. Try to open the
767 * snapshot if it exist, creating the pseudo filesystem vnode as necessary.
768 * Perform a mount of the associated dataset on top of the vnode.
772 zfsctl_snapdir_lookup(vnode_t
*dvp
, char *nm
, vnode_t
**vpp
, pathname_t
*pnp
,
773 int flags
, vnode_t
*rdir
, cred_t
*cr
, caller_context_t
*ct
,
774 int *direntflags
, pathname_t
*realpnp
)
776 zfsctl_snapdir_t
*sdp
= dvp
->v_data
;
778 char snapname
[ZFS_MAX_DATASET_NAME_LEN
];
779 char real
[ZFS_MAX_DATASET_NAME_LEN
];
781 zfs_snapentry_t
*sep
, search
;
784 size_t mountpoint_len
;
786 zfsvfs_t
*zfsvfs
= dvp
->v_vfsp
->vfs_data
;
790 * No extended attributes allowed under .zfs
792 if (flags
& LOOKUP_XATTR
)
793 return (SET_ERROR(EINVAL
));
795 ASSERT(dvp
->v_type
== VDIR
);
798 * If we get a recursive call, that means we got called
799 * from the domount() code while it was trying to look up the
800 * spec (which looks like a local path for zfs). We need to
801 * add some flag to domount() to tell it not to do this lookup.
803 if (MUTEX_HELD(&sdp
->sd_lock
))
804 return (SET_ERROR(ENOENT
));
808 if (gfs_lookup_dot(vpp
, dvp
, zfsvfs
->z_ctldir
, nm
) == 0) {
813 if (flags
& FIGNORECASE
) {
814 boolean_t conflict
= B_FALSE
;
816 err
= dmu_snapshot_realname(zfsvfs
->z_os
, nm
, real
,
817 sizeof (real
), &conflict
);
820 } else if (err
!= ENOTSUP
) {
825 (void) strlcpy(realpnp
->pn_buf
, nm
,
826 realpnp
->pn_bufsize
);
827 if (conflict
&& direntflags
)
828 *direntflags
= ED_CASE_CONFLICT
;
831 mutex_enter(&sdp
->sd_lock
);
832 search
.se_name
= (char *)nm
;
833 if ((sep
= avl_find(&sdp
->sd_snaps
, &search
, &where
)) != NULL
) {
840 } else if (*vpp
== sep
->se_root
) {
842 * The snapshot was unmounted behind our backs,
848 * VROOT was set during the traverse call. We need
849 * to clear it since we're pretending to be part
850 * of our parent's vfs.
852 (*vpp
)->v_flag
&= ~VROOT
;
854 mutex_exit(&sdp
->sd_lock
);
860 * The requested snapshot is not currently mounted, look it up.
862 err
= zfsctl_snapshot_zname(dvp
, nm
, sizeof (snapname
), snapname
);
864 mutex_exit(&sdp
->sd_lock
);
867 * handle "ls *" or "?" in a graceful manner,
868 * forcing EILSEQ to ENOENT.
869 * Since shell ultimately passes "*" or "?" as name to lookup
871 return (err
== EILSEQ
? ENOENT
: err
);
873 if (dmu_objset_hold(snapname
, FTAG
, &snap
) != 0) {
874 mutex_exit(&sdp
->sd_lock
);
876 return (SET_ERROR(ENOENT
));
879 sep
= kmem_alloc(sizeof (zfs_snapentry_t
), KM_SLEEP
);
880 sep
->se_name
= kmem_alloc(strlen(nm
) + 1, KM_SLEEP
);
881 (void) strcpy(sep
->se_name
, nm
);
882 *vpp
= sep
->se_root
= zfsctl_snapshot_mknode(dvp
, dmu_objset_id(snap
));
883 avl_insert(&sdp
->sd_snaps
, sep
, where
);
885 dmu_objset_rele(snap
, FTAG
);
887 mountpoint_len
= strlen(refstr_value(dvp
->v_vfsp
->vfs_mntpt
)) +
888 strlen("/.zfs/snapshot/") + strlen(nm
) + 1;
889 mountpoint
= kmem_alloc(mountpoint_len
, KM_SLEEP
);
890 (void) snprintf(mountpoint
, mountpoint_len
, "%s/.zfs/snapshot/%s",
891 refstr_value(dvp
->v_vfsp
->vfs_mntpt
), nm
);
893 margs
.spec
= snapname
;
894 margs
.dir
= mountpoint
;
895 margs
.flags
= MS_SYSSPACE
| MS_NOMNTTAB
;
896 margs
.fstype
= "zfs";
897 margs
.dataptr
= NULL
;
902 err
= domount("zfs", &margs
, *vpp
, kcred
, &vfsp
);
903 kmem_free(mountpoint
, mountpoint_len
);
907 * Return the mounted root rather than the covered mount point.
908 * Takes the GFS vnode at .zfs/snapshot/<snapname> and returns
909 * the ZFS vnode mounted on top of the GFS node. This ZFS
910 * vnode is the root of the newly created vfsp.
918 * Fix up the root vnode mounted on .zfs/snapshot/<snapname>.
920 * This is where we lie about our v_vfsp in order to
921 * make .zfs/snapshot/<snapname> accessible over NFS
922 * without requiring manual mounts of <snapname>.
924 ASSERT(VTOZ(*vpp
)->z_zfsvfs
!= zfsvfs
);
925 VTOZ(*vpp
)->z_zfsvfs
->z_parent
= zfsvfs
;
926 (*vpp
)->v_vfsp
= zfsvfs
->z_vfs
;
927 (*vpp
)->v_flag
&= ~VROOT
;
929 mutex_exit(&sdp
->sd_lock
);
933 * If we had an error, drop our hold on the vnode and
934 * zfsctl_snapshot_inactive() will clean up.
945 zfsctl_shares_lookup(vnode_t
*dvp
, char *nm
, vnode_t
**vpp
, pathname_t
*pnp
,
946 int flags
, vnode_t
*rdir
, cred_t
*cr
, caller_context_t
*ct
,
947 int *direntflags
, pathname_t
*realpnp
)
949 zfsvfs_t
*zfsvfs
= dvp
->v_vfsp
->vfs_data
;
955 if (gfs_lookup_dot(vpp
, dvp
, zfsvfs
->z_ctldir
, nm
) == 0) {
960 if (zfsvfs
->z_shares_dir
== 0) {
962 return (SET_ERROR(ENOTSUP
));
964 if ((error
= zfs_zget(zfsvfs
, zfsvfs
->z_shares_dir
, &dzp
)) == 0) {
965 error
= VOP_LOOKUP(ZTOV(dzp
), nm
, vpp
, pnp
,
966 flags
, rdir
, cr
, ct
, direntflags
, realpnp
);
977 zfsctl_snapdir_readdir_cb(vnode_t
*vp
, void *dp
, int *eofp
,
978 offset_t
*offp
, offset_t
*nextp
, void *data
, int flags
)
980 zfsvfs_t
*zfsvfs
= vp
->v_vfsp
->vfs_data
;
981 char snapname
[ZFS_MAX_DATASET_NAME_LEN
];
983 boolean_t case_conflict
;
989 dsl_pool_config_enter(dmu_objset_pool(zfsvfs
->z_os
), FTAG
);
990 error
= dmu_snapshot_list_next(zfsvfs
->z_os
,
991 sizeof (snapname
), snapname
, &id
, &cookie
, &case_conflict
);
992 dsl_pool_config_exit(dmu_objset_pool(zfsvfs
->z_os
), FTAG
);
995 if (error
== ENOENT
) {
1002 if (flags
& V_RDDIR_ENTFLAGS
) {
1003 edirent_t
*eodp
= dp
;
1005 (void) strcpy(eodp
->ed_name
, snapname
);
1006 eodp
->ed_ino
= ZFSCTL_INO_SNAP(id
);
1007 eodp
->ed_eflags
= case_conflict
? ED_CASE_CONFLICT
: 0;
1009 struct dirent64
*odp
= dp
;
1011 (void) strcpy(odp
->d_name
, snapname
);
1012 odp
->d_ino
= ZFSCTL_INO_SNAP(id
);
1023 zfsctl_shares_readdir(vnode_t
*vp
, uio_t
*uiop
, cred_t
*cr
, int *eofp
,
1024 caller_context_t
*ct
, int flags
)
1026 zfsvfs_t
*zfsvfs
= vp
->v_vfsp
->vfs_data
;
1032 if (zfsvfs
->z_shares_dir
== 0) {
1034 return (SET_ERROR(ENOTSUP
));
1036 if ((error
= zfs_zget(zfsvfs
, zfsvfs
->z_shares_dir
, &dzp
)) == 0) {
1037 error
= VOP_READDIR(ZTOV(dzp
), uiop
, cr
, eofp
, ct
, flags
);
1041 error
= SET_ERROR(ENOENT
);
1049 * pvp is the '.zfs' directory (zfsctl_node_t).
1051 * Creates vp, which is '.zfs/snapshot' (zfsctl_snapdir_t).
1053 * This function is the callback to create a GFS vnode for '.zfs/snapshot'
1054 * when a lookup is performed on .zfs for "snapshot".
1057 zfsctl_mknode_snapdir(vnode_t
*pvp
)
1060 zfsctl_snapdir_t
*sdp
;
1062 vp
= gfs_dir_create(sizeof (zfsctl_snapdir_t
), pvp
,
1063 zfsctl_ops_snapdir
, NULL
, NULL
, MAXNAMELEN
,
1064 zfsctl_snapdir_readdir_cb
, NULL
);
1066 sdp
->sd_node
.zc_id
= ZFSCTL_INO_SNAPDIR
;
1067 sdp
->sd_node
.zc_cmtime
= ((zfsctl_node_t
*)pvp
->v_data
)->zc_cmtime
;
1068 mutex_init(&sdp
->sd_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
1069 avl_create(&sdp
->sd_snaps
, snapentry_compare
,
1070 sizeof (zfs_snapentry_t
), offsetof(zfs_snapentry_t
, se_node
));
1075 zfsctl_mknode_shares(vnode_t
*pvp
)
1080 vp
= gfs_dir_create(sizeof (zfsctl_node_t
), pvp
,
1081 zfsctl_ops_shares
, NULL
, NULL
, MAXNAMELEN
,
1084 sdp
->zc_cmtime
= ((zfsctl_node_t
*)pvp
->v_data
)->zc_cmtime
;
1091 zfsctl_shares_getattr(vnode_t
*vp
, vattr_t
*vap
, int flags
, cred_t
*cr
,
1092 caller_context_t
*ct
)
1094 zfsvfs_t
*zfsvfs
= vp
->v_vfsp
->vfs_data
;
1099 if (zfsvfs
->z_shares_dir
== 0) {
1101 return (SET_ERROR(ENOTSUP
));
1103 if ((error
= zfs_zget(zfsvfs
, zfsvfs
->z_shares_dir
, &dzp
)) == 0) {
1104 error
= VOP_GETATTR(ZTOV(dzp
), vap
, flags
, cr
, ct
);
1115 zfsctl_snapdir_getattr(vnode_t
*vp
, vattr_t
*vap
, int flags
, cred_t
*cr
,
1116 caller_context_t
*ct
)
1118 zfsvfs_t
*zfsvfs
= vp
->v_vfsp
->vfs_data
;
1119 zfsctl_snapdir_t
*sdp
= vp
->v_data
;
1122 zfsctl_common_getattr(vp
, vap
);
1123 vap
->va_nodeid
= gfs_file_inode(vp
);
1124 vap
->va_nlink
= vap
->va_size
= avl_numnodes(&sdp
->sd_snaps
) + 2;
1125 vap
->va_ctime
= vap
->va_mtime
= dmu_objset_snap_cmtime(zfsvfs
->z_os
);
1133 zfsctl_snapdir_inactive(vnode_t
*vp
, cred_t
*cr
, caller_context_t
*ct
)
1135 zfsctl_snapdir_t
*sdp
= vp
->v_data
;
1138 private = gfs_dir_inactive(vp
);
1139 if (private != NULL
) {
1140 ASSERT(avl_numnodes(&sdp
->sd_snaps
) == 0);
1141 mutex_destroy(&sdp
->sd_lock
);
1142 avl_destroy(&sdp
->sd_snaps
);
1143 kmem_free(private, sizeof (zfsctl_snapdir_t
));
1147 static const fs_operation_def_t zfsctl_tops_snapdir
[] = {
1148 { VOPNAME_OPEN
, { .vop_open
= zfsctl_common_open
} },
1149 { VOPNAME_CLOSE
, { .vop_close
= zfsctl_common_close
} },
1150 { VOPNAME_IOCTL
, { .error
= fs_inval
} },
1151 { VOPNAME_GETATTR
, { .vop_getattr
= zfsctl_snapdir_getattr
} },
1152 { VOPNAME_ACCESS
, { .vop_access
= zfsctl_common_access
} },
1153 { VOPNAME_RENAME
, { .vop_rename
= zfsctl_snapdir_rename
} },
1154 { VOPNAME_RMDIR
, { .vop_rmdir
= zfsctl_snapdir_remove
} },
1155 { VOPNAME_MKDIR
, { .vop_mkdir
= zfsctl_snapdir_mkdir
} },
1156 { VOPNAME_READDIR
, { .vop_readdir
= gfs_vop_readdir
} },
1157 { VOPNAME_LOOKUP
, { .vop_lookup
= zfsctl_snapdir_lookup
} },
1158 { VOPNAME_SEEK
, { .vop_seek
= fs_seek
} },
1159 { VOPNAME_INACTIVE
, { .vop_inactive
= zfsctl_snapdir_inactive
} },
1160 { VOPNAME_FID
, { .vop_fid
= zfsctl_common_fid
} },
1164 static const fs_operation_def_t zfsctl_tops_shares
[] = {
1165 { VOPNAME_OPEN
, { .vop_open
= zfsctl_common_open
} },
1166 { VOPNAME_CLOSE
, { .vop_close
= zfsctl_common_close
} },
1167 { VOPNAME_IOCTL
, { .error
= fs_inval
} },
1168 { VOPNAME_GETATTR
, { .vop_getattr
= zfsctl_shares_getattr
} },
1169 { VOPNAME_ACCESS
, { .vop_access
= zfsctl_common_access
} },
1170 { VOPNAME_READDIR
, { .vop_readdir
= zfsctl_shares_readdir
} },
1171 { VOPNAME_LOOKUP
, { .vop_lookup
= zfsctl_shares_lookup
} },
1172 { VOPNAME_SEEK
, { .vop_seek
= fs_seek
} },
1173 { VOPNAME_INACTIVE
, { .vop_inactive
= gfs_vop_inactive
} },
1174 { VOPNAME_FID
, { .vop_fid
= zfsctl_shares_fid
} },
1179 * pvp is the GFS vnode '.zfs/snapshot'.
1181 * This creates a GFS node under '.zfs/snapshot' representing each
1182 * snapshot. This newly created GFS node is what we mount snapshot
1186 zfsctl_snapshot_mknode(vnode_t
*pvp
, uint64_t objset
)
1191 vp
= gfs_dir_create(sizeof (zfsctl_node_t
), pvp
,
1192 zfsctl_ops_snapshot
, NULL
, NULL
, MAXNAMELEN
, NULL
, NULL
);
1194 zcp
->zc_id
= objset
;
1200 zfsctl_snapshot_inactive(vnode_t
*vp
, cred_t
*cr
, caller_context_t
*ct
)
1202 zfsctl_snapdir_t
*sdp
;
1203 zfs_snapentry_t
*sep
, *next
;
1206 VERIFY(gfs_dir_lookup(vp
, "..", &dvp
, cr
, 0, NULL
, NULL
) == 0);
1209 mutex_enter(&sdp
->sd_lock
);
1211 mutex_enter(&vp
->v_lock
);
1212 if (vp
->v_count
> 1) {
1214 mutex_exit(&vp
->v_lock
);
1215 mutex_exit(&sdp
->sd_lock
);
1219 mutex_exit(&vp
->v_lock
);
1220 ASSERT(!vn_ismntpt(vp
));
1222 sep
= avl_first(&sdp
->sd_snaps
);
1223 while (sep
!= NULL
) {
1224 next
= AVL_NEXT(&sdp
->sd_snaps
, sep
);
1226 if (sep
->se_root
== vp
) {
1227 avl_remove(&sdp
->sd_snaps
, sep
);
1228 kmem_free(sep
->se_name
, strlen(sep
->se_name
) + 1);
1229 kmem_free(sep
, sizeof (zfs_snapentry_t
));
1234 ASSERT(sep
!= NULL
);
1236 mutex_exit(&sdp
->sd_lock
);
1240 * Dispose of the vnode for the snapshot mount point.
1241 * This is safe to do because once this entry has been removed
1242 * from the AVL tree, it can't be found again, so cannot become
1243 * "active". If we lookup the same name again we will end up
1244 * creating a new vnode.
1246 gfs_vop_inactive(vp
, cr
, ct
);
1251 * These VP's should never see the light of day. They should always
1254 static const fs_operation_def_t zfsctl_tops_snapshot
[] = {
1255 VOPNAME_INACTIVE
, { .vop_inactive
= zfsctl_snapshot_inactive
},
1260 zfsctl_lookup_objset(vfs_t
*vfsp
, uint64_t objsetid
, zfsvfs_t
**zfsvfsp
)
1262 zfsvfs_t
*zfsvfs
= vfsp
->vfs_data
;
1264 zfsctl_snapdir_t
*sdp
;
1266 zfs_snapentry_t
*sep
;
1269 ASSERT(zfsvfs
->z_ctldir
!= NULL
);
1270 error
= zfsctl_root_lookup(zfsvfs
->z_ctldir
, "snapshot", &dvp
,
1271 NULL
, 0, NULL
, kcred
, NULL
, NULL
, NULL
);
1276 mutex_enter(&sdp
->sd_lock
);
1277 sep
= avl_first(&sdp
->sd_snaps
);
1278 while (sep
!= NULL
) {
1281 if (zcp
->zc_id
== objsetid
)
1284 sep
= AVL_NEXT(&sdp
->sd_snaps
, sep
);
1290 * Return the mounted root rather than the covered mount point.
1291 * Takes the GFS vnode at .zfs/snapshot/<snapshot objsetid>
1292 * and returns the ZFS vnode mounted on top of the GFS node.
1293 * This ZFS vnode is the root of the vfs for objset 'objsetid'.
1295 error
= traverse(&vp
);
1297 if (vp
== sep
->se_root
)
1298 error
= SET_ERROR(EINVAL
);
1300 *zfsvfsp
= VTOZ(vp
)->z_zfsvfs
;
1302 mutex_exit(&sdp
->sd_lock
);
1305 error
= SET_ERROR(EINVAL
);
1306 mutex_exit(&sdp
->sd_lock
);
1315 * Unmount any snapshots for the given filesystem. This is called from
1316 * zfs_umount() - if we have a ctldir, then go through and unmount all the
1320 zfsctl_umount_snapshots(vfs_t
*vfsp
, int fflags
, cred_t
*cr
)
1322 zfsvfs_t
*zfsvfs
= vfsp
->vfs_data
;
1324 zfsctl_snapdir_t
*sdp
;
1325 zfs_snapentry_t
*sep
, *next
;
1328 ASSERT(zfsvfs
->z_ctldir
!= NULL
);
1329 error
= zfsctl_root_lookup(zfsvfs
->z_ctldir
, "snapshot", &dvp
,
1330 NULL
, 0, NULL
, cr
, NULL
, NULL
, NULL
);
1335 mutex_enter(&sdp
->sd_lock
);
1337 sep
= avl_first(&sdp
->sd_snaps
);
1338 while (sep
!= NULL
) {
1339 next
= AVL_NEXT(&sdp
->sd_snaps
, sep
);
1342 * If this snapshot is not mounted, then it must
1343 * have just been unmounted by somebody else, and
1344 * will be cleaned up by zfsctl_snapdir_inactive().
1346 if (vn_ismntpt(sep
->se_root
)) {
1347 avl_remove(&sdp
->sd_snaps
, sep
);
1348 error
= zfsctl_unmount_snap(sep
, fflags
, cr
);
1350 avl_add(&sdp
->sd_snaps
, sep
);
1357 mutex_exit(&sdp
->sd_lock
);