2 * Copyright (c) 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * @(#)vfs_syscalls.c 8.13 (Berkeley) 4/15/94
39 * $FreeBSD: src/sys/kern/vfs_syscalls.c,v 1.151.2.18 2003/04/04 20:35:58 tegge Exp $
40 * $DragonFly: src/sys/kern/vfs_syscalls.c,v 1.127 2008/05/18 05:54:25 dillon Exp $
43 #include <sys/param.h>
44 #include <sys/systm.h>
47 #include <sys/sysent.h>
48 #include <sys/malloc.h>
49 #include <sys/mount.h>
50 #include <sys/mountctl.h>
51 #include <sys/sysproto.h>
52 #include <sys/filedesc.h>
53 #include <sys/kernel.h>
54 #include <sys/fcntl.h>
56 #include <sys/linker.h>
58 #include <sys/unistd.h>
59 #include <sys/vnode.h>
61 #include <sys/namei.h>
62 #include <sys/nlookup.h>
63 #include <sys/dirent.h>
64 #include <sys/extattr.h>
65 #include <sys/spinlock.h>
66 #include <sys/kern_syscall.h>
67 #include <sys/objcache.h>
68 #include <sys/sysctl.h>
71 #include <sys/file2.h>
72 #include <sys/spinlock2.h>
75 #include <vm/vm_object.h>
76 #include <vm/vm_page.h>
78 #include <machine/limits.h>
79 #include <machine/stdarg.h>
81 #include <vfs/union/union.h>
83 static void mount_warning(struct mount
*mp
, const char *ctl
, ...);
84 static int mount_path(struct proc
*p
, struct mount
*mp
, char **rb
, char **fb
);
85 static int checkvp_chdir (struct vnode
*vn
, struct thread
*td
);
86 static void checkdirs (struct nchandle
*old_nch
, struct nchandle
*new_nch
);
87 static int chroot_refuse_vdir_fds (struct filedesc
*fdp
);
88 static int chroot_visible_mnt(struct mount
*mp
, struct proc
*p
);
89 static int getutimes (const struct timeval
*, struct timespec
*);
90 static int setfown (struct vnode
*, uid_t
, gid_t
);
91 static int setfmode (struct vnode
*, int);
92 static int setfflags (struct vnode
*, int);
93 static int setutimes (struct vnode
*, const struct timespec
*, int);
94 static int usermount
= 0; /* if 1, non-root can mount fs. */
96 int (*union_dircheckp
) (struct thread
*, struct vnode
**, struct file
*);
98 SYSCTL_INT(_vfs
, OID_AUTO
, usermount
, CTLFLAG_RW
, &usermount
, 0, "");
101 * Virtual File System System Calls
105 * Mount a file system.
108 * mount_args(char *type, char *path, int flags, caddr_t data)
112 sys_mount(struct mount_args
*uap
)
114 struct thread
*td
= curthread
;
115 struct proc
*p
= td
->td_proc
;
119 struct vfsconf
*vfsp
;
120 int error
, flag
= 0, flag2
= 0;
123 struct nlookupdata nd
;
124 char fstypename
[MFSNAMELEN
];
125 struct ucred
*cred
= p
->p_ucred
;
128 if (cred
->cr_prison
!= NULL
)
130 if (usermount
== 0 && (error
= suser(td
)))
133 * Do not allow NFS export by non-root users.
135 if (uap
->flags
& MNT_EXPORTED
) {
141 * Silently enforce MNT_NOSUID and MNT_NODEV for non-root users
144 uap
->flags
|= MNT_NOSUID
| MNT_NODEV
;
147 * Lookup the requested path and extract the nch and vnode.
149 error
= nlookup_init(&nd
, uap
->path
, UIO_USERSPACE
, NLC_FOLLOW
);
151 if ((error
= nlookup(&nd
)) == 0) {
152 if (nd
.nl_nch
.ncp
->nc_vp
== NULL
)
162 * Extract the locked+refd ncp and cleanup the nd structure
165 cache_zero(&nd
.nl_nch
);
168 if ((nch
.ncp
->nc_flag
& NCF_ISMOUNTPT
) && cache_findmount(&nch
))
175 * now we have the locked ref'd nch and unreferenced vnode.
178 if ((error
= vget(vp
, LK_EXCLUSIVE
)) != 0) {
185 * Now we have an unlocked ref'd nch and a locked ref'd vp
187 if (uap
->flags
& MNT_UPDATE
) {
188 if ((vp
->v_flag
& VROOT
) == 0) {
195 flag2
= mp
->mnt_kern_flag
;
197 * We only allow the filesystem to be reloaded if it
198 * is currently mounted read-only.
200 if ((uap
->flags
& MNT_RELOAD
) &&
201 ((mp
->mnt_flag
& MNT_RDONLY
) == 0)) {
204 return (EOPNOTSUPP
); /* Needs translation */
207 * Only root, or the user that did the original mount is
208 * permitted to update it.
210 if (mp
->mnt_stat
.f_owner
!= cred
->cr_uid
&&
211 (error
= suser(td
))) {
216 if (vfs_busy(mp
, LK_NOWAIT
)) {
221 if ((vp
->v_flag
& VMOUNT
) != 0 || hasmount
) {
227 vp
->v_flag
|= VMOUNT
;
229 uap
->flags
& (MNT_RELOAD
| MNT_FORCE
| MNT_UPDATE
);
234 * If the user is not root, ensure that they own the directory
235 * onto which we are attempting to mount.
237 if ((error
= VOP_GETATTR(vp
, &va
)) ||
238 (va
.va_uid
!= cred
->cr_uid
&& (error
= suser(td
)))) {
243 if ((error
= vinvalbuf(vp
, V_SAVE
, 0, 0)) != 0) {
248 if (vp
->v_type
!= VDIR
) {
253 if (vp
->v_mount
->mnt_kern_flag
& MNTK_NOSTKMNT
) {
258 if ((error
= copyinstr(uap
->type
, fstypename
, MFSNAMELEN
, NULL
)) != 0) {
263 for (vfsp
= vfsconf
; vfsp
; vfsp
= vfsp
->vfc_next
) {
264 if (!strcmp(vfsp
->vfc_name
, fstypename
))
270 /* Only load modules for root (very important!) */
271 if ((error
= suser(td
)) != 0) {
276 error
= linker_load_file(fstypename
, &lf
);
277 if (error
|| lf
== NULL
) {
285 /* lookup again, see if the VFS was loaded */
286 for (vfsp
= vfsconf
; vfsp
; vfsp
= vfsp
->vfc_next
) {
287 if (!strcmp(vfsp
->vfc_name
, fstypename
))
292 linker_file_unload(lf
);
298 if ((vp
->v_flag
& VMOUNT
) != 0 || hasmount
) {
303 vp
->v_flag
|= VMOUNT
;
306 * Allocate and initialize the filesystem.
308 mp
= kmalloc(sizeof(struct mount
), M_MOUNT
, M_ZERO
|M_WAITOK
);
309 TAILQ_INIT(&mp
->mnt_nvnodelist
);
310 TAILQ_INIT(&mp
->mnt_reservedvnlist
);
311 TAILQ_INIT(&mp
->mnt_jlist
);
312 mp
->mnt_nvnodelistsize
= 0;
313 lockinit(&mp
->mnt_lock
, "vfslock", 0, 0);
314 vfs_busy(mp
, LK_NOWAIT
);
315 mp
->mnt_op
= vfsp
->vfc_vfsops
;
317 vfsp
->vfc_refcount
++;
318 mp
->mnt_stat
.f_type
= vfsp
->vfc_typenum
;
319 mp
->mnt_flag
|= vfsp
->vfc_flags
& MNT_VISFLAGMASK
;
320 strncpy(mp
->mnt_stat
.f_fstypename
, vfsp
->vfc_name
, MFSNAMELEN
);
321 mp
->mnt_stat
.f_owner
= cred
->cr_uid
;
322 mp
->mnt_iosize_max
= DFLTPHYS
;
326 * Set the mount level flags.
328 if (uap
->flags
& MNT_RDONLY
)
329 mp
->mnt_flag
|= MNT_RDONLY
;
330 else if (mp
->mnt_flag
& MNT_RDONLY
)
331 mp
->mnt_kern_flag
|= MNTK_WANTRDWR
;
332 mp
->mnt_flag
&=~ (MNT_NOSUID
| MNT_NOEXEC
| MNT_NODEV
|
333 MNT_SYNCHRONOUS
| MNT_UNION
| MNT_ASYNC
| MNT_NOATIME
|
334 MNT_NOSYMFOLLOW
| MNT_IGNORE
|
335 MNT_NOCLUSTERR
| MNT_NOCLUSTERW
| MNT_SUIDDIR
);
336 mp
->mnt_flag
|= uap
->flags
& (MNT_NOSUID
| MNT_NOEXEC
|
337 MNT_NODEV
| MNT_SYNCHRONOUS
| MNT_UNION
| MNT_ASYNC
| MNT_FORCE
|
338 MNT_NOSYMFOLLOW
| MNT_IGNORE
|
339 MNT_NOATIME
| MNT_NOCLUSTERR
| MNT_NOCLUSTERW
| MNT_SUIDDIR
);
341 * Mount the filesystem.
342 * XXX The final recipients of VFS_MOUNT just overwrite the ndp they
345 error
= VFS_MOUNT(mp
, uap
->path
, uap
->data
, cred
);
346 if (mp
->mnt_flag
& MNT_UPDATE
) {
347 if (mp
->mnt_kern_flag
& MNTK_WANTRDWR
)
348 mp
->mnt_flag
&= ~MNT_RDONLY
;
349 mp
->mnt_flag
&=~ (MNT_UPDATE
| MNT_RELOAD
| MNT_FORCE
);
350 mp
->mnt_kern_flag
&=~ MNTK_WANTRDWR
;
353 mp
->mnt_kern_flag
= flag2
;
356 vp
->v_flag
&= ~VMOUNT
;
361 vn_lock(vp
, LK_EXCLUSIVE
| LK_RETRY
);
363 * Put the new filesystem on the mount list after root. The mount
364 * point gets its own mnt_ncmountpt (unless the VFS already set one
365 * up) which represents the root of the mount. The lookup code
366 * detects the mount point going forward and checks the root of
367 * the mount going backwards.
369 * It is not necessary to invalidate or purge the vnode underneath
370 * because elements under the mount will be given their own glue
374 if (mp
->mnt_ncmountpt
.ncp
== NULL
) {
376 * allocate, then unlock, but leave the ref intact
378 cache_allocroot(&mp
->mnt_ncmountpt
, mp
, NULL
);
379 cache_unlock(&mp
->mnt_ncmountpt
);
381 mp
->mnt_ncmounton
= nch
; /* inherits ref */
382 nch
.ncp
->nc_flag
|= NCF_ISMOUNTPT
;
384 /* XXX get the root of the fs and cache_setvp(mnt_ncmountpt...) */
385 vp
->v_flag
&= ~VMOUNT
;
386 mountlist_insert(mp
, MNTINS_LAST
);
387 checkdirs(&mp
->mnt_ncmounton
, &mp
->mnt_ncmountpt
);
389 error
= vfs_allocate_syncvnode(mp
);
391 error
= VFS_START(mp
, 0);
394 vfs_rm_vnodeops(mp
, NULL
, &mp
->mnt_vn_coherency_ops
);
395 vfs_rm_vnodeops(mp
, NULL
, &mp
->mnt_vn_journal_ops
);
396 vfs_rm_vnodeops(mp
, NULL
, &mp
->mnt_vn_norm_ops
);
397 vfs_rm_vnodeops(mp
, NULL
, &mp
->mnt_vn_spec_ops
);
398 vfs_rm_vnodeops(mp
, NULL
, &mp
->mnt_vn_fifo_ops
);
399 vp
->v_flag
&= ~VMOUNT
;
400 mp
->mnt_vfc
->vfc_refcount
--;
410 * Scan all active processes to see if any of them have a current
411 * or root directory onto which the new filesystem has just been
412 * mounted. If so, replace them with the new mount point.
414 * The passed ncp is ref'd and locked (from the mount code) and
415 * must be associated with the vnode representing the root of the
418 struct checkdirs_info
{
419 struct nchandle old_nch
;
420 struct nchandle new_nch
;
421 struct vnode
*old_vp
;
422 struct vnode
*new_vp
;
425 static int checkdirs_callback(struct proc
*p
, void *data
);
428 checkdirs(struct nchandle
*old_nch
, struct nchandle
*new_nch
)
430 struct checkdirs_info info
;
436 * If the old mount point's vnode has a usecount of 1, it is not
437 * being held as a descriptor anywhere.
439 olddp
= old_nch
->ncp
->nc_vp
;
440 if (olddp
== NULL
|| olddp
->v_sysref
.refcnt
== 1)
444 * Force the root vnode of the new mount point to be resolved
445 * so we can update any matching processes.
448 if (VFS_ROOT(mp
, &newdp
))
449 panic("mount: lost mount");
450 cache_setunresolved(new_nch
);
451 cache_setvp(new_nch
, newdp
);
454 * Special handling of the root node
456 if (rootvnode
== olddp
) {
458 vfs_cache_setroot(newdp
, cache_hold(new_nch
));
462 * Pass newdp separately so the callback does not have to access
463 * it via new_nch->ncp->nc_vp.
465 info
.old_nch
= *old_nch
;
466 info
.new_nch
= *new_nch
;
468 allproc_scan(checkdirs_callback
, &info
);
473 * NOTE: callback is not MP safe because the scanned process's filedesc
474 * structure can be ripped out from under us, amoung other things.
477 checkdirs_callback(struct proc
*p
, void *data
)
479 struct checkdirs_info
*info
= data
;
480 struct filedesc
*fdp
;
481 struct nchandle ncdrop1
;
482 struct nchandle ncdrop2
;
483 struct vnode
*vprele1
;
484 struct vnode
*vprele2
;
486 if ((fdp
= p
->p_fd
) != NULL
) {
487 cache_zero(&ncdrop1
);
488 cache_zero(&ncdrop2
);
493 * MPUNSAFE - XXX fdp can be pulled out from under a
496 * A shared filedesc is ok, we don't have to copy it
497 * because we are making this change globally.
499 spin_lock_wr(&fdp
->fd_spin
);
500 if (fdp
->fd_ncdir
.mount
== info
->old_nch
.mount
&&
501 fdp
->fd_ncdir
.ncp
== info
->old_nch
.ncp
) {
502 vprele1
= fdp
->fd_cdir
;
504 fdp
->fd_cdir
= info
->new_vp
;
505 ncdrop1
= fdp
->fd_ncdir
;
506 cache_copy(&info
->new_nch
, &fdp
->fd_ncdir
);
508 if (fdp
->fd_nrdir
.mount
== info
->old_nch
.mount
&&
509 fdp
->fd_nrdir
.ncp
== info
->old_nch
.ncp
) {
510 vprele2
= fdp
->fd_rdir
;
512 fdp
->fd_rdir
= info
->new_vp
;
513 ncdrop2
= fdp
->fd_nrdir
;
514 cache_copy(&info
->new_nch
, &fdp
->fd_nrdir
);
516 spin_unlock_wr(&fdp
->fd_spin
);
518 cache_drop(&ncdrop1
);
520 cache_drop(&ncdrop2
);
530 * Unmount a file system.
532 * Note: unmount takes a path to the vnode mounted on as argument,
533 * not special file (as before).
536 * umount_args(char *path, int flags)
540 sys_unmount(struct unmount_args
*uap
)
542 struct thread
*td
= curthread
;
543 struct proc
*p
= td
->td_proc
;
544 struct mount
*mp
= NULL
;
546 struct nlookupdata nd
;
549 if (p
->p_ucred
->cr_prison
!= NULL
)
551 if (usermount
== 0 && (error
= suser(td
)))
554 error
= nlookup_init(&nd
, uap
->path
, UIO_USERSPACE
, NLC_FOLLOW
);
556 error
= nlookup(&nd
);
560 mp
= nd
.nl_nch
.mount
;
563 * Only root, or the user that did the original mount is
564 * permitted to unmount this filesystem.
566 if ((mp
->mnt_stat
.f_owner
!= p
->p_ucred
->cr_uid
) &&
571 * Don't allow unmounting the root file system.
573 if (mp
->mnt_flag
& MNT_ROOTFS
) {
579 * Must be the root of the filesystem
581 if (nd
.nl_nch
.ncp
!= mp
->mnt_ncmountpt
.ncp
) {
590 return (dounmount(mp
, uap
->flags
));
594 * Do the actual file system unmount.
597 dounmount_interlock(struct mount
*mp
)
599 if (mp
->mnt_kern_flag
& MNTK_UNMOUNT
)
601 mp
->mnt_kern_flag
|= MNTK_UNMOUNT
;
606 dounmount(struct mount
*mp
, int flags
)
608 struct namecache
*ncp
;
617 * Exclusive access for unmounting purposes
619 if ((error
= mountlist_interlock(dounmount_interlock
, mp
)) != 0)
623 * Allow filesystems to detect that a forced unmount is in progress.
625 if (flags
& MNT_FORCE
)
626 mp
->mnt_kern_flag
|= MNTK_UNMOUNTF
;
627 lflags
= LK_EXCLUSIVE
| ((flags
& MNT_FORCE
) ? 0 : LK_NOWAIT
);
628 error
= lockmgr(&mp
->mnt_lock
, lflags
);
630 mp
->mnt_kern_flag
&= ~(MNTK_UNMOUNT
| MNTK_UNMOUNTF
);
631 if (mp
->mnt_kern_flag
& MNTK_MWAIT
)
636 if (mp
->mnt_flag
& MNT_EXPUBLIC
)
637 vfs_setpublicfs(NULL
, NULL
, NULL
);
639 vfs_msync(mp
, MNT_WAIT
);
640 async_flag
= mp
->mnt_flag
& MNT_ASYNC
;
641 mp
->mnt_flag
&=~ MNT_ASYNC
;
644 * If this filesystem isn't aliasing other filesystems,
645 * try to invalidate any remaining namecache entries and
646 * check the count afterwords.
648 if ((mp
->mnt_kern_flag
& MNTK_NCALIASED
) == 0) {
649 cache_lock(&mp
->mnt_ncmountpt
);
650 cache_inval(&mp
->mnt_ncmountpt
, CINV_DESTROY
|CINV_CHILDREN
);
651 cache_unlock(&mp
->mnt_ncmountpt
);
653 if ((ncp
= mp
->mnt_ncmountpt
.ncp
) != NULL
&&
654 (ncp
->nc_refs
!= 1 || TAILQ_FIRST(&ncp
->nc_list
))) {
656 if ((flags
& MNT_FORCE
) == 0) {
658 mount_warning(mp
, "Cannot unmount: "
664 mount_warning(mp
, "Forced unmount: "
675 * nchandle records ref the mount structure. Expect a count of 1
676 * (our mount->mnt_ncmountpt).
678 if (mp
->mnt_refs
!= 1) {
679 if ((flags
& MNT_FORCE
) == 0) {
680 mount_warning(mp
, "Cannot unmount: "
681 "%d process references still "
682 "present", mp
->mnt_refs
);
685 mount_warning(mp
, "Forced unmount: "
686 "%d process references still "
687 "present", mp
->mnt_refs
);
693 * Decomission our special mnt_syncer vnode. This also stops
694 * the vnlru code. If we are unable to unmount we recommission
698 if ((vp
= mp
->mnt_syncer
) != NULL
) {
699 mp
->mnt_syncer
= NULL
;
702 if (((mp
->mnt_flag
& MNT_RDONLY
) ||
703 (error
= VFS_SYNC(mp
, MNT_WAIT
)) == 0) ||
704 (flags
& MNT_FORCE
)) {
705 error
= VFS_UNMOUNT(mp
, flags
);
709 if (mp
->mnt_syncer
== NULL
)
710 vfs_allocate_syncvnode(mp
);
711 mp
->mnt_kern_flag
&= ~(MNTK_UNMOUNT
| MNTK_UNMOUNTF
);
712 mp
->mnt_flag
|= async_flag
;
713 lockmgr(&mp
->mnt_lock
, LK_RELEASE
);
714 if (mp
->mnt_kern_flag
& MNTK_MWAIT
)
719 * Clean up any journals still associated with the mount after
720 * filesystem activity has ceased.
722 journal_remove_all_journals(mp
,
723 ((flags
& MNT_FORCE
) ? MC_JOURNAL_STOP_IMM
: 0));
725 mountlist_remove(mp
);
728 * Remove any installed vnode ops here so the individual VFSs don't
731 vfs_rm_vnodeops(mp
, NULL
, &mp
->mnt_vn_coherency_ops
);
732 vfs_rm_vnodeops(mp
, NULL
, &mp
->mnt_vn_journal_ops
);
733 vfs_rm_vnodeops(mp
, NULL
, &mp
->mnt_vn_norm_ops
);
734 vfs_rm_vnodeops(mp
, NULL
, &mp
->mnt_vn_spec_ops
);
735 vfs_rm_vnodeops(mp
, NULL
, &mp
->mnt_vn_fifo_ops
);
737 if (mp
->mnt_ncmountpt
.ncp
!= NULL
) {
738 nch
= mp
->mnt_ncmountpt
;
739 cache_zero(&mp
->mnt_ncmountpt
);
740 cache_clrmountpt(&nch
);
743 if (mp
->mnt_ncmounton
.ncp
!= NULL
) {
744 nch
= mp
->mnt_ncmounton
;
745 cache_zero(&mp
->mnt_ncmounton
);
746 cache_clrmountpt(&nch
);
750 mp
->mnt_vfc
->vfc_refcount
--;
751 if (!TAILQ_EMPTY(&mp
->mnt_nvnodelist
))
752 panic("unmount: dangling vnode");
753 lockmgr(&mp
->mnt_lock
, LK_RELEASE
);
754 if (mp
->mnt_kern_flag
& MNTK_MWAIT
)
763 mount_warning(struct mount
*mp
, const char *ctl
, ...)
770 if (cache_fullpath(NULL
, &mp
->mnt_ncmounton
, &ptr
, &buf
) == 0) {
771 kprintf("unmount(%s): ", ptr
);
776 kprintf("unmount(%p", mp
);
777 if (mp
->mnt_ncmounton
.ncp
&& mp
->mnt_ncmounton
.ncp
->nc_name
)
778 kprintf(",%s", mp
->mnt_ncmounton
.ncp
->nc_name
);
787 * Shim cache_fullpath() to handle the case where a process is chrooted into
788 * a subdirectory of a mount. In this case if the root mount matches the
789 * process root directory's mount we have to specify the process's root
790 * directory instead of the mount point, because the mount point might
791 * be above the root directory.
795 mount_path(struct proc
*p
, struct mount
*mp
, char **rb
, char **fb
)
797 struct nchandle
*nch
;
799 if (p
&& p
->p_fd
->fd_nrdir
.mount
== mp
)
800 nch
= &p
->p_fd
->fd_nrdir
;
802 nch
= &mp
->mnt_ncmountpt
;
803 return(cache_fullpath(p
, nch
, rb
, fb
));
807 * Sync each mounted filesystem.
811 static int syncprt
= 0;
812 SYSCTL_INT(_debug
, OID_AUTO
, syncprt
, CTLFLAG_RW
, &syncprt
, 0, "");
815 static int sync_callback(struct mount
*mp
, void *data
);
819 sys_sync(struct sync_args
*uap
)
821 mountlist_scan(sync_callback
, NULL
, MNTSCAN_FORWARD
);
824 * print out buffer pool stat information on each sync() call.
834 sync_callback(struct mount
*mp
, void *data __unused
)
838 if ((mp
->mnt_flag
& MNT_RDONLY
) == 0) {
839 asyncflag
= mp
->mnt_flag
& MNT_ASYNC
;
840 mp
->mnt_flag
&= ~MNT_ASYNC
;
841 vfs_msync(mp
, MNT_NOWAIT
);
842 VFS_SYNC(mp
, MNT_NOWAIT
);
843 mp
->mnt_flag
|= asyncflag
;
848 /* XXX PRISON: could be per prison flag */
849 static int prison_quotas
;
851 SYSCTL_INT(_kern_prison
, OID_AUTO
, quotas
, CTLFLAG_RW
, &prison_quotas
, 0, "");
855 * quotactl_args(char *path, int fcmd, int uid, caddr_t arg)
857 * Change filesystem quotas.
861 sys_quotactl(struct quotactl_args
*uap
)
863 struct nlookupdata nd
;
871 if (p
->p_ucred
->cr_prison
&& !prison_quotas
)
874 error
= nlookup_init(&nd
, uap
->path
, UIO_USERSPACE
, NLC_FOLLOW
);
876 error
= nlookup(&nd
);
878 mp
= nd
.nl_nch
.mount
;
879 error
= VFS_QUOTACTL(mp
, uap
->cmd
, uap
->uid
,
880 uap
->arg
, nd
.nl_cred
);
887 * mountctl(char *path, int op, int fd, const void *ctl, int ctllen,
888 * void *buf, int buflen)
890 * This function operates on a mount point and executes the specified
891 * operation using the specified control data, and possibly returns data.
893 * The actual number of bytes stored in the result buffer is returned, 0
894 * if none, otherwise an error is returned.
898 sys_mountctl(struct mountctl_args
*uap
)
900 struct thread
*td
= curthread
;
901 struct proc
*p
= td
->td_proc
;
909 * Sanity and permissions checks. We must be root.
912 if (p
->p_ucred
->cr_prison
!= NULL
)
914 if ((error
= suser(td
)) != 0)
918 * Argument length checks
920 if (uap
->ctllen
< 0 || uap
->ctllen
> 1024)
922 if (uap
->buflen
< 0 || uap
->buflen
> 16 * 1024)
924 if (uap
->path
== NULL
)
928 * Allocate the necessary buffers and copyin data
930 path
= objcache_get(namei_oc
, M_WAITOK
);
931 error
= copyinstr(uap
->path
, path
, MAXPATHLEN
, NULL
);
936 ctl
= kmalloc(uap
->ctllen
+ 1, M_TEMP
, M_WAITOK
|M_ZERO
);
937 error
= copyin(uap
->ctl
, ctl
, uap
->ctllen
);
942 buf
= kmalloc(uap
->buflen
+ 1, M_TEMP
, M_WAITOK
|M_ZERO
);
945 * Validate the descriptor
948 fp
= holdfp(p
->p_fd
, uap
->fd
, -1);
958 * Execute the internal kernel function and clean up.
960 error
= kern_mountctl(path
, uap
->op
, fp
, ctl
, uap
->ctllen
, buf
, uap
->buflen
, &uap
->sysmsg_result
);
963 if (error
== 0 && uap
->sysmsg_result
> 0)
964 error
= copyout(buf
, uap
->buf
, uap
->sysmsg_result
);
967 objcache_put(namei_oc
, path
);
976 * Execute a mount control operation by resolving the path to a mount point
977 * and calling vop_mountctl().
980 kern_mountctl(const char *path
, int op
, struct file
*fp
,
981 const void *ctl
, int ctllen
,
982 void *buf
, int buflen
, int *res
)
986 struct nlookupdata nd
;
991 error
= nlookup_init(&nd
, path
, UIO_SYSSPACE
, NLC_FOLLOW
);
993 error
= nlookup(&nd
);
995 error
= cache_vget(&nd
.nl_nch
, nd
.nl_cred
, LK_EXCLUSIVE
, &vp
);
1003 * Must be the root of the filesystem
1005 if ((vp
->v_flag
& VROOT
) == 0) {
1009 error
= vop_mountctl(mp
->mnt_vn_use_ops
, op
, fp
, ctl
, ctllen
,
1016 kern_statfs(struct nlookupdata
*nd
, struct statfs
*buf
)
1018 struct thread
*td
= curthread
;
1019 struct proc
*p
= td
->td_proc
;
1022 char *fullpath
, *freepath
;
1025 if ((error
= nlookup(nd
)) != 0)
1027 mp
= nd
->nl_nch
.mount
;
1029 if ((error
= VFS_STATFS(mp
, sp
, nd
->nl_cred
)) != 0)
1032 error
= mount_path(p
, mp
, &fullpath
, &freepath
);
1035 bzero(sp
->f_mntonname
, sizeof(sp
->f_mntonname
));
1036 strlcpy(sp
->f_mntonname
, fullpath
, sizeof(sp
->f_mntonname
));
1037 kfree(freepath
, M_TEMP
);
1039 sp
->f_flags
= mp
->mnt_flag
& MNT_VISFLAGMASK
;
1040 bcopy(sp
, buf
, sizeof(*buf
));
1041 /* Only root should have access to the fsid's. */
1043 buf
->f_fsid
.val
[0] = buf
->f_fsid
.val
[1] = 0;
1048 * statfs_args(char *path, struct statfs *buf)
1050 * Get filesystem statistics.
1053 sys_statfs(struct statfs_args
*uap
)
1055 struct nlookupdata nd
;
1059 error
= nlookup_init(&nd
, uap
->path
, UIO_USERSPACE
, NLC_FOLLOW
);
1061 error
= kern_statfs(&nd
, &buf
);
1064 error
= copyout(&buf
, uap
->buf
, sizeof(*uap
->buf
));
1069 kern_fstatfs(int fd
, struct statfs
*buf
)
1071 struct thread
*td
= curthread
;
1072 struct proc
*p
= td
->td_proc
;
1076 char *fullpath
, *freepath
;
1080 if ((error
= holdvnode(p
->p_fd
, fd
, &fp
)) != 0)
1082 mp
= ((struct vnode
*)fp
->f_data
)->v_mount
;
1087 if (fp
->f_cred
== NULL
) {
1092 if ((error
= VFS_STATFS(mp
, sp
, fp
->f_cred
)) != 0)
1095 if ((error
= mount_path(p
, mp
, &fullpath
, &freepath
)) != 0)
1097 bzero(sp
->f_mntonname
, sizeof(sp
->f_mntonname
));
1098 strlcpy(sp
->f_mntonname
, fullpath
, sizeof(sp
->f_mntonname
));
1099 kfree(freepath
, M_TEMP
);
1101 sp
->f_flags
= mp
->mnt_flag
& MNT_VISFLAGMASK
;
1102 bcopy(sp
, buf
, sizeof(*buf
));
1104 /* Only root should have access to the fsid's. */
1106 buf
->f_fsid
.val
[0] = buf
->f_fsid
.val
[1] = 0;
1114 * fstatfs_args(int fd, struct statfs *buf)
1116 * Get filesystem statistics.
1119 sys_fstatfs(struct fstatfs_args
*uap
)
1124 error
= kern_fstatfs(uap
->fd
, &buf
);
1127 error
= copyout(&buf
, uap
->buf
, sizeof(*uap
->buf
));
1132 * getfsstat_args(struct statfs *buf, long bufsize, int flags)
1134 * Get statistics on all filesystems.
1137 struct getfsstat_info
{
1138 struct statfs
*sfsp
;
1146 static int getfsstat_callback(struct mount
*, void *);
1150 sys_getfsstat(struct getfsstat_args
*uap
)
1152 struct thread
*td
= curthread
;
1153 struct proc
*p
= td
->td_proc
;
1154 struct getfsstat_info info
;
1156 bzero(&info
, sizeof(info
));
1158 info
.maxcount
= uap
->bufsize
/ sizeof(struct statfs
);
1159 info
.sfsp
= uap
->buf
;
1161 info
.flags
= uap
->flags
;
1164 mountlist_scan(getfsstat_callback
, &info
, MNTSCAN_FORWARD
);
1165 if (info
.sfsp
&& info
.count
> info
.maxcount
)
1166 uap
->sysmsg_result
= info
.maxcount
;
1168 uap
->sysmsg_result
= info
.count
;
1169 return (info
.error
);
1173 getfsstat_callback(struct mount
*mp
, void *data
)
1175 struct getfsstat_info
*info
= data
;
1181 if (info
->sfsp
&& info
->count
< info
->maxcount
) {
1182 if (info
->p
&& !chroot_visible_mnt(mp
, info
->p
))
1187 * If MNT_NOWAIT or MNT_LAZY is specified, do not
1188 * refresh the fsstat cache. MNT_NOWAIT or MNT_LAZY
1189 * overrides MNT_WAIT.
1191 if (((info
->flags
& (MNT_LAZY
|MNT_NOWAIT
)) == 0 ||
1192 (info
->flags
& MNT_WAIT
)) &&
1193 (error
= VFS_STATFS(mp
, sp
, info
->p
->p_ucred
))) {
1196 sp
->f_flags
= mp
->mnt_flag
& MNT_VISFLAGMASK
;
1198 error
= mount_path(info
->p
, mp
, &fullpath
, &freepath
);
1200 info
->error
= error
;
1203 bzero(sp
->f_mntonname
, sizeof(sp
->f_mntonname
));
1204 strlcpy(sp
->f_mntonname
, fullpath
, sizeof(sp
->f_mntonname
));
1205 kfree(freepath
, M_TEMP
);
1207 error
= copyout(sp
, info
->sfsp
, sizeof(*sp
));
1209 info
->error
= error
;
1219 * fchdir_args(int fd)
1221 * Change current working directory to a given file descriptor.
1225 sys_fchdir(struct fchdir_args
*uap
)
1227 struct thread
*td
= curthread
;
1228 struct proc
*p
= td
->td_proc
;
1229 struct filedesc
*fdp
= p
->p_fd
;
1230 struct vnode
*vp
, *ovp
;
1233 struct nchandle nch
, onch
, tnch
;
1236 if ((error
= holdvnode(fdp
, uap
->fd
, &fp
)) != 0)
1238 vp
= (struct vnode
*)fp
->f_data
;
1240 vn_lock(vp
, LK_EXCLUSIVE
| LK_RETRY
);
1241 if (vp
->v_type
!= VDIR
|| fp
->f_nchandle
.ncp
== NULL
)
1244 error
= VOP_ACCESS(vp
, VEXEC
, p
->p_ucred
);
1250 cache_copy(&fp
->f_nchandle
, &nch
);
1253 * If the ncp has become a mount point, traverse through
1257 while (!error
&& (nch
.ncp
->nc_flag
& NCF_ISMOUNTPT
) &&
1258 (mp
= cache_findmount(&nch
)) != NULL
1260 error
= nlookup_mp(mp
, &tnch
);
1262 cache_unlock(&tnch
); /* leave ref intact */
1264 vp
= tnch
.ncp
->nc_vp
;
1265 error
= vget(vp
, LK_SHARED
);
1266 KKASSERT(error
== 0);
1273 onch
= fdp
->fd_ncdir
;
1274 vn_unlock(vp
); /* leave ref intact */
1276 fdp
->fd_ncdir
= nch
;
1288 kern_chdir(struct nlookupdata
*nd
)
1290 struct thread
*td
= curthread
;
1291 struct proc
*p
= td
->td_proc
;
1292 struct filedesc
*fdp
= p
->p_fd
;
1293 struct vnode
*vp
, *ovp
;
1294 struct nchandle onch
;
1297 if ((error
= nlookup(nd
)) != 0)
1299 if ((vp
= nd
->nl_nch
.ncp
->nc_vp
) == NULL
)
1301 if ((error
= vget(vp
, LK_SHARED
)) != 0)
1304 error
= checkvp_chdir(vp
, td
);
1308 onch
= fdp
->fd_ncdir
;
1309 cache_unlock(&nd
->nl_nch
); /* leave reference intact */
1310 fdp
->fd_ncdir
= nd
->nl_nch
;
1314 cache_zero(&nd
->nl_nch
);
1322 * chdir_args(char *path)
1324 * Change current working directory (``.'').
1327 sys_chdir(struct chdir_args
*uap
)
1329 struct nlookupdata nd
;
1332 error
= nlookup_init(&nd
, uap
->path
, UIO_USERSPACE
, NLC_FOLLOW
);
1334 error
= kern_chdir(&nd
);
1340 * Helper function for raised chroot(2) security function: Refuse if
1341 * any filedescriptors are open directories.
1344 chroot_refuse_vdir_fds(struct filedesc
*fdp
)
1351 for (fd
= 0; fd
< fdp
->fd_nfiles
; fd
++) {
1352 if ((error
= holdvnode(fdp
, fd
, &fp
)) != 0)
1354 vp
= (struct vnode
*)fp
->f_data
;
1355 if (vp
->v_type
!= VDIR
) {
1366 * This sysctl determines if we will allow a process to chroot(2) if it
1367 * has a directory open:
1368 * 0: disallowed for all processes.
1369 * 1: allowed for processes that were not already chroot(2)'ed.
1370 * 2: allowed for all processes.
1373 static int chroot_allow_open_directories
= 1;
1375 SYSCTL_INT(_kern
, OID_AUTO
, chroot_allow_open_directories
, CTLFLAG_RW
,
1376 &chroot_allow_open_directories
, 0, "");
1379 * chroot to the specified namecache entry. We obtain the vp from the
1380 * namecache data. The passed ncp must be locked and referenced and will
1381 * remain locked and referenced on return.
1384 kern_chroot(struct nchandle
*nch
)
1386 struct thread
*td
= curthread
;
1387 struct proc
*p
= td
->td_proc
;
1388 struct filedesc
*fdp
= p
->p_fd
;
1393 * Only root can chroot
1395 if ((error
= suser_cred(p
->p_ucred
, PRISON_ROOT
)) != 0)
1399 * Disallow open directory descriptors (fchdir() breakouts).
1401 if (chroot_allow_open_directories
== 0 ||
1402 (chroot_allow_open_directories
== 1 && fdp
->fd_rdir
!= rootvnode
)) {
1403 if ((error
= chroot_refuse_vdir_fds(fdp
)) != 0)
1406 if ((vp
= nch
->ncp
->nc_vp
) == NULL
)
1409 if ((error
= vget(vp
, LK_SHARED
)) != 0)
1413 * Check the validity of vp as a directory to change to and
1414 * associate it with rdir/jdir.
1416 error
= checkvp_chdir(vp
, td
);
1417 vn_unlock(vp
); /* leave reference intact */
1419 vrele(fdp
->fd_rdir
);
1420 fdp
->fd_rdir
= vp
; /* reference inherited by fd_rdir */
1421 cache_drop(&fdp
->fd_nrdir
);
1422 cache_copy(nch
, &fdp
->fd_nrdir
);
1423 if (fdp
->fd_jdir
== NULL
) {
1426 cache_copy(nch
, &fdp
->fd_njdir
);
1435 * chroot_args(char *path)
1437 * Change notion of root (``/'') directory.
1441 sys_chroot(struct chroot_args
*uap
)
1443 struct thread
*td
= curthread
;
1444 struct nlookupdata nd
;
1447 KKASSERT(td
->td_proc
);
1448 error
= nlookup_init(&nd
, uap
->path
, UIO_USERSPACE
, NLC_FOLLOW
);
1453 error
= nlookup(&nd
);
1455 error
= kern_chroot(&nd
.nl_nch
);
1461 * Common routine for chroot and chdir. Given a locked, referenced vnode,
1462 * determine whether it is legal to chdir to the vnode. The vnode's state
1463 * is not changed by this call.
1466 checkvp_chdir(struct vnode
*vp
, struct thread
*td
)
1470 if (vp
->v_type
!= VDIR
)
1473 error
= VOP_ACCESS(vp
, VEXEC
, td
->td_proc
->p_ucred
);
1478 kern_open(struct nlookupdata
*nd
, int oflags
, int mode
, int *res
)
1480 struct thread
*td
= curthread
;
1481 struct proc
*p
= td
->td_proc
;
1482 struct lwp
*lp
= td
->td_lwp
;
1483 struct filedesc
*fdp
= p
->p_fd
;
1488 int type
, indx
, error
;
1491 if ((oflags
& O_ACCMODE
) == O_ACCMODE
)
1493 flags
= FFLAGS(oflags
);
1494 error
= falloc(p
, &nfp
, NULL
);
1498 cmode
= ((mode
&~ fdp
->fd_cmask
) & ALLPERMS
) &~ S_ISTXT
;
1501 * XXX p_dupfd is a real mess. It allows a device to return a
1502 * file descriptor to be duplicated rather then doing the open
1508 * Call vn_open() to do the lookup and assign the vnode to the
1509 * file pointer. vn_open() does not change the ref count on fp
1510 * and the vnode, on success, will be inherited by the file pointer
1513 nd
->nl_flags
|= NLC_LOCKVP
;
1514 error
= vn_open(nd
, fp
, flags
, cmode
);
1518 * handle special fdopen() case. bleh. dupfdopen() is
1519 * responsible for dropping the old contents of ofiles[indx]
1522 * Note that fsetfd() will add a ref to fp which represents
1523 * the fd_files[] assignment. We must still drop our
1526 if ((error
== ENODEV
|| error
== ENXIO
) && lp
->lwp_dupfd
>= 0) {
1527 if (fdalloc(p
, 0, &indx
) == 0) {
1528 error
= dupfdopen(p
, indx
, lp
->lwp_dupfd
, flags
, error
);
1531 fdrop(fp
); /* our ref */
1534 fsetfd(p
, NULL
, indx
);
1537 fdrop(fp
); /* our ref */
1538 if (error
== ERESTART
)
1544 * ref the vnode for ourselves so it can't be ripped out from under
1545 * is. XXX need an ND flag to request that the vnode be returned
1548 * Reserve a file descriptor but do not assign it until the open
1551 vp
= (struct vnode
*)fp
->f_data
;
1553 if ((error
= fdalloc(p
, 0, &indx
)) != 0) {
1560 * If no error occurs the vp will have been assigned to the file
1565 if (flags
& (O_EXLOCK
| O_SHLOCK
)) {
1566 lf
.l_whence
= SEEK_SET
;
1569 if (flags
& O_EXLOCK
)
1570 lf
.l_type
= F_WRLCK
;
1572 lf
.l_type
= F_RDLCK
;
1573 if (flags
& FNONBLOCK
)
1578 if ((error
= VOP_ADVLOCK(vp
, (caddr_t
)fp
, F_SETLK
, &lf
, type
)) != 0) {
1580 * lock request failed. Clean up the reserved
1584 fsetfd(p
, NULL
, indx
);
1588 fp
->f_flag
|= FHASLOCK
;
1592 * Assert that all regular file vnodes were created with a object.
1594 KASSERT(vp
->v_type
!= VREG
|| vp
->v_object
!= NULL
,
1595 ("open: regular file has no backing object after vn_open"));
1601 * release our private reference, leaving the one associated with the
1602 * descriptor table intact.
1604 fsetfd(p
, fp
, indx
);
1611 * open_args(char *path, int flags, int mode)
1613 * Check permissions, allocate an open file structure,
1614 * and call the device open routine if any.
1617 sys_open(struct open_args
*uap
)
1619 struct nlookupdata nd
;
1622 error
= nlookup_init(&nd
, uap
->path
, UIO_USERSPACE
, NLC_FOLLOW
);
1624 error
= kern_open(&nd
, uap
->flags
,
1625 uap
->mode
, &uap
->sysmsg_result
);
1632 kern_mknod(struct nlookupdata
*nd
, int mode
, int rmajor
, int rminor
)
1634 struct thread
*td
= curthread
;
1635 struct proc
*p
= td
->td_proc
;
1643 switch (mode
& S_IFMT
) {
1649 error
= suser_cred(p
->p_ucred
, PRISON_ROOT
);
1656 nd
->nl_flags
|= NLC_CREATE
| NLC_REFDVP
;
1657 if ((error
= nlookup(nd
)) != 0)
1659 if (nd
->nl_nch
.ncp
->nc_vp
)
1661 if ((error
= ncp_writechk(&nd
->nl_nch
)) != 0)
1665 vattr
.va_mode
= (mode
& ALLPERMS
) &~ p
->p_fd
->fd_cmask
;
1666 vattr
.va_rmajor
= rmajor
;
1667 vattr
.va_rminor
= rminor
;
1670 switch (mode
& S_IFMT
) {
1671 case S_IFMT
: /* used by badsect to flag bad sectors */
1672 vattr
.va_type
= VBAD
;
1675 vattr
.va_type
= VCHR
;
1678 vattr
.va_type
= VBLK
;
1689 error
= VOP_NWHITEOUT(&nd
->nl_nch
, nd
->nl_dvp
,
1690 nd
->nl_cred
, NAMEI_CREATE
);
1693 error
= VOP_NMKNOD(&nd
->nl_nch
, nd
->nl_dvp
,
1694 &vp
, nd
->nl_cred
, &vattr
);
1703 * mknod_args(char *path, int mode, int dev)
1705 * Create a special file.
1708 sys_mknod(struct mknod_args
*uap
)
1710 struct nlookupdata nd
;
1713 error
= nlookup_init(&nd
, uap
->path
, UIO_USERSPACE
, 0);
1715 error
= kern_mknod(&nd
, uap
->mode
,
1716 umajor(uap
->dev
), uminor(uap
->dev
));
1723 kern_mkfifo(struct nlookupdata
*nd
, int mode
)
1725 struct thread
*td
= curthread
;
1726 struct proc
*p
= td
->td_proc
;
1733 nd
->nl_flags
|= NLC_CREATE
| NLC_REFDVP
;
1734 if ((error
= nlookup(nd
)) != 0)
1736 if (nd
->nl_nch
.ncp
->nc_vp
)
1738 if ((error
= ncp_writechk(&nd
->nl_nch
)) != 0)
1742 vattr
.va_type
= VFIFO
;
1743 vattr
.va_mode
= (mode
& ALLPERMS
) &~ p
->p_fd
->fd_cmask
;
1745 error
= VOP_NMKNOD(&nd
->nl_nch
, nd
->nl_dvp
, &vp
, nd
->nl_cred
, &vattr
);
1752 * mkfifo_args(char *path, int mode)
1754 * Create a named pipe.
1757 sys_mkfifo(struct mkfifo_args
*uap
)
1759 struct nlookupdata nd
;
1762 error
= nlookup_init(&nd
, uap
->path
, UIO_USERSPACE
, 0);
1764 error
= kern_mkfifo(&nd
, uap
->mode
);
1769 static int hardlink_check_uid
= 0;
1770 SYSCTL_INT(_security
, OID_AUTO
, hardlink_check_uid
, CTLFLAG_RW
,
1771 &hardlink_check_uid
, 0,
1772 "Unprivileged processes cannot create hard links to files owned by other "
1774 static int hardlink_check_gid
= 0;
1775 SYSCTL_INT(_security
, OID_AUTO
, hardlink_check_gid
, CTLFLAG_RW
,
1776 &hardlink_check_gid
, 0,
1777 "Unprivileged processes cannot create hard links to files owned by other "
1781 can_hardlink(struct vnode
*vp
, struct thread
*td
, struct ucred
*cred
)
1787 * Shortcut if disabled
1789 if (hardlink_check_uid
== 0 && hardlink_check_gid
== 0)
1793 * root cred can always hardlink
1795 if (suser_cred(cred
, PRISON_ROOT
) == 0)
1799 * Otherwise only if the originating file is owned by the
1800 * same user or group. Note that any group is allowed if
1801 * the file is owned by the caller.
1803 error
= VOP_GETATTR(vp
, &va
);
1807 if (hardlink_check_uid
) {
1808 if (cred
->cr_uid
!= va
.va_uid
)
1812 if (hardlink_check_gid
) {
1813 if (cred
->cr_uid
!= va
.va_uid
&& !groupmember(va
.va_gid
, cred
))
1821 kern_link(struct nlookupdata
*nd
, struct nlookupdata
*linknd
)
1823 struct thread
*td
= curthread
;
1828 * Lookup the source and obtained a locked vnode.
1830 * XXX relookup on vget failure / race ?
1833 if ((error
= nlookup(nd
)) != 0)
1835 vp
= nd
->nl_nch
.ncp
->nc_vp
;
1836 KKASSERT(vp
!= NULL
);
1837 if (vp
->v_type
== VDIR
)
1838 return (EPERM
); /* POSIX */
1839 if ((error
= ncp_writechk(&nd
->nl_nch
)) != 0)
1841 if ((error
= vget(vp
, LK_EXCLUSIVE
)) != 0)
1845 * Unlock the source so we can lookup the target without deadlocking
1846 * (XXX vp is locked already, possible other deadlock?). The target
1849 KKASSERT(nd
->nl_flags
& NLC_NCPISLOCKED
);
1850 nd
->nl_flags
&= ~NLC_NCPISLOCKED
;
1851 cache_unlock(&nd
->nl_nch
);
1853 linknd
->nl_flags
|= NLC_CREATE
| NLC_REFDVP
;
1854 if ((error
= nlookup(linknd
)) != 0) {
1858 if (linknd
->nl_nch
.ncp
->nc_vp
) {
1864 * Finally run the new API VOP.
1866 error
= can_hardlink(vp
, td
, td
->td_proc
->p_ucred
);
1868 error
= VOP_NLINK(&linknd
->nl_nch
, linknd
->nl_dvp
,
1869 vp
, linknd
->nl_cred
);
1876 * link_args(char *path, char *link)
1878 * Make a hard file link.
1881 sys_link(struct link_args
*uap
)
1883 struct nlookupdata nd
, linknd
;
1886 error
= nlookup_init(&nd
, uap
->path
, UIO_USERSPACE
, NLC_FOLLOW
);
1888 error
= nlookup_init(&linknd
, uap
->link
, UIO_USERSPACE
, 0);
1890 error
= kern_link(&nd
, &linknd
);
1891 nlookup_done(&linknd
);
1898 kern_symlink(struct nlookupdata
*nd
, char *path
, int mode
)
1906 nd
->nl_flags
|= NLC_CREATE
| NLC_REFDVP
;
1907 if ((error
= nlookup(nd
)) != 0)
1909 if (nd
->nl_nch
.ncp
->nc_vp
)
1911 if ((error
= ncp_writechk(&nd
->nl_nch
)) != 0)
1915 vattr
.va_mode
= mode
;
1916 error
= VOP_NSYMLINK(&nd
->nl_nch
, dvp
, &vp
, nd
->nl_cred
, &vattr
, path
);
1923 * symlink(char *path, char *link)
1925 * Make a symbolic link.
1928 sys_symlink(struct symlink_args
*uap
)
1930 struct thread
*td
= curthread
;
1931 struct nlookupdata nd
;
1936 path
= objcache_get(namei_oc
, M_WAITOK
);
1937 error
= copyinstr(uap
->path
, path
, MAXPATHLEN
, NULL
);
1939 error
= nlookup_init(&nd
, uap
->link
, UIO_USERSPACE
, 0);
1941 mode
= ACCESSPERMS
& ~td
->td_proc
->p_fd
->fd_cmask
;
1942 error
= kern_symlink(&nd
, path
, mode
);
1946 objcache_put(namei_oc
, path
);
1951 * undelete_args(char *path)
1953 * Delete a whiteout from the filesystem.
1957 sys_undelete(struct undelete_args
*uap
)
1959 struct nlookupdata nd
;
1962 error
= nlookup_init(&nd
, uap
->path
, UIO_USERSPACE
, 0);
1964 nd
.nl_flags
|= NLC_DELETE
| NLC_REFDVP
;
1966 error
= nlookup(&nd
);
1968 error
= ncp_writechk(&nd
.nl_nch
);
1970 error
= VOP_NWHITEOUT(&nd
.nl_nch
, nd
.nl_dvp
, nd
.nl_cred
,
1978 kern_unlink(struct nlookupdata
*nd
)
1983 nd
->nl_flags
|= NLC_DELETE
| NLC_REFDVP
;
1984 if ((error
= nlookup(nd
)) != 0)
1986 if ((error
= ncp_writechk(&nd
->nl_nch
)) != 0)
1988 error
= VOP_NREMOVE(&nd
->nl_nch
, nd
->nl_dvp
, nd
->nl_cred
);
1993 * unlink_args(char *path)
1995 * Delete a name from the filesystem.
1998 sys_unlink(struct unlink_args
*uap
)
2000 struct nlookupdata nd
;
2003 error
= nlookup_init(&nd
, uap
->path
, UIO_USERSPACE
, 0);
2005 error
= kern_unlink(&nd
);
2011 kern_lseek(int fd
, off_t offset
, int whence
, off_t
*res
)
2013 struct thread
*td
= curthread
;
2014 struct proc
*p
= td
->td_proc
;
2019 fp
= holdfp(p
->p_fd
, fd
, -1);
2022 if (fp
->f_type
!= DTYPE_VNODE
) {
2029 fp
->f_offset
+= offset
;
2033 error
= VOP_GETATTR((struct vnode
*)fp
->f_data
, &vattr
);
2035 fp
->f_offset
= offset
+ vattr
.va_size
;
2038 fp
->f_offset
= offset
;
2045 *res
= fp
->f_offset
;
2052 * lseek_args(int fd, int pad, off_t offset, int whence)
2054 * Reposition read/write file offset.
2057 sys_lseek(struct lseek_args
*uap
)
2061 error
= kern_lseek(uap
->fd
, uap
->offset
, uap
->whence
,
2062 &uap
->sysmsg_offset
);
2068 kern_access(struct nlookupdata
*nd
, int aflags
)
2073 if ((error
= nlookup(nd
)) != 0)
2076 error
= cache_vget(&nd
->nl_nch
, nd
->nl_cred
, LK_EXCLUSIVE
, &vp
);
2080 /* Flags == 0 means only check for existence. */
2089 if ((flags
& VWRITE
) == 0 ||
2090 (error
= vn_writechk(vp
, &nd
->nl_nch
)) == 0)
2091 error
= VOP_ACCESS(vp
, flags
, nd
->nl_cred
);
2094 * If the file handle is stale we have to re-resolve the
2095 * entry. This is a hack at the moment.
2097 if (error
== ESTALE
) {
2099 cache_setunresolved(&nd
->nl_nch
);
2100 error
= cache_resolve(&nd
->nl_nch
, nd
->nl_cred
);
2113 * access_args(char *path, int flags)
2115 * Check access permissions.
2118 sys_access(struct access_args
*uap
)
2120 struct nlookupdata nd
;
2123 error
= nlookup_init(&nd
, uap
->path
, UIO_USERSPACE
, NLC_FOLLOW
);
2125 error
= kern_access(&nd
, uap
->flags
);
2131 kern_stat(struct nlookupdata
*nd
, struct stat
*st
)
2137 if ((error
= nlookup(nd
)) != 0)
2140 if ((vp
= nd
->nl_nch
.ncp
->nc_vp
) == NULL
)
2144 if ((error
= vget(vp
, LK_SHARED
)) != 0)
2146 error
= vn_stat(vp
, st
, nd
->nl_cred
);
2149 * If the file handle is stale we have to re-resolve the entry. This
2150 * is a hack at the moment.
2152 if (error
== ESTALE
) {
2154 cache_setunresolved(&nd
->nl_nch
);
2155 error
= cache_resolve(&nd
->nl_nch
, nd
->nl_cred
);
2165 * stat_args(char *path, struct stat *ub)
2167 * Get file status; this version follows links.
2170 sys_stat(struct stat_args
*uap
)
2172 struct nlookupdata nd
;
2176 error
= nlookup_init(&nd
, uap
->path
, UIO_USERSPACE
, NLC_FOLLOW
);
2178 error
= kern_stat(&nd
, &st
);
2180 error
= copyout(&st
, uap
->ub
, sizeof(*uap
->ub
));
2187 * lstat_args(char *path, struct stat *ub)
2189 * Get file status; this version does not follow links.
2192 sys_lstat(struct lstat_args
*uap
)
2194 struct nlookupdata nd
;
2198 error
= nlookup_init(&nd
, uap
->path
, UIO_USERSPACE
, 0);
2200 error
= kern_stat(&nd
, &st
);
2202 error
= copyout(&st
, uap
->ub
, sizeof(*uap
->ub
));
2209 * pathconf_Args(char *path, int name)
2211 * Get configurable pathname variables.
2215 sys_pathconf(struct pathconf_args
*uap
)
2217 struct nlookupdata nd
;
2222 error
= nlookup_init(&nd
, uap
->path
, UIO_USERSPACE
, NLC_FOLLOW
);
2224 error
= nlookup(&nd
);
2226 error
= cache_vget(&nd
.nl_nch
, nd
.nl_cred
, LK_EXCLUSIVE
, &vp
);
2229 error
= VOP_PATHCONF(vp
, uap
->name
, uap
->sysmsg_fds
);
2237 * kern_readlink isn't properly split yet. There is a copyin burried
2238 * in VOP_READLINK().
2241 kern_readlink(struct nlookupdata
*nd
, char *buf
, int count
, int *res
)
2243 struct thread
*td
= curthread
;
2244 struct proc
*p
= td
->td_proc
;
2250 if ((error
= nlookup(nd
)) != 0)
2252 error
= cache_vget(&nd
->nl_nch
, nd
->nl_cred
, LK_EXCLUSIVE
, &vp
);
2255 if (vp
->v_type
!= VLNK
) {
2258 aiov
.iov_base
= buf
;
2259 aiov
.iov_len
= count
;
2260 auio
.uio_iov
= &aiov
;
2261 auio
.uio_iovcnt
= 1;
2262 auio
.uio_offset
= 0;
2263 auio
.uio_rw
= UIO_READ
;
2264 auio
.uio_segflg
= UIO_USERSPACE
;
2266 auio
.uio_resid
= count
;
2267 error
= VOP_READLINK(vp
, &auio
, p
->p_ucred
);
2270 *res
= count
- auio
.uio_resid
;
2275 * readlink_args(char *path, char *buf, int count)
2277 * Return target name of a symbolic link.
2280 sys_readlink(struct readlink_args
*uap
)
2282 struct nlookupdata nd
;
2285 error
= nlookup_init(&nd
, uap
->path
, UIO_USERSPACE
, 0);
2287 error
= kern_readlink(&nd
, uap
->buf
, uap
->count
,
2288 &uap
->sysmsg_result
);
2295 setfflags(struct vnode
*vp
, int flags
)
2297 struct thread
*td
= curthread
;
2298 struct proc
*p
= td
->td_proc
;
2303 * Prevent non-root users from setting flags on devices. When
2304 * a device is reused, users can retain ownership of the device
2305 * if they are allowed to set flags and programs assume that
2306 * chown can't fail when done as root.
2308 if ((vp
->v_type
== VCHR
|| vp
->v_type
== VBLK
) &&
2309 ((error
= suser_cred(p
->p_ucred
, PRISON_ROOT
)) != 0))
2313 * note: vget is required for any operation that might mod the vnode
2314 * so VINACTIVE is properly cleared.
2316 if ((error
= vget(vp
, LK_EXCLUSIVE
)) == 0) {
2318 vattr
.va_flags
= flags
;
2319 error
= VOP_SETATTR(vp
, &vattr
, p
->p_ucred
);
2326 * chflags(char *path, int flags)
2328 * Change flags of a file given a path name.
2332 sys_chflags(struct chflags_args
*uap
)
2334 struct nlookupdata nd
;
2339 error
= nlookup_init(&nd
, uap
->path
, UIO_USERSPACE
, NLC_FOLLOW
);
2340 /* XXX Add NLC flag indicating modifying operation? */
2342 error
= nlookup(&nd
);
2344 error
= ncp_writechk(&nd
.nl_nch
);
2346 error
= cache_vref(&nd
.nl_nch
, nd
.nl_cred
, &vp
);
2349 error
= setfflags(vp
, uap
->flags
);
2356 * fchflags_args(int fd, int flags)
2358 * Change flags of a file given a file descriptor.
2362 sys_fchflags(struct fchflags_args
*uap
)
2364 struct thread
*td
= curthread
;
2365 struct proc
*p
= td
->td_proc
;
2369 if ((error
= holdvnode(p
->p_fd
, uap
->fd
, &fp
)) != 0)
2371 if (fp
->f_nchandle
.ncp
)
2372 error
= ncp_writechk(&fp
->f_nchandle
);
2374 error
= setfflags((struct vnode
*) fp
->f_data
, uap
->flags
);
2380 setfmode(struct vnode
*vp
, int mode
)
2382 struct thread
*td
= curthread
;
2383 struct proc
*p
= td
->td_proc
;
2388 * note: vget is required for any operation that might mod the vnode
2389 * so VINACTIVE is properly cleared.
2391 if ((error
= vget(vp
, LK_EXCLUSIVE
)) == 0) {
2393 vattr
.va_mode
= mode
& ALLPERMS
;
2394 error
= VOP_SETATTR(vp
, &vattr
, p
->p_ucred
);
2401 kern_chmod(struct nlookupdata
*nd
, int mode
)
2406 /* XXX Add NLC flag indicating modifying operation? */
2407 if ((error
= nlookup(nd
)) != 0)
2409 if ((error
= cache_vref(&nd
->nl_nch
, nd
->nl_cred
, &vp
)) != 0)
2411 if ((error
= ncp_writechk(&nd
->nl_nch
)) == 0)
2412 error
= setfmode(vp
, mode
);
2418 * chmod_args(char *path, int mode)
2420 * Change mode of a file given path name.
2424 sys_chmod(struct chmod_args
*uap
)
2426 struct nlookupdata nd
;
2429 error
= nlookup_init(&nd
, uap
->path
, UIO_USERSPACE
, NLC_FOLLOW
);
2431 error
= kern_chmod(&nd
, uap
->mode
);
2437 * lchmod_args(char *path, int mode)
2439 * Change mode of a file given path name (don't follow links.)
2443 sys_lchmod(struct lchmod_args
*uap
)
2445 struct nlookupdata nd
;
2448 error
= nlookup_init(&nd
, uap
->path
, UIO_USERSPACE
, 0);
2450 error
= kern_chmod(&nd
, uap
->mode
);
2456 * fchmod_args(int fd, int mode)
2458 * Change mode of a file given a file descriptor.
2462 sys_fchmod(struct fchmod_args
*uap
)
2464 struct thread
*td
= curthread
;
2465 struct proc
*p
= td
->td_proc
;
2469 if ((error
= holdvnode(p
->p_fd
, uap
->fd
, &fp
)) != 0)
2471 if (fp
->f_nchandle
.ncp
)
2472 error
= ncp_writechk(&fp
->f_nchandle
);
2474 error
= setfmode((struct vnode
*)fp
->f_data
, uap
->mode
);
2480 setfown(struct vnode
*vp
, uid_t uid
, gid_t gid
)
2482 struct thread
*td
= curthread
;
2483 struct proc
*p
= td
->td_proc
;
2488 * note: vget is required for any operation that might mod the vnode
2489 * so VINACTIVE is properly cleared.
2491 if ((error
= vget(vp
, LK_EXCLUSIVE
)) == 0) {
2495 error
= VOP_SETATTR(vp
, &vattr
, p
->p_ucred
);
2502 kern_chown(struct nlookupdata
*nd
, int uid
, int gid
)
2507 /* XXX Add NLC flag indicating modifying operation? */
2508 if ((error
= nlookup(nd
)) != 0)
2510 if ((error
= cache_vref(&nd
->nl_nch
, nd
->nl_cred
, &vp
)) != 0)
2512 if ((error
= ncp_writechk(&nd
->nl_nch
)) == 0)
2513 error
= setfown(vp
, uid
, gid
);
2519 * chown(char *path, int uid, int gid)
2521 * Set ownership given a path name.
2524 sys_chown(struct chown_args
*uap
)
2526 struct nlookupdata nd
;
2529 error
= nlookup_init(&nd
, uap
->path
, UIO_USERSPACE
, NLC_FOLLOW
);
2531 error
= kern_chown(&nd
, uap
->uid
, uap
->gid
);
2537 * lchown_args(char *path, int uid, int gid)
2539 * Set ownership given a path name, do not cross symlinks.
2542 sys_lchown(struct lchown_args
*uap
)
2544 struct nlookupdata nd
;
2547 error
= nlookup_init(&nd
, uap
->path
, UIO_USERSPACE
, 0);
2549 error
= kern_chown(&nd
, uap
->uid
, uap
->gid
);
2555 * fchown_args(int fd, int uid, int gid)
2557 * Set ownership given a file descriptor.
2561 sys_fchown(struct fchown_args
*uap
)
2563 struct thread
*td
= curthread
;
2564 struct proc
*p
= td
->td_proc
;
2568 if ((error
= holdvnode(p
->p_fd
, uap
->fd
, &fp
)) != 0)
2570 if (fp
->f_nchandle
.ncp
)
2571 error
= ncp_writechk(&fp
->f_nchandle
);
2573 error
= setfown((struct vnode
*)fp
->f_data
, uap
->uid
, uap
->gid
);
2579 getutimes(const struct timeval
*tvp
, struct timespec
*tsp
)
2581 struct timeval tv
[2];
2585 TIMEVAL_TO_TIMESPEC(&tv
[0], &tsp
[0]);
2588 TIMEVAL_TO_TIMESPEC(&tvp
[0], &tsp
[0]);
2589 TIMEVAL_TO_TIMESPEC(&tvp
[1], &tsp
[1]);
2595 setutimes(struct vnode
*vp
, const struct timespec
*ts
, int nullflag
)
2597 struct thread
*td
= curthread
;
2598 struct proc
*p
= td
->td_proc
;
2603 * note: vget is required for any operation that might mod the vnode
2604 * so VINACTIVE is properly cleared.
2606 if ((error
= vget(vp
, LK_EXCLUSIVE
)) == 0) {
2608 vattr
.va_atime
= ts
[0];
2609 vattr
.va_mtime
= ts
[1];
2611 vattr
.va_vaflags
|= VA_UTIMES_NULL
;
2612 error
= VOP_SETATTR(vp
, &vattr
, p
->p_ucred
);
2619 kern_utimes(struct nlookupdata
*nd
, struct timeval
*tptr
)
2621 struct timespec ts
[2];
2625 if ((error
= getutimes(tptr
, ts
)) != 0)
2627 /* XXX Add NLC flag indicating modifying operation? */
2628 if ((error
= nlookup(nd
)) != 0)
2630 if ((error
= ncp_writechk(&nd
->nl_nch
)) != 0)
2632 if ((error
= cache_vref(&nd
->nl_nch
, nd
->nl_cred
, &vp
)) != 0)
2634 error
= setutimes(vp
, ts
, tptr
== NULL
);
2640 * utimes_args(char *path, struct timeval *tptr)
2642 * Set the access and modification times of a file.
2645 sys_utimes(struct utimes_args
*uap
)
2647 struct timeval tv
[2];
2648 struct nlookupdata nd
;
2652 error
= copyin(uap
->tptr
, tv
, sizeof(tv
));
2656 error
= nlookup_init(&nd
, uap
->path
, UIO_USERSPACE
, NLC_FOLLOW
);
2658 error
= kern_utimes(&nd
, uap
->tptr
? tv
: NULL
);
2664 * lutimes_args(char *path, struct timeval *tptr)
2666 * Set the access and modification times of a file.
2669 sys_lutimes(struct lutimes_args
*uap
)
2671 struct timeval tv
[2];
2672 struct nlookupdata nd
;
2676 error
= copyin(uap
->tptr
, tv
, sizeof(tv
));
2680 error
= nlookup_init(&nd
, uap
->path
, UIO_USERSPACE
, 0);
2682 error
= kern_utimes(&nd
, uap
->tptr
? tv
: NULL
);
2688 kern_futimes(int fd
, struct timeval
*tptr
)
2690 struct thread
*td
= curthread
;
2691 struct proc
*p
= td
->td_proc
;
2692 struct timespec ts
[2];
2696 error
= getutimes(tptr
, ts
);
2699 if ((error
= holdvnode(p
->p_fd
, fd
, &fp
)) != 0)
2701 if (fp
->f_nchandle
.ncp
)
2702 error
= ncp_writechk(&fp
->f_nchandle
);
2704 error
= setutimes((struct vnode
*)fp
->f_data
, ts
, tptr
== NULL
);
2710 * futimes_args(int fd, struct timeval *tptr)
2712 * Set the access and modification times of a file.
2715 sys_futimes(struct futimes_args
*uap
)
2717 struct timeval tv
[2];
2721 error
= copyin(uap
->tptr
, tv
, sizeof(tv
));
2726 error
= kern_futimes(uap
->fd
, uap
->tptr
? tv
: NULL
);
2732 kern_truncate(struct nlookupdata
*nd
, off_t length
)
2740 /* XXX Add NLC flag indicating modifying operation? */
2741 if ((error
= nlookup(nd
)) != 0)
2743 if ((error
= ncp_writechk(&nd
->nl_nch
)) != 0)
2745 if ((error
= cache_vref(&nd
->nl_nch
, nd
->nl_cred
, &vp
)) != 0)
2747 if ((error
= vn_lock(vp
, LK_EXCLUSIVE
| LK_RETRY
)) != 0) {
2751 if (vp
->v_type
== VDIR
) {
2753 } else if ((error
= vn_writechk(vp
, &nd
->nl_nch
)) == 0 &&
2754 (error
= VOP_ACCESS(vp
, VWRITE
, nd
->nl_cred
)) == 0) {
2756 vattr
.va_size
= length
;
2757 error
= VOP_SETATTR(vp
, &vattr
, nd
->nl_cred
);
2764 * truncate(char *path, int pad, off_t length)
2766 * Truncate a file given its path name.
2769 sys_truncate(struct truncate_args
*uap
)
2771 struct nlookupdata nd
;
2774 error
= nlookup_init(&nd
, uap
->path
, UIO_USERSPACE
, NLC_FOLLOW
);
2776 error
= kern_truncate(&nd
, uap
->length
);
2782 kern_ftruncate(int fd
, off_t length
)
2784 struct thread
*td
= curthread
;
2785 struct proc
*p
= td
->td_proc
;
2793 if ((error
= holdvnode(p
->p_fd
, fd
, &fp
)) != 0)
2795 if (fp
->f_nchandle
.ncp
) {
2796 error
= ncp_writechk(&fp
->f_nchandle
);
2800 if ((fp
->f_flag
& FWRITE
) == 0) {
2804 vp
= (struct vnode
*)fp
->f_data
;
2805 vn_lock(vp
, LK_EXCLUSIVE
| LK_RETRY
);
2806 if (vp
->v_type
== VDIR
) {
2808 } else if ((error
= vn_writechk(vp
, NULL
)) == 0) {
2810 vattr
.va_size
= length
;
2811 error
= VOP_SETATTR(vp
, &vattr
, fp
->f_cred
);
2820 * ftruncate_args(int fd, int pad, off_t length)
2822 * Truncate a file given a file descriptor.
2825 sys_ftruncate(struct ftruncate_args
*uap
)
2829 error
= kern_ftruncate(uap
->fd
, uap
->length
);
2837 * Sync an open file.
2841 sys_fsync(struct fsync_args
*uap
)
2843 struct thread
*td
= curthread
;
2844 struct proc
*p
= td
->td_proc
;
2850 if ((error
= holdvnode(p
->p_fd
, uap
->fd
, &fp
)) != 0)
2852 vp
= (struct vnode
*)fp
->f_data
;
2853 vn_lock(vp
, LK_EXCLUSIVE
| LK_RETRY
);
2854 if ((obj
= vp
->v_object
) != NULL
)
2855 vm_object_page_clean(obj
, 0, 0, 0);
2856 if ((error
= VOP_FSYNC(vp
, MNT_WAIT
)) == 0 && vp
->v_mount
)
2857 error
= buf_fsync(vp
);
2864 kern_rename(struct nlookupdata
*fromnd
, struct nlookupdata
*tond
)
2866 struct nchandle fnchd
;
2867 struct nchandle tnchd
;
2868 struct namecache
*ncp
;
2875 fromnd
->nl_flags
|= NLC_REFDVP
;
2876 if ((error
= nlookup(fromnd
)) != 0)
2878 if ((fnchd
.ncp
= fromnd
->nl_nch
.ncp
->nc_parent
) == NULL
)
2880 fnchd
.mount
= fromnd
->nl_nch
.mount
;
2884 * unlock the source nch so we can lookup the target nch without
2885 * deadlocking. The target may or may not exist so we do not check
2886 * for a target vp like kern_mkdir() and other creation functions do.
2888 * The source and target directories are ref'd and rechecked after
2889 * everything is relocked to determine if the source or target file
2892 KKASSERT(fromnd
->nl_flags
& NLC_NCPISLOCKED
);
2893 fromnd
->nl_flags
&= ~NLC_NCPISLOCKED
;
2894 cache_unlock(&fromnd
->nl_nch
);
2896 tond
->nl_flags
|= NLC_CREATE
| NLC_REFDVP
;
2897 if ((error
= nlookup(tond
)) != 0) {
2901 if ((tnchd
.ncp
= tond
->nl_nch
.ncp
->nc_parent
) == NULL
) {
2905 tnchd
.mount
= tond
->nl_nch
.mount
;
2909 * If the source and target are the same there is nothing to do
2911 if (fromnd
->nl_nch
.ncp
== tond
->nl_nch
.ncp
) {
2918 * Mount points cannot be renamed or overwritten
2920 if ((fromnd
->nl_nch
.ncp
->nc_flag
| tond
->nl_nch
.ncp
->nc_flag
) &
2929 * relock the source ncp. NOTE AFTER RELOCKING: the source ncp
2930 * may have become invalid while it was unlocked, nc_vp and nc_mount
2933 if (cache_lock_nonblock(&fromnd
->nl_nch
) == 0) {
2934 cache_resolve(&fromnd
->nl_nch
, fromnd
->nl_cred
);
2935 } else if (fromnd
->nl_nch
.ncp
> tond
->nl_nch
.ncp
) {
2936 cache_lock(&fromnd
->nl_nch
);
2937 cache_resolve(&fromnd
->nl_nch
, fromnd
->nl_cred
);
2939 cache_unlock(&tond
->nl_nch
);
2940 cache_lock(&fromnd
->nl_nch
);
2941 cache_resolve(&fromnd
->nl_nch
, fromnd
->nl_cred
);
2942 cache_lock(&tond
->nl_nch
);
2943 cache_resolve(&tond
->nl_nch
, tond
->nl_cred
);
2945 fromnd
->nl_flags
|= NLC_NCPISLOCKED
;
2948 * make sure the parent directories linkages are the same
2950 if (fnchd
.ncp
!= fromnd
->nl_nch
.ncp
->nc_parent
||
2951 tnchd
.ncp
!= tond
->nl_nch
.ncp
->nc_parent
) {
2958 * Both the source and target must be within the same filesystem and
2959 * in the same filesystem as their parent directories within the
2960 * namecache topology.
2962 * NOTE: fromnd's nc_mount or nc_vp could be NULL.
2965 if (mp
!= tnchd
.mount
|| mp
!= fromnd
->nl_nch
.mount
||
2966 mp
!= tond
->nl_nch
.mount
) {
2973 * Make sure the mount point is writable
2975 if ((error
= ncp_writechk(&tond
->nl_nch
)) != 0) {
2982 * If the target exists and either the source or target is a directory,
2983 * then both must be directories.
2985 * Due to relocking of the source, fromnd->nl_nch.ncp->nc_vp might h
2988 if (tond
->nl_nch
.ncp
->nc_vp
) {
2989 if (fromnd
->nl_nch
.ncp
->nc_vp
== NULL
) {
2991 } else if (fromnd
->nl_nch
.ncp
->nc_vp
->v_type
== VDIR
) {
2992 if (tond
->nl_nch
.ncp
->nc_vp
->v_type
!= VDIR
)
2994 } else if (tond
->nl_nch
.ncp
->nc_vp
->v_type
== VDIR
) {
3000 * You cannot rename a source into itself or a subdirectory of itself.
3001 * We check this by travsersing the target directory upwards looking
3002 * for a match against the source.
3005 for (ncp
= tnchd
.ncp
; ncp
; ncp
= ncp
->nc_parent
) {
3006 if (fromnd
->nl_nch
.ncp
== ncp
) {
3017 * Even though the namespaces are different, they may still represent
3018 * hardlinks to the same file. The filesystem might have a hard time
3019 * with this so we issue a NREMOVE of the source instead of a NRENAME
3020 * when we detect the situation.
3023 fdvp
= fromnd
->nl_dvp
;
3024 tdvp
= tond
->nl_dvp
;
3025 if (fdvp
== NULL
|| tdvp
== NULL
) {
3027 } else if (fromnd
->nl_nch
.ncp
->nc_vp
== tond
->nl_nch
.ncp
->nc_vp
) {
3028 error
= VOP_NREMOVE(&fromnd
->nl_nch
, fdvp
,
3031 error
= VOP_NRENAME(&fromnd
->nl_nch
, &tond
->nl_nch
,
3032 fdvp
, tdvp
, tond
->nl_cred
);
3039 * rename_args(char *from, char *to)
3041 * Rename files. Source and destination must either both be directories,
3042 * or both not be directories. If target is a directory, it must be empty.
3045 sys_rename(struct rename_args
*uap
)
3047 struct nlookupdata fromnd
, tond
;
3050 error
= nlookup_init(&fromnd
, uap
->from
, UIO_USERSPACE
, 0);
3052 error
= nlookup_init(&tond
, uap
->to
, UIO_USERSPACE
, 0);
3054 error
= kern_rename(&fromnd
, &tond
);
3055 nlookup_done(&tond
);
3057 nlookup_done(&fromnd
);
3062 kern_mkdir(struct nlookupdata
*nd
, int mode
)
3064 struct thread
*td
= curthread
;
3065 struct proc
*p
= td
->td_proc
;
3071 nd
->nl_flags
|= NLC_WILLBEDIR
| NLC_CREATE
| NLC_REFDVP
;
3072 if ((error
= nlookup(nd
)) != 0)
3075 if (nd
->nl_nch
.ncp
->nc_vp
)
3077 if ((error
= ncp_writechk(&nd
->nl_nch
)) != 0)
3080 vattr
.va_type
= VDIR
;
3081 vattr
.va_mode
= (mode
& ACCESSPERMS
) &~ p
->p_fd
->fd_cmask
;
3084 error
= VOP_NMKDIR(&nd
->nl_nch
, nd
->nl_dvp
, &vp
, p
->p_ucred
, &vattr
);
3091 * mkdir_args(char *path, int mode)
3093 * Make a directory file.
3097 sys_mkdir(struct mkdir_args
*uap
)
3099 struct nlookupdata nd
;
3102 error
= nlookup_init(&nd
, uap
->path
, UIO_USERSPACE
, 0);
3104 error
= kern_mkdir(&nd
, uap
->mode
);
3110 kern_rmdir(struct nlookupdata
*nd
)
3115 nd
->nl_flags
|= NLC_DELETE
| NLC_REFDVP
;
3116 if ((error
= nlookup(nd
)) != 0)
3120 * Do not allow directories representing mount points to be
3121 * deleted, even if empty. Check write perms on mount point
3122 * in case the vnode is aliased (aka nullfs).
3124 if (nd
->nl_nch
.ncp
->nc_flag
& (NCF_ISMOUNTPT
))
3126 if ((error
= ncp_writechk(&nd
->nl_nch
)) != 0)
3128 error
= VOP_NRMDIR(&nd
->nl_nch
, nd
->nl_dvp
, nd
->nl_cred
);
3133 * rmdir_args(char *path)
3135 * Remove a directory file.
3139 sys_rmdir(struct rmdir_args
*uap
)
3141 struct nlookupdata nd
;
3144 error
= nlookup_init(&nd
, uap
->path
, UIO_USERSPACE
, 0);
3146 error
= kern_rmdir(&nd
);
3152 kern_getdirentries(int fd
, char *buf
, u_int count
, long *basep
, int *res
,
3153 enum uio_seg direction
)
3155 struct thread
*td
= curthread
;
3156 struct proc
*p
= td
->td_proc
;
3164 if ((error
= holdvnode(p
->p_fd
, fd
, &fp
)) != 0)
3166 if ((fp
->f_flag
& FREAD
) == 0) {
3170 vp
= (struct vnode
*)fp
->f_data
;
3172 if (vp
->v_type
!= VDIR
) {
3176 aiov
.iov_base
= buf
;
3177 aiov
.iov_len
= count
;
3178 auio
.uio_iov
= &aiov
;
3179 auio
.uio_iovcnt
= 1;
3180 auio
.uio_rw
= UIO_READ
;
3181 auio
.uio_segflg
= direction
;
3183 auio
.uio_resid
= count
;
3184 loff
= auio
.uio_offset
= fp
->f_offset
;
3185 error
= VOP_READDIR(vp
, &auio
, fp
->f_cred
, &eofflag
, NULL
, NULL
);
3186 fp
->f_offset
= auio
.uio_offset
;
3189 if (count
== auio
.uio_resid
) {
3190 if (union_dircheckp
) {
3191 error
= union_dircheckp(td
, &vp
, fp
);
3198 if ((vp
->v_flag
& VROOT
) &&
3199 (vp
->v_mount
->mnt_flag
& MNT_UNION
)) {
3200 struct vnode
*tvp
= vp
;
3201 vp
= vp
->v_mount
->mnt_vnodecovered
;
3212 * WARNING! *basep may not be wide enough to accomodate the
3213 * seek offset. XXX should we hack this to return the upper 32 bits
3214 * for offsets greater then 4G?
3217 *basep
= (long)loff
;
3219 *res
= count
- auio
.uio_resid
;
3226 * getdirentries_args(int fd, char *buf, u_int conut, long *basep)
3228 * Read a block of directory entries in a file system independent format.
3231 sys_getdirentries(struct getdirentries_args
*uap
)
3236 error
= kern_getdirentries(uap
->fd
, uap
->buf
, uap
->count
, &base
,
3237 &uap
->sysmsg_result
, UIO_USERSPACE
);
3239 if (error
== 0 && uap
->basep
)
3240 error
= copyout(&base
, uap
->basep
, sizeof(*uap
->basep
));
3245 * getdents_args(int fd, char *buf, size_t count)
3248 sys_getdents(struct getdents_args
*uap
)
3252 error
= kern_getdirentries(uap
->fd
, uap
->buf
, uap
->count
, NULL
,
3253 &uap
->sysmsg_result
, UIO_USERSPACE
);
3259 * umask(int newmask)
3261 * Set the mode mask for creation of filesystem nodes.
3266 sys_umask(struct umask_args
*uap
)
3268 struct thread
*td
= curthread
;
3269 struct proc
*p
= td
->td_proc
;
3270 struct filedesc
*fdp
;
3273 uap
->sysmsg_result
= fdp
->fd_cmask
;
3274 fdp
->fd_cmask
= uap
->newmask
& ALLPERMS
;
3279 * revoke(char *path)
3281 * Void all references to file by ripping underlying filesystem
3286 sys_revoke(struct revoke_args
*uap
)
3288 struct nlookupdata nd
;
3295 error
= nlookup_init(&nd
, uap
->path
, UIO_USERSPACE
, NLC_FOLLOW
);
3297 error
= nlookup(&nd
);
3299 error
= cache_vref(&nd
.nl_nch
, nd
.nl_cred
, &vp
);
3300 cred
= crhold(nd
.nl_cred
);
3303 if (vp
->v_type
!= VCHR
&& vp
->v_type
!= VBLK
)
3306 error
= VOP_GETATTR(vp
, &vattr
);
3307 if (error
== 0 && cred
->cr_uid
!= vattr
.va_uid
)
3308 error
= suser_cred(cred
, PRISON_ROOT
);
3309 if (error
== 0 && count_udev(vp
->v_umajor
, vp
->v_uminor
) > 0) {
3312 VOP_REVOKE(vp
, REVOKEALL
);
3323 * getfh_args(char *fname, fhandle_t *fhp)
3325 * Get (NFS) file handle
3328 sys_getfh(struct getfh_args
*uap
)
3330 struct thread
*td
= curthread
;
3331 struct nlookupdata nd
;
3337 * Must be super user
3339 if ((error
= suser(td
)) != 0)
3343 error
= nlookup_init(&nd
, uap
->fname
, UIO_USERSPACE
, NLC_FOLLOW
);
3345 error
= nlookup(&nd
);
3347 error
= cache_vget(&nd
.nl_nch
, nd
.nl_cred
, LK_EXCLUSIVE
, &vp
);
3350 bzero(&fh
, sizeof(fh
));
3351 fh
.fh_fsid
= vp
->v_mount
->mnt_stat
.f_fsid
;
3352 error
= VFS_VPTOFH(vp
, &fh
.fh_fid
);
3355 error
= copyout(&fh
, uap
->fhp
, sizeof(fh
));
3361 * fhopen_args(const struct fhandle *u_fhp, int flags)
3363 * syscall for the rpc.lockd to use to translate a NFS file handle into
3364 * an open descriptor.
3366 * warning: do not remove the suser() call or this becomes one giant
3370 sys_fhopen(struct fhopen_args
*uap
)
3372 struct thread
*td
= curthread
;
3373 struct proc
*p
= td
->td_proc
;
3378 struct vattr
*vap
= &vat
;
3380 int fmode
, mode
, error
, type
;
3386 * Must be super user
3392 fmode
= FFLAGS(uap
->flags
);
3393 /* why not allow a non-read/write open for our lockd? */
3394 if (((fmode
& (FREAD
| FWRITE
)) == 0) || (fmode
& O_CREAT
))
3396 error
= copyin(uap
->u_fhp
, &fhp
, sizeof(fhp
));
3399 /* find the mount point */
3400 mp
= vfs_getvfs(&fhp
.fh_fsid
);
3403 /* now give me my vnode, it gets returned to me locked */
3404 error
= VFS_FHTOVP(mp
, &fhp
.fh_fid
, &vp
);
3408 * from now on we have to make sure not
3409 * to forget about the vnode
3410 * any error that causes an abort must vput(vp)
3411 * just set error = err and 'goto bad;'.
3417 if (vp
->v_type
== VLNK
) {
3421 if (vp
->v_type
== VSOCK
) {
3426 if (fmode
& (FWRITE
| O_TRUNC
)) {
3427 if (vp
->v_type
== VDIR
) {
3431 error
= vn_writechk(vp
, NULL
);
3439 error
= VOP_ACCESS(vp
, mode
, p
->p_ucred
);
3443 if (fmode
& O_TRUNC
) {
3444 vn_unlock(vp
); /* XXX */
3445 vn_lock(vp
, LK_EXCLUSIVE
| LK_RETRY
); /* XXX */
3448 error
= VOP_SETATTR(vp
, vap
, p
->p_ucred
);
3454 * VOP_OPEN needs the file pointer so it can potentially override
3457 * WARNING! no f_nchandle will be associated when fhopen()ing a
3460 if ((error
= falloc(p
, &nfp
, &indx
)) != 0)
3464 error
= VOP_OPEN(vp
, fmode
, p
->p_ucred
, fp
);
3467 * setting f_ops this way prevents VOP_CLOSE from being
3468 * called or fdrop() releasing the vp from v_data. Since
3469 * the VOP_OPEN failed we don't want to VOP_CLOSE.
3471 fp
->f_ops
= &badfileops
;
3477 * The fp is given its own reference, we still have our ref and lock.
3479 * Assert that all regular files must be created with a VM object.
3481 if (vp
->v_type
== VREG
&& vp
->v_object
== NULL
) {
3482 kprintf("fhopen: regular file did not have VM object: %p\n", vp
);
3487 * The open was successful. Handle any locking requirements.
3489 if (fmode
& (O_EXLOCK
| O_SHLOCK
)) {
3490 lf
.l_whence
= SEEK_SET
;
3493 if (fmode
& O_EXLOCK
)
3494 lf
.l_type
= F_WRLCK
;
3496 lf
.l_type
= F_RDLCK
;
3497 if (fmode
& FNONBLOCK
)
3502 if ((error
= VOP_ADVLOCK(vp
, (caddr_t
)fp
, F_SETLK
, &lf
, type
)) != 0) {
3504 * release our private reference.
3506 fsetfd(p
, NULL
, indx
);
3511 vn_lock(vp
, LK_EXCLUSIVE
| LK_RETRY
);
3512 fp
->f_flag
|= FHASLOCK
;
3516 * Clean up. Associate the file pointer with the previously
3517 * reserved descriptor and return it.
3520 fsetfd(p
, fp
, indx
);
3522 uap
->sysmsg_result
= indx
;
3526 fsetfd(p
, NULL
, indx
);
3534 * fhstat_args(struct fhandle *u_fhp, struct stat *sb)
3537 sys_fhstat(struct fhstat_args
*uap
)
3539 struct thread
*td
= curthread
;
3547 * Must be super user
3553 error
= copyin(uap
->u_fhp
, &fh
, sizeof(fhandle_t
));
3557 if ((mp
= vfs_getvfs(&fh
.fh_fsid
)) == NULL
)
3559 if ((error
= VFS_FHTOVP(mp
, &fh
.fh_fid
, &vp
)))
3561 error
= vn_stat(vp
, &sb
, td
->td_proc
->p_ucred
);
3565 error
= copyout(&sb
, uap
->sb
, sizeof(sb
));
3570 * fhstatfs_args(struct fhandle *u_fhp, struct statfs *buf)
3573 sys_fhstatfs(struct fhstatfs_args
*uap
)
3575 struct thread
*td
= curthread
;
3576 struct proc
*p
= td
->td_proc
;
3581 char *fullpath
, *freepath
;
3586 * Must be super user
3588 if ((error
= suser(td
)))
3591 if ((error
= copyin(uap
->u_fhp
, &fh
, sizeof(fhandle_t
))) != 0)
3594 if ((mp
= vfs_getvfs(&fh
.fh_fsid
)) == NULL
)
3597 if (p
!= NULL
&& !chroot_visible_mnt(mp
, p
))
3600 if ((error
= VFS_FHTOVP(mp
, &fh
.fh_fid
, &vp
)))
3605 if ((error
= VFS_STATFS(mp
, sp
, p
->p_ucred
)) != 0)
3608 error
= mount_path(p
, mp
, &fullpath
, &freepath
);
3611 bzero(sp
->f_mntonname
, sizeof(sp
->f_mntonname
));
3612 strlcpy(sp
->f_mntonname
, fullpath
, sizeof(sp
->f_mntonname
));
3613 kfree(freepath
, M_TEMP
);
3615 sp
->f_flags
= mp
->mnt_flag
& MNT_VISFLAGMASK
;
3617 bcopy(sp
, &sb
, sizeof(sb
));
3618 sb
.f_fsid
.val
[0] = sb
.f_fsid
.val
[1] = 0;
3621 return (copyout(sp
, uap
->buf
, sizeof(*sp
)));
3625 * Syscall to push extended attribute configuration information into the
3626 * VFS. Accepts a path, which it converts to a mountpoint, as well as
3627 * a command (int cmd), and attribute name and misc data. For now, the
3628 * attribute name is left in userspace for consumption by the VFS_op.
3629 * It will probably be changed to be copied into sysspace by the
3630 * syscall in the future, once issues with various consumers of the
3631 * attribute code have raised their hands.
3633 * Currently this is used only by UFS Extended Attributes.
3636 sys_extattrctl(struct extattrctl_args
*uap
)
3638 struct nlookupdata nd
;
3644 error
= nlookup_init(&nd
, uap
->path
, UIO_USERSPACE
, NLC_FOLLOW
);
3646 error
= nlookup(&nd
);
3648 mp
= nd
.nl_nch
.mount
;
3649 error
= VFS_EXTATTRCTL(mp
, uap
->cmd
,
3650 uap
->attrname
, uap
->arg
,
3658 * Syscall to set a named extended attribute on a file or directory.
3659 * Accepts attribute name, and a uio structure pointing to the data to set.
3660 * The uio is consumed in the style of writev(). The real work happens
3661 * in VOP_SETEXTATTR().
3664 sys_extattr_set_file(struct extattr_set_file_args
*uap
)
3666 char attrname
[EXTATTR_MAXNAMELEN
];
3667 struct iovec aiov
[UIO_SMALLIOV
];
3668 struct iovec
*needfree
;
3669 struct nlookupdata nd
;
3678 error
= copyin(uap
->attrname
, attrname
, EXTATTR_MAXNAMELEN
);
3683 error
= nlookup_init(&nd
, uap
->path
, UIO_USERSPACE
, NLC_FOLLOW
);
3685 error
= nlookup(&nd
);
3687 error
= ncp_writechk(&nd
.nl_nch
);
3689 error
= cache_vget(&nd
.nl_nch
, nd
.nl_cred
, LK_EXCLUSIVE
, &vp
);
3696 iovlen
= uap
->iovcnt
* sizeof(struct iovec
);
3697 if (uap
->iovcnt
> UIO_SMALLIOV
) {
3698 if (uap
->iovcnt
> UIO_MAXIOV
) {
3702 MALLOC(iov
, struct iovec
*, iovlen
, M_IOV
, M_WAITOK
);
3708 auio
.uio_iovcnt
= uap
->iovcnt
;
3709 auio
.uio_rw
= UIO_WRITE
;
3710 auio
.uio_segflg
= UIO_USERSPACE
;
3711 auio
.uio_td
= nd
.nl_td
;
3712 auio
.uio_offset
= 0;
3713 if ((error
= copyin(uap
->iovp
, iov
, iovlen
)))
3716 for (i
= 0; i
< uap
->iovcnt
; i
++) {
3717 if (iov
->iov_len
> INT_MAX
- auio
.uio_resid
) {
3721 auio
.uio_resid
+= iov
->iov_len
;
3724 cnt
= auio
.uio_resid
;
3725 error
= VOP_SETEXTATTR(vp
, attrname
, &auio
, nd
.nl_cred
);
3726 cnt
-= auio
.uio_resid
;
3727 uap
->sysmsg_result
= cnt
;
3732 FREE(needfree
, M_IOV
);
3737 * Syscall to get a named extended attribute on a file or directory.
3738 * Accepts attribute name, and a uio structure pointing to a buffer for the
3739 * data. The uio is consumed in the style of readv(). The real work
3740 * happens in VOP_GETEXTATTR();
3743 sys_extattr_get_file(struct extattr_get_file_args
*uap
)
3745 char attrname
[EXTATTR_MAXNAMELEN
];
3746 struct iovec aiov
[UIO_SMALLIOV
];
3747 struct iovec
*needfree
;
3748 struct nlookupdata nd
;
3757 error
= copyin(uap
->attrname
, attrname
, EXTATTR_MAXNAMELEN
);
3762 error
= nlookup_init(&nd
, uap
->path
, UIO_USERSPACE
, NLC_FOLLOW
);
3764 error
= nlookup(&nd
);
3766 error
= cache_vget(&nd
.nl_nch
, nd
.nl_cred
, LK_EXCLUSIVE
, &vp
);
3772 iovlen
= uap
->iovcnt
* sizeof (struct iovec
);
3774 if (uap
->iovcnt
> UIO_SMALLIOV
) {
3775 if (uap
->iovcnt
> UIO_MAXIOV
) {
3779 MALLOC(iov
, struct iovec
*, iovlen
, M_IOV
, M_WAITOK
);
3785 auio
.uio_iovcnt
= uap
->iovcnt
;
3786 auio
.uio_rw
= UIO_READ
;
3787 auio
.uio_segflg
= UIO_USERSPACE
;
3788 auio
.uio_td
= nd
.nl_td
;
3789 auio
.uio_offset
= 0;
3790 if ((error
= copyin(uap
->iovp
, iov
, iovlen
)))
3793 for (i
= 0; i
< uap
->iovcnt
; i
++) {
3794 if (iov
->iov_len
> INT_MAX
- auio
.uio_resid
) {
3798 auio
.uio_resid
+= iov
->iov_len
;
3801 cnt
= auio
.uio_resid
;
3802 error
= VOP_GETEXTATTR(vp
, attrname
, &auio
, nd
.nl_cred
);
3803 cnt
-= auio
.uio_resid
;
3804 uap
->sysmsg_result
= cnt
;
3809 FREE(needfree
, M_IOV
);
3814 * Syscall to delete a named extended attribute from a file or directory.
3815 * Accepts attribute name. The real work happens in VOP_SETEXTATTR().
3818 sys_extattr_delete_file(struct extattr_delete_file_args
*uap
)
3820 char attrname
[EXTATTR_MAXNAMELEN
];
3821 struct nlookupdata nd
;
3825 error
= copyin(uap
->attrname
, attrname
, EXTATTR_MAXNAMELEN
);
3830 error
= nlookup_init(&nd
, uap
->path
, UIO_USERSPACE
, NLC_FOLLOW
);
3832 error
= nlookup(&nd
);
3834 error
= ncp_writechk(&nd
.nl_nch
);
3836 error
= cache_vget(&nd
.nl_nch
, nd
.nl_cred
, LK_EXCLUSIVE
, &vp
);
3842 error
= VOP_SETEXTATTR(vp
, attrname
, NULL
, nd
.nl_cred
);
3849 * Determine if the mount is visible to the process.
3852 chroot_visible_mnt(struct mount
*mp
, struct proc
*p
)
3854 struct nchandle nch
;
3857 * Traverse from the mount point upwards. If we hit the process
3858 * root then the mount point is visible to the process.
3860 nch
= mp
->mnt_ncmountpt
;
3862 if (nch
.mount
== p
->p_fd
->fd_nrdir
.mount
&&
3863 nch
.ncp
== p
->p_fd
->fd_nrdir
.ncp
) {
3866 if (nch
.ncp
== nch
.mount
->mnt_ncmountpt
.ncp
) {
3867 nch
= nch
.mount
->mnt_ncmounton
;
3869 nch
.ncp
= nch
.ncp
->nc_parent
;
3874 * If the mount point is not visible to the process, but the
3875 * process root is in a subdirectory of the mount, return
3878 if (p
->p_fd
->fd_nrdir
.mount
== mp
)