2 * Copyright (c) 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * @(#)vfs_syscalls.c 8.13 (Berkeley) 4/15/94
35 * $FreeBSD: src/sys/kern/vfs_syscalls.c,v 1.151.2.18 2003/04/04 20:35:58 tegge Exp $
38 #include <sys/param.h>
39 #include <sys/systm.h>
42 #include <sys/sysent.h>
43 #include <sys/malloc.h>
44 #include <sys/mount.h>
45 #include <sys/mountctl.h>
46 #include <sys/sysproto.h>
47 #include <sys/filedesc.h>
48 #include <sys/kernel.h>
49 #include <sys/fcntl.h>
51 #include <sys/linker.h>
53 #include <sys/unistd.h>
54 #include <sys/vnode.h>
58 #include <sys/namei.h>
59 #include <sys/nlookup.h>
60 #include <sys/dirent.h>
61 #include <sys/extattr.h>
62 #include <sys/spinlock.h>
63 #include <sys/kern_syscall.h>
64 #include <sys/objcache.h>
65 #include <sys/sysctl.h>
68 #include <sys/file2.h>
69 #include <sys/spinlock2.h>
72 #include <vm/vm_object.h>
73 #include <vm/vm_page.h>
75 #include <machine/limits.h>
76 #include <machine/stdarg.h>
78 static void mount_warning(struct mount
*mp
, const char *ctl
, ...)
80 static int mount_path(struct proc
*p
, struct mount
*mp
, char **rb
, char **fb
);
81 static int checkvp_chdir (struct vnode
*vn
, struct thread
*td
);
82 static void checkdirs (struct nchandle
*old_nch
, struct nchandle
*new_nch
);
83 static int chroot_refuse_vdir_fds (thread_t td
, struct filedesc
*fdp
);
84 static int chroot_visible_mnt(struct mount
*mp
, struct proc
*p
);
85 static int getutimes (struct timeval
*, struct timespec
*);
86 static int getutimens (const struct timespec
*, struct timespec
*, int *);
87 static int setfown (struct mount
*, struct vnode
*, uid_t
, gid_t
);
88 static int setfmode (struct vnode
*, int);
89 static int setfflags (struct vnode
*, int);
90 static int setutimes (struct vnode
*, struct vattr
*,
91 const struct timespec
*, int);
92 static int usermount
= 0; /* if 1, non-root can mount fs. */
94 SYSCTL_INT(_vfs
, OID_AUTO
, usermount
, CTLFLAG_RW
, &usermount
, 0,
95 "Allow non-root users to mount filesystems");
98 * Virtual File System System Calls
102 * Mount a file system.
104 * mount_args(char *type, char *path, int flags, caddr_t data)
109 sys_mount(struct mount_args
*uap
)
111 struct thread
*td
= curthread
;
114 struct mount
*mp
, *nullmp
;
115 struct vfsconf
*vfsp
;
116 int error
, flag
= 0, flag2
= 0;
119 struct nlookupdata nd
;
120 char fstypename
[MFSNAMELEN
];
128 if (usermount
== 0 && (error
= priv_check(td
, PRIV_ROOT
)))
132 * Do not allow NFS export by non-root users.
134 if (uap
->flags
& MNT_EXPORTED
) {
135 error
= priv_check(td
, PRIV_ROOT
);
140 * Silently enforce MNT_NOSUID and MNT_NODEV for non-root users
142 if (priv_check(td
, PRIV_ROOT
))
143 uap
->flags
|= MNT_NOSUID
| MNT_NODEV
;
146 * Lookup the requested path and extract the nch and vnode.
148 error
= nlookup_init(&nd
, uap
->path
, UIO_USERSPACE
, NLC_FOLLOW
);
150 if ((error
= nlookup(&nd
)) == 0) {
151 if (nd
.nl_nch
.ncp
->nc_vp
== NULL
)
161 * If the target filesystem is resolved via a nullfs mount, then
162 * nd.nl_nch.mount will be pointing to the nullfs mount structure
163 * instead of the target file system. We need it in case we are
166 nullmp
= nd
.nl_nch
.mount
;
169 * Extract the locked+refd ncp and cleanup the nd structure
172 cache_zero(&nd
.nl_nch
);
175 if ((nch
.ncp
->nc_flag
& NCF_ISMOUNTPT
) &&
176 (mp
= cache_findmount(&nch
)) != NULL
) {
185 * now we have the locked ref'd nch and unreferenced vnode.
188 if ((error
= vget(vp
, LK_EXCLUSIVE
)) != 0) {
195 * Extract the file system type. We need to know this early, to take
196 * appropriate actions if we are dealing with a nullfs.
198 if ((error
= copyinstr(uap
->type
, fstypename
, MFSNAMELEN
, NULL
)) != 0) {
205 * Now we have an unlocked ref'd nch and a locked ref'd vp
207 if (uap
->flags
& MNT_UPDATE
) {
208 if ((vp
->v_flag
& (VROOT
|VPFSROOT
)) == 0) {
215 if (strncmp(fstypename
, "null", 5) == 0) {
223 flag2
= mp
->mnt_kern_flag
;
225 * We only allow the filesystem to be reloaded if it
226 * is currently mounted read-only.
228 if ((uap
->flags
& MNT_RELOAD
) &&
229 ((mp
->mnt_flag
& MNT_RDONLY
) == 0)) {
232 error
= EOPNOTSUPP
; /* Needs translation */
236 * Only root, or the user that did the original mount is
237 * permitted to update it.
239 if (mp
->mnt_stat
.f_owner
!= cred
->cr_uid
&&
240 (error
= priv_check(td
, PRIV_ROOT
))) {
245 if (vfs_busy(mp
, LK_NOWAIT
)) {
259 uap
->flags
& (MNT_RELOAD
| MNT_FORCE
| MNT_UPDATE
);
260 lwkt_gettoken(&mp
->mnt_token
);
267 * If the user is not root, ensure that they own the directory
268 * onto which we are attempting to mount.
270 if ((error
= VOP_GETATTR(vp
, &va
)) ||
271 (va
.va_uid
!= cred
->cr_uid
&&
272 (error
= priv_check(td
, PRIV_ROOT
)))) {
277 if ((error
= vinvalbuf(vp
, V_SAVE
, 0, 0)) != 0) {
282 if (vp
->v_type
!= VDIR
) {
288 if (vp
->v_mount
->mnt_kern_flag
& MNTK_NOSTKMNT
) {
294 vfsp
= vfsconf_find_by_name(fstypename
);
298 /* Only load modules for root (very important!) */
299 if ((error
= priv_check(td
, PRIV_ROOT
)) != 0) {
304 error
= linker_load_file(fstypename
, &lf
);
305 if (error
|| lf
== NULL
) {
313 /* lookup again, see if the VFS was loaded */
314 vfsp
= vfsconf_find_by_name(fstypename
);
317 linker_file_unload(lf
);
332 * Allocate and initialize the filesystem.
334 mp
= kmalloc(sizeof(struct mount
), M_MOUNT
, M_ZERO
|M_WAITOK
);
336 vfs_busy(mp
, LK_NOWAIT
);
337 mp
->mnt_op
= vfsp
->vfc_vfsops
;
339 mp
->mnt_pbuf_count
= nswbuf_kva
/ NSWBUF_SPLIT
;
340 vfsp
->vfc_refcount
++;
341 mp
->mnt_stat
.f_type
= vfsp
->vfc_typenum
;
342 mp
->mnt_flag
|= vfsp
->vfc_flags
& MNT_VISFLAGMASK
;
343 strncpy(mp
->mnt_stat
.f_fstypename
, vfsp
->vfc_name
, MFSNAMELEN
);
344 mp
->mnt_stat
.f_owner
= cred
->cr_uid
;
345 lwkt_gettoken(&mp
->mnt_token
);
349 * (per-mount token acquired at this point)
351 * Set the mount level flags.
353 if (uap
->flags
& MNT_RDONLY
)
354 mp
->mnt_flag
|= MNT_RDONLY
;
355 else if (mp
->mnt_flag
& MNT_RDONLY
)
356 mp
->mnt_kern_flag
|= MNTK_WANTRDWR
;
357 mp
->mnt_flag
&=~ (MNT_NOSUID
| MNT_NOEXEC
| MNT_NODEV
|
358 MNT_SYNCHRONOUS
| MNT_ASYNC
| MNT_NOATIME
|
359 MNT_NOSYMFOLLOW
| MNT_IGNORE
| MNT_TRIM
|
360 MNT_NOCLUSTERR
| MNT_NOCLUSTERW
| MNT_SUIDDIR
|
362 mp
->mnt_flag
|= uap
->flags
& (MNT_NOSUID
| MNT_NOEXEC
|
363 MNT_NODEV
| MNT_SYNCHRONOUS
| MNT_ASYNC
| MNT_FORCE
|
364 MNT_NOSYMFOLLOW
| MNT_IGNORE
| MNT_TRIM
|
365 MNT_NOATIME
| MNT_NOCLUSTERR
| MNT_NOCLUSTERW
| MNT_SUIDDIR
|
369 * Pre-set the mount's ALL_MPSAFE flags if specified in the vfsconf.
370 * This way the initial VFS_MOUNT() call will also be MPSAFE.
372 if (vfsp
->vfc_flags
& VFCF_MPSAFE
)
373 mp
->mnt_kern_flag
|= MNTK_ALL_MPSAFE
;
376 * Mount the filesystem.
377 * XXX The final recipients of VFS_MOUNT just overwrite the ndp they
380 error
= VFS_MOUNT(mp
, uap
->path
, uap
->data
, cred
);
381 if (mp
->mnt_flag
& MNT_UPDATE
) {
382 if (mp
->mnt_kern_flag
& MNTK_WANTRDWR
)
383 mp
->mnt_flag
&= ~MNT_RDONLY
;
384 mp
->mnt_flag
&=~ (MNT_UPDATE
| MNT_RELOAD
| MNT_FORCE
);
385 mp
->mnt_kern_flag
&=~ MNTK_WANTRDWR
;
388 mp
->mnt_kern_flag
= flag2
;
390 lwkt_reltoken(&mp
->mnt_token
);
396 vn_lock(vp
, LK_EXCLUSIVE
| LK_RETRY
);
399 * Put the new filesystem on the mount list after root. The mount
400 * point gets its own mnt_ncmountpt (unless the VFS already set one
401 * up) which represents the root of the mount. The lookup code
402 * detects the mount point going forward and checks the root of
403 * the mount going backwards.
405 * It is not necessary to invalidate or purge the vnode underneath
406 * because elements under the mount will be given their own glue
410 if (mp
->mnt_ncmountpt
.ncp
== NULL
) {
412 * Allocate, then unlock, but leave the ref intact.
413 * This is the mnt_refs (1) that we will retain
414 * through to the unmount.
416 cache_allocroot(&mp
->mnt_ncmountpt
, mp
, NULL
);
417 cache_unlock(&mp
->mnt_ncmountpt
);
420 mp
->mnt_ncmounton
= nch
; /* inherits ref */
422 nch
.ncp
->nc_flag
|= NCF_ISMOUNTPT
;
424 cache_ismounting(mp
);
425 vn_lock(vp
, LK_EXCLUSIVE
| LK_RETRY
);
427 mountlist_insert(mp
, MNTINS_LAST
);
429 checkdirs(&mp
->mnt_ncmounton
, &mp
->mnt_ncmountpt
);
430 error
= vfs_allocate_syncvnode(mp
);
431 lwkt_reltoken(&mp
->mnt_token
);
433 error
= VFS_START(mp
, 0);
435 KNOTE(&fs_klist
, VQ_MOUNT
);
437 vn_syncer_thr_stop(mp
);
438 vfs_rm_vnodeops(mp
, NULL
, &mp
->mnt_vn_coherency_ops
);
439 vfs_rm_vnodeops(mp
, NULL
, &mp
->mnt_vn_journal_ops
);
440 vfs_rm_vnodeops(mp
, NULL
, &mp
->mnt_vn_norm_ops
);
441 vfs_rm_vnodeops(mp
, NULL
, &mp
->mnt_vn_spec_ops
);
442 vfs_rm_vnodeops(mp
, NULL
, &mp
->mnt_vn_fifo_ops
);
443 mp
->mnt_vfc
->vfc_refcount
--;
444 lwkt_reltoken(&mp
->mnt_token
);
455 * Scan all active processes to see if any of them have a current
456 * or root directory onto which the new filesystem has just been
457 * mounted. If so, replace them with the new mount point.
459 * Both old_nch and new_nch are ref'd on call but not locked.
460 * new_nch must be temporarily locked so it can be associated with the
461 * vnode representing the root of the mount point.
463 struct checkdirs_info
{
464 struct nchandle old_nch
;
465 struct nchandle new_nch
;
466 struct vnode
*old_vp
;
467 struct vnode
*new_vp
;
470 static int checkdirs_callback(struct proc
*p
, void *data
);
473 checkdirs(struct nchandle
*old_nch
, struct nchandle
*new_nch
)
475 struct checkdirs_info info
;
481 * If the old mount point's vnode has a usecount of 1, it is not
482 * being held as a descriptor anywhere.
484 olddp
= old_nch
->ncp
->nc_vp
;
485 if (olddp
== NULL
|| VREFCNT(olddp
) == 1)
489 * Force the root vnode of the new mount point to be resolved
490 * so we can update any matching processes.
493 if (VFS_ROOT(mp
, &newdp
))
494 panic("mount: lost mount");
497 vn_lock(newdp
, LK_EXCLUSIVE
| LK_RETRY
);
498 cache_setunresolved(new_nch
);
499 cache_setvp(new_nch
, newdp
);
500 cache_unlock(new_nch
);
503 * Special handling of the root node
505 if (rootvnode
== olddp
) {
507 vfs_cache_setroot(newdp
, cache_hold(new_nch
));
511 * Pass newdp separately so the callback does not have to access
512 * it via new_nch->ncp->nc_vp.
514 info
.old_nch
= *old_nch
;
515 info
.new_nch
= *new_nch
;
517 allproc_scan(checkdirs_callback
, &info
, 0);
522 * NOTE: callback is not MP safe because the scanned process's filedesc
523 * structure can be ripped out from under us, amoung other things.
526 checkdirs_callback(struct proc
*p
, void *data
)
528 struct checkdirs_info
*info
= data
;
529 struct filedesc
*fdp
;
530 struct nchandle ncdrop1
;
531 struct nchandle ncdrop2
;
532 struct vnode
*vprele1
;
533 struct vnode
*vprele2
;
535 if ((fdp
= p
->p_fd
) != NULL
) {
536 cache_zero(&ncdrop1
);
537 cache_zero(&ncdrop2
);
542 * MPUNSAFE - XXX fdp can be pulled out from under a
545 * A shared filedesc is ok, we don't have to copy it
546 * because we are making this change globally.
548 spin_lock(&fdp
->fd_spin
);
549 if (fdp
->fd_ncdir
.mount
== info
->old_nch
.mount
&&
550 fdp
->fd_ncdir
.ncp
== info
->old_nch
.ncp
) {
551 vprele1
= fdp
->fd_cdir
;
553 fdp
->fd_cdir
= info
->new_vp
;
554 ncdrop1
= fdp
->fd_ncdir
;
555 cache_copy(&info
->new_nch
, &fdp
->fd_ncdir
);
557 if (fdp
->fd_nrdir
.mount
== info
->old_nch
.mount
&&
558 fdp
->fd_nrdir
.ncp
== info
->old_nch
.ncp
) {
559 vprele2
= fdp
->fd_rdir
;
561 fdp
->fd_rdir
= info
->new_vp
;
562 ncdrop2
= fdp
->fd_nrdir
;
563 cache_copy(&info
->new_nch
, &fdp
->fd_nrdir
);
565 spin_unlock(&fdp
->fd_spin
);
567 cache_drop(&ncdrop1
);
569 cache_drop(&ncdrop2
);
579 * Unmount a file system.
581 * Note: unmount takes a path to the vnode mounted on as argument,
582 * not special file (as before).
584 * umount_args(char *path, int flags)
589 sys_unmount(struct unmount_args
*uap
)
591 struct thread
*td
= curthread
;
592 struct proc
*p __debugvar
= td
->td_proc
;
593 struct mount
*mp
= NULL
;
594 struct nlookupdata nd
;
598 if (td
->td_ucred
->cr_prison
!= NULL
) {
602 if (usermount
== 0 && (error
= priv_check(td
, PRIV_ROOT
)))
605 error
= nlookup_init(&nd
, uap
->path
, UIO_USERSPACE
, NLC_FOLLOW
);
607 error
= nlookup(&nd
);
611 mp
= nd
.nl_nch
.mount
;
614 * Only root, or the user that did the original mount is
615 * permitted to unmount this filesystem.
617 if ((mp
->mnt_stat
.f_owner
!= td
->td_ucred
->cr_uid
) &&
618 (error
= priv_check(td
, PRIV_ROOT
)))
622 * Don't allow unmounting the root file system.
624 if (mp
->mnt_flag
& MNT_ROOTFS
) {
630 * Must be the root of the filesystem
632 if (nd
.nl_nch
.ncp
!= mp
->mnt_ncmountpt
.ncp
) {
638 * If no error try to issue the unmount. We lose our cache
639 * ref when we call nlookup_done so we must hold the mount point
640 * to prevent use-after-free races.
646 error
= dounmount(mp
, uap
->flags
, 0);
656 * Do the actual file system unmount (interlocked against the mountlist
657 * token and mp->mnt_token).
660 dounmount_interlock(struct mount
*mp
)
662 if (mp
->mnt_kern_flag
& MNTK_UNMOUNT
)
664 mp
->mnt_kern_flag
|= MNTK_UNMOUNT
;
669 unmount_allproc_cb(struct proc
*p
, void *arg
)
673 if (p
->p_textnch
.ncp
== NULL
)
676 mp
= (struct mount
*)arg
;
677 if (p
->p_textnch
.mount
== mp
)
678 cache_drop(&p
->p_textnch
);
684 * The guts of the unmount code. The mount owns one ref and one hold
685 * count. If we successfully interlock the unmount, those refs are ours.
686 * (The ref is from mnt_ncmountpt).
688 * When halting we shortcut certain mount types such as devfs by not actually
689 * issuing the VFS_SYNC() or VFS_UNMOUNT(). They are still disconnected
690 * from the mountlist so higher-level filesytems can unmount cleanly.
692 * The mount types that allow QUICKHALT are: devfs, tmpfs, procfs.
695 dounmount(struct mount
*mp
, int flags
, int halting
)
697 struct namecache
*ncp
;
707 lwkt_gettoken(&mp
->mnt_token
);
710 * When halting, certain mount points can essentially just
711 * be unhooked and otherwise ignored.
713 if (halting
&& (mp
->mnt_kern_flag
& MNTK_QUICKHALT
)) {
722 * Exclusive access for unmounting purposes.
724 if ((error
= mountlist_interlock(dounmount_interlock
, mp
)) != 0)
728 * We now 'own' the last mp->mnt_refs
730 * Allow filesystems to detect that a forced unmount is in progress.
732 if (flags
& MNT_FORCE
)
733 mp
->mnt_kern_flag
|= MNTK_UNMOUNTF
;
734 lflags
= LK_EXCLUSIVE
| ((flags
& MNT_FORCE
) ? 0 : LK_TIMELOCK
);
735 error
= lockmgr(&mp
->mnt_lock
, lflags
);
737 mp
->mnt_kern_flag
&= ~(MNTK_UNMOUNT
| MNTK_UNMOUNTF
);
738 if (mp
->mnt_kern_flag
& MNTK_MWAIT
) {
739 mp
->mnt_kern_flag
&= ~MNTK_MWAIT
;
745 if (mp
->mnt_flag
& MNT_EXPUBLIC
)
746 vfs_setpublicfs(NULL
, NULL
, NULL
);
748 vfs_msync(mp
, MNT_WAIT
);
749 async_flag
= mp
->mnt_flag
& MNT_ASYNC
;
750 mp
->mnt_flag
&=~ MNT_ASYNC
;
753 * If this filesystem isn't aliasing other filesystems,
754 * try to invalidate any remaining namecache entries and
755 * check the count afterwords.
757 * We own the last mnt_refs by owning mnt_ncmountpt.
759 if ((mp
->mnt_kern_flag
& MNTK_NCALIASED
) == 0) {
760 cache_lock(&mp
->mnt_ncmountpt
);
761 cache_inval(&mp
->mnt_ncmountpt
, CINV_DESTROY
|CINV_CHILDREN
);
762 cache_unlock(&mp
->mnt_ncmountpt
);
764 cache_clearmntcache();
765 if ((ncp
= mp
->mnt_ncmountpt
.ncp
) != NULL
&&
766 (ncp
->nc_refs
!= 1 || TAILQ_FIRST(&ncp
->nc_list
))) {
767 allproc_scan(&unmount_allproc_cb
, mp
, 0);
770 cache_clearmntcache();
771 if ((ncp
= mp
->mnt_ncmountpt
.ncp
) != NULL
&&
772 (ncp
->nc_refs
!= 1 || TAILQ_FIRST(&ncp
->nc_list
))) {
774 if ((flags
& MNT_FORCE
) == 0) {
776 mount_warning(mp
, "Cannot unmount: "
782 mount_warning(mp
, "Forced unmount: "
793 * Decomission our special mnt_syncer vnode. This also stops
794 * the vnlru code. If we are unable to unmount we recommission
797 * Then sync the filesystem.
799 if ((vp
= mp
->mnt_syncer
) != NULL
) {
800 mp
->mnt_syncer
= NULL
;
801 atomic_set_int(&vp
->v_refcnt
, VREF_FINALIZE
);
805 if (quickhalt
== 0) {
806 if ((mp
->mnt_flag
& MNT_RDONLY
) == 0)
807 VFS_SYNC(mp
, MNT_WAIT
);
811 * nchandle records ref the mount structure. Expect a count of 1
812 * (our mount->mnt_ncmountpt).
814 * Scans can get temporary refs on a mountpoint (thought really
815 * heavy duty stuff like cache_findmount() do not).
817 if (mp
->mnt_refs
!= 1)
818 cache_clearmntcache();
819 for (retry
= 0; retry
< 10 && mp
->mnt_refs
!= 1; ++retry
) {
820 cache_unmounting(mp
);
821 tsleep(&mp
->mnt_refs
, 0, "mntbsy", hz
/ 10 + 1);
822 cache_clearmntcache();
824 if (mp
->mnt_refs
!= 1) {
825 if ((flags
& MNT_FORCE
) == 0) {
826 mount_warning(mp
, "Cannot unmount: "
827 "%d mount refs still present",
831 mount_warning(mp
, "Forced unmount: "
832 "%d mount refs still present",
839 * So far so good, sync the filesystem once more and
840 * call the VFS unmount code if the sync succeeds.
842 if (error
== 0 && quickhalt
== 0) {
843 if (mp
->mnt_flag
& MNT_RDONLY
) {
844 error
= VFS_UNMOUNT(mp
, flags
);
846 error
= VFS_SYNC(mp
, MNT_WAIT
);
848 (error
== EOPNOTSUPP
) || /* No sync */
849 (flags
& MNT_FORCE
)) {
850 error
= VFS_UNMOUNT(mp
, flags
);
856 * If an error occurred we can still recover, restoring the
857 * syncer vnode and misc flags.
860 if (mp
->mnt_syncer
== NULL
)
861 vfs_allocate_syncvnode(mp
);
862 mp
->mnt_kern_flag
&= ~(MNTK_UNMOUNT
| MNTK_UNMOUNTF
);
863 mp
->mnt_flag
|= async_flag
;
864 lockmgr(&mp
->mnt_lock
, LK_RELEASE
);
865 if (mp
->mnt_kern_flag
& MNTK_MWAIT
) {
866 mp
->mnt_kern_flag
&= ~MNTK_MWAIT
;
872 * Clean up any journals still associated with the mount after
873 * filesystem activity has ceased.
875 journal_remove_all_journals(mp
,
876 ((flags
& MNT_FORCE
) ? MC_JOURNAL_STOP_IMM
: 0));
878 mountlist_remove(mp
);
881 * Remove any installed vnode ops here so the individual VFSs don't
884 * mnt_refs should go to zero when we scrap mnt_ncmountpt.
886 * When quickhalting we have to keep these intact because the
887 * underlying vnodes have not been destroyed, and some might be
890 if (quickhalt
== 0) {
891 vfs_rm_vnodeops(mp
, NULL
, &mp
->mnt_vn_coherency_ops
);
892 vfs_rm_vnodeops(mp
, NULL
, &mp
->mnt_vn_journal_ops
);
893 vfs_rm_vnodeops(mp
, NULL
, &mp
->mnt_vn_norm_ops
);
894 vfs_rm_vnodeops(mp
, NULL
, &mp
->mnt_vn_spec_ops
);
895 vfs_rm_vnodeops(mp
, NULL
, &mp
->mnt_vn_fifo_ops
);
898 if (mp
->mnt_ncmountpt
.ncp
!= NULL
) {
899 nch
= mp
->mnt_ncmountpt
;
900 cache_zero(&mp
->mnt_ncmountpt
);
901 cache_clrmountpt(&nch
);
904 if (mp
->mnt_ncmounton
.ncp
!= NULL
) {
905 cache_unmounting(mp
);
906 nch
= mp
->mnt_ncmounton
;
907 cache_zero(&mp
->mnt_ncmounton
);
908 cache_clrmountpt(&nch
);
912 mp
->mnt_vfc
->vfc_refcount
--;
915 * If not quickhalting the mount, we expect there to be no
918 if (quickhalt
== 0 && !TAILQ_EMPTY(&mp
->mnt_nvnodelist
))
919 panic("unmount: dangling vnode");
924 lockmgr(&mp
->mnt_lock
, LK_RELEASE
);
925 if (mp
->mnt_kern_flag
& MNTK_MWAIT
) {
926 mp
->mnt_kern_flag
&= ~MNTK_MWAIT
;
931 * If we reach here and freeok != 0 we must free the mount.
932 * mnt_refs should already have dropped to 0, so if it is not
933 * zero we must cycle the caches and wait.
935 * When we are satisfied that the mount has disconnected we can
936 * drop the hold on the mp that represented the mount (though the
937 * caller might actually have another, so the caller's drop may
938 * do the actual free).
941 if (mp
->mnt_refs
> 0)
942 cache_clearmntcache();
943 while (mp
->mnt_refs
> 0) {
944 cache_unmounting(mp
);
946 tsleep(&mp
->mnt_refs
, 0, "umntrwait", hz
/ 10 + 1);
947 cache_clearmntcache();
949 lwkt_reltoken(&mp
->mnt_token
);
953 cache_clearmntcache();
956 KNOTE(&fs_klist
, VQ_UNMOUNT
);
959 lwkt_reltoken(&mp
->mnt_token
);
965 mount_warning(struct mount
*mp
, const char *ctl
, ...)
972 if (cache_fullpath(NULL
, &mp
->mnt_ncmounton
, NULL
,
973 &ptr
, &buf
, 0) == 0) {
974 kprintf("unmount(%s): ", ptr
);
979 kprintf("unmount(%p", mp
);
980 if (mp
->mnt_ncmounton
.ncp
&& mp
->mnt_ncmounton
.ncp
->nc_name
)
981 kprintf(",%s", mp
->mnt_ncmounton
.ncp
->nc_name
);
990 * Shim cache_fullpath() to handle the case where a process is chrooted into
991 * a subdirectory of a mount. In this case if the root mount matches the
992 * process root directory's mount we have to specify the process's root
993 * directory instead of the mount point, because the mount point might
994 * be above the root directory.
998 mount_path(struct proc
*p
, struct mount
*mp
, char **rb
, char **fb
)
1000 struct nchandle
*nch
;
1002 if (p
&& p
->p_fd
->fd_nrdir
.mount
== mp
)
1003 nch
= &p
->p_fd
->fd_nrdir
;
1005 nch
= &mp
->mnt_ncmountpt
;
1006 return(cache_fullpath(p
, nch
, NULL
, rb
, fb
, 0));
1010 * Sync each mounted filesystem.
1014 static int syncprt
= 0;
1015 SYSCTL_INT(_debug
, OID_AUTO
, syncprt
, CTLFLAG_RW
, &syncprt
, 0, "");
1018 static int sync_callback(struct mount
*mp
, void *data
);
1021 sys_sync(struct sync_args
*uap
)
1023 mountlist_scan(sync_callback
, NULL
, MNTSCAN_FORWARD
);
1029 sync_callback(struct mount
*mp
, void *data __unused
)
1033 if ((mp
->mnt_flag
& MNT_RDONLY
) == 0) {
1034 lwkt_gettoken(&mp
->mnt_token
);
1035 asyncflag
= mp
->mnt_flag
& MNT_ASYNC
;
1036 mp
->mnt_flag
&= ~MNT_ASYNC
;
1037 lwkt_reltoken(&mp
->mnt_token
);
1038 vfs_msync(mp
, MNT_NOWAIT
);
1039 VFS_SYNC(mp
, MNT_NOWAIT
);
1040 lwkt_gettoken(&mp
->mnt_token
);
1041 mp
->mnt_flag
|= asyncflag
;
1042 lwkt_reltoken(&mp
->mnt_token
);
1047 /* XXX PRISON: could be per prison flag */
1048 static int prison_quotas
;
1050 SYSCTL_INT(_kern_prison
, OID_AUTO
, quotas
, CTLFLAG_RW
, &prison_quotas
, 0, "");
1054 * quotactl_args(char *path, int fcmd, int uid, caddr_t arg)
1056 * Change filesystem quotas.
1061 sys_quotactl(struct quotactl_args
*uap
)
1063 struct nlookupdata nd
;
1069 if (td
->td_ucred
->cr_prison
&& !prison_quotas
) {
1074 error
= nlookup_init(&nd
, uap
->path
, UIO_USERSPACE
, NLC_FOLLOW
);
1076 error
= nlookup(&nd
);
1078 mp
= nd
.nl_nch
.mount
;
1079 error
= VFS_QUOTACTL(mp
, uap
->cmd
, uap
->uid
,
1080 uap
->arg
, nd
.nl_cred
);
1088 * mountctl(char *path, int op, int fd, const void *ctl, int ctllen,
1089 * void *buf, int buflen)
1091 * This function operates on a mount point and executes the specified
1092 * operation using the specified control data, and possibly returns data.
1094 * The actual number of bytes stored in the result buffer is returned, 0
1095 * if none, otherwise an error is returned.
1100 sys_mountctl(struct mountctl_args
*uap
)
1102 struct thread
*td
= curthread
;
1110 * Sanity and permissions checks. We must be root.
1112 if (td
->td_ucred
->cr_prison
!= NULL
)
1114 if ((uap
->op
!= MOUNTCTL_MOUNTFLAGS
) &&
1115 (error
= priv_check(td
, PRIV_ROOT
)) != 0)
1119 * Argument length checks
1121 if (uap
->ctllen
< 0 || uap
->ctllen
> 1024)
1123 if (uap
->buflen
< 0 || uap
->buflen
> 16 * 1024)
1125 if (uap
->path
== NULL
)
1129 * Allocate the necessary buffers and copyin data
1131 path
= objcache_get(namei_oc
, M_WAITOK
);
1132 error
= copyinstr(uap
->path
, path
, MAXPATHLEN
, NULL
);
1137 ctl
= kmalloc(uap
->ctllen
+ 1, M_TEMP
, M_WAITOK
|M_ZERO
);
1138 error
= copyin(uap
->ctl
, ctl
, uap
->ctllen
);
1143 buf
= kmalloc(uap
->buflen
+ 1, M_TEMP
, M_WAITOK
|M_ZERO
);
1146 * Validate the descriptor
1149 fp
= holdfp(td
, uap
->fd
, -1);
1159 * Execute the internal kernel function and clean up.
1161 error
= kern_mountctl(path
, uap
->op
, fp
, ctl
, uap
->ctllen
,
1162 buf
, uap
->buflen
, &uap
->sysmsg_result
);
1164 dropfp(td
, uap
->fd
, fp
);
1165 if (error
== 0 && uap
->sysmsg_result
> 0)
1166 error
= copyout(buf
, uap
->buf
, uap
->sysmsg_result
);
1169 objcache_put(namei_oc
, path
);
1178 * Execute a mount control operation by resolving the path to a mount point
1179 * and calling vop_mountctl().
1181 * Use the mount point from the nch instead of the vnode so nullfs mounts
1182 * can properly spike the VOP.
1185 kern_mountctl(const char *path
, int op
, struct file
*fp
,
1186 const void *ctl
, int ctllen
,
1187 void *buf
, int buflen
, int *res
)
1190 struct nlookupdata nd
;
1191 struct nchandle nch
;
1197 error
= nlookup_init(&nd
, path
, UIO_SYSSPACE
, NLC_FOLLOW
);
1200 error
= nlookup(&nd
);
1205 error
= cache_vget(&nd
.nl_nch
, nd
.nl_cred
, LK_EXCLUSIVE
, &vp
);
1212 * Yes, all this is needed to use the nch.mount below, because
1213 * we must maintain a ref on the mount to avoid ripouts (e.g.
1214 * due to heavy mount/unmount use by synth or poudriere).
1217 cache_zero(&nd
.nl_nch
);
1225 * Must be the root of the filesystem
1227 if ((vp
->v_flag
& (VROOT
|VPFSROOT
)) == 0) {
1232 if (mp
== NULL
|| mp
->mnt_kern_flag
& MNTK_UNMOUNT
) {
1233 kprintf("kern_mountctl: Warning, \"%s\" racing unmount\n",
1239 error
= vop_mountctl(mp
->mnt_vn_use_ops
, vp
, op
, fp
, ctl
, ctllen
,
1248 kern_statfs(struct nlookupdata
*nd
, struct statfs
*buf
)
1250 struct thread
*td
= curthread
;
1251 struct proc
*p
= td
->td_proc
;
1254 char *fullpath
, *freepath
;
1257 if ((error
= nlookup(nd
)) != 0)
1259 mp
= nd
->nl_nch
.mount
;
1261 if ((error
= VFS_STATFS(mp
, sp
, nd
->nl_cred
)) != 0)
1264 error
= mount_path(p
, mp
, &fullpath
, &freepath
);
1267 bzero(sp
->f_mntonname
, sizeof(sp
->f_mntonname
));
1268 strlcpy(sp
->f_mntonname
, fullpath
, sizeof(sp
->f_mntonname
));
1269 kfree(freepath
, M_TEMP
);
1271 sp
->f_flags
= mp
->mnt_flag
& MNT_VISFLAGMASK
;
1272 bcopy(sp
, buf
, sizeof(*buf
));
1273 /* Only root should have access to the fsid's. */
1274 if (priv_check(td
, PRIV_ROOT
))
1275 buf
->f_fsid
.val
[0] = buf
->f_fsid
.val
[1] = 0;
1280 * statfs_args(char *path, struct statfs *buf)
1282 * Get filesystem statistics.
1285 sys_statfs(struct statfs_args
*uap
)
1287 struct nlookupdata nd
;
1291 error
= nlookup_init(&nd
, uap
->path
, UIO_USERSPACE
, NLC_FOLLOW
);
1293 error
= kern_statfs(&nd
, &buf
);
1296 error
= copyout(&buf
, uap
->buf
, sizeof(*uap
->buf
));
1301 kern_fstatfs(int fd
, struct statfs
*buf
)
1303 struct thread
*td
= curthread
;
1304 struct proc
*p
= td
->td_proc
;
1308 char *fullpath
, *freepath
;
1312 if ((error
= holdvnode(td
, fd
, &fp
)) != 0)
1316 * Try to use mount info from any overlays rather than the
1317 * mount info for the underlying vnode, otherwise we will
1318 * fail when operating on null-mounted paths inside a chroot.
1320 if ((mp
= fp
->f_nchandle
.mount
) == NULL
)
1321 mp
= ((struct vnode
*)fp
->f_data
)->v_mount
;
1326 if (fp
->f_cred
== NULL
) {
1331 if ((error
= VFS_STATFS(mp
, sp
, fp
->f_cred
)) != 0)
1334 if ((error
= mount_path(p
, mp
, &fullpath
, &freepath
)) != 0)
1336 bzero(sp
->f_mntonname
, sizeof(sp
->f_mntonname
));
1337 strlcpy(sp
->f_mntonname
, fullpath
, sizeof(sp
->f_mntonname
));
1338 kfree(freepath
, M_TEMP
);
1340 sp
->f_flags
= mp
->mnt_flag
& MNT_VISFLAGMASK
;
1341 bcopy(sp
, buf
, sizeof(*buf
));
1343 /* Only root should have access to the fsid's. */
1344 if (priv_check(td
, PRIV_ROOT
))
1345 buf
->f_fsid
.val
[0] = buf
->f_fsid
.val
[1] = 0;
1353 * fstatfs_args(int fd, struct statfs *buf)
1355 * Get filesystem statistics.
1358 sys_fstatfs(struct fstatfs_args
*uap
)
1363 error
= kern_fstatfs(uap
->fd
, &buf
);
1366 error
= copyout(&buf
, uap
->buf
, sizeof(*uap
->buf
));
1371 kern_statvfs(struct nlookupdata
*nd
, struct statvfs
*buf
)
1377 if ((error
= nlookup(nd
)) != 0)
1379 mp
= nd
->nl_nch
.mount
;
1380 sp
= &mp
->mnt_vstat
;
1381 if ((error
= VFS_STATVFS(mp
, sp
, nd
->nl_cred
)) != 0)
1385 if (mp
->mnt_flag
& MNT_RDONLY
)
1386 sp
->f_flag
|= ST_RDONLY
;
1387 if (mp
->mnt_flag
& MNT_NOSUID
)
1388 sp
->f_flag
|= ST_NOSUID
;
1389 bcopy(sp
, buf
, sizeof(*buf
));
1394 * statfs_args(char *path, struct statfs *buf)
1396 * Get filesystem statistics.
1399 sys_statvfs(struct statvfs_args
*uap
)
1401 struct nlookupdata nd
;
1405 error
= nlookup_init(&nd
, uap
->path
, UIO_USERSPACE
, NLC_FOLLOW
);
1407 error
= kern_statvfs(&nd
, &buf
);
1410 error
= copyout(&buf
, uap
->buf
, sizeof(*uap
->buf
));
1415 kern_fstatvfs(int fd
, struct statvfs
*buf
)
1417 struct thread
*td
= curthread
;
1423 if ((error
= holdvnode(td
, fd
, &fp
)) != 0)
1425 if ((mp
= fp
->f_nchandle
.mount
) == NULL
)
1426 mp
= ((struct vnode
*)fp
->f_data
)->v_mount
;
1431 if (fp
->f_cred
== NULL
) {
1435 sp
= &mp
->mnt_vstat
;
1436 if ((error
= VFS_STATVFS(mp
, sp
, fp
->f_cred
)) != 0)
1440 if (mp
->mnt_flag
& MNT_RDONLY
)
1441 sp
->f_flag
|= ST_RDONLY
;
1442 if (mp
->mnt_flag
& MNT_NOSUID
)
1443 sp
->f_flag
|= ST_NOSUID
;
1445 bcopy(sp
, buf
, sizeof(*buf
));
1453 * fstatfs_args(int fd, struct statfs *buf)
1455 * Get filesystem statistics.
1458 sys_fstatvfs(struct fstatvfs_args
*uap
)
1463 error
= kern_fstatvfs(uap
->fd
, &buf
);
1466 error
= copyout(&buf
, uap
->buf
, sizeof(*uap
->buf
));
1471 * getfsstat_args(struct statfs *buf, long bufsize, int flags)
1473 * Get statistics on all filesystems.
1476 struct getfsstat_info
{
1477 struct statfs
*sfsp
;
1485 static int getfsstat_callback(struct mount
*, void *);
1488 sys_getfsstat(struct getfsstat_args
*uap
)
1490 struct thread
*td
= curthread
;
1491 struct getfsstat_info info
;
1493 bzero(&info
, sizeof(info
));
1495 info
.maxcount
= uap
->bufsize
/ sizeof(struct statfs
);
1496 info
.sfsp
= uap
->buf
;
1498 info
.flags
= uap
->flags
;
1501 mountlist_scan(getfsstat_callback
, &info
, MNTSCAN_FORWARD
);
1502 if (info
.sfsp
&& info
.count
> info
.maxcount
)
1503 uap
->sysmsg_result
= info
.maxcount
;
1505 uap
->sysmsg_result
= info
.count
;
1506 return (info
.error
);
1510 getfsstat_callback(struct mount
*mp
, void *data
)
1512 struct getfsstat_info
*info
= data
;
1518 if (info
->sfsp
&& info
->count
< info
->maxcount
) {
1519 if (info
->td
->td_proc
&&
1520 !chroot_visible_mnt(mp
, info
->td
->td_proc
)) {
1526 * If MNT_NOWAIT or MNT_LAZY is specified, do not
1527 * refresh the fsstat cache. MNT_NOWAIT or MNT_LAZY
1528 * overrides MNT_WAIT.
1530 if (((info
->flags
& (MNT_LAZY
|MNT_NOWAIT
)) == 0 ||
1531 (info
->flags
& MNT_WAIT
)) &&
1532 (error
= VFS_STATFS(mp
, sp
, info
->td
->td_ucred
))) {
1535 sp
->f_flags
= mp
->mnt_flag
& MNT_VISFLAGMASK
;
1537 error
= mount_path(info
->td
->td_proc
, mp
, &fullpath
, &freepath
);
1539 info
->error
= error
;
1542 bzero(sp
->f_mntonname
, sizeof(sp
->f_mntonname
));
1543 strlcpy(sp
->f_mntonname
, fullpath
, sizeof(sp
->f_mntonname
));
1544 kfree(freepath
, M_TEMP
);
1546 error
= copyout(sp
, info
->sfsp
, sizeof(*sp
));
1548 info
->error
= error
;
1558 * getvfsstat_args(struct statfs *buf, struct statvfs *vbuf,
1559 long bufsize, int flags)
1561 * Get statistics on all filesystems.
1564 struct getvfsstat_info
{
1565 struct statfs
*sfsp
;
1566 struct statvfs
*vsfsp
;
1574 static int getvfsstat_callback(struct mount
*, void *);
1577 sys_getvfsstat(struct getvfsstat_args
*uap
)
1579 struct thread
*td
= curthread
;
1580 struct getvfsstat_info info
;
1582 bzero(&info
, sizeof(info
));
1584 info
.maxcount
= uap
->vbufsize
/ sizeof(struct statvfs
);
1585 info
.sfsp
= uap
->buf
;
1586 info
.vsfsp
= uap
->vbuf
;
1588 info
.flags
= uap
->flags
;
1591 mountlist_scan(getvfsstat_callback
, &info
, MNTSCAN_FORWARD
);
1592 if (info
.vsfsp
&& info
.count
> info
.maxcount
)
1593 uap
->sysmsg_result
= info
.maxcount
;
1595 uap
->sysmsg_result
= info
.count
;
1596 return (info
.error
);
1600 getvfsstat_callback(struct mount
*mp
, void *data
)
1602 struct getvfsstat_info
*info
= data
;
1604 struct statvfs
*vsp
;
1609 if (info
->vsfsp
&& info
->count
< info
->maxcount
) {
1610 if (info
->td
->td_proc
&&
1611 !chroot_visible_mnt(mp
, info
->td
->td_proc
)) {
1615 vsp
= &mp
->mnt_vstat
;
1618 * If MNT_NOWAIT or MNT_LAZY is specified, do not
1619 * refresh the fsstat cache. MNT_NOWAIT or MNT_LAZY
1620 * overrides MNT_WAIT.
1622 if (((info
->flags
& (MNT_LAZY
|MNT_NOWAIT
)) == 0 ||
1623 (info
->flags
& MNT_WAIT
)) &&
1624 (error
= VFS_STATFS(mp
, sp
, info
->td
->td_ucred
))) {
1627 sp
->f_flags
= mp
->mnt_flag
& MNT_VISFLAGMASK
;
1629 if (((info
->flags
& (MNT_LAZY
|MNT_NOWAIT
)) == 0 ||
1630 (info
->flags
& MNT_WAIT
)) &&
1631 (error
= VFS_STATVFS(mp
, vsp
, info
->td
->td_ucred
))) {
1635 if (mp
->mnt_flag
& MNT_RDONLY
)
1636 vsp
->f_flag
|= ST_RDONLY
;
1637 if (mp
->mnt_flag
& MNT_NOSUID
)
1638 vsp
->f_flag
|= ST_NOSUID
;
1640 error
= mount_path(info
->td
->td_proc
, mp
, &fullpath
, &freepath
);
1642 info
->error
= error
;
1645 bzero(sp
->f_mntonname
, sizeof(sp
->f_mntonname
));
1646 strlcpy(sp
->f_mntonname
, fullpath
, sizeof(sp
->f_mntonname
));
1647 kfree(freepath
, M_TEMP
);
1649 error
= copyout(sp
, info
->sfsp
, sizeof(*sp
));
1651 error
= copyout(vsp
, info
->vsfsp
, sizeof(*vsp
));
1653 info
->error
= error
;
1665 * fchdir_args(int fd)
1667 * Change current working directory to a given file descriptor.
1670 sys_fchdir(struct fchdir_args
*uap
)
1672 struct thread
*td
= curthread
;
1673 struct proc
*p
= td
->td_proc
;
1674 struct filedesc
*fdp
= p
->p_fd
;
1675 struct vnode
*vp
, *ovp
;
1678 struct nchandle nch
, onch
, tnch
;
1681 if ((error
= holdvnode(td
, uap
->fd
, &fp
)) != 0)
1683 lwkt_gettoken(&p
->p_token
);
1684 vp
= (struct vnode
*)fp
->f_data
;
1686 vn_lock(vp
, LK_SHARED
| LK_RETRY
);
1687 if (fp
->f_nchandle
.ncp
== NULL
)
1690 error
= checkvp_chdir(vp
, td
);
1695 cache_copy(&fp
->f_nchandle
, &nch
);
1698 * If the ncp has become a mount point, traverse through
1702 while (!error
&& (nch
.ncp
->nc_flag
& NCF_ISMOUNTPT
) &&
1703 (mp
= cache_findmount(&nch
)) != NULL
1705 error
= nlookup_mp(mp
, &tnch
);
1707 cache_unlock(&tnch
); /* leave ref intact */
1709 vp
= tnch
.ncp
->nc_vp
;
1710 error
= vget(vp
, LK_SHARED
);
1711 KKASSERT(error
== 0);
1715 cache_dropmount(mp
);
1718 spin_lock(&fdp
->fd_spin
);
1720 onch
= fdp
->fd_ncdir
;
1722 fdp
->fd_ncdir
= nch
;
1723 spin_unlock(&fdp
->fd_spin
);
1724 vn_unlock(vp
); /* leave ref intact */
1733 lwkt_reltoken(&p
->p_token
);
1738 kern_chdir(struct nlookupdata
*nd
)
1740 struct thread
*td
= curthread
;
1741 struct proc
*p
= td
->td_proc
;
1742 struct filedesc
*fdp
= p
->p_fd
;
1743 struct vnode
*vp
, *ovp
;
1744 struct nchandle onch
;
1747 nd
->nl_flags
|= NLC_SHAREDLOCK
;
1748 if ((error
= nlookup(nd
)) != 0)
1750 if ((vp
= nd
->nl_nch
.ncp
->nc_vp
) == NULL
)
1752 if ((error
= vget(vp
, LK_SHARED
)) != 0)
1755 lwkt_gettoken(&p
->p_token
);
1756 error
= checkvp_chdir(vp
, td
);
1759 spin_lock(&fdp
->fd_spin
);
1761 onch
= fdp
->fd_ncdir
;
1762 fdp
->fd_ncdir
= nd
->nl_nch
;
1764 spin_unlock(&fdp
->fd_spin
);
1765 cache_unlock(&nd
->nl_nch
); /* leave reference intact */
1768 cache_zero(&nd
->nl_nch
);
1772 lwkt_reltoken(&p
->p_token
);
1777 * chdir_args(char *path)
1779 * Change current working directory (``.'').
1782 sys_chdir(struct chdir_args
*uap
)
1784 struct nlookupdata nd
;
1787 error
= nlookup_init(&nd
, uap
->path
, UIO_USERSPACE
, NLC_FOLLOW
);
1789 error
= kern_chdir(&nd
);
1795 * Helper function for raised chroot(2) security function: Refuse if
1796 * any filedescriptors are open directories.
1799 chroot_refuse_vdir_fds(thread_t td
, struct filedesc
*fdp
)
1806 for (fd
= 0; fd
< fdp
->fd_nfiles
; fd
++) {
1807 if ((error
= holdvnode(td
, fd
, &fp
)) != 0)
1809 vp
= (struct vnode
*)fp
->f_data
;
1810 if (vp
->v_type
!= VDIR
) {
1821 * This sysctl determines if we will allow a process to chroot(2) if it
1822 * has a directory open:
1823 * 0: disallowed for all processes.
1824 * 1: allowed for processes that were not already chroot(2)'ed.
1825 * 2: allowed for all processes.
1828 static int chroot_allow_open_directories
= 1;
1830 SYSCTL_INT(_kern
, OID_AUTO
, chroot_allow_open_directories
, CTLFLAG_RW
,
1831 &chroot_allow_open_directories
, 0, "");
1834 * chroot to the specified namecache entry. We obtain the vp from the
1835 * namecache data. The passed ncp must be locked and referenced and will
1836 * remain locked and referenced on return.
1839 kern_chroot(struct nchandle
*nch
)
1841 struct thread
*td
= curthread
;
1842 struct proc
*p
= td
->td_proc
;
1843 struct filedesc
*fdp
= p
->p_fd
;
1848 * Only privileged user can chroot
1850 error
= priv_check_cred(td
->td_ucred
, PRIV_VFS_CHROOT
, 0);
1855 * Disallow open directory descriptors (fchdir() breakouts).
1857 if (chroot_allow_open_directories
== 0 ||
1858 (chroot_allow_open_directories
== 1 && fdp
->fd_rdir
!= rootvnode
)) {
1859 if ((error
= chroot_refuse_vdir_fds(td
, fdp
)) != 0)
1862 if ((vp
= nch
->ncp
->nc_vp
) == NULL
)
1865 if ((error
= vget(vp
, LK_SHARED
)) != 0)
1869 * Check the validity of vp as a directory to change to and
1870 * associate it with rdir/jdir.
1872 error
= checkvp_chdir(vp
, td
);
1873 vn_unlock(vp
); /* leave reference intact */
1875 lwkt_gettoken(&p
->p_token
);
1876 vrele(fdp
->fd_rdir
);
1877 fdp
->fd_rdir
= vp
; /* reference inherited by fd_rdir */
1878 cache_drop(&fdp
->fd_nrdir
);
1879 cache_copy(nch
, &fdp
->fd_nrdir
);
1880 if (fdp
->fd_jdir
== NULL
) {
1883 cache_copy(nch
, &fdp
->fd_njdir
);
1885 if ((p
->p_flags
& P_DIDCHROOT
) == 0) {
1886 p
->p_flags
|= P_DIDCHROOT
;
1887 if (p
->p_depth
<= 65535 - 32)
1890 lwkt_reltoken(&p
->p_token
);
1898 * chroot_args(char *path)
1900 * Change notion of root (``/'') directory.
1903 sys_chroot(struct chroot_args
*uap
)
1905 struct thread
*td __debugvar
= curthread
;
1906 struct nlookupdata nd
;
1909 KKASSERT(td
->td_proc
);
1910 error
= nlookup_init(&nd
, uap
->path
, UIO_USERSPACE
, NLC_FOLLOW
);
1912 nd
.nl_flags
|= NLC_EXEC
;
1913 error
= nlookup(&nd
);
1915 error
= kern_chroot(&nd
.nl_nch
);
1922 sys_chroot_kernel(struct chroot_kernel_args
*uap
)
1924 struct thread
*td
= curthread
;
1925 struct nlookupdata nd
;
1926 struct nchandle
*nch
;
1930 error
= nlookup_init(&nd
, uap
->path
, UIO_USERSPACE
, NLC_FOLLOW
);
1934 error
= nlookup(&nd
);
1940 error
= priv_check_cred(td
->td_ucred
, PRIV_VFS_CHROOT
, 0);
1944 if ((vp
= nch
->ncp
->nc_vp
) == NULL
) {
1949 if ((error
= cache_vref(nch
, nd
.nl_cred
, &vp
)) != 0)
1952 kprintf("chroot_kernel: set new rootnch/rootvnode to %s\n", uap
->path
);
1953 vfs_cache_setroot(vp
, cache_hold(nch
));
1962 * Common routine for chroot and chdir. Given a locked, referenced vnode,
1963 * determine whether it is legal to chdir to the vnode. The vnode's state
1964 * is not changed by this call.
1967 checkvp_chdir(struct vnode
*vp
, struct thread
*td
)
1971 if (vp
->v_type
!= VDIR
)
1974 error
= VOP_EACCESS(vp
, VEXEC
, td
->td_ucred
);
1979 kern_open(struct nlookupdata
*nd
, int oflags
, int mode
, int *res
)
1981 struct thread
*td
= curthread
;
1982 struct proc
*p
= td
->td_proc
;
1983 struct lwp
*lp
= td
->td_lwp
;
1984 struct filedesc
*fdp
= p
->p_fd
;
1989 int type
, indx
, error
= 0;
1992 if ((oflags
& O_ACCMODE
) == O_ACCMODE
)
1994 flags
= FFLAGS(oflags
);
1995 error
= falloc(lp
, &nfp
, NULL
);
1999 cmode
= ((mode
&~ fdp
->fd_cmask
) & ALLPERMS
) & ~S_ISTXT
;
2002 * XXX p_dupfd is a real mess. It allows a device to return a
2003 * file descriptor to be duplicated rather then doing the open
2009 * Call vn_open() to do the lookup and assign the vnode to the
2010 * file pointer. vn_open() does not change the ref count on fp
2011 * and the vnode, on success, will be inherited by the file pointer
2014 * Request a shared lock on the vnode if possible.
2016 * Executable binaries can race VTEXT against O_RDWR opens, so
2017 * use an exclusive lock for O_RDWR opens as well.
2019 * NOTE: We need a flag to separate terminal vnode locking from
2020 * parent locking. O_CREAT needs parent locking, but O_TRUNC
2021 * and O_RDWR only need to lock the terminal vnode exclusively.
2023 nd
->nl_flags
|= NLC_LOCKVP
;
2024 if ((flags
& (O_CREAT
|O_TRUNC
|O_RDWR
)) == 0)
2025 nd
->nl_flags
|= NLC_SHAREDLOCK
;
2027 error
= vn_open(nd
, fp
, flags
, cmode
);
2032 * handle special fdopen() case. bleh. dupfdopen() is
2033 * responsible for dropping the old contents of ofiles[indx]
2036 * Note that fsetfd() will add a ref to fp which represents
2037 * the fd_files[] assignment. We must still drop our
2040 if ((error
== ENODEV
|| error
== ENXIO
) && lp
->lwp_dupfd
>= 0) {
2041 if (fdalloc(p
, 0, &indx
) == 0) {
2042 error
= dupfdopen(td
, indx
, lp
->lwp_dupfd
, flags
, error
);
2045 fdrop(fp
); /* our ref */
2048 fsetfd(fdp
, NULL
, indx
);
2051 fdrop(fp
); /* our ref */
2052 if (error
== ERESTART
)
2058 * ref the vnode for ourselves so it can't be ripped out from under
2059 * is. XXX need an ND flag to request that the vnode be returned
2062 * Reserve a file descriptor but do not assign it until the open
2065 vp
= (struct vnode
*)fp
->f_data
;
2067 if ((error
= fdalloc(p
, 0, &indx
)) != 0) {
2074 * If no error occurs the vp will have been assigned to the file
2079 if (flags
& (O_EXLOCK
| O_SHLOCK
)) {
2080 lf
.l_whence
= SEEK_SET
;
2083 if (flags
& O_EXLOCK
)
2084 lf
.l_type
= F_WRLCK
;
2086 lf
.l_type
= F_RDLCK
;
2087 if (flags
& FNONBLOCK
)
2092 if ((error
= VOP_ADVLOCK(vp
, (caddr_t
)fp
, F_SETLK
, &lf
, type
)) != 0) {
2094 * lock request failed. Clean up the reserved
2098 fsetfd(fdp
, NULL
, indx
);
2102 atomic_set_int(&fp
->f_flag
, FHASLOCK
); /* race ok */
2106 * Assert that all regular file vnodes were created with a object.
2108 KASSERT(vp
->v_type
!= VREG
|| vp
->v_object
!= NULL
,
2109 ("open: regular file has no backing object after vn_open"));
2115 * release our private reference, leaving the one associated with the
2116 * descriptor table intact.
2118 if (oflags
& O_CLOEXEC
)
2119 fdp
->fd_files
[indx
].fileflags
|= UF_EXCLOSE
;
2120 fsetfd(fdp
, fp
, indx
);
2128 * open_args(char *path, int flags, int mode)
2130 * Check permissions, allocate an open file structure,
2131 * and call the device open routine if any.
2134 sys_open(struct open_args
*uap
)
2136 struct nlookupdata nd
;
2139 error
= nlookup_init(&nd
, uap
->path
, UIO_USERSPACE
, 0);
2141 error
= kern_open(&nd
, uap
->flags
,
2142 uap
->mode
, &uap
->sysmsg_result
);
2149 * openat_args(int fd, char *path, int flags, int mode)
2152 sys_openat(struct openat_args
*uap
)
2154 struct nlookupdata nd
;
2158 error
= nlookup_init_at(&nd
, &fp
, uap
->fd
, uap
->path
, UIO_USERSPACE
, 0);
2160 error
= kern_open(&nd
, uap
->flags
, uap
->mode
,
2161 &uap
->sysmsg_result
);
2163 nlookup_done_at(&nd
, fp
);
2168 kern_mknod(struct nlookupdata
*nd
, int mode
, int rmajor
, int rminor
)
2170 struct thread
*td
= curthread
;
2171 struct proc
*p
= td
->td_proc
;
2180 vattr
.va_mode
= (mode
& ALLPERMS
) &~ p
->p_fd
->fd_cmask
;
2181 vattr
.va_rmajor
= rmajor
;
2182 vattr
.va_rminor
= rminor
;
2184 switch (mode
& S_IFMT
) {
2185 case S_IFMT
: /* used by badsect to flag bad sectors */
2186 error
= priv_check_cred(td
->td_ucred
, PRIV_VFS_MKNOD_BAD
, 0);
2187 vattr
.va_type
= VBAD
;
2190 error
= priv_check(td
, PRIV_VFS_MKNOD_DEV
);
2191 vattr
.va_type
= VCHR
;
2194 error
= priv_check(td
, PRIV_VFS_MKNOD_DEV
);
2195 vattr
.va_type
= VBLK
;
2198 error
= priv_check_cred(td
->td_ucred
, PRIV_VFS_MKNOD_WHT
, 0);
2201 case S_IFDIR
: /* special directories support for HAMMER */
2202 error
= priv_check_cred(td
->td_ucred
, PRIV_VFS_MKNOD_DIR
, 0);
2203 vattr
.va_type
= VDIR
;
2214 nd
->nl_flags
|= NLC_CREATE
| NLC_REFDVP
;
2215 if ((error
= nlookup(nd
)) != 0)
2217 if (nd
->nl_nch
.ncp
->nc_vp
)
2219 if ((error
= ncp_writechk(&nd
->nl_nch
)) != 0)
2223 error
= VOP_NWHITEOUT(&nd
->nl_nch
, nd
->nl_dvp
,
2224 nd
->nl_cred
, NAMEI_CREATE
);
2227 error
= VOP_NMKNOD(&nd
->nl_nch
, nd
->nl_dvp
,
2228 &vp
, nd
->nl_cred
, &vattr
);
2236 * mknod_args(char *path, int mode, int dev)
2238 * Create a special file.
2241 sys_mknod(struct mknod_args
*uap
)
2243 struct nlookupdata nd
;
2246 error
= nlookup_init(&nd
, uap
->path
, UIO_USERSPACE
, 0);
2248 error
= kern_mknod(&nd
, uap
->mode
,
2249 umajor(uap
->dev
), uminor(uap
->dev
));
2256 * mknodat_args(int fd, char *path, mode_t mode, dev_t dev)
2258 * Create a special file. The path is relative to the directory associated
2262 sys_mknodat(struct mknodat_args
*uap
)
2264 struct nlookupdata nd
;
2268 error
= nlookup_init_at(&nd
, &fp
, uap
->fd
, uap
->path
, UIO_USERSPACE
, 0);
2270 error
= kern_mknod(&nd
, uap
->mode
,
2271 umajor(uap
->dev
), uminor(uap
->dev
));
2273 nlookup_done_at(&nd
, fp
);
2278 kern_mkfifo(struct nlookupdata
*nd
, int mode
)
2280 struct thread
*td
= curthread
;
2281 struct proc
*p
= td
->td_proc
;
2288 nd
->nl_flags
|= NLC_CREATE
| NLC_REFDVP
;
2289 if ((error
= nlookup(nd
)) != 0)
2291 if (nd
->nl_nch
.ncp
->nc_vp
)
2293 if ((error
= ncp_writechk(&nd
->nl_nch
)) != 0)
2297 vattr
.va_type
= VFIFO
;
2298 vattr
.va_mode
= (mode
& ALLPERMS
) &~ p
->p_fd
->fd_cmask
;
2300 error
= VOP_NMKNOD(&nd
->nl_nch
, nd
->nl_dvp
, &vp
, nd
->nl_cred
, &vattr
);
2307 * mkfifo_args(char *path, int mode)
2309 * Create a named pipe.
2312 sys_mkfifo(struct mkfifo_args
*uap
)
2314 struct nlookupdata nd
;
2317 error
= nlookup_init(&nd
, uap
->path
, UIO_USERSPACE
, 0);
2319 error
= kern_mkfifo(&nd
, uap
->mode
);
2325 * mkfifoat_args(int fd, char *path, mode_t mode)
2327 * Create a named pipe. The path is relative to the directory associated
2331 sys_mkfifoat(struct mkfifoat_args
*uap
)
2333 struct nlookupdata nd
;
2337 error
= nlookup_init_at(&nd
, &fp
, uap
->fd
, uap
->path
, UIO_USERSPACE
, 0);
2339 error
= kern_mkfifo(&nd
, uap
->mode
);
2340 nlookup_done_at(&nd
, fp
);
2344 static int hardlink_check_uid
= 0;
2345 SYSCTL_INT(_security
, OID_AUTO
, hardlink_check_uid
, CTLFLAG_RW
,
2346 &hardlink_check_uid
, 0,
2347 "Unprivileged processes cannot create hard links to files owned by other "
2349 static int hardlink_check_gid
= 0;
2350 SYSCTL_INT(_security
, OID_AUTO
, hardlink_check_gid
, CTLFLAG_RW
,
2351 &hardlink_check_gid
, 0,
2352 "Unprivileged processes cannot create hard links to files owned by other "
2356 can_hardlink(struct vnode
*vp
, struct thread
*td
, struct ucred
*cred
)
2362 * Shortcut if disabled
2364 if (hardlink_check_uid
== 0 && hardlink_check_gid
== 0)
2368 * Privileged user can always hardlink
2370 if (priv_check_cred(cred
, PRIV_VFS_LINK
, 0) == 0)
2374 * Otherwise only if the originating file is owned by the
2375 * same user or group. Note that any group is allowed if
2376 * the file is owned by the caller.
2378 error
= VOP_GETATTR(vp
, &va
);
2382 if (hardlink_check_uid
) {
2383 if (cred
->cr_uid
!= va
.va_uid
)
2387 if (hardlink_check_gid
) {
2388 if (cred
->cr_uid
!= va
.va_uid
&& !groupmember(va
.va_gid
, cred
))
2396 kern_link(struct nlookupdata
*nd
, struct nlookupdata
*linknd
)
2398 struct thread
*td
= curthread
;
2403 * Lookup the source and obtained a locked vnode.
2405 * You may only hardlink a file which you have write permission
2406 * on or which you own.
2408 * XXX relookup on vget failure / race ?
2411 nd
->nl_flags
|= NLC_WRITE
| NLC_OWN
| NLC_HLINK
;
2412 if ((error
= nlookup(nd
)) != 0)
2414 vp
= nd
->nl_nch
.ncp
->nc_vp
;
2415 KKASSERT(vp
!= NULL
);
2416 if (vp
->v_type
== VDIR
)
2417 return (EPERM
); /* POSIX */
2418 if ((error
= ncp_writechk(&nd
->nl_nch
)) != 0)
2420 if ((error
= vget(vp
, LK_EXCLUSIVE
)) != 0)
2424 * Unlock the source so we can lookup the target without deadlocking
2425 * (XXX vp is locked already, possible other deadlock?). The target
2428 KKASSERT(nd
->nl_flags
& NLC_NCPISLOCKED
);
2429 nd
->nl_flags
&= ~NLC_NCPISLOCKED
;
2430 cache_unlock(&nd
->nl_nch
);
2433 linknd
->nl_flags
|= NLC_CREATE
| NLC_REFDVP
;
2434 if ((error
= nlookup(linknd
)) != 0) {
2438 if (linknd
->nl_nch
.ncp
->nc_vp
) {
2442 error
= vn_lock(vp
, LK_EXCLUSIVE
| LK_RETRY
| LK_FAILRECLAIM
);
2449 * Finally run the new API VOP.
2451 error
= can_hardlink(vp
, td
, td
->td_ucred
);
2453 error
= VOP_NLINK(&linknd
->nl_nch
, linknd
->nl_dvp
,
2454 vp
, linknd
->nl_cred
);
2461 * link_args(char *path, char *link)
2463 * Make a hard file link.
2466 sys_link(struct link_args
*uap
)
2468 struct nlookupdata nd
, linknd
;
2471 error
= nlookup_init(&nd
, uap
->path
, UIO_USERSPACE
, NLC_FOLLOW
);
2473 error
= nlookup_init(&linknd
, uap
->link
, UIO_USERSPACE
, 0);
2475 error
= kern_link(&nd
, &linknd
);
2476 nlookup_done(&linknd
);
2483 * linkat_args(int fd1, char *path1, int fd2, char *path2, int flags)
2485 * Make a hard file link. The path1 argument is relative to the directory
2486 * associated with fd1, and similarly the path2 argument is relative to
2487 * the directory associated with fd2.
2490 sys_linkat(struct linkat_args
*uap
)
2492 struct nlookupdata nd
, linknd
;
2493 struct file
*fp1
, *fp2
;
2496 error
= nlookup_init_at(&nd
, &fp1
, uap
->fd1
, uap
->path1
, UIO_USERSPACE
,
2497 (uap
->flags
& AT_SYMLINK_FOLLOW
) ? NLC_FOLLOW
: 0);
2499 error
= nlookup_init_at(&linknd
, &fp2
, uap
->fd2
,
2500 uap
->path2
, UIO_USERSPACE
, 0);
2502 error
= kern_link(&nd
, &linknd
);
2503 nlookup_done_at(&linknd
, fp2
);
2505 nlookup_done_at(&nd
, fp1
);
2510 kern_symlink(struct nlookupdata
*nd
, char *path
, int mode
)
2518 nd
->nl_flags
|= NLC_CREATE
| NLC_REFDVP
;
2519 if ((error
= nlookup(nd
)) != 0)
2521 if (nd
->nl_nch
.ncp
->nc_vp
)
2523 if ((error
= ncp_writechk(&nd
->nl_nch
)) != 0)
2527 vattr
.va_mode
= mode
;
2528 error
= VOP_NSYMLINK(&nd
->nl_nch
, dvp
, &vp
, nd
->nl_cred
, &vattr
, path
);
2535 * symlink(char *path, char *link)
2537 * Make a symbolic link.
2540 sys_symlink(struct symlink_args
*uap
)
2542 struct thread
*td
= curthread
;
2543 struct nlookupdata nd
;
2548 path
= objcache_get(namei_oc
, M_WAITOK
);
2549 error
= copyinstr(uap
->path
, path
, MAXPATHLEN
, NULL
);
2551 error
= nlookup_init(&nd
, uap
->link
, UIO_USERSPACE
, 0);
2553 mode
= ACCESSPERMS
& ~td
->td_proc
->p_fd
->fd_cmask
;
2554 error
= kern_symlink(&nd
, path
, mode
);
2558 objcache_put(namei_oc
, path
);
2563 * symlinkat_args(char *path1, int fd, char *path2)
2565 * Make a symbolic link. The path2 argument is relative to the directory
2566 * associated with fd.
2569 sys_symlinkat(struct symlinkat_args
*uap
)
2571 struct thread
*td
= curthread
;
2572 struct nlookupdata nd
;
2578 path1
= objcache_get(namei_oc
, M_WAITOK
);
2579 error
= copyinstr(uap
->path1
, path1
, MAXPATHLEN
, NULL
);
2581 error
= nlookup_init_at(&nd
, &fp
, uap
->fd
, uap
->path2
,
2584 mode
= ACCESSPERMS
& ~td
->td_proc
->p_fd
->fd_cmask
;
2585 error
= kern_symlink(&nd
, path1
, mode
);
2587 nlookup_done_at(&nd
, fp
);
2589 objcache_put(namei_oc
, path1
);
2594 * undelete_args(char *path)
2596 * Delete a whiteout from the filesystem.
2599 sys_undelete(struct undelete_args
*uap
)
2601 struct nlookupdata nd
;
2604 error
= nlookup_init(&nd
, uap
->path
, UIO_USERSPACE
, 0);
2606 nd
.nl_flags
|= NLC_DELETE
| NLC_REFDVP
;
2608 error
= nlookup(&nd
);
2610 error
= ncp_writechk(&nd
.nl_nch
);
2612 error
= VOP_NWHITEOUT(&nd
.nl_nch
, nd
.nl_dvp
, nd
.nl_cred
,
2620 kern_unlink(struct nlookupdata
*nd
)
2625 nd
->nl_flags
|= NLC_DELETE
| NLC_REFDVP
;
2626 if ((error
= nlookup(nd
)) != 0)
2628 if ((error
= ncp_writechk(&nd
->nl_nch
)) != 0)
2630 error
= VOP_NREMOVE(&nd
->nl_nch
, nd
->nl_dvp
, nd
->nl_cred
);
2635 * unlink_args(char *path)
2637 * Delete a name from the filesystem.
2640 sys_unlink(struct unlink_args
*uap
)
2642 struct nlookupdata nd
;
2645 error
= nlookup_init(&nd
, uap
->path
, UIO_USERSPACE
, 0);
2647 error
= kern_unlink(&nd
);
2654 * unlinkat_args(int fd, char *path, int flags)
2656 * Delete the file or directory entry pointed to by fd/path.
2659 sys_unlinkat(struct unlinkat_args
*uap
)
2661 struct nlookupdata nd
;
2665 if (uap
->flags
& ~AT_REMOVEDIR
)
2668 error
= nlookup_init_at(&nd
, &fp
, uap
->fd
, uap
->path
, UIO_USERSPACE
, 0);
2670 if (uap
->flags
& AT_REMOVEDIR
)
2671 error
= kern_rmdir(&nd
);
2673 error
= kern_unlink(&nd
);
2675 nlookup_done_at(&nd
, fp
);
2680 kern_lseek(int fd
, off_t offset
, int whence
, off_t
*res
)
2682 struct thread
*td
= curthread
;
2689 fp
= holdfp(td
, fd
, -1);
2692 if (fp
->f_type
!= DTYPE_VNODE
) {
2696 vp
= (struct vnode
*)fp
->f_data
;
2700 spin_lock(&fp
->f_spin
);
2701 new_offset
= fp
->f_offset
+ offset
;
2705 error
= VOP_GETATTR(vp
, &vattr
);
2706 spin_lock(&fp
->f_spin
);
2707 new_offset
= offset
+ vattr
.va_size
;
2710 new_offset
= offset
;
2712 spin_lock(&fp
->f_spin
);
2717 spin_lock(&fp
->f_spin
);
2722 * Validate the seek position. Negative offsets are not allowed
2723 * for regular files or directories.
2725 * Normally we would also not want to allow negative offsets for
2726 * character and block-special devices. However kvm addresses
2727 * on 64 bit architectures might appear to be negative and must
2731 if (new_offset
< 0 &&
2732 (vp
->v_type
== VREG
|| vp
->v_type
== VDIR
)) {
2735 fp
->f_offset
= new_offset
;
2738 *res
= fp
->f_offset
;
2739 spin_unlock(&fp
->f_spin
);
2747 * lseek_args(int fd, int pad, off_t offset, int whence)
2749 * Reposition read/write file offset.
2752 sys_lseek(struct lseek_args
*uap
)
2756 error
= kern_lseek(uap
->fd
, uap
->offset
, uap
->whence
,
2757 &uap
->sysmsg_offset
);
2763 * Check if current process can access given file. amode is a bitmask of *_OK
2764 * access bits. flags is a bitmask of AT_* flags.
2767 kern_access(struct nlookupdata
*nd
, int amode
, int flags
)
2772 if (flags
& ~AT_EACCESS
)
2774 nd
->nl_flags
|= NLC_SHAREDLOCK
;
2775 if ((error
= nlookup(nd
)) != 0)
2778 error
= cache_vget(&nd
->nl_nch
, nd
->nl_cred
, LK_SHARED
, &vp
);
2782 /* Flags == 0 means only check for existence. */
2791 if ((mode
& VWRITE
) == 0 ||
2792 (error
= vn_writechk(vp
, &nd
->nl_nch
)) == 0)
2793 error
= VOP_ACCESS_FLAGS(vp
, mode
, flags
, nd
->nl_cred
);
2796 * If the file handle is stale we have to re-resolve the
2797 * entry with the ncp held exclusively. This is a hack
2800 if (error
== ESTALE
) {
2802 cache_unlock(&nd
->nl_nch
);
2803 cache_lock(&nd
->nl_nch
);
2804 cache_setunresolved(&nd
->nl_nch
);
2805 error
= cache_resolve(&nd
->nl_nch
, nd
->nl_cred
);
2818 * access_args(char *path, int flags)
2820 * Check access permissions.
2823 sys_access(struct access_args
*uap
)
2825 struct nlookupdata nd
;
2828 error
= nlookup_init(&nd
, uap
->path
, UIO_USERSPACE
, NLC_FOLLOW
);
2830 error
= kern_access(&nd
, uap
->flags
, 0);
2837 * eaccess_args(char *path, int flags)
2839 * Check access permissions.
2842 sys_eaccess(struct eaccess_args
*uap
)
2844 struct nlookupdata nd
;
2847 error
= nlookup_init(&nd
, uap
->path
, UIO_USERSPACE
, NLC_FOLLOW
);
2849 error
= kern_access(&nd
, uap
->flags
, AT_EACCESS
);
2856 * faccessat_args(int fd, char *path, int amode, int flags)
2858 * Check access permissions.
2861 sys_faccessat(struct faccessat_args
*uap
)
2863 struct nlookupdata nd
;
2867 error
= nlookup_init_at(&nd
, &fp
, uap
->fd
, uap
->path
, UIO_USERSPACE
,
2870 error
= kern_access(&nd
, uap
->amode
, uap
->flags
);
2871 nlookup_done_at(&nd
, fp
);
2876 kern_stat(struct nlookupdata
*nd
, struct stat
*st
)
2881 nd
->nl_flags
|= NLC_SHAREDLOCK
;
2882 if ((error
= nlookup(nd
)) != 0)
2885 if ((vp
= nd
->nl_nch
.ncp
->nc_vp
) == NULL
)
2888 if ((error
= vget(vp
, LK_SHARED
)) != 0)
2890 error
= vn_stat(vp
, st
, nd
->nl_cred
);
2893 * If the file handle is stale we have to re-resolve the
2894 * entry with the ncp held exclusively. This is a hack
2897 if (error
== ESTALE
) {
2899 cache_unlock(&nd
->nl_nch
);
2900 cache_lock(&nd
->nl_nch
);
2901 cache_setunresolved(&nd
->nl_nch
);
2902 error
= cache_resolve(&nd
->nl_nch
, nd
->nl_cred
);
2912 * stat_args(char *path, struct stat *ub)
2914 * Get file status; this version follows links.
2917 sys_stat(struct stat_args
*uap
)
2919 struct nlookupdata nd
;
2923 error
= nlookup_init(&nd
, uap
->path
, UIO_USERSPACE
, NLC_FOLLOW
);
2925 error
= kern_stat(&nd
, &st
);
2927 error
= copyout(&st
, uap
->ub
, sizeof(*uap
->ub
));
2934 * lstat_args(char *path, struct stat *ub)
2936 * Get file status; this version does not follow links.
2939 sys_lstat(struct lstat_args
*uap
)
2941 struct nlookupdata nd
;
2945 error
= nlookup_init(&nd
, uap
->path
, UIO_USERSPACE
, 0);
2947 error
= kern_stat(&nd
, &st
);
2949 error
= copyout(&st
, uap
->ub
, sizeof(*uap
->ub
));
2956 * fstatat_args(int fd, char *path, struct stat *sb, int flags)
2958 * Get status of file pointed to by fd/path.
2961 sys_fstatat(struct fstatat_args
*uap
)
2963 struct nlookupdata nd
;
2969 if (uap
->flags
& ~AT_SYMLINK_NOFOLLOW
)
2972 flags
= (uap
->flags
& AT_SYMLINK_NOFOLLOW
) ? 0 : NLC_FOLLOW
;
2974 error
= nlookup_init_at(&nd
, &fp
, uap
->fd
, uap
->path
,
2975 UIO_USERSPACE
, flags
);
2977 error
= kern_stat(&nd
, &st
);
2979 error
= copyout(&st
, uap
->sb
, sizeof(*uap
->sb
));
2981 nlookup_done_at(&nd
, fp
);
2986 kern_pathconf(char *path
, int name
, int flags
, register_t
*sysmsg_regp
)
2988 struct nlookupdata nd
;
2993 error
= nlookup_init(&nd
, path
, UIO_USERSPACE
, flags
);
2995 error
= nlookup(&nd
);
2997 error
= cache_vget(&nd
.nl_nch
, nd
.nl_cred
, LK_EXCLUSIVE
, &vp
);
3000 error
= VOP_PATHCONF(vp
, name
, sysmsg_regp
);
3007 * pathconf_Args(char *path, int name)
3009 * Get configurable pathname variables.
3012 sys_pathconf(struct pathconf_args
*uap
)
3014 return (kern_pathconf(uap
->path
, uap
->name
, NLC_FOLLOW
,
3019 * lpathconf_Args(char *path, int name)
3021 * Get configurable pathname variables, but don't follow symlinks.
3024 sys_lpathconf(struct lpathconf_args
*uap
)
3026 return (kern_pathconf(uap
->path
, uap
->name
, 0, &uap
->sysmsg_reg
));
3031 * kern_readlink isn't properly split yet. There is a copyin burried
3032 * in VOP_READLINK().
3035 kern_readlink(struct nlookupdata
*nd
, char *buf
, int count
, int *res
)
3037 struct thread
*td
= curthread
;
3043 nd
->nl_flags
|= NLC_SHAREDLOCK
;
3044 if ((error
= nlookup(nd
)) != 0)
3046 error
= cache_vget(&nd
->nl_nch
, nd
->nl_cred
, LK_SHARED
, &vp
);
3049 if (vp
->v_type
!= VLNK
) {
3052 aiov
.iov_base
= buf
;
3053 aiov
.iov_len
= count
;
3054 auio
.uio_iov
= &aiov
;
3055 auio
.uio_iovcnt
= 1;
3056 auio
.uio_offset
= 0;
3057 auio
.uio_rw
= UIO_READ
;
3058 auio
.uio_segflg
= UIO_USERSPACE
;
3060 auio
.uio_resid
= count
;
3061 error
= VOP_READLINK(vp
, &auio
, td
->td_ucred
);
3064 *res
= count
- auio
.uio_resid
;
3069 * readlink_args(char *path, char *buf, int count)
3071 * Return target name of a symbolic link.
3074 sys_readlink(struct readlink_args
*uap
)
3076 struct nlookupdata nd
;
3079 error
= nlookup_init(&nd
, uap
->path
, UIO_USERSPACE
, 0);
3081 error
= kern_readlink(&nd
, uap
->buf
, uap
->count
,
3082 &uap
->sysmsg_result
);
3089 * readlinkat_args(int fd, char *path, char *buf, size_t bufsize)
3091 * Return target name of a symbolic link. The path is relative to the
3092 * directory associated with fd.
3095 sys_readlinkat(struct readlinkat_args
*uap
)
3097 struct nlookupdata nd
;
3101 error
= nlookup_init_at(&nd
, &fp
, uap
->fd
, uap
->path
, UIO_USERSPACE
, 0);
3103 error
= kern_readlink(&nd
, uap
->buf
, uap
->bufsize
,
3104 &uap
->sysmsg_result
);
3106 nlookup_done_at(&nd
, fp
);
3111 setfflags(struct vnode
*vp
, int flags
)
3113 struct thread
*td
= curthread
;
3118 * Prevent non-root users from setting flags on devices. When
3119 * a device is reused, users can retain ownership of the device
3120 * if they are allowed to set flags and programs assume that
3121 * chown can't fail when done as root.
3123 if ((vp
->v_type
== VCHR
|| vp
->v_type
== VBLK
) &&
3124 ((error
= priv_check_cred(td
->td_ucred
, PRIV_VFS_CHFLAGS_DEV
, 0)) != 0))
3128 * note: vget is required for any operation that might mod the vnode
3129 * so VINACTIVE is properly cleared.
3131 if ((error
= vget(vp
, LK_EXCLUSIVE
)) == 0) {
3133 vattr
.va_flags
= flags
;
3134 error
= VOP_SETATTR(vp
, &vattr
, td
->td_ucred
);
3141 * chflags(char *path, int flags)
3143 * Change flags of a file given a path name.
3146 sys_chflags(struct chflags_args
*uap
)
3148 struct nlookupdata nd
;
3153 error
= nlookup_init(&nd
, uap
->path
, UIO_USERSPACE
, NLC_FOLLOW
);
3155 error
= nlookup(&nd
);
3157 error
= ncp_writechk(&nd
.nl_nch
);
3159 error
= cache_vref(&nd
.nl_nch
, nd
.nl_cred
, &vp
);
3162 error
= setfflags(vp
, uap
->flags
);
3169 * lchflags(char *path, int flags)
3171 * Change flags of a file given a path name, but don't follow symlinks.
3174 sys_lchflags(struct lchflags_args
*uap
)
3176 struct nlookupdata nd
;
3181 error
= nlookup_init(&nd
, uap
->path
, UIO_USERSPACE
, 0);
3183 error
= nlookup(&nd
);
3185 error
= ncp_writechk(&nd
.nl_nch
);
3187 error
= cache_vref(&nd
.nl_nch
, nd
.nl_cred
, &vp
);
3190 error
= setfflags(vp
, uap
->flags
);
3197 * fchflags_args(int fd, int flags)
3199 * Change flags of a file given a file descriptor.
3202 sys_fchflags(struct fchflags_args
*uap
)
3204 struct thread
*td
= curthread
;
3208 if ((error
= holdvnode(td
, uap
->fd
, &fp
)) != 0)
3210 if (fp
->f_nchandle
.ncp
)
3211 error
= ncp_writechk(&fp
->f_nchandle
);
3213 error
= setfflags((struct vnode
*) fp
->f_data
, uap
->flags
);
3219 * chflagsat_args(int fd, const char *path, int flags, int atflags)
3220 * change flags given a pathname relative to a filedescriptor
3222 int sys_chflagsat(struct chflagsat_args
*uap
)
3224 struct nlookupdata nd
;
3230 if (uap
->atflags
& ~AT_SYMLINK_NOFOLLOW
)
3233 lookupflags
= (uap
->atflags
& AT_SYMLINK_NOFOLLOW
) ? 0 : NLC_FOLLOW
;
3236 error
= nlookup_init_at(&nd
, &fp
, uap
->fd
, uap
->path
, UIO_USERSPACE
, lookupflags
);
3238 error
= nlookup(&nd
);
3240 error
= ncp_writechk(&nd
.nl_nch
);
3242 error
= cache_vref(&nd
.nl_nch
, nd
.nl_cred
, &vp
);
3243 nlookup_done_at(&nd
, fp
);
3245 error
= setfflags(vp
, uap
->flags
);
3253 setfmode(struct vnode
*vp
, int mode
)
3255 struct thread
*td
= curthread
;
3260 * note: vget is required for any operation that might mod the vnode
3261 * so VINACTIVE is properly cleared.
3263 if ((error
= vget(vp
, LK_EXCLUSIVE
)) == 0) {
3265 vattr
.va_mode
= mode
& ALLPERMS
;
3266 error
= VOP_SETATTR(vp
, &vattr
, td
->td_ucred
);
3267 cache_inval_wxok(vp
);
3274 kern_chmod(struct nlookupdata
*nd
, int mode
)
3279 if ((error
= nlookup(nd
)) != 0)
3281 if ((error
= cache_vref(&nd
->nl_nch
, nd
->nl_cred
, &vp
)) != 0)
3283 if ((error
= ncp_writechk(&nd
->nl_nch
)) == 0)
3284 error
= setfmode(vp
, mode
);
3290 * chmod_args(char *path, int mode)
3292 * Change mode of a file given path name.
3295 sys_chmod(struct chmod_args
*uap
)
3297 struct nlookupdata nd
;
3300 error
= nlookup_init(&nd
, uap
->path
, UIO_USERSPACE
, NLC_FOLLOW
);
3302 error
= kern_chmod(&nd
, uap
->mode
);
3308 * lchmod_args(char *path, int mode)
3310 * Change mode of a file given path name (don't follow links.)
3313 sys_lchmod(struct lchmod_args
*uap
)
3315 struct nlookupdata nd
;
3318 error
= nlookup_init(&nd
, uap
->path
, UIO_USERSPACE
, 0);
3320 error
= kern_chmod(&nd
, uap
->mode
);
3326 * fchmod_args(int fd, int mode)
3328 * Change mode of a file given a file descriptor.
3331 sys_fchmod(struct fchmod_args
*uap
)
3333 struct thread
*td
= curthread
;
3337 if ((error
= holdvnode(td
, uap
->fd
, &fp
)) != 0)
3339 if (fp
->f_nchandle
.ncp
)
3340 error
= ncp_writechk(&fp
->f_nchandle
);
3342 error
= setfmode((struct vnode
*)fp
->f_data
, uap
->mode
);
3348 * fchmodat_args(char *path, int mode)
3350 * Change mode of a file pointed to by fd/path.
3353 sys_fchmodat(struct fchmodat_args
*uap
)
3355 struct nlookupdata nd
;
3360 if (uap
->flags
& ~AT_SYMLINK_NOFOLLOW
)
3362 flags
= (uap
->flags
& AT_SYMLINK_NOFOLLOW
) ? 0 : NLC_FOLLOW
;
3364 error
= nlookup_init_at(&nd
, &fp
, uap
->fd
, uap
->path
,
3365 UIO_USERSPACE
, flags
);
3367 error
= kern_chmod(&nd
, uap
->mode
);
3368 nlookup_done_at(&nd
, fp
);
3373 setfown(struct mount
*mp
, struct vnode
*vp
, uid_t uid
, gid_t gid
)
3375 struct thread
*td
= curthread
;
3383 * note: vget is required for any operation that might mod the vnode
3384 * so VINACTIVE is properly cleared.
3386 if ((error
= vget(vp
, LK_EXCLUSIVE
)) == 0) {
3387 if ((error
= VOP_GETATTR(vp
, &vattr
)) != 0)
3389 o_uid
= vattr
.va_uid
;
3390 o_gid
= vattr
.va_gid
;
3391 size
= vattr
.va_size
;
3396 error
= VOP_SETATTR(vp
, &vattr
, td
->td_ucred
);
3405 VFS_ACCOUNT(mp
, o_uid
, o_gid
, -size
);
3406 VFS_ACCOUNT(mp
, uid
, gid
, size
);
3413 kern_chown(struct nlookupdata
*nd
, int uid
, int gid
)
3418 if ((error
= nlookup(nd
)) != 0)
3420 if ((error
= cache_vref(&nd
->nl_nch
, nd
->nl_cred
, &vp
)) != 0)
3422 if ((error
= ncp_writechk(&nd
->nl_nch
)) == 0)
3423 error
= setfown(nd
->nl_nch
.mount
, vp
, uid
, gid
);
3429 * chown(char *path, int uid, int gid)
3431 * Set ownership given a path name.
3434 sys_chown(struct chown_args
*uap
)
3436 struct nlookupdata nd
;
3439 error
= nlookup_init(&nd
, uap
->path
, UIO_USERSPACE
, NLC_FOLLOW
);
3441 error
= kern_chown(&nd
, uap
->uid
, uap
->gid
);
3447 * lchown_args(char *path, int uid, int gid)
3449 * Set ownership given a path name, do not cross symlinks.
3452 sys_lchown(struct lchown_args
*uap
)
3454 struct nlookupdata nd
;
3457 error
= nlookup_init(&nd
, uap
->path
, UIO_USERSPACE
, 0);
3459 error
= kern_chown(&nd
, uap
->uid
, uap
->gid
);
3465 * fchown_args(int fd, int uid, int gid)
3467 * Set ownership given a file descriptor.
3470 sys_fchown(struct fchown_args
*uap
)
3472 struct thread
*td
= curthread
;
3473 struct proc
*p
= td
->td_proc
;
3477 if ((error
= holdvnode(td
, uap
->fd
, &fp
)) != 0)
3479 if (fp
->f_nchandle
.ncp
)
3480 error
= ncp_writechk(&fp
->f_nchandle
);
3482 error
= setfown(p
->p_fd
->fd_ncdir
.mount
,
3483 (struct vnode
*)fp
->f_data
, uap
->uid
, uap
->gid
);
3489 * fchownat(int fd, char *path, int uid, int gid, int flags)
3491 * Set ownership of file pointed to by fd/path.
3494 sys_fchownat(struct fchownat_args
*uap
)
3496 struct nlookupdata nd
;
3501 if (uap
->flags
& ~AT_SYMLINK_NOFOLLOW
)
3503 flags
= (uap
->flags
& AT_SYMLINK_NOFOLLOW
) ? 0 : NLC_FOLLOW
;
3505 error
= nlookup_init_at(&nd
, &fp
, uap
->fd
, uap
->path
,
3506 UIO_USERSPACE
, flags
);
3508 error
= kern_chown(&nd
, uap
->uid
, uap
->gid
);
3509 nlookup_done_at(&nd
, fp
);
3515 getutimes(struct timeval
*tvp
, struct timespec
*tsp
)
3517 struct timeval tv
[2];
3522 TIMEVAL_TO_TIMESPEC(&tv
[0], &tsp
[0]);
3525 if ((error
= itimerfix(tvp
)) != 0)
3527 TIMEVAL_TO_TIMESPEC(&tvp
[0], &tsp
[0]);
3528 TIMEVAL_TO_TIMESPEC(&tvp
[1], &tsp
[1]);
3534 getutimens(const struct timespec
*ts
, struct timespec
*newts
, int *nullflag
)
3536 struct timespec tsnow
;
3550 if (newts
[0].tv_nsec
== UTIME_OMIT
&& newts
[1].tv_nsec
== UTIME_OMIT
)
3552 if (newts
[0].tv_nsec
== UTIME_NOW
&& newts
[1].tv_nsec
== UTIME_NOW
)
3555 if (newts
[0].tv_nsec
== UTIME_OMIT
)
3556 newts
[0].tv_sec
= VNOVAL
;
3557 else if (newts
[0].tv_nsec
== UTIME_NOW
)
3559 else if ((error
= itimespecfix(&newts
[0])) != 0)
3562 if (newts
[1].tv_nsec
== UTIME_OMIT
)
3563 newts
[1].tv_sec
= VNOVAL
;
3564 else if (newts
[1].tv_nsec
== UTIME_NOW
)
3566 else if ((error
= itimespecfix(&newts
[1])) != 0)
3573 setutimes(struct vnode
*vp
, struct vattr
*vattr
,
3574 const struct timespec
*ts
, int nullflag
)
3576 struct thread
*td
= curthread
;
3580 vattr
->va_atime
= ts
[0];
3581 vattr
->va_mtime
= ts
[1];
3583 vattr
->va_vaflags
|= VA_UTIMES_NULL
;
3584 error
= VOP_SETATTR(vp
, vattr
, td
->td_ucred
);
3590 kern_utimes(struct nlookupdata
*nd
, struct timeval
*tptr
)
3592 struct timespec ts
[2];
3596 if ((error
= getutimes(tptr
, ts
)) != 0)
3599 error
= kern_utimensat(nd
, tptr
? ts
: NULL
, 0);
3604 * utimes_args(char *path, struct timeval *tptr)
3606 * Set the access and modification times of a file.
3609 sys_utimes(struct utimes_args
*uap
)
3611 struct timeval tv
[2];
3612 struct nlookupdata nd
;
3616 error
= copyin(uap
->tptr
, tv
, sizeof(tv
));
3620 error
= nlookup_init(&nd
, uap
->path
, UIO_USERSPACE
, NLC_FOLLOW
);
3622 error
= kern_utimes(&nd
, uap
->tptr
? tv
: NULL
);
3628 * lutimes_args(char *path, struct timeval *tptr)
3630 * Set the access and modification times of a file.
3633 sys_lutimes(struct lutimes_args
*uap
)
3635 struct timeval tv
[2];
3636 struct nlookupdata nd
;
3640 error
= copyin(uap
->tptr
, tv
, sizeof(tv
));
3644 error
= nlookup_init(&nd
, uap
->path
, UIO_USERSPACE
, 0);
3646 error
= kern_utimes(&nd
, uap
->tptr
? tv
: NULL
);
3652 * Set utimes on a file descriptor. The creds used to open the
3653 * file are used to determine whether the operation is allowed
3657 kern_futimens(int fd
, struct timespec
*ts
)
3659 struct thread
*td
= curthread
;
3660 struct timespec newts
[2];
3667 error
= getutimens(ts
, newts
, &nullflag
);
3670 if ((error
= holdvnode(td
, fd
, &fp
)) != 0)
3672 if (fp
->f_nchandle
.ncp
)
3673 error
= ncp_writechk(&fp
->f_nchandle
);
3676 error
= vget(vp
, LK_EXCLUSIVE
);
3678 error
= VOP_GETATTR(vp
, &vattr
);
3680 error
= naccess_va(&vattr
, NLC_OWN
| NLC_WRITE
,
3684 error
= setutimes(vp
, &vattr
, newts
, nullflag
);
3694 * futimens_args(int fd, struct timespec *ts)
3696 * Set the access and modification times of a file.
3699 sys_futimens(struct futimens_args
*uap
)
3701 struct timespec ts
[2];
3705 error
= copyin(uap
->ts
, ts
, sizeof(ts
));
3709 error
= kern_futimens(uap
->fd
, uap
->ts
? ts
: NULL
);
3714 kern_futimes(int fd
, struct timeval
*tptr
)
3716 struct timespec ts
[2];
3720 if ((error
= getutimes(tptr
, ts
)) != 0)
3723 error
= kern_futimens(fd
, tptr
? ts
: NULL
);
3728 * futimes_args(int fd, struct timeval *tptr)
3730 * Set the access and modification times of a file.
3733 sys_futimes(struct futimes_args
*uap
)
3735 struct timeval tv
[2];
3739 error
= copyin(uap
->tptr
, tv
, sizeof(tv
));
3743 error
= kern_futimes(uap
->fd
, uap
->tptr
? tv
: NULL
);
3748 kern_utimensat(struct nlookupdata
*nd
, const struct timespec
*ts
, int flags
)
3750 struct timespec newts
[2];
3756 if (flags
& ~AT_SYMLINK_NOFOLLOW
)
3759 error
= getutimens(ts
, newts
, &nullflag
);
3763 nd
->nl_flags
|= NLC_OWN
| NLC_WRITE
;
3764 if ((error
= nlookup(nd
)) != 0)
3766 if ((error
= ncp_writechk(&nd
->nl_nch
)) != 0)
3768 if ((error
= cache_vref(&nd
->nl_nch
, nd
->nl_cred
, &vp
)) != 0)
3770 if ((error
= vn_writechk(vp
, &nd
->nl_nch
)) == 0) {
3771 error
= vget(vp
, LK_EXCLUSIVE
);
3773 error
= setutimes(vp
, &vattr
, newts
, nullflag
);
3782 * utimensat_args(int fd, const char *path, const struct timespec *ts, int flags);
3784 * Set file access and modification times of a file.
3787 sys_utimensat(struct utimensat_args
*uap
)
3789 struct timespec ts
[2];
3790 struct nlookupdata nd
;
3796 error
= copyin(uap
->ts
, ts
, sizeof(ts
));
3801 flags
= (uap
->flags
& AT_SYMLINK_NOFOLLOW
) ? 0 : NLC_FOLLOW
;
3802 error
= nlookup_init_at(&nd
, &fp
, uap
->fd
, uap
->path
,
3803 UIO_USERSPACE
, flags
);
3805 error
= kern_utimensat(&nd
, uap
->ts
? ts
: NULL
, uap
->flags
);
3806 nlookup_done_at(&nd
, fp
);
3811 kern_truncate(struct nlookupdata
*nd
, off_t length
)
3818 uint64_t old_size
= 0;
3822 nd
->nl_flags
|= NLC_WRITE
| NLC_TRUNCATE
;
3823 if ((error
= nlookup(nd
)) != 0)
3825 if ((error
= ncp_writechk(&nd
->nl_nch
)) != 0)
3827 if ((error
= cache_vref(&nd
->nl_nch
, nd
->nl_cred
, &vp
)) != 0)
3829 error
= vn_lock(vp
, LK_EXCLUSIVE
| LK_RETRY
| LK_FAILRECLAIM
);
3834 if (vp
->v_type
== VDIR
) {
3838 if (vfs_quota_enabled
) {
3839 error
= VOP_GETATTR(vp
, &vattr
);
3840 KASSERT(error
== 0, ("kern_truncate(): VOP_GETATTR didn't return 0"));
3843 old_size
= vattr
.va_size
;
3846 if ((error
= vn_writechk(vp
, &nd
->nl_nch
)) == 0) {
3848 vattr
.va_size
= length
;
3849 error
= VOP_SETATTR(vp
, &vattr
, nd
->nl_cred
);
3850 VFS_ACCOUNT(nd
->nl_nch
.mount
, uid
, gid
, length
- old_size
);
3858 * truncate(char *path, int pad, off_t length)
3860 * Truncate a file given its path name.
3863 sys_truncate(struct truncate_args
*uap
)
3865 struct nlookupdata nd
;
3868 error
= nlookup_init(&nd
, uap
->path
, UIO_USERSPACE
, NLC_FOLLOW
);
3870 error
= kern_truncate(&nd
, uap
->length
);
3876 kern_ftruncate(int fd
, off_t length
)
3878 struct thread
*td
= curthread
;
3885 uint64_t old_size
= 0;
3890 if ((error
= holdvnode(td
, fd
, &fp
)) != 0)
3892 if (fp
->f_nchandle
.ncp
) {
3893 error
= ncp_writechk(&fp
->f_nchandle
);
3897 if ((fp
->f_flag
& FWRITE
) == 0) {
3901 if (fp
->f_flag
& FAPPENDONLY
) { /* inode was set s/uapnd */
3905 vp
= (struct vnode
*)fp
->f_data
;
3906 vn_lock(vp
, LK_EXCLUSIVE
| LK_RETRY
);
3907 if (vp
->v_type
== VDIR
) {
3913 if (vfs_quota_enabled
) {
3914 error
= VOP_GETATTR(vp
, &vattr
);
3915 KASSERT(error
== 0, ("kern_ftruncate(): VOP_GETATTR didn't return 0"));
3918 old_size
= vattr
.va_size
;
3921 if ((error
= vn_writechk(vp
, NULL
)) == 0) {
3923 vattr
.va_size
= length
;
3924 error
= VOP_SETATTR(vp
, &vattr
, fp
->f_cred
);
3926 VFS_ACCOUNT(mp
, uid
, gid
, length
- old_size
);
3935 * ftruncate_args(int fd, int pad, off_t length)
3937 * Truncate a file given a file descriptor.
3940 sys_ftruncate(struct ftruncate_args
*uap
)
3944 error
= kern_ftruncate(uap
->fd
, uap
->length
);
3952 * Sync an open file.
3955 sys_fsync(struct fsync_args
*uap
)
3957 struct thread
*td
= curthread
;
3963 if ((error
= holdvnode(td
, uap
->fd
, &fp
)) != 0)
3965 vp
= (struct vnode
*)fp
->f_data
;
3966 vn_lock(vp
, LK_EXCLUSIVE
| LK_RETRY
);
3967 if ((obj
= vp
->v_object
) != NULL
) {
3968 if (vp
->v_mount
== NULL
||
3969 (vp
->v_mount
->mnt_kern_flag
& MNTK_NOMSYNC
) == 0) {
3970 vm_object_page_clean(obj
, 0, 0, 0);
3973 error
= VOP_FSYNC(vp
, MNT_WAIT
, VOP_FSYNC_SYSCALL
);
3974 if (error
== 0 && vp
->v_mount
)
3975 error
= buf_fsync(vp
);
3983 kern_rename(struct nlookupdata
*fromnd
, struct nlookupdata
*tond
)
3985 struct nchandle fnchd
;
3986 struct nchandle tnchd
;
3987 struct namecache
*ncp
;
3996 fromnd
->nl_flags
|= NLC_REFDVP
| NLC_RENAME_SRC
;
3997 if ((error
= nlookup(fromnd
)) != 0)
3999 if ((fnchd
.ncp
= fromnd
->nl_nch
.ncp
->nc_parent
) == NULL
)
4001 fnchd
.mount
= fromnd
->nl_nch
.mount
;
4005 * unlock the source nch so we can lookup the target nch without
4006 * deadlocking. The target may or may not exist so we do not check
4007 * for a target vp like kern_mkdir() and other creation functions do.
4009 * The source and target directories are ref'd and rechecked after
4010 * everything is relocked to determine if the source or target file
4013 KKASSERT(fromnd
->nl_flags
& NLC_NCPISLOCKED
);
4014 fromnd
->nl_flags
&= ~NLC_NCPISLOCKED
;
4016 fncp_gen
= fromnd
->nl_nch
.ncp
->nc_generation
;
4018 cache_unlock(&fromnd
->nl_nch
);
4020 tond
->nl_flags
|= NLC_RENAME_DST
| NLC_REFDVP
;
4021 if ((error
= nlookup(tond
)) != 0) {
4025 tncp_gen
= tond
->nl_nch
.ncp
->nc_generation
;
4027 if ((tnchd
.ncp
= tond
->nl_nch
.ncp
->nc_parent
) == NULL
) {
4031 tnchd
.mount
= tond
->nl_nch
.mount
;
4035 * If the source and target are the same there is nothing to do
4037 if (fromnd
->nl_nch
.ncp
== tond
->nl_nch
.ncp
) {
4044 * Mount points cannot be renamed or overwritten
4046 if ((fromnd
->nl_nch
.ncp
->nc_flag
| tond
->nl_nch
.ncp
->nc_flag
) &
4055 * Relock the source ncp. cache_relock() will deal with any
4056 * deadlocks against the already-locked tond and will also
4057 * make sure both are resolved.
4059 * NOTE AFTER RELOCKING: The source or target ncp may have become
4060 * invalid while they were unlocked, nc_vp and nc_mount could
4063 cache_relock(&fromnd
->nl_nch
, fromnd
->nl_cred
,
4064 &tond
->nl_nch
, tond
->nl_cred
);
4065 fromnd
->nl_flags
|= NLC_NCPISLOCKED
;
4068 * If the namecache generation changed for either fromnd or tond,
4071 if (fromnd
->nl_nch
.ncp
->nc_generation
!= fncp_gen
||
4072 tond
->nl_nch
.ncp
->nc_generation
!= tncp_gen
) {
4073 kprintf("kern_rename: retry due to gen on: "
4074 "\"%s\" -> \"%s\"\n",
4075 fromnd
->nl_nch
.ncp
->nc_name
,
4076 tond
->nl_nch
.ncp
->nc_name
);
4083 * If either fromnd or tond are marked destroyed a ripout occured
4084 * out from under us and we must retry.
4086 if ((fromnd
->nl_nch
.ncp
->nc_flag
& (NCF_DESTROYED
| NCF_UNRESOLVED
)) ||
4087 fromnd
->nl_nch
.ncp
->nc_vp
== NULL
||
4088 (tond
->nl_nch
.ncp
->nc_flag
& NCF_DESTROYED
)) {
4089 kprintf("kern_rename: retry due to ripout on: "
4090 "\"%s\" -> \"%s\"\n",
4091 fromnd
->nl_nch
.ncp
->nc_name
,
4092 tond
->nl_nch
.ncp
->nc_name
);
4099 * Make sure the parent directories linkages are the same.
4100 * XXX shouldn't be needed any more w/ generation check above.
4102 if (fnchd
.ncp
!= fromnd
->nl_nch
.ncp
->nc_parent
||
4103 tnchd
.ncp
!= tond
->nl_nch
.ncp
->nc_parent
) {
4110 * Both the source and target must be within the same filesystem and
4111 * in the same filesystem as their parent directories within the
4112 * namecache topology.
4114 * NOTE: fromnd's nc_mount or nc_vp could be NULL.
4117 if (mp
!= tnchd
.mount
|| mp
!= fromnd
->nl_nch
.mount
||
4118 mp
!= tond
->nl_nch
.mount
) {
4125 * Make sure the mount point is writable
4127 if ((error
= ncp_writechk(&tond
->nl_nch
)) != 0) {
4134 * If the target exists and either the source or target is a directory,
4135 * then both must be directories.
4137 * Due to relocking of the source, fromnd->nl_nch.ncp->nc_vp might h
4140 if (tond
->nl_nch
.ncp
->nc_vp
) {
4141 if (fromnd
->nl_nch
.ncp
->nc_vp
== NULL
) {
4143 } else if (fromnd
->nl_nch
.ncp
->nc_vp
->v_type
== VDIR
) {
4144 if (tond
->nl_nch
.ncp
->nc_vp
->v_type
!= VDIR
)
4146 } else if (tond
->nl_nch
.ncp
->nc_vp
->v_type
== VDIR
) {
4152 * You cannot rename a source into itself or a subdirectory of itself.
4153 * We check this by travsersing the target directory upwards looking
4154 * for a match against the source.
4159 for (ncp
= tnchd
.ncp
; ncp
; ncp
= ncp
->nc_parent
) {
4160 if (fromnd
->nl_nch
.ncp
== ncp
) {
4171 * Even though the namespaces are different, they may still represent
4172 * hardlinks to the same file. The filesystem might have a hard time
4173 * with this so we issue a NREMOVE of the source instead of a NRENAME
4174 * when we detect the situation.
4177 fdvp
= fromnd
->nl_dvp
;
4178 tdvp
= tond
->nl_dvp
;
4179 if (fdvp
== NULL
|| tdvp
== NULL
) {
4181 } else if (fromnd
->nl_nch
.ncp
->nc_vp
== tond
->nl_nch
.ncp
->nc_vp
) {
4182 error
= VOP_NREMOVE(&fromnd
->nl_nch
, fdvp
,
4185 error
= VOP_NRENAME(&fromnd
->nl_nch
, &tond
->nl_nch
,
4186 fdvp
, tdvp
, tond
->nl_cred
);
4193 * rename_args(char *from, char *to)
4195 * Rename files. Source and destination must either both be directories,
4196 * or both not be directories. If target is a directory, it must be empty.
4199 sys_rename(struct rename_args
*uap
)
4201 struct nlookupdata fromnd
, tond
;
4205 error
= nlookup_init(&fromnd
, uap
->from
, UIO_USERSPACE
, 0);
4207 error
= nlookup_init(&tond
, uap
->to
, UIO_USERSPACE
, 0);
4209 error
= kern_rename(&fromnd
, &tond
);
4210 nlookup_done(&tond
);
4212 nlookup_done(&fromnd
);
4213 } while (error
== EAGAIN
);
4218 * renameat_args(int oldfd, char *old, int newfd, char *new)
4220 * Rename files using paths relative to the directories associated with
4221 * oldfd and newfd. Source and destination must either both be directories,
4222 * or both not be directories. If target is a directory, it must be empty.
4225 sys_renameat(struct renameat_args
*uap
)
4227 struct nlookupdata oldnd
, newnd
;
4228 struct file
*oldfp
, *newfp
;
4232 error
= nlookup_init_at(&oldnd
, &oldfp
,
4233 uap
->oldfd
, uap
->old
,
4236 error
= nlookup_init_at(&newnd
, &newfp
,
4237 uap
->newfd
, uap
->new,
4240 error
= kern_rename(&oldnd
, &newnd
);
4241 nlookup_done_at(&newnd
, newfp
);
4243 nlookup_done_at(&oldnd
, oldfp
);
4244 } while (error
== EAGAIN
);
4249 kern_mkdir(struct nlookupdata
*nd
, int mode
)
4251 struct thread
*td
= curthread
;
4252 struct proc
*p
= td
->td_proc
;
4258 nd
->nl_flags
|= NLC_WILLBEDIR
| NLC_CREATE
| NLC_REFDVP
;
4259 if ((error
= nlookup(nd
)) != 0)
4262 if (nd
->nl_nch
.ncp
->nc_vp
)
4264 if ((error
= ncp_writechk(&nd
->nl_nch
)) != 0)
4267 vattr
.va_type
= VDIR
;
4268 vattr
.va_mode
= (mode
& ACCESSPERMS
) &~ p
->p_fd
->fd_cmask
;
4271 error
= VOP_NMKDIR(&nd
->nl_nch
, nd
->nl_dvp
, &vp
, td
->td_ucred
, &vattr
);
4278 * mkdir_args(char *path, int mode)
4280 * Make a directory file.
4283 sys_mkdir(struct mkdir_args
*uap
)
4285 struct nlookupdata nd
;
4288 error
= nlookup_init(&nd
, uap
->path
, UIO_USERSPACE
, 0);
4290 error
= kern_mkdir(&nd
, uap
->mode
);
4296 * mkdirat_args(int fd, char *path, mode_t mode)
4298 * Make a directory file. The path is relative to the directory associated
4302 sys_mkdirat(struct mkdirat_args
*uap
)
4304 struct nlookupdata nd
;
4308 error
= nlookup_init_at(&nd
, &fp
, uap
->fd
, uap
->path
, UIO_USERSPACE
, 0);
4310 error
= kern_mkdir(&nd
, uap
->mode
);
4311 nlookup_done_at(&nd
, fp
);
4316 kern_rmdir(struct nlookupdata
*nd
)
4321 nd
->nl_flags
|= NLC_DELETE
| NLC_REFDVP
;
4322 if ((error
= nlookup(nd
)) != 0)
4326 * Do not allow directories representing mount points to be
4327 * deleted, even if empty. Check write perms on mount point
4328 * in case the vnode is aliased (aka nullfs).
4330 if (nd
->nl_nch
.ncp
->nc_flag
& (NCF_ISMOUNTPT
))
4332 if ((error
= ncp_writechk(&nd
->nl_nch
)) != 0)
4334 error
= VOP_NRMDIR(&nd
->nl_nch
, nd
->nl_dvp
, nd
->nl_cred
);
4339 * rmdir_args(char *path)
4341 * Remove a directory file.
4344 sys_rmdir(struct rmdir_args
*uap
)
4346 struct nlookupdata nd
;
4349 error
= nlookup_init(&nd
, uap
->path
, UIO_USERSPACE
, 0);
4351 error
= kern_rmdir(&nd
);
4357 kern_getdirentries(int fd
, char *buf
, u_int count
, long *basep
, int *res
,
4358 enum uio_seg direction
)
4360 struct thread
*td
= curthread
;
4368 if ((error
= holdvnode(td
, fd
, &fp
)) != 0)
4370 if ((fp
->f_flag
& FREAD
) == 0) {
4374 vp
= (struct vnode
*)fp
->f_data
;
4375 if (vp
->v_type
!= VDIR
) {
4379 aiov
.iov_base
= buf
;
4380 aiov
.iov_len
= count
;
4381 auio
.uio_iov
= &aiov
;
4382 auio
.uio_iovcnt
= 1;
4383 auio
.uio_rw
= UIO_READ
;
4384 auio
.uio_segflg
= direction
;
4386 auio
.uio_resid
= count
;
4387 loff
= auio
.uio_offset
= fp
->f_offset
;
4388 error
= VOP_READDIR(vp
, &auio
, fp
->f_cred
, &eofflag
, NULL
, NULL
);
4389 fp
->f_offset
= auio
.uio_offset
;
4394 * WARNING! *basep may not be wide enough to accomodate the
4395 * seek offset. XXX should we hack this to return the upper 32 bits
4396 * for offsets greater then 4G?
4399 *basep
= (long)loff
;
4401 *res
= count
- auio
.uio_resid
;
4408 * getdirentries_args(int fd, char *buf, u_int conut, long *basep)
4410 * Read a block of directory entries in a file system independent format.
4413 sys_getdirentries(struct getdirentries_args
*uap
)
4418 error
= kern_getdirentries(uap
->fd
, uap
->buf
, uap
->count
, &base
,
4419 &uap
->sysmsg_result
, UIO_USERSPACE
);
4421 if (error
== 0 && uap
->basep
)
4422 error
= copyout(&base
, uap
->basep
, sizeof(*uap
->basep
));
4427 * getdents_args(int fd, char *buf, size_t count)
4430 sys_getdents(struct getdents_args
*uap
)
4434 error
= kern_getdirentries(uap
->fd
, uap
->buf
, uap
->count
, NULL
,
4435 &uap
->sysmsg_result
, UIO_USERSPACE
);
4441 * Set the mode mask for creation of filesystem nodes.
4443 * umask(int newmask)
4446 sys_umask(struct umask_args
*uap
)
4448 struct thread
*td
= curthread
;
4449 struct proc
*p
= td
->td_proc
;
4450 struct filedesc
*fdp
;
4453 uap
->sysmsg_result
= fdp
->fd_cmask
;
4454 fdp
->fd_cmask
= uap
->newmask
& ALLPERMS
;
4459 * revoke(char *path)
4461 * Void all references to file by ripping underlying filesystem
4465 sys_revoke(struct revoke_args
*uap
)
4467 struct nlookupdata nd
;
4474 error
= nlookup_init(&nd
, uap
->path
, UIO_USERSPACE
, NLC_FOLLOW
);
4476 error
= nlookup(&nd
);
4478 error
= cache_vref(&nd
.nl_nch
, nd
.nl_cred
, &vp
);
4479 cred
= crhold(nd
.nl_cred
);
4483 error
= VOP_GETATTR(vp
, &vattr
);
4484 if (error
== 0 && cred
->cr_uid
!= vattr
.va_uid
)
4485 error
= priv_check_cred(cred
, PRIV_VFS_REVOKE
, 0);
4486 if (error
== 0 && (vp
->v_type
== VCHR
|| vp
->v_type
== VBLK
)) {
4488 error
= vrevoke(vp
, cred
);
4489 } else if (error
== 0) {
4490 error
= vrevoke(vp
, cred
);
4500 * getfh_args(char *fname, fhandle_t *fhp)
4502 * Get (NFS) file handle
4504 * NOTE: We use the fsid of the covering mount, even if it is a nullfs
4505 * mount. This allows nullfs mounts to be explicitly exported.
4507 * WARNING: nullfs mounts of HAMMER PFS ROOTs are safe.
4509 * nullfs mounts of subdirectories are not safe. That is, it will
4510 * work, but you do not really have protection against access to
4511 * the related parent directories.
4514 sys_getfh(struct getfh_args
*uap
)
4516 struct thread
*td
= curthread
;
4517 struct nlookupdata nd
;
4524 * Must be super user
4526 if ((error
= priv_check(td
, PRIV_ROOT
)) != 0)
4530 error
= nlookup_init(&nd
, uap
->fname
, UIO_USERSPACE
, NLC_FOLLOW
);
4532 error
= nlookup(&nd
);
4534 error
= cache_vget(&nd
.nl_nch
, nd
.nl_cred
, LK_EXCLUSIVE
, &vp
);
4535 mp
= nd
.nl_nch
.mount
;
4538 bzero(&fh
, sizeof(fh
));
4539 fh
.fh_fsid
= mp
->mnt_stat
.f_fsid
;
4540 error
= VFS_VPTOFH(vp
, &fh
.fh_fid
);
4543 error
= copyout(&fh
, uap
->fhp
, sizeof(fh
));
4549 * fhopen_args(const struct fhandle *u_fhp, int flags)
4551 * syscall for the rpc.lockd to use to translate a NFS file handle into
4552 * an open descriptor.
4554 * warning: do not remove the priv_check() call or this becomes one giant
4558 sys_fhopen(struct fhopen_args
*uap
)
4560 struct thread
*td
= curthread
;
4561 struct filedesc
*fdp
= td
->td_proc
->p_fd
;
4566 struct vattr
*vap
= &vat
;
4568 int fmode
, mode
, error
= 0, type
;
4574 * Must be super user
4576 error
= priv_check(td
, PRIV_ROOT
);
4580 fmode
= FFLAGS(uap
->flags
);
4583 * Why not allow a non-read/write open for our lockd?
4585 if (((fmode
& (FREAD
| FWRITE
)) == 0) || (fmode
& O_CREAT
))
4587 error
= copyin(uap
->u_fhp
, &fhp
, sizeof(fhp
));
4592 * Find the mount point
4594 mp
= vfs_getvfs(&fhp
.fh_fsid
);
4599 /* now give me my vnode, it gets returned to me locked */
4600 error
= VFS_FHTOVP(mp
, NULL
, &fhp
.fh_fid
, &vp
);
4604 * from now on we have to make sure not
4605 * to forget about the vnode
4606 * any error that causes an abort must vput(vp)
4607 * just set error = err and 'goto bad;'.
4613 if (vp
->v_type
== VLNK
) {
4617 if (vp
->v_type
== VSOCK
) {
4622 if (fmode
& (FWRITE
| O_TRUNC
)) {
4623 if (vp
->v_type
== VDIR
) {
4627 error
= vn_writechk(vp
, NULL
);
4635 error
= VOP_ACCESS(vp
, mode
, td
->td_ucred
);
4639 if (fmode
& O_TRUNC
) {
4640 vn_unlock(vp
); /* XXX */
4641 vn_lock(vp
, LK_EXCLUSIVE
| LK_RETRY
); /* XXX */
4644 error
= VOP_SETATTR(vp
, vap
, td
->td_ucred
);
4650 * VOP_OPEN needs the file pointer so it can potentially override
4653 * WARNING! no f_nchandle will be associated when fhopen()ing a
4656 if ((error
= falloc(td
->td_lwp
, &nfp
, &indx
)) != 0)
4660 error
= VOP_OPEN(vp
, fmode
, td
->td_ucred
, fp
);
4663 * setting f_ops this way prevents VOP_CLOSE from being
4664 * called or fdrop() releasing the vp from v_data. Since
4665 * the VOP_OPEN failed we don't want to VOP_CLOSE.
4667 fp
->f_ops
= &badfileops
;
4673 * The fp is given its own reference, we still have our ref and lock.
4675 * Assert that all regular files must be created with a VM object.
4677 if (vp
->v_type
== VREG
&& vp
->v_object
== NULL
) {
4678 kprintf("fhopen: regular file did not "
4679 "have VM object: %p\n",
4685 * The open was successful. Handle any locking requirements.
4687 if (fmode
& (O_EXLOCK
| O_SHLOCK
)) {
4688 lf
.l_whence
= SEEK_SET
;
4691 if (fmode
& O_EXLOCK
)
4692 lf
.l_type
= F_WRLCK
;
4694 lf
.l_type
= F_RDLCK
;
4695 if (fmode
& FNONBLOCK
)
4700 if ((error
= VOP_ADVLOCK(vp
, (caddr_t
)fp
, F_SETLK
,
4703 * release our private reference.
4705 fsetfd(fdp
, NULL
, indx
);
4710 vn_lock(vp
, LK_EXCLUSIVE
| LK_RETRY
);
4711 atomic_set_int(&fp
->f_flag
, FHASLOCK
); /* race ok */
4715 * Clean up. Associate the file pointer with the previously
4716 * reserved descriptor and return it.
4719 if (uap
->flags
& O_CLOEXEC
)
4720 fdp
->fd_files
[indx
].fileflags
|= UF_EXCLOSE
;
4721 fsetfd(fdp
, fp
, indx
);
4723 uap
->sysmsg_result
= indx
;
4729 fsetfd(fdp
, NULL
, indx
);
4740 * fhstat_args(struct fhandle *u_fhp, struct stat *sb)
4743 sys_fhstat(struct fhstat_args
*uap
)
4745 struct thread
*td
= curthread
;
4753 * Must be super user
4755 error
= priv_check(td
, PRIV_ROOT
);
4759 error
= copyin(uap
->u_fhp
, &fh
, sizeof(fhandle_t
));
4763 if ((mp
= vfs_getvfs(&fh
.fh_fsid
)) == NULL
)
4766 if ((error
= VFS_FHTOVP(mp
, NULL
, &fh
.fh_fid
, &vp
)) == 0) {
4767 error
= vn_stat(vp
, &sb
, td
->td_ucred
);
4772 error
= copyout(&sb
, uap
->sb
, sizeof(sb
));
4780 * fhstatfs_args(struct fhandle *u_fhp, struct statfs *buf)
4783 sys_fhstatfs(struct fhstatfs_args
*uap
)
4785 struct thread
*td
= curthread
;
4786 struct proc
*p
= td
->td_proc
;
4791 char *fullpath
, *freepath
;
4796 * Must be super user
4798 if ((error
= priv_check(td
, PRIV_ROOT
)))
4801 if ((error
= copyin(uap
->u_fhp
, &fh
, sizeof(fhandle_t
))) != 0)
4804 if ((mp
= vfs_getvfs(&fh
.fh_fsid
)) == NULL
) {
4808 if (p
!= NULL
&& !chroot_visible_mnt(mp
, p
)) {
4813 if ((error
= VFS_FHTOVP(mp
, NULL
, &fh
.fh_fid
, &vp
)) != 0)
4818 if ((error
= VFS_STATFS(mp
, sp
, td
->td_ucred
)) != 0)
4821 error
= mount_path(p
, mp
, &fullpath
, &freepath
);
4824 bzero(sp
->f_mntonname
, sizeof(sp
->f_mntonname
));
4825 strlcpy(sp
->f_mntonname
, fullpath
, sizeof(sp
->f_mntonname
));
4826 kfree(freepath
, M_TEMP
);
4828 sp
->f_flags
= mp
->mnt_flag
& MNT_VISFLAGMASK
;
4829 if (priv_check(td
, PRIV_ROOT
)) {
4830 bcopy(sp
, &sb
, sizeof(sb
));
4831 sb
.f_fsid
.val
[0] = sb
.f_fsid
.val
[1] = 0;
4834 error
= copyout(sp
, uap
->buf
, sizeof(*sp
));
4843 * fhstatvfs_args(struct fhandle *u_fhp, struct statvfs *buf)
4846 sys_fhstatvfs(struct fhstatvfs_args
*uap
)
4848 struct thread
*td
= curthread
;
4849 struct proc
*p
= td
->td_proc
;
4857 * Must be super user
4859 if ((error
= priv_check(td
, PRIV_ROOT
)))
4862 if ((error
= copyin(uap
->u_fhp
, &fh
, sizeof(fhandle_t
))) != 0)
4865 if ((mp
= vfs_getvfs(&fh
.fh_fsid
)) == NULL
) {
4869 if (p
!= NULL
&& !chroot_visible_mnt(mp
, p
)) {
4874 if ((error
= VFS_FHTOVP(mp
, NULL
, &fh
.fh_fid
, &vp
)))
4877 sp
= &mp
->mnt_vstat
;
4879 if ((error
= VFS_STATVFS(mp
, sp
, td
->td_ucred
)) != 0)
4883 if (mp
->mnt_flag
& MNT_RDONLY
)
4884 sp
->f_flag
|= ST_RDONLY
;
4885 if (mp
->mnt_flag
& MNT_NOSUID
)
4886 sp
->f_flag
|= ST_NOSUID
;
4887 error
= copyout(sp
, uap
->buf
, sizeof(*sp
));
4896 * Syscall to push extended attribute configuration information into the
4897 * VFS. Accepts a path, which it converts to a mountpoint, as well as
4898 * a command (int cmd), and attribute name and misc data. For now, the
4899 * attribute name is left in userspace for consumption by the VFS_op.
4900 * It will probably be changed to be copied into sysspace by the
4901 * syscall in the future, once issues with various consumers of the
4902 * attribute code have raised their hands.
4904 * Currently this is used only by UFS Extended Attributes.
4907 sys_extattrctl(struct extattrctl_args
*uap
)
4909 struct nlookupdata nd
;
4911 char attrname
[EXTATTR_MAXNAMELEN
];
4919 if (error
== 0 && uap
->filename
) {
4920 error
= nlookup_init(&nd
, uap
->filename
, UIO_USERSPACE
,
4923 error
= nlookup(&nd
);
4925 error
= cache_vref(&nd
.nl_nch
, nd
.nl_cred
, &vp
);
4929 if (error
== 0 && uap
->attrname
) {
4930 error
= copyinstr(uap
->attrname
, attrname
, EXTATTR_MAXNAMELEN
,
4935 error
= nlookup_init(&nd
, uap
->path
, UIO_USERSPACE
, NLC_FOLLOW
);
4937 error
= nlookup(&nd
);
4939 error
= ncp_writechk(&nd
.nl_nch
);
4941 error
= VFS_EXTATTRCTL(nd
.nl_nch
.mount
, uap
->cmd
, vp
,
4943 uap
->attrname
, nd
.nl_cred
);
4952 * Syscall to get a named extended attribute on a file or directory.
4955 sys_extattr_set_file(struct extattr_set_file_args
*uap
)
4957 char attrname
[EXTATTR_MAXNAMELEN
];
4958 struct nlookupdata nd
;
4964 error
= copyin(uap
->attrname
, attrname
, EXTATTR_MAXNAMELEN
);
4970 error
= nlookup_init(&nd
, uap
->path
, UIO_USERSPACE
, NLC_FOLLOW
);
4972 error
= nlookup(&nd
);
4974 error
= ncp_writechk(&nd
.nl_nch
);
4976 error
= cache_vget(&nd
.nl_nch
, nd
.nl_cred
, LK_EXCLUSIVE
, &vp
);
4982 bzero(&auio
, sizeof(auio
));
4983 aiov
.iov_base
= uap
->data
;
4984 aiov
.iov_len
= uap
->nbytes
;
4985 auio
.uio_iov
= &aiov
;
4986 auio
.uio_iovcnt
= 1;
4987 auio
.uio_offset
= 0;
4988 auio
.uio_resid
= uap
->nbytes
;
4989 auio
.uio_rw
= UIO_WRITE
;
4990 auio
.uio_td
= curthread
;
4992 error
= VOP_SETEXTATTR(vp
, uap
->attrnamespace
, attrname
,
5001 * Syscall to get a named extended attribute on a file or directory.
5004 sys_extattr_get_file(struct extattr_get_file_args
*uap
)
5006 char attrname
[EXTATTR_MAXNAMELEN
];
5007 struct nlookupdata nd
;
5013 error
= copyin(uap
->attrname
, attrname
, EXTATTR_MAXNAMELEN
);
5019 error
= nlookup_init(&nd
, uap
->path
, UIO_USERSPACE
, NLC_FOLLOW
);
5021 error
= nlookup(&nd
);
5023 error
= cache_vget(&nd
.nl_nch
, nd
.nl_cred
, LK_SHARED
, &vp
);
5029 bzero(&auio
, sizeof(auio
));
5030 aiov
.iov_base
= uap
->data
;
5031 aiov
.iov_len
= uap
->nbytes
;
5032 auio
.uio_iov
= &aiov
;
5033 auio
.uio_iovcnt
= 1;
5034 auio
.uio_offset
= 0;
5035 auio
.uio_resid
= uap
->nbytes
;
5036 auio
.uio_rw
= UIO_READ
;
5037 auio
.uio_td
= curthread
;
5039 error
= VOP_GETEXTATTR(vp
, uap
->attrnamespace
, attrname
,
5041 uap
->sysmsg_result
= uap
->nbytes
- auio
.uio_resid
;
5049 * Syscall to delete a named extended attribute from a file or directory.
5050 * Accepts attribute name. The real work happens in VOP_SETEXTATTR().
5053 sys_extattr_delete_file(struct extattr_delete_file_args
*uap
)
5055 char attrname
[EXTATTR_MAXNAMELEN
];
5056 struct nlookupdata nd
;
5060 error
= copyin(uap
->attrname
, attrname
, EXTATTR_MAXNAMELEN
);
5064 error
= nlookup_init(&nd
, uap
->path
, UIO_USERSPACE
, NLC_FOLLOW
);
5066 error
= nlookup(&nd
);
5068 error
= ncp_writechk(&nd
.nl_nch
);
5070 error
= cache_vget(&nd
.nl_nch
, nd
.nl_cred
, LK_EXCLUSIVE
, &vp
);
5072 error
= VOP_SETEXTATTR(vp
, uap
->attrnamespace
,
5073 attrname
, NULL
, nd
.nl_cred
);
5082 * Determine if the mount is visible to the process.
5085 chroot_visible_mnt(struct mount
*mp
, struct proc
*p
)
5087 struct nchandle nch
;
5090 * Traverse from the mount point upwards. If we hit the process
5091 * root then the mount point is visible to the process.
5093 nch
= mp
->mnt_ncmountpt
;
5095 if (nch
.mount
== p
->p_fd
->fd_nrdir
.mount
&&
5096 nch
.ncp
== p
->p_fd
->fd_nrdir
.ncp
) {
5099 if (nch
.ncp
== nch
.mount
->mnt_ncmountpt
.ncp
) {
5100 nch
= nch
.mount
->mnt_ncmounton
;
5102 nch
.ncp
= nch
.ncp
->nc_parent
;
5107 * If the mount point is not visible to the process, but the
5108 * process root is in a subdirectory of the mount, return
5111 if (p
->p_fd
->fd_nrdir
.mount
== mp
)