2 * Copyright (c) 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * @(#)vfs_syscalls.c 8.13 (Berkeley) 4/15/94
39 * $FreeBSD: src/sys/kern/vfs_syscalls.c,v 1.151.2.18 2003/04/04 20:35:58 tegge Exp $
40 * $DragonFly: src/sys/kern/vfs_syscalls.c,v 1.135 2008/11/11 00:55:49 pavalos Exp $
43 #include <sys/param.h>
44 #include <sys/systm.h>
47 #include <sys/sysent.h>
48 #include <sys/malloc.h>
49 #include <sys/mount.h>
50 #include <sys/mountctl.h>
51 #include <sys/sysproto.h>
52 #include <sys/filedesc.h>
53 #include <sys/kernel.h>
54 #include <sys/fcntl.h>
56 #include <sys/linker.h>
58 #include <sys/unistd.h>
59 #include <sys/vnode.h>
63 #include <sys/namei.h>
64 #include <sys/nlookup.h>
65 #include <sys/dirent.h>
66 #include <sys/extattr.h>
67 #include <sys/spinlock.h>
68 #include <sys/kern_syscall.h>
69 #include <sys/objcache.h>
70 #include <sys/sysctl.h>
73 #include <sys/file2.h>
74 #include <sys/spinlock2.h>
77 #include <vm/vm_object.h>
78 #include <vm/vm_page.h>
80 #include <machine/limits.h>
81 #include <machine/stdarg.h>
83 #include <vfs/union/union.h>
85 static void mount_warning(struct mount
*mp
, const char *ctl
, ...);
86 static int mount_path(struct proc
*p
, struct mount
*mp
, char **rb
, char **fb
);
87 static int checkvp_chdir (struct vnode
*vn
, struct thread
*td
);
88 static void checkdirs (struct nchandle
*old_nch
, struct nchandle
*new_nch
);
89 static int chroot_refuse_vdir_fds (struct filedesc
*fdp
);
90 static int chroot_visible_mnt(struct mount
*mp
, struct proc
*p
);
91 static int getutimes (const struct timeval
*, struct timespec
*);
92 static int setfown (struct vnode
*, uid_t
, gid_t
);
93 static int setfmode (struct vnode
*, int);
94 static int setfflags (struct vnode
*, int);
95 static int setutimes (struct vnode
*, struct vattr
*,
96 const struct timespec
*, int);
97 static int usermount
= 0; /* if 1, non-root can mount fs. */
99 int (*union_dircheckp
) (struct thread
*, struct vnode
**, struct file
*);
101 SYSCTL_INT(_vfs
, OID_AUTO
, usermount
, CTLFLAG_RW
, &usermount
, 0, "");
104 * Virtual File System System Calls
108 * Mount a file system.
111 * mount_args(char *type, char *path, int flags, caddr_t data)
115 sys_mount(struct mount_args
*uap
)
117 struct thread
*td
= curthread
;
118 struct proc
*p
= td
->td_proc
;
122 struct vfsconf
*vfsp
;
123 int error
, flag
= 0, flag2
= 0;
126 struct nlookupdata nd
;
127 char fstypename
[MFSNAMELEN
];
128 struct ucred
*cred
= p
->p_ucred
;
133 if (usermount
== 0 && (error
= priv_check(td
, PRIV_ROOT
)))
136 * Do not allow NFS export by non-root users.
138 if (uap
->flags
& MNT_EXPORTED
) {
139 error
= priv_check(td
, PRIV_ROOT
);
144 * Silently enforce MNT_NOSUID and MNT_NODEV for non-root users
146 if (priv_check(td
, PRIV_ROOT
))
147 uap
->flags
|= MNT_NOSUID
| MNT_NODEV
;
150 * Lookup the requested path and extract the nch and vnode.
152 error
= nlookup_init(&nd
, uap
->path
, UIO_USERSPACE
, NLC_FOLLOW
);
154 if ((error
= nlookup(&nd
)) == 0) {
155 if (nd
.nl_nch
.ncp
->nc_vp
== NULL
)
165 * Extract the locked+refd ncp and cleanup the nd structure
168 cache_zero(&nd
.nl_nch
);
171 if ((nch
.ncp
->nc_flag
& NCF_ISMOUNTPT
) && cache_findmount(&nch
))
178 * now we have the locked ref'd nch and unreferenced vnode.
181 if ((error
= vget(vp
, LK_EXCLUSIVE
)) != 0) {
188 * Now we have an unlocked ref'd nch and a locked ref'd vp
190 if (uap
->flags
& MNT_UPDATE
) {
191 if ((vp
->v_flag
& (VROOT
|VPFSROOT
)) == 0) {
198 flag2
= mp
->mnt_kern_flag
;
200 * We only allow the filesystem to be reloaded if it
201 * is currently mounted read-only.
203 if ((uap
->flags
& MNT_RELOAD
) &&
204 ((mp
->mnt_flag
& MNT_RDONLY
) == 0)) {
207 return (EOPNOTSUPP
); /* Needs translation */
210 * Only root, or the user that did the original mount is
211 * permitted to update it.
213 if (mp
->mnt_stat
.f_owner
!= cred
->cr_uid
&&
214 (error
= priv_check(td
, PRIV_ROOT
))) {
219 if (vfs_busy(mp
, LK_NOWAIT
)) {
224 if ((vp
->v_flag
& VMOUNT
) != 0 || hasmount
) {
230 vp
->v_flag
|= VMOUNT
;
232 uap
->flags
& (MNT_RELOAD
| MNT_FORCE
| MNT_UPDATE
);
237 * If the user is not root, ensure that they own the directory
238 * onto which we are attempting to mount.
240 if ((error
= VOP_GETATTR(vp
, &va
)) ||
241 (va
.va_uid
!= cred
->cr_uid
&& (error
= priv_check(td
, PRIV_ROOT
)))) {
246 if ((error
= vinvalbuf(vp
, V_SAVE
, 0, 0)) != 0) {
251 if (vp
->v_type
!= VDIR
) {
256 if (vp
->v_mount
->mnt_kern_flag
& MNTK_NOSTKMNT
) {
261 if ((error
= copyinstr(uap
->type
, fstypename
, MFSNAMELEN
, NULL
)) != 0) {
266 vfsp
= vfsconf_find_by_name(fstypename
);
270 /* Only load modules for root (very important!) */
271 if ((error
= priv_check(td
, PRIV_ROOT
)) != 0) {
276 error
= linker_load_file(fstypename
, &lf
);
277 if (error
|| lf
== NULL
) {
285 /* lookup again, see if the VFS was loaded */
286 vfsp
= vfsconf_find_by_name(fstypename
);
289 linker_file_unload(lf
);
295 if ((vp
->v_flag
& VMOUNT
) != 0 || hasmount
) {
300 vp
->v_flag
|= VMOUNT
;
303 * Allocate and initialize the filesystem.
305 mp
= kmalloc(sizeof(struct mount
), M_MOUNT
, M_ZERO
|M_WAITOK
);
306 TAILQ_INIT(&mp
->mnt_nvnodelist
);
307 TAILQ_INIT(&mp
->mnt_reservedvnlist
);
308 TAILQ_INIT(&mp
->mnt_jlist
);
309 mp
->mnt_nvnodelistsize
= 0;
310 lockinit(&mp
->mnt_lock
, "vfslock", 0, 0);
311 vfs_busy(mp
, LK_NOWAIT
);
312 mp
->mnt_op
= vfsp
->vfc_vfsops
;
314 vfsp
->vfc_refcount
++;
315 mp
->mnt_stat
.f_type
= vfsp
->vfc_typenum
;
316 mp
->mnt_flag
|= vfsp
->vfc_flags
& MNT_VISFLAGMASK
;
317 strncpy(mp
->mnt_stat
.f_fstypename
, vfsp
->vfc_name
, MFSNAMELEN
);
318 mp
->mnt_stat
.f_owner
= cred
->cr_uid
;
319 mp
->mnt_iosize_max
= DFLTPHYS
;
323 * Set the mount level flags.
325 if (uap
->flags
& MNT_RDONLY
)
326 mp
->mnt_flag
|= MNT_RDONLY
;
327 else if (mp
->mnt_flag
& MNT_RDONLY
)
328 mp
->mnt_kern_flag
|= MNTK_WANTRDWR
;
329 mp
->mnt_flag
&=~ (MNT_NOSUID
| MNT_NOEXEC
| MNT_NODEV
|
330 MNT_SYNCHRONOUS
| MNT_UNION
| MNT_ASYNC
| MNT_NOATIME
|
331 MNT_NOSYMFOLLOW
| MNT_IGNORE
|
332 MNT_NOCLUSTERR
| MNT_NOCLUSTERW
| MNT_SUIDDIR
);
333 mp
->mnt_flag
|= uap
->flags
& (MNT_NOSUID
| MNT_NOEXEC
|
334 MNT_NODEV
| MNT_SYNCHRONOUS
| MNT_UNION
| MNT_ASYNC
| MNT_FORCE
|
335 MNT_NOSYMFOLLOW
| MNT_IGNORE
|
336 MNT_NOATIME
| MNT_NOCLUSTERR
| MNT_NOCLUSTERW
| MNT_SUIDDIR
);
338 * Mount the filesystem.
339 * XXX The final recipients of VFS_MOUNT just overwrite the ndp they
342 error
= VFS_MOUNT(mp
, uap
->path
, uap
->data
, cred
);
343 if (mp
->mnt_flag
& MNT_UPDATE
) {
344 if (mp
->mnt_kern_flag
& MNTK_WANTRDWR
)
345 mp
->mnt_flag
&= ~MNT_RDONLY
;
346 mp
->mnt_flag
&=~ (MNT_UPDATE
| MNT_RELOAD
| MNT_FORCE
);
347 mp
->mnt_kern_flag
&=~ MNTK_WANTRDWR
;
350 mp
->mnt_kern_flag
= flag2
;
353 vp
->v_flag
&= ~VMOUNT
;
358 vn_lock(vp
, LK_EXCLUSIVE
| LK_RETRY
);
360 * Put the new filesystem on the mount list after root. The mount
361 * point gets its own mnt_ncmountpt (unless the VFS already set one
362 * up) which represents the root of the mount. The lookup code
363 * detects the mount point going forward and checks the root of
364 * the mount going backwards.
366 * It is not necessary to invalidate or purge the vnode underneath
367 * because elements under the mount will be given their own glue
371 if (mp
->mnt_ncmountpt
.ncp
== NULL
) {
373 * allocate, then unlock, but leave the ref intact
375 cache_allocroot(&mp
->mnt_ncmountpt
, mp
, NULL
);
376 cache_unlock(&mp
->mnt_ncmountpt
);
378 mp
->mnt_ncmounton
= nch
; /* inherits ref */
379 nch
.ncp
->nc_flag
|= NCF_ISMOUNTPT
;
381 /* XXX get the root of the fs and cache_setvp(mnt_ncmountpt...) */
382 vp
->v_flag
&= ~VMOUNT
;
383 mountlist_insert(mp
, MNTINS_LAST
);
385 checkdirs(&mp
->mnt_ncmounton
, &mp
->mnt_ncmountpt
);
386 error
= vfs_allocate_syncvnode(mp
);
388 error
= VFS_START(mp
, 0);
391 vfs_rm_vnodeops(mp
, NULL
, &mp
->mnt_vn_coherency_ops
);
392 vfs_rm_vnodeops(mp
, NULL
, &mp
->mnt_vn_journal_ops
);
393 vfs_rm_vnodeops(mp
, NULL
, &mp
->mnt_vn_norm_ops
);
394 vfs_rm_vnodeops(mp
, NULL
, &mp
->mnt_vn_spec_ops
);
395 vfs_rm_vnodeops(mp
, NULL
, &mp
->mnt_vn_fifo_ops
);
396 vp
->v_flag
&= ~VMOUNT
;
397 mp
->mnt_vfc
->vfc_refcount
--;
407 * Scan all active processes to see if any of them have a current
408 * or root directory onto which the new filesystem has just been
409 * mounted. If so, replace them with the new mount point.
411 * The passed ncp is ref'd and locked (from the mount code) and
412 * must be associated with the vnode representing the root of the
415 struct checkdirs_info
{
416 struct nchandle old_nch
;
417 struct nchandle new_nch
;
418 struct vnode
*old_vp
;
419 struct vnode
*new_vp
;
422 static int checkdirs_callback(struct proc
*p
, void *data
);
425 checkdirs(struct nchandle
*old_nch
, struct nchandle
*new_nch
)
427 struct checkdirs_info info
;
433 * If the old mount point's vnode has a usecount of 1, it is not
434 * being held as a descriptor anywhere.
436 olddp
= old_nch
->ncp
->nc_vp
;
437 if (olddp
== NULL
|| olddp
->v_sysref
.refcnt
== 1)
441 * Force the root vnode of the new mount point to be resolved
442 * so we can update any matching processes.
445 if (VFS_ROOT(mp
, &newdp
))
446 panic("mount: lost mount");
447 cache_setunresolved(new_nch
);
448 cache_setvp(new_nch
, newdp
);
451 * Special handling of the root node
453 if (rootvnode
== olddp
) {
455 vfs_cache_setroot(newdp
, cache_hold(new_nch
));
459 * Pass newdp separately so the callback does not have to access
460 * it via new_nch->ncp->nc_vp.
462 info
.old_nch
= *old_nch
;
463 info
.new_nch
= *new_nch
;
465 allproc_scan(checkdirs_callback
, &info
);
470 * NOTE: callback is not MP safe because the scanned process's filedesc
471 * structure can be ripped out from under us, amoung other things.
474 checkdirs_callback(struct proc
*p
, void *data
)
476 struct checkdirs_info
*info
= data
;
477 struct filedesc
*fdp
;
478 struct nchandle ncdrop1
;
479 struct nchandle ncdrop2
;
480 struct vnode
*vprele1
;
481 struct vnode
*vprele2
;
483 if ((fdp
= p
->p_fd
) != NULL
) {
484 cache_zero(&ncdrop1
);
485 cache_zero(&ncdrop2
);
490 * MPUNSAFE - XXX fdp can be pulled out from under a
493 * A shared filedesc is ok, we don't have to copy it
494 * because we are making this change globally.
496 spin_lock_wr(&fdp
->fd_spin
);
497 if (fdp
->fd_ncdir
.mount
== info
->old_nch
.mount
&&
498 fdp
->fd_ncdir
.ncp
== info
->old_nch
.ncp
) {
499 vprele1
= fdp
->fd_cdir
;
501 fdp
->fd_cdir
= info
->new_vp
;
502 ncdrop1
= fdp
->fd_ncdir
;
503 cache_copy(&info
->new_nch
, &fdp
->fd_ncdir
);
505 if (fdp
->fd_nrdir
.mount
== info
->old_nch
.mount
&&
506 fdp
->fd_nrdir
.ncp
== info
->old_nch
.ncp
) {
507 vprele2
= fdp
->fd_rdir
;
509 fdp
->fd_rdir
= info
->new_vp
;
510 ncdrop2
= fdp
->fd_nrdir
;
511 cache_copy(&info
->new_nch
, &fdp
->fd_nrdir
);
513 spin_unlock_wr(&fdp
->fd_spin
);
515 cache_drop(&ncdrop1
);
517 cache_drop(&ncdrop2
);
527 * Unmount a file system.
529 * Note: unmount takes a path to the vnode mounted on as argument,
530 * not special file (as before).
533 * umount_args(char *path, int flags)
537 sys_unmount(struct unmount_args
*uap
)
539 struct thread
*td
= curthread
;
540 struct proc
*p
= td
->td_proc
;
541 struct mount
*mp
= NULL
;
543 struct nlookupdata nd
;
546 if (p
->p_ucred
->cr_prison
!= NULL
)
548 if (usermount
== 0 && (error
= priv_check(td
, PRIV_ROOT
)))
551 error
= nlookup_init(&nd
, uap
->path
, UIO_USERSPACE
, NLC_FOLLOW
);
553 error
= nlookup(&nd
);
557 mp
= nd
.nl_nch
.mount
;
560 * Only root, or the user that did the original mount is
561 * permitted to unmount this filesystem.
563 if ((mp
->mnt_stat
.f_owner
!= p
->p_ucred
->cr_uid
) &&
564 (error
= priv_check(td
, PRIV_ROOT
)))
568 * Don't allow unmounting the root file system.
570 if (mp
->mnt_flag
& MNT_ROOTFS
) {
576 * Must be the root of the filesystem
578 if (nd
.nl_nch
.ncp
!= mp
->mnt_ncmountpt
.ncp
) {
587 return (dounmount(mp
, uap
->flags
));
591 * Do the actual file system unmount.
594 dounmount_interlock(struct mount
*mp
)
596 if (mp
->mnt_kern_flag
& MNTK_UNMOUNT
)
598 mp
->mnt_kern_flag
|= MNTK_UNMOUNT
;
603 dounmount(struct mount
*mp
, int flags
)
605 struct namecache
*ncp
;
614 * Exclusive access for unmounting purposes
616 if ((error
= mountlist_interlock(dounmount_interlock
, mp
)) != 0)
620 * Allow filesystems to detect that a forced unmount is in progress.
622 if (flags
& MNT_FORCE
)
623 mp
->mnt_kern_flag
|= MNTK_UNMOUNTF
;
624 lflags
= LK_EXCLUSIVE
| ((flags
& MNT_FORCE
) ? 0 : LK_NOWAIT
);
625 error
= lockmgr(&mp
->mnt_lock
, lflags
);
627 mp
->mnt_kern_flag
&= ~(MNTK_UNMOUNT
| MNTK_UNMOUNTF
);
628 if (mp
->mnt_kern_flag
& MNTK_MWAIT
)
633 if (mp
->mnt_flag
& MNT_EXPUBLIC
)
634 vfs_setpublicfs(NULL
, NULL
, NULL
);
636 vfs_msync(mp
, MNT_WAIT
);
637 async_flag
= mp
->mnt_flag
& MNT_ASYNC
;
638 mp
->mnt_flag
&=~ MNT_ASYNC
;
641 * If this filesystem isn't aliasing other filesystems,
642 * try to invalidate any remaining namecache entries and
643 * check the count afterwords.
645 if ((mp
->mnt_kern_flag
& MNTK_NCALIASED
) == 0) {
646 cache_lock(&mp
->mnt_ncmountpt
);
647 cache_inval(&mp
->mnt_ncmountpt
, CINV_DESTROY
|CINV_CHILDREN
);
648 cache_unlock(&mp
->mnt_ncmountpt
);
650 if ((ncp
= mp
->mnt_ncmountpt
.ncp
) != NULL
&&
651 (ncp
->nc_refs
!= 1 || TAILQ_FIRST(&ncp
->nc_list
))) {
653 if ((flags
& MNT_FORCE
) == 0) {
655 mount_warning(mp
, "Cannot unmount: "
661 mount_warning(mp
, "Forced unmount: "
672 * nchandle records ref the mount structure. Expect a count of 1
673 * (our mount->mnt_ncmountpt).
675 if (mp
->mnt_refs
!= 1) {
676 if ((flags
& MNT_FORCE
) == 0) {
677 mount_warning(mp
, "Cannot unmount: "
678 "%d process references still "
679 "present", mp
->mnt_refs
);
682 mount_warning(mp
, "Forced unmount: "
683 "%d process references still "
684 "present", mp
->mnt_refs
);
690 * Decomission our special mnt_syncer vnode. This also stops
691 * the vnlru code. If we are unable to unmount we recommission
695 if ((vp
= mp
->mnt_syncer
) != NULL
) {
696 mp
->mnt_syncer
= NULL
;
699 if (((mp
->mnt_flag
& MNT_RDONLY
) ||
700 (error
= VFS_SYNC(mp
, MNT_WAIT
)) == 0) ||
701 (flags
& MNT_FORCE
)) {
702 error
= VFS_UNMOUNT(mp
, flags
);
706 if (mp
->mnt_syncer
== NULL
)
707 vfs_allocate_syncvnode(mp
);
708 mp
->mnt_kern_flag
&= ~(MNTK_UNMOUNT
| MNTK_UNMOUNTF
);
709 mp
->mnt_flag
|= async_flag
;
710 lockmgr(&mp
->mnt_lock
, LK_RELEASE
);
711 if (mp
->mnt_kern_flag
& MNTK_MWAIT
)
716 * Clean up any journals still associated with the mount after
717 * filesystem activity has ceased.
719 journal_remove_all_journals(mp
,
720 ((flags
& MNT_FORCE
) ? MC_JOURNAL_STOP_IMM
: 0));
722 mountlist_remove(mp
);
725 * Remove any installed vnode ops here so the individual VFSs don't
728 vfs_rm_vnodeops(mp
, NULL
, &mp
->mnt_vn_coherency_ops
);
729 vfs_rm_vnodeops(mp
, NULL
, &mp
->mnt_vn_journal_ops
);
730 vfs_rm_vnodeops(mp
, NULL
, &mp
->mnt_vn_norm_ops
);
731 vfs_rm_vnodeops(mp
, NULL
, &mp
->mnt_vn_spec_ops
);
732 vfs_rm_vnodeops(mp
, NULL
, &mp
->mnt_vn_fifo_ops
);
734 if (mp
->mnt_ncmountpt
.ncp
!= NULL
) {
735 nch
= mp
->mnt_ncmountpt
;
736 cache_zero(&mp
->mnt_ncmountpt
);
737 cache_clrmountpt(&nch
);
740 if (mp
->mnt_ncmounton
.ncp
!= NULL
) {
741 nch
= mp
->mnt_ncmounton
;
742 cache_zero(&mp
->mnt_ncmounton
);
743 cache_clrmountpt(&nch
);
747 mp
->mnt_vfc
->vfc_refcount
--;
748 if (!TAILQ_EMPTY(&mp
->mnt_nvnodelist
))
749 panic("unmount: dangling vnode");
750 lockmgr(&mp
->mnt_lock
, LK_RELEASE
);
751 if (mp
->mnt_kern_flag
& MNTK_MWAIT
)
760 mount_warning(struct mount
*mp
, const char *ctl
, ...)
767 if (cache_fullpath(NULL
, &mp
->mnt_ncmounton
, &ptr
, &buf
) == 0) {
768 kprintf("unmount(%s): ", ptr
);
773 kprintf("unmount(%p", mp
);
774 if (mp
->mnt_ncmounton
.ncp
&& mp
->mnt_ncmounton
.ncp
->nc_name
)
775 kprintf(",%s", mp
->mnt_ncmounton
.ncp
->nc_name
);
784 * Shim cache_fullpath() to handle the case where a process is chrooted into
785 * a subdirectory of a mount. In this case if the root mount matches the
786 * process root directory's mount we have to specify the process's root
787 * directory instead of the mount point, because the mount point might
788 * be above the root directory.
792 mount_path(struct proc
*p
, struct mount
*mp
, char **rb
, char **fb
)
794 struct nchandle
*nch
;
796 if (p
&& p
->p_fd
->fd_nrdir
.mount
== mp
)
797 nch
= &p
->p_fd
->fd_nrdir
;
799 nch
= &mp
->mnt_ncmountpt
;
800 return(cache_fullpath(p
, nch
, rb
, fb
));
804 * Sync each mounted filesystem.
808 static int syncprt
= 0;
809 SYSCTL_INT(_debug
, OID_AUTO
, syncprt
, CTLFLAG_RW
, &syncprt
, 0, "");
812 static int sync_callback(struct mount
*mp
, void *data
);
816 sys_sync(struct sync_args
*uap
)
818 mountlist_scan(sync_callback
, NULL
, MNTSCAN_FORWARD
);
821 * print out buffer pool stat information on each sync() call.
831 sync_callback(struct mount
*mp
, void *data __unused
)
835 if ((mp
->mnt_flag
& MNT_RDONLY
) == 0) {
836 asyncflag
= mp
->mnt_flag
& MNT_ASYNC
;
837 mp
->mnt_flag
&= ~MNT_ASYNC
;
838 vfs_msync(mp
, MNT_NOWAIT
);
839 VFS_SYNC(mp
, MNT_NOWAIT
);
840 mp
->mnt_flag
|= asyncflag
;
845 /* XXX PRISON: could be per prison flag */
846 static int prison_quotas
;
848 SYSCTL_INT(_kern_prison
, OID_AUTO
, quotas
, CTLFLAG_RW
, &prison_quotas
, 0, "");
852 * quotactl_args(char *path, int fcmd, int uid, caddr_t arg)
854 * Change filesystem quotas.
858 sys_quotactl(struct quotactl_args
*uap
)
860 struct nlookupdata nd
;
868 if (p
->p_ucred
->cr_prison
&& !prison_quotas
)
871 error
= nlookup_init(&nd
, uap
->path
, UIO_USERSPACE
, NLC_FOLLOW
);
873 error
= nlookup(&nd
);
875 mp
= nd
.nl_nch
.mount
;
876 error
= VFS_QUOTACTL(mp
, uap
->cmd
, uap
->uid
,
877 uap
->arg
, nd
.nl_cred
);
884 * mountctl(char *path, int op, int fd, const void *ctl, int ctllen,
885 * void *buf, int buflen)
887 * This function operates on a mount point and executes the specified
888 * operation using the specified control data, and possibly returns data.
890 * The actual number of bytes stored in the result buffer is returned, 0
891 * if none, otherwise an error is returned.
895 sys_mountctl(struct mountctl_args
*uap
)
897 struct thread
*td
= curthread
;
898 struct proc
*p
= td
->td_proc
;
906 * Sanity and permissions checks. We must be root.
909 if (p
->p_ucred
->cr_prison
!= NULL
)
911 if ((uap
->op
!= MOUNTCTL_MOUNTFLAGS
) &&
912 (error
= priv_check(td
, PRIV_ROOT
)) != 0)
916 * Argument length checks
918 if (uap
->ctllen
< 0 || uap
->ctllen
> 1024)
920 if (uap
->buflen
< 0 || uap
->buflen
> 16 * 1024)
922 if (uap
->path
== NULL
)
926 * Allocate the necessary buffers and copyin data
928 path
= objcache_get(namei_oc
, M_WAITOK
);
929 error
= copyinstr(uap
->path
, path
, MAXPATHLEN
, NULL
);
934 ctl
= kmalloc(uap
->ctllen
+ 1, M_TEMP
, M_WAITOK
|M_ZERO
);
935 error
= copyin(uap
->ctl
, ctl
, uap
->ctllen
);
940 buf
= kmalloc(uap
->buflen
+ 1, M_TEMP
, M_WAITOK
|M_ZERO
);
943 * Validate the descriptor
946 fp
= holdfp(p
->p_fd
, uap
->fd
, -1);
956 * Execute the internal kernel function and clean up.
958 error
= kern_mountctl(path
, uap
->op
, fp
, ctl
, uap
->ctllen
, buf
, uap
->buflen
, &uap
->sysmsg_result
);
961 if (error
== 0 && uap
->sysmsg_result
> 0)
962 error
= copyout(buf
, uap
->buf
, uap
->sysmsg_result
);
965 objcache_put(namei_oc
, path
);
974 * Execute a mount control operation by resolving the path to a mount point
975 * and calling vop_mountctl().
977 * Use the mount point from the nch instead of the vnode so nullfs mounts
978 * can properly spike the VOP.
981 kern_mountctl(const char *path
, int op
, struct file
*fp
,
982 const void *ctl
, int ctllen
,
983 void *buf
, int buflen
, int *res
)
987 struct nlookupdata nd
;
992 error
= nlookup_init(&nd
, path
, UIO_SYSSPACE
, NLC_FOLLOW
);
994 error
= nlookup(&nd
);
996 error
= cache_vget(&nd
.nl_nch
, nd
.nl_cred
, LK_EXCLUSIVE
, &vp
);
997 mp
= nd
.nl_nch
.mount
;
1004 * Must be the root of the filesystem
1006 if ((vp
->v_flag
& (VROOT
|VPFSROOT
)) == 0) {
1010 error
= vop_mountctl(mp
->mnt_vn_use_ops
, op
, fp
, ctl
, ctllen
,
1017 kern_statfs(struct nlookupdata
*nd
, struct statfs
*buf
)
1019 struct thread
*td
= curthread
;
1020 struct proc
*p
= td
->td_proc
;
1023 char *fullpath
, *freepath
;
1026 if ((error
= nlookup(nd
)) != 0)
1028 mp
= nd
->nl_nch
.mount
;
1030 if ((error
= VFS_STATFS(mp
, sp
, nd
->nl_cred
)) != 0)
1033 error
= mount_path(p
, mp
, &fullpath
, &freepath
);
1036 bzero(sp
->f_mntonname
, sizeof(sp
->f_mntonname
));
1037 strlcpy(sp
->f_mntonname
, fullpath
, sizeof(sp
->f_mntonname
));
1038 kfree(freepath
, M_TEMP
);
1040 sp
->f_flags
= mp
->mnt_flag
& MNT_VISFLAGMASK
;
1041 bcopy(sp
, buf
, sizeof(*buf
));
1042 /* Only root should have access to the fsid's. */
1043 if (priv_check(td
, PRIV_ROOT
))
1044 buf
->f_fsid
.val
[0] = buf
->f_fsid
.val
[1] = 0;
1049 * statfs_args(char *path, struct statfs *buf)
1051 * Get filesystem statistics.
1054 sys_statfs(struct statfs_args
*uap
)
1056 struct nlookupdata nd
;
1060 error
= nlookup_init(&nd
, uap
->path
, UIO_USERSPACE
, NLC_FOLLOW
);
1062 error
= kern_statfs(&nd
, &buf
);
1065 error
= copyout(&buf
, uap
->buf
, sizeof(*uap
->buf
));
1070 kern_fstatfs(int fd
, struct statfs
*buf
)
1072 struct thread
*td
= curthread
;
1073 struct proc
*p
= td
->td_proc
;
1077 char *fullpath
, *freepath
;
1081 if ((error
= holdvnode(p
->p_fd
, fd
, &fp
)) != 0)
1083 mp
= ((struct vnode
*)fp
->f_data
)->v_mount
;
1088 if (fp
->f_cred
== NULL
) {
1093 if ((error
= VFS_STATFS(mp
, sp
, fp
->f_cred
)) != 0)
1096 if ((error
= mount_path(p
, mp
, &fullpath
, &freepath
)) != 0)
1098 bzero(sp
->f_mntonname
, sizeof(sp
->f_mntonname
));
1099 strlcpy(sp
->f_mntonname
, fullpath
, sizeof(sp
->f_mntonname
));
1100 kfree(freepath
, M_TEMP
);
1102 sp
->f_flags
= mp
->mnt_flag
& MNT_VISFLAGMASK
;
1103 bcopy(sp
, buf
, sizeof(*buf
));
1105 /* Only root should have access to the fsid's. */
1106 if (priv_check(td
, PRIV_ROOT
))
1107 buf
->f_fsid
.val
[0] = buf
->f_fsid
.val
[1] = 0;
1115 * fstatfs_args(int fd, struct statfs *buf)
1117 * Get filesystem statistics.
1120 sys_fstatfs(struct fstatfs_args
*uap
)
1125 error
= kern_fstatfs(uap
->fd
, &buf
);
1128 error
= copyout(&buf
, uap
->buf
, sizeof(*uap
->buf
));
1133 kern_statvfs(struct nlookupdata
*nd
, struct statvfs
*buf
)
1139 if ((error
= nlookup(nd
)) != 0)
1141 mp
= nd
->nl_nch
.mount
;
1142 sp
= &mp
->mnt_vstat
;
1143 if ((error
= VFS_STATVFS(mp
, sp
, nd
->nl_cred
)) != 0)
1147 if (mp
->mnt_flag
& MNT_RDONLY
)
1148 sp
->f_flag
|= ST_RDONLY
;
1149 if (mp
->mnt_flag
& MNT_NOSUID
)
1150 sp
->f_flag
|= ST_NOSUID
;
1151 bcopy(sp
, buf
, sizeof(*buf
));
1156 * statfs_args(char *path, struct statfs *buf)
1158 * Get filesystem statistics.
1161 sys_statvfs(struct statvfs_args
*uap
)
1163 struct nlookupdata nd
;
1167 error
= nlookup_init(&nd
, uap
->path
, UIO_USERSPACE
, NLC_FOLLOW
);
1169 error
= kern_statvfs(&nd
, &buf
);
1172 error
= copyout(&buf
, uap
->buf
, sizeof(*uap
->buf
));
1177 kern_fstatvfs(int fd
, struct statvfs
*buf
)
1179 struct thread
*td
= curthread
;
1180 struct proc
*p
= td
->td_proc
;
1187 if ((error
= holdvnode(p
->p_fd
, fd
, &fp
)) != 0)
1189 mp
= ((struct vnode
*)fp
->f_data
)->v_mount
;
1194 if (fp
->f_cred
== NULL
) {
1198 sp
= &mp
->mnt_vstat
;
1199 if ((error
= VFS_STATVFS(mp
, sp
, fp
->f_cred
)) != 0)
1203 if (mp
->mnt_flag
& MNT_RDONLY
)
1204 sp
->f_flag
|= ST_RDONLY
;
1205 if (mp
->mnt_flag
& MNT_NOSUID
)
1206 sp
->f_flag
|= ST_NOSUID
;
1208 bcopy(sp
, buf
, sizeof(*buf
));
1216 * fstatfs_args(int fd, struct statfs *buf)
1218 * Get filesystem statistics.
1221 sys_fstatvfs(struct fstatvfs_args
*uap
)
1226 error
= kern_fstatvfs(uap
->fd
, &buf
);
1229 error
= copyout(&buf
, uap
->buf
, sizeof(*uap
->buf
));
1234 * getfsstat_args(struct statfs *buf, long bufsize, int flags)
1236 * Get statistics on all filesystems.
1239 struct getfsstat_info
{
1240 struct statfs
*sfsp
;
1248 static int getfsstat_callback(struct mount
*, void *);
1252 sys_getfsstat(struct getfsstat_args
*uap
)
1254 struct thread
*td
= curthread
;
1255 struct proc
*p
= td
->td_proc
;
1256 struct getfsstat_info info
;
1258 bzero(&info
, sizeof(info
));
1260 info
.maxcount
= uap
->bufsize
/ sizeof(struct statfs
);
1261 info
.sfsp
= uap
->buf
;
1263 info
.flags
= uap
->flags
;
1266 mountlist_scan(getfsstat_callback
, &info
, MNTSCAN_FORWARD
);
1267 if (info
.sfsp
&& info
.count
> info
.maxcount
)
1268 uap
->sysmsg_result
= info
.maxcount
;
1270 uap
->sysmsg_result
= info
.count
;
1271 return (info
.error
);
1275 getfsstat_callback(struct mount
*mp
, void *data
)
1277 struct getfsstat_info
*info
= data
;
1283 if (info
->sfsp
&& info
->count
< info
->maxcount
) {
1284 if (info
->p
&& !chroot_visible_mnt(mp
, info
->p
))
1289 * If MNT_NOWAIT or MNT_LAZY is specified, do not
1290 * refresh the fsstat cache. MNT_NOWAIT or MNT_LAZY
1291 * overrides MNT_WAIT.
1293 if (((info
->flags
& (MNT_LAZY
|MNT_NOWAIT
)) == 0 ||
1294 (info
->flags
& MNT_WAIT
)) &&
1295 (error
= VFS_STATFS(mp
, sp
, info
->p
->p_ucred
))) {
1298 sp
->f_flags
= mp
->mnt_flag
& MNT_VISFLAGMASK
;
1300 error
= mount_path(info
->p
, mp
, &fullpath
, &freepath
);
1302 info
->error
= error
;
1305 bzero(sp
->f_mntonname
, sizeof(sp
->f_mntonname
));
1306 strlcpy(sp
->f_mntonname
, fullpath
, sizeof(sp
->f_mntonname
));
1307 kfree(freepath
, M_TEMP
);
1309 error
= copyout(sp
, info
->sfsp
, sizeof(*sp
));
1311 info
->error
= error
;
1321 * getvfsstat_args(struct statfs *buf, struct statvfs *vbuf,
1322 long bufsize, int flags)
1324 * Get statistics on all filesystems.
1327 struct getvfsstat_info
{
1328 struct statfs
*sfsp
;
1329 struct statvfs
*vsfsp
;
1337 static int getvfsstat_callback(struct mount
*, void *);
1341 sys_getvfsstat(struct getvfsstat_args
*uap
)
1343 struct thread
*td
= curthread
;
1344 struct proc
*p
= td
->td_proc
;
1345 struct getvfsstat_info info
;
1347 bzero(&info
, sizeof(info
));
1349 info
.maxcount
= uap
->vbufsize
/ sizeof(struct statvfs
);
1350 info
.sfsp
= uap
->buf
;
1351 info
.vsfsp
= uap
->vbuf
;
1353 info
.flags
= uap
->flags
;
1356 mountlist_scan(getvfsstat_callback
, &info
, MNTSCAN_FORWARD
);
1357 if (info
.vsfsp
&& info
.count
> info
.maxcount
)
1358 uap
->sysmsg_result
= info
.maxcount
;
1360 uap
->sysmsg_result
= info
.count
;
1361 return (info
.error
);
1365 getvfsstat_callback(struct mount
*mp
, void *data
)
1367 struct getvfsstat_info
*info
= data
;
1369 struct statvfs
*vsp
;
1374 if (info
->vsfsp
&& info
->count
< info
->maxcount
) {
1375 if (info
->p
&& !chroot_visible_mnt(mp
, info
->p
))
1378 vsp
= &mp
->mnt_vstat
;
1381 * If MNT_NOWAIT or MNT_LAZY is specified, do not
1382 * refresh the fsstat cache. MNT_NOWAIT or MNT_LAZY
1383 * overrides MNT_WAIT.
1385 if (((info
->flags
& (MNT_LAZY
|MNT_NOWAIT
)) == 0 ||
1386 (info
->flags
& MNT_WAIT
)) &&
1387 (error
= VFS_STATFS(mp
, sp
, info
->p
->p_ucred
))) {
1390 sp
->f_flags
= mp
->mnt_flag
& MNT_VISFLAGMASK
;
1392 if (((info
->flags
& (MNT_LAZY
|MNT_NOWAIT
)) == 0 ||
1393 (info
->flags
& MNT_WAIT
)) &&
1394 (error
= VFS_STATVFS(mp
, vsp
, info
->p
->p_ucred
))) {
1398 if (mp
->mnt_flag
& MNT_RDONLY
)
1399 vsp
->f_flag
|= ST_RDONLY
;
1400 if (mp
->mnt_flag
& MNT_NOSUID
)
1401 vsp
->f_flag
|= ST_NOSUID
;
1403 error
= mount_path(info
->p
, mp
, &fullpath
, &freepath
);
1405 info
->error
= error
;
1408 bzero(sp
->f_mntonname
, sizeof(sp
->f_mntonname
));
1409 strlcpy(sp
->f_mntonname
, fullpath
, sizeof(sp
->f_mntonname
));
1410 kfree(freepath
, M_TEMP
);
1412 error
= copyout(sp
, info
->sfsp
, sizeof(*sp
));
1414 error
= copyout(vsp
, info
->vsfsp
, sizeof(*vsp
));
1416 info
->error
= error
;
1428 * fchdir_args(int fd)
1430 * Change current working directory to a given file descriptor.
1434 sys_fchdir(struct fchdir_args
*uap
)
1436 struct thread
*td
= curthread
;
1437 struct proc
*p
= td
->td_proc
;
1438 struct filedesc
*fdp
= p
->p_fd
;
1439 struct vnode
*vp
, *ovp
;
1442 struct nchandle nch
, onch
, tnch
;
1445 if ((error
= holdvnode(fdp
, uap
->fd
, &fp
)) != 0)
1447 vp
= (struct vnode
*)fp
->f_data
;
1449 vn_lock(vp
, LK_EXCLUSIVE
| LK_RETRY
);
1450 if (vp
->v_type
!= VDIR
|| fp
->f_nchandle
.ncp
== NULL
)
1453 error
= VOP_ACCESS(vp
, VEXEC
, p
->p_ucred
);
1459 cache_copy(&fp
->f_nchandle
, &nch
);
1462 * If the ncp has become a mount point, traverse through
1466 while (!error
&& (nch
.ncp
->nc_flag
& NCF_ISMOUNTPT
) &&
1467 (mp
= cache_findmount(&nch
)) != NULL
1469 error
= nlookup_mp(mp
, &tnch
);
1471 cache_unlock(&tnch
); /* leave ref intact */
1473 vp
= tnch
.ncp
->nc_vp
;
1474 error
= vget(vp
, LK_SHARED
);
1475 KKASSERT(error
== 0);
1482 onch
= fdp
->fd_ncdir
;
1483 vn_unlock(vp
); /* leave ref intact */
1485 fdp
->fd_ncdir
= nch
;
1497 kern_chdir(struct nlookupdata
*nd
)
1499 struct thread
*td
= curthread
;
1500 struct proc
*p
= td
->td_proc
;
1501 struct filedesc
*fdp
= p
->p_fd
;
1502 struct vnode
*vp
, *ovp
;
1503 struct nchandle onch
;
1506 if ((error
= nlookup(nd
)) != 0)
1508 if ((vp
= nd
->nl_nch
.ncp
->nc_vp
) == NULL
)
1510 if ((error
= vget(vp
, LK_SHARED
)) != 0)
1513 error
= checkvp_chdir(vp
, td
);
1517 onch
= fdp
->fd_ncdir
;
1518 cache_unlock(&nd
->nl_nch
); /* leave reference intact */
1519 fdp
->fd_ncdir
= nd
->nl_nch
;
1523 cache_zero(&nd
->nl_nch
);
1531 * chdir_args(char *path)
1533 * Change current working directory (``.'').
1536 sys_chdir(struct chdir_args
*uap
)
1538 struct nlookupdata nd
;
1541 error
= nlookup_init(&nd
, uap
->path
, UIO_USERSPACE
, NLC_FOLLOW
);
1543 error
= kern_chdir(&nd
);
1549 * Helper function for raised chroot(2) security function: Refuse if
1550 * any filedescriptors are open directories.
1553 chroot_refuse_vdir_fds(struct filedesc
*fdp
)
1560 for (fd
= 0; fd
< fdp
->fd_nfiles
; fd
++) {
1561 if ((error
= holdvnode(fdp
, fd
, &fp
)) != 0)
1563 vp
= (struct vnode
*)fp
->f_data
;
1564 if (vp
->v_type
!= VDIR
) {
1575 * This sysctl determines if we will allow a process to chroot(2) if it
1576 * has a directory open:
1577 * 0: disallowed for all processes.
1578 * 1: allowed for processes that were not already chroot(2)'ed.
1579 * 2: allowed for all processes.
1582 static int chroot_allow_open_directories
= 1;
1584 SYSCTL_INT(_kern
, OID_AUTO
, chroot_allow_open_directories
, CTLFLAG_RW
,
1585 &chroot_allow_open_directories
, 0, "");
1588 * chroot to the specified namecache entry. We obtain the vp from the
1589 * namecache data. The passed ncp must be locked and referenced and will
1590 * remain locked and referenced on return.
1593 kern_chroot(struct nchandle
*nch
)
1595 struct thread
*td
= curthread
;
1596 struct proc
*p
= td
->td_proc
;
1597 struct filedesc
*fdp
= p
->p_fd
;
1602 * Only privileged user can chroot
1604 error
= priv_check_cred(p
->p_ucred
, PRIV_VFS_CHROOT
, 0);
1609 * Disallow open directory descriptors (fchdir() breakouts).
1611 if (chroot_allow_open_directories
== 0 ||
1612 (chroot_allow_open_directories
== 1 && fdp
->fd_rdir
!= rootvnode
)) {
1613 if ((error
= chroot_refuse_vdir_fds(fdp
)) != 0)
1616 if ((vp
= nch
->ncp
->nc_vp
) == NULL
)
1619 if ((error
= vget(vp
, LK_SHARED
)) != 0)
1623 * Check the validity of vp as a directory to change to and
1624 * associate it with rdir/jdir.
1626 error
= checkvp_chdir(vp
, td
);
1627 vn_unlock(vp
); /* leave reference intact */
1629 vrele(fdp
->fd_rdir
);
1630 fdp
->fd_rdir
= vp
; /* reference inherited by fd_rdir */
1631 cache_drop(&fdp
->fd_nrdir
);
1632 cache_copy(nch
, &fdp
->fd_nrdir
);
1633 if (fdp
->fd_jdir
== NULL
) {
1636 cache_copy(nch
, &fdp
->fd_njdir
);
1645 * chroot_args(char *path)
1647 * Change notion of root (``/'') directory.
1651 sys_chroot(struct chroot_args
*uap
)
1653 struct thread
*td
= curthread
;
1654 struct nlookupdata nd
;
1657 KKASSERT(td
->td_proc
);
1658 error
= nlookup_init(&nd
, uap
->path
, UIO_USERSPACE
, NLC_FOLLOW
);
1663 nd
.nl_flags
|= NLC_EXEC
;
1664 error
= nlookup(&nd
);
1666 error
= kern_chroot(&nd
.nl_nch
);
1672 * Common routine for chroot and chdir. Given a locked, referenced vnode,
1673 * determine whether it is legal to chdir to the vnode. The vnode's state
1674 * is not changed by this call.
1677 checkvp_chdir(struct vnode
*vp
, struct thread
*td
)
1681 if (vp
->v_type
!= VDIR
)
1684 error
= VOP_ACCESS(vp
, VEXEC
, td
->td_proc
->p_ucred
);
1689 kern_open(struct nlookupdata
*nd
, int oflags
, int mode
, int *res
)
1691 struct thread
*td
= curthread
;
1692 struct proc
*p
= td
->td_proc
;
1693 struct lwp
*lp
= td
->td_lwp
;
1694 struct filedesc
*fdp
= p
->p_fd
;
1699 int type
, indx
, error
;
1702 if ((oflags
& O_ACCMODE
) == O_ACCMODE
)
1704 flags
= FFLAGS(oflags
);
1705 error
= falloc(p
, &nfp
, NULL
);
1709 cmode
= ((mode
&~ fdp
->fd_cmask
) & ALLPERMS
) & ~S_ISTXT
;
1712 * XXX p_dupfd is a real mess. It allows a device to return a
1713 * file descriptor to be duplicated rather then doing the open
1719 * Call vn_open() to do the lookup and assign the vnode to the
1720 * file pointer. vn_open() does not change the ref count on fp
1721 * and the vnode, on success, will be inherited by the file pointer
1724 nd
->nl_flags
|= NLC_LOCKVP
;
1725 error
= vn_open(nd
, fp
, flags
, cmode
);
1729 * handle special fdopen() case. bleh. dupfdopen() is
1730 * responsible for dropping the old contents of ofiles[indx]
1733 * Note that fsetfd() will add a ref to fp which represents
1734 * the fd_files[] assignment. We must still drop our
1737 if ((error
== ENODEV
|| error
== ENXIO
) && lp
->lwp_dupfd
>= 0) {
1738 if (fdalloc(p
, 0, &indx
) == 0) {
1739 error
= dupfdopen(p
, indx
, lp
->lwp_dupfd
, flags
, error
);
1742 fdrop(fp
); /* our ref */
1745 fsetfd(p
, NULL
, indx
);
1748 fdrop(fp
); /* our ref */
1749 if (error
== ERESTART
)
1755 * ref the vnode for ourselves so it can't be ripped out from under
1756 * is. XXX need an ND flag to request that the vnode be returned
1759 * Reserve a file descriptor but do not assign it until the open
1762 vp
= (struct vnode
*)fp
->f_data
;
1764 if ((error
= fdalloc(p
, 0, &indx
)) != 0) {
1771 * If no error occurs the vp will have been assigned to the file
1776 if (flags
& (O_EXLOCK
| O_SHLOCK
)) {
1777 lf
.l_whence
= SEEK_SET
;
1780 if (flags
& O_EXLOCK
)
1781 lf
.l_type
= F_WRLCK
;
1783 lf
.l_type
= F_RDLCK
;
1784 if (flags
& FNONBLOCK
)
1789 if ((error
= VOP_ADVLOCK(vp
, (caddr_t
)fp
, F_SETLK
, &lf
, type
)) != 0) {
1791 * lock request failed. Clean up the reserved
1795 fsetfd(p
, NULL
, indx
);
1799 fp
->f_flag
|= FHASLOCK
;
1803 * Assert that all regular file vnodes were created with a object.
1805 KASSERT(vp
->v_type
!= VREG
|| vp
->v_object
!= NULL
,
1806 ("open: regular file has no backing object after vn_open"));
1812 * release our private reference, leaving the one associated with the
1813 * descriptor table intact.
1815 fsetfd(p
, fp
, indx
);
1822 * open_args(char *path, int flags, int mode)
1824 * Check permissions, allocate an open file structure,
1825 * and call the device open routine if any.
1828 sys_open(struct open_args
*uap
)
1830 struct nlookupdata nd
;
1833 error
= nlookup_init(&nd
, uap
->path
, UIO_USERSPACE
, 0);
1835 error
= kern_open(&nd
, uap
->flags
,
1836 uap
->mode
, &uap
->sysmsg_result
);
1843 * openat_args(int fd, char *path, int flags, int mode)
1846 sys_openat(struct openat_args
*uap
)
1848 struct nlookupdata nd
;
1852 error
= nlookup_init_at(&nd
, &fp
, uap
->fd
, uap
->path
, UIO_USERSPACE
, 0);
1854 error
= kern_open(&nd
, uap
->flags
, uap
->mode
,
1855 &uap
->sysmsg_result
);
1857 nlookup_done_at(&nd
, fp
);
1862 kern_mknod(struct nlookupdata
*nd
, int mode
, int rmajor
, int rminor
)
1864 struct thread
*td
= curthread
;
1865 struct proc
*p
= td
->td_proc
;
1874 vattr
.va_mode
= (mode
& ALLPERMS
) &~ p
->p_fd
->fd_cmask
;
1875 vattr
.va_rmajor
= rmajor
;
1876 vattr
.va_rminor
= rminor
;
1878 switch (mode
& S_IFMT
) {
1879 case S_IFMT
: /* used by badsect to flag bad sectors */
1880 error
= priv_check_cred(p
->p_ucred
, PRIV_VFS_MKNOD_BAD
, 0);
1881 vattr
.va_type
= VBAD
;
1884 error
= priv_check(td
, PRIV_VFS_MKNOD_DEV
);
1885 vattr
.va_type
= VCHR
;
1888 error
= priv_check(td
, PRIV_VFS_MKNOD_DEV
);
1889 vattr
.va_type
= VBLK
;
1892 error
= priv_check_cred(p
->p_ucred
, PRIV_VFS_MKNOD_WHT
, 0);
1895 case S_IFDIR
: /* special directories support for HAMMER */
1896 error
= priv_check_cred(p
->p_ucred
, PRIV_VFS_MKNOD_DIR
, 0);
1897 vattr
.va_type
= VDIR
;
1908 nd
->nl_flags
|= NLC_CREATE
| NLC_REFDVP
;
1909 if ((error
= nlookup(nd
)) != 0)
1911 if (nd
->nl_nch
.ncp
->nc_vp
)
1913 if ((error
= ncp_writechk(&nd
->nl_nch
)) != 0)
1917 error
= VOP_NWHITEOUT(&nd
->nl_nch
, nd
->nl_dvp
,
1918 nd
->nl_cred
, NAMEI_CREATE
);
1921 error
= VOP_NMKNOD(&nd
->nl_nch
, nd
->nl_dvp
,
1922 &vp
, nd
->nl_cred
, &vattr
);
1930 * mknod_args(char *path, int mode, int dev)
1932 * Create a special file.
1935 sys_mknod(struct mknod_args
*uap
)
1937 struct nlookupdata nd
;
1940 error
= nlookup_init(&nd
, uap
->path
, UIO_USERSPACE
, 0);
1942 error
= kern_mknod(&nd
, uap
->mode
,
1943 umajor(uap
->dev
), uminor(uap
->dev
));
1950 kern_mkfifo(struct nlookupdata
*nd
, int mode
)
1952 struct thread
*td
= curthread
;
1953 struct proc
*p
= td
->td_proc
;
1960 nd
->nl_flags
|= NLC_CREATE
| NLC_REFDVP
;
1961 if ((error
= nlookup(nd
)) != 0)
1963 if (nd
->nl_nch
.ncp
->nc_vp
)
1965 if ((error
= ncp_writechk(&nd
->nl_nch
)) != 0)
1969 vattr
.va_type
= VFIFO
;
1970 vattr
.va_mode
= (mode
& ALLPERMS
) &~ p
->p_fd
->fd_cmask
;
1972 error
= VOP_NMKNOD(&nd
->nl_nch
, nd
->nl_dvp
, &vp
, nd
->nl_cred
, &vattr
);
1979 * mkfifo_args(char *path, int mode)
1981 * Create a named pipe.
1984 sys_mkfifo(struct mkfifo_args
*uap
)
1986 struct nlookupdata nd
;
1989 error
= nlookup_init(&nd
, uap
->path
, UIO_USERSPACE
, 0);
1991 error
= kern_mkfifo(&nd
, uap
->mode
);
1996 static int hardlink_check_uid
= 0;
1997 SYSCTL_INT(_security
, OID_AUTO
, hardlink_check_uid
, CTLFLAG_RW
,
1998 &hardlink_check_uid
, 0,
1999 "Unprivileged processes cannot create hard links to files owned by other "
2001 static int hardlink_check_gid
= 0;
2002 SYSCTL_INT(_security
, OID_AUTO
, hardlink_check_gid
, CTLFLAG_RW
,
2003 &hardlink_check_gid
, 0,
2004 "Unprivileged processes cannot create hard links to files owned by other "
2008 can_hardlink(struct vnode
*vp
, struct thread
*td
, struct ucred
*cred
)
2014 * Shortcut if disabled
2016 if (hardlink_check_uid
== 0 && hardlink_check_gid
== 0)
2020 * Privileged user can always hardlink
2022 if (priv_check_cred(cred
, PRIV_VFS_LINK
, 0) == 0)
2026 * Otherwise only if the originating file is owned by the
2027 * same user or group. Note that any group is allowed if
2028 * the file is owned by the caller.
2030 error
= VOP_GETATTR(vp
, &va
);
2034 if (hardlink_check_uid
) {
2035 if (cred
->cr_uid
!= va
.va_uid
)
2039 if (hardlink_check_gid
) {
2040 if (cred
->cr_uid
!= va
.va_uid
&& !groupmember(va
.va_gid
, cred
))
2048 kern_link(struct nlookupdata
*nd
, struct nlookupdata
*linknd
)
2050 struct thread
*td
= curthread
;
2055 * Lookup the source and obtained a locked vnode.
2057 * You may only hardlink a file which you have write permission
2058 * on or which you own.
2060 * XXX relookup on vget failure / race ?
2063 nd
->nl_flags
|= NLC_WRITE
| NLC_OWN
| NLC_HLINK
;
2064 if ((error
= nlookup(nd
)) != 0)
2066 vp
= nd
->nl_nch
.ncp
->nc_vp
;
2067 KKASSERT(vp
!= NULL
);
2068 if (vp
->v_type
== VDIR
)
2069 return (EPERM
); /* POSIX */
2070 if ((error
= ncp_writechk(&nd
->nl_nch
)) != 0)
2072 if ((error
= vget(vp
, LK_EXCLUSIVE
)) != 0)
2076 * Unlock the source so we can lookup the target without deadlocking
2077 * (XXX vp is locked already, possible other deadlock?). The target
2080 KKASSERT(nd
->nl_flags
& NLC_NCPISLOCKED
);
2081 nd
->nl_flags
&= ~NLC_NCPISLOCKED
;
2082 cache_unlock(&nd
->nl_nch
);
2084 linknd
->nl_flags
|= NLC_CREATE
| NLC_REFDVP
;
2085 if ((error
= nlookup(linknd
)) != 0) {
2089 if (linknd
->nl_nch
.ncp
->nc_vp
) {
2095 * Finally run the new API VOP.
2097 error
= can_hardlink(vp
, td
, td
->td_proc
->p_ucred
);
2099 error
= VOP_NLINK(&linknd
->nl_nch
, linknd
->nl_dvp
,
2100 vp
, linknd
->nl_cred
);
2107 * link_args(char *path, char *link)
2109 * Make a hard file link.
2112 sys_link(struct link_args
*uap
)
2114 struct nlookupdata nd
, linknd
;
2117 error
= nlookup_init(&nd
, uap
->path
, UIO_USERSPACE
, NLC_FOLLOW
);
2119 error
= nlookup_init(&linknd
, uap
->link
, UIO_USERSPACE
, 0);
2121 error
= kern_link(&nd
, &linknd
);
2122 nlookup_done(&linknd
);
2129 kern_symlink(struct nlookupdata
*nd
, char *path
, int mode
)
2137 nd
->nl_flags
|= NLC_CREATE
| NLC_REFDVP
;
2138 if ((error
= nlookup(nd
)) != 0)
2140 if (nd
->nl_nch
.ncp
->nc_vp
)
2142 if ((error
= ncp_writechk(&nd
->nl_nch
)) != 0)
2146 vattr
.va_mode
= mode
;
2147 error
= VOP_NSYMLINK(&nd
->nl_nch
, dvp
, &vp
, nd
->nl_cred
, &vattr
, path
);
2154 * symlink(char *path, char *link)
2156 * Make a symbolic link.
2159 sys_symlink(struct symlink_args
*uap
)
2161 struct thread
*td
= curthread
;
2162 struct nlookupdata nd
;
2167 path
= objcache_get(namei_oc
, M_WAITOK
);
2168 error
= copyinstr(uap
->path
, path
, MAXPATHLEN
, NULL
);
2170 error
= nlookup_init(&nd
, uap
->link
, UIO_USERSPACE
, 0);
2172 mode
= ACCESSPERMS
& ~td
->td_proc
->p_fd
->fd_cmask
;
2173 error
= kern_symlink(&nd
, path
, mode
);
2177 objcache_put(namei_oc
, path
);
2182 * undelete_args(char *path)
2184 * Delete a whiteout from the filesystem.
2188 sys_undelete(struct undelete_args
*uap
)
2190 struct nlookupdata nd
;
2193 error
= nlookup_init(&nd
, uap
->path
, UIO_USERSPACE
, 0);
2195 nd
.nl_flags
|= NLC_DELETE
| NLC_REFDVP
;
2197 error
= nlookup(&nd
);
2199 error
= ncp_writechk(&nd
.nl_nch
);
2201 error
= VOP_NWHITEOUT(&nd
.nl_nch
, nd
.nl_dvp
, nd
.nl_cred
,
2209 kern_unlink(struct nlookupdata
*nd
)
2214 nd
->nl_flags
|= NLC_DELETE
| NLC_REFDVP
;
2215 if ((error
= nlookup(nd
)) != 0)
2217 if ((error
= ncp_writechk(&nd
->nl_nch
)) != 0)
2219 error
= VOP_NREMOVE(&nd
->nl_nch
, nd
->nl_dvp
, nd
->nl_cred
);
2224 * unlink_args(char *path)
2226 * Delete a name from the filesystem.
2229 sys_unlink(struct unlink_args
*uap
)
2231 struct nlookupdata nd
;
2234 error
= nlookup_init(&nd
, uap
->path
, UIO_USERSPACE
, 0);
2236 error
= kern_unlink(&nd
);
2243 * unlinkat_args(int fd, char *path, int flags)
2245 * Delete the file or directory entry pointed to by fd/path.
2248 sys_unlinkat(struct unlinkat_args
*uap
)
2250 struct nlookupdata nd
;
2254 if (uap
->flags
& ~AT_REMOVEDIR
)
2257 error
= nlookup_init_at(&nd
, &fp
, uap
->fd
, uap
->path
, UIO_USERSPACE
, 0);
2259 if (uap
->flags
& AT_REMOVEDIR
)
2260 error
= kern_rmdir(&nd
);
2262 error
= kern_unlink(&nd
);
2264 nlookup_done_at(&nd
, fp
);
2269 kern_lseek(int fd
, off_t offset
, int whence
, off_t
*res
)
2271 struct thread
*td
= curthread
;
2272 struct proc
*p
= td
->td_proc
;
2279 fp
= holdfp(p
->p_fd
, fd
, -1);
2282 if (fp
->f_type
!= DTYPE_VNODE
) {
2286 vp
= (struct vnode
*)fp
->f_data
;
2290 new_offset
= fp
->f_offset
+ offset
;
2294 error
= VOP_GETATTR(vp
, &vattr
);
2295 new_offset
= offset
+ vattr
.va_size
;
2298 new_offset
= offset
;
2308 * Validate the seek position. Negative offsets are not allowed
2309 * for regular files or directories.
2311 * Normally we would also not want to allow negative offsets for
2312 * character and block-special devices. However kvm addresses
2313 * on 64 bit architectures might appear to be negative and must
2317 if (new_offset
< 0 &&
2318 (vp
->v_type
== VREG
|| vp
->v_type
== VDIR
)) {
2321 fp
->f_offset
= new_offset
;
2324 *res
= fp
->f_offset
;
2331 * lseek_args(int fd, int pad, off_t offset, int whence)
2333 * Reposition read/write file offset.
2336 sys_lseek(struct lseek_args
*uap
)
2340 error
= kern_lseek(uap
->fd
, uap
->offset
, uap
->whence
,
2341 &uap
->sysmsg_offset
);
2347 * Check if current process can access given file. amode is a bitmask of *_OK
2348 * access bits. flags is a bitmask of AT_* flags.
2351 kern_access(struct nlookupdata
*nd
, int amode
, int flags
)
2356 if (flags
& ~AT_EACCESS
)
2358 if ((error
= nlookup(nd
)) != 0)
2361 error
= cache_vget(&nd
->nl_nch
, nd
->nl_cred
, LK_EXCLUSIVE
, &vp
);
2365 /* Flags == 0 means only check for existence. */
2374 if ((mode
& VWRITE
) == 0 ||
2375 (error
= vn_writechk(vp
, &nd
->nl_nch
)) == 0)
2376 error
= VOP_ACCESS_FLAGS(vp
, mode
, flags
, nd
->nl_cred
);
2379 * If the file handle is stale we have to re-resolve the
2380 * entry. This is a hack at the moment.
2382 if (error
== ESTALE
) {
2384 cache_setunresolved(&nd
->nl_nch
);
2385 error
= cache_resolve(&nd
->nl_nch
, nd
->nl_cred
);
2398 * access_args(char *path, int flags)
2400 * Check access permissions.
2403 sys_access(struct access_args
*uap
)
2405 struct nlookupdata nd
;
2408 error
= nlookup_init(&nd
, uap
->path
, UIO_USERSPACE
, NLC_FOLLOW
);
2410 error
= kern_access(&nd
, uap
->flags
, 0);
2417 * faccessat_args(int fd, char *path, int amode, int flags)
2419 * Check access permissions.
2422 sys_faccessat(struct faccessat_args
*uap
)
2424 struct nlookupdata nd
;
2428 error
= nlookup_init_at(&nd
, &fp
, uap
->fd
, uap
->path
, UIO_USERSPACE
,
2431 error
= kern_access(&nd
, uap
->amode
, uap
->flags
);
2432 nlookup_done_at(&nd
, fp
);
2438 kern_stat(struct nlookupdata
*nd
, struct stat
*st
)
2444 if ((error
= nlookup(nd
)) != 0)
2447 if ((vp
= nd
->nl_nch
.ncp
->nc_vp
) == NULL
)
2451 if ((error
= vget(vp
, LK_SHARED
)) != 0)
2453 error
= vn_stat(vp
, st
, nd
->nl_cred
);
2456 * If the file handle is stale we have to re-resolve the entry. This
2457 * is a hack at the moment.
2459 if (error
== ESTALE
) {
2461 cache_setunresolved(&nd
->nl_nch
);
2462 error
= cache_resolve(&nd
->nl_nch
, nd
->nl_cred
);
2472 * stat_args(char *path, struct stat *ub)
2474 * Get file status; this version follows links.
2477 sys_stat(struct stat_args
*uap
)
2479 struct nlookupdata nd
;
2483 error
= nlookup_init(&nd
, uap
->path
, UIO_USERSPACE
, NLC_FOLLOW
);
2485 error
= kern_stat(&nd
, &st
);
2487 error
= copyout(&st
, uap
->ub
, sizeof(*uap
->ub
));
2494 * lstat_args(char *path, struct stat *ub)
2496 * Get file status; this version does not follow links.
2499 sys_lstat(struct lstat_args
*uap
)
2501 struct nlookupdata nd
;
2505 error
= nlookup_init(&nd
, uap
->path
, UIO_USERSPACE
, 0);
2507 error
= kern_stat(&nd
, &st
);
2509 error
= copyout(&st
, uap
->ub
, sizeof(*uap
->ub
));
2516 * fstatat_args(int fd, char *path, struct stat *sb, int flags)
2518 * Get status of file pointed to by fd/path.
2521 sys_fstatat(struct fstatat_args
*uap
)
2523 struct nlookupdata nd
;
2529 if (uap
->flags
& ~AT_SYMLINK_NOFOLLOW
)
2532 flags
= (uap
->flags
& AT_SYMLINK_NOFOLLOW
) ? 0 : NLC_FOLLOW
;
2534 error
= nlookup_init_at(&nd
, &fp
, uap
->fd
, uap
->path
,
2535 UIO_USERSPACE
, flags
);
2537 error
= kern_stat(&nd
, &st
);
2539 error
= copyout(&st
, uap
->sb
, sizeof(*uap
->sb
));
2541 nlookup_done_at(&nd
, fp
);
2546 * pathconf_Args(char *path, int name)
2548 * Get configurable pathname variables.
2552 sys_pathconf(struct pathconf_args
*uap
)
2554 struct nlookupdata nd
;
2559 error
= nlookup_init(&nd
, uap
->path
, UIO_USERSPACE
, NLC_FOLLOW
);
2561 error
= nlookup(&nd
);
2563 error
= cache_vget(&nd
.nl_nch
, nd
.nl_cred
, LK_EXCLUSIVE
, &vp
);
2566 error
= VOP_PATHCONF(vp
, uap
->name
, &uap
->sysmsg_reg
);
2574 * kern_readlink isn't properly split yet. There is a copyin burried
2575 * in VOP_READLINK().
2578 kern_readlink(struct nlookupdata
*nd
, char *buf
, int count
, int *res
)
2580 struct thread
*td
= curthread
;
2581 struct proc
*p
= td
->td_proc
;
2587 if ((error
= nlookup(nd
)) != 0)
2589 error
= cache_vget(&nd
->nl_nch
, nd
->nl_cred
, LK_EXCLUSIVE
, &vp
);
2592 if (vp
->v_type
!= VLNK
) {
2595 aiov
.iov_base
= buf
;
2596 aiov
.iov_len
= count
;
2597 auio
.uio_iov
= &aiov
;
2598 auio
.uio_iovcnt
= 1;
2599 auio
.uio_offset
= 0;
2600 auio
.uio_rw
= UIO_READ
;
2601 auio
.uio_segflg
= UIO_USERSPACE
;
2603 auio
.uio_resid
= count
;
2604 error
= VOP_READLINK(vp
, &auio
, p
->p_ucred
);
2607 *res
= count
- auio
.uio_resid
;
2612 * readlink_args(char *path, char *buf, int count)
2614 * Return target name of a symbolic link.
2617 sys_readlink(struct readlink_args
*uap
)
2619 struct nlookupdata nd
;
2622 error
= nlookup_init(&nd
, uap
->path
, UIO_USERSPACE
, 0);
2624 error
= kern_readlink(&nd
, uap
->buf
, uap
->count
,
2625 &uap
->sysmsg_result
);
2632 setfflags(struct vnode
*vp
, int flags
)
2634 struct thread
*td
= curthread
;
2635 struct proc
*p
= td
->td_proc
;
2640 * Prevent non-root users from setting flags on devices. When
2641 * a device is reused, users can retain ownership of the device
2642 * if they are allowed to set flags and programs assume that
2643 * chown can't fail when done as root.
2645 if ((vp
->v_type
== VCHR
|| vp
->v_type
== VBLK
) &&
2646 ((error
= priv_check_cred(p
->p_ucred
, PRIV_VFS_CHFLAGS_DEV
, 0)) != 0))
2650 * note: vget is required for any operation that might mod the vnode
2651 * so VINACTIVE is properly cleared.
2653 if ((error
= vget(vp
, LK_EXCLUSIVE
)) == 0) {
2655 vattr
.va_flags
= flags
;
2656 error
= VOP_SETATTR(vp
, &vattr
, p
->p_ucred
);
2663 * chflags(char *path, int flags)
2665 * Change flags of a file given a path name.
2669 sys_chflags(struct chflags_args
*uap
)
2671 struct nlookupdata nd
;
2676 error
= nlookup_init(&nd
, uap
->path
, UIO_USERSPACE
, NLC_FOLLOW
);
2678 error
= nlookup(&nd
);
2680 error
= ncp_writechk(&nd
.nl_nch
);
2682 error
= cache_vref(&nd
.nl_nch
, nd
.nl_cred
, &vp
);
2685 error
= setfflags(vp
, uap
->flags
);
2692 * lchflags(char *path, int flags)
2694 * Change flags of a file given a path name, but don't follow symlinks.
2698 sys_lchflags(struct lchflags_args
*uap
)
2700 struct nlookupdata nd
;
2705 error
= nlookup_init(&nd
, uap
->path
, UIO_USERSPACE
, 0);
2707 error
= nlookup(&nd
);
2709 error
= ncp_writechk(&nd
.nl_nch
);
2711 error
= cache_vref(&nd
.nl_nch
, nd
.nl_cred
, &vp
);
2714 error
= setfflags(vp
, uap
->flags
);
2721 * fchflags_args(int fd, int flags)
2723 * Change flags of a file given a file descriptor.
2727 sys_fchflags(struct fchflags_args
*uap
)
2729 struct thread
*td
= curthread
;
2730 struct proc
*p
= td
->td_proc
;
2734 if ((error
= holdvnode(p
->p_fd
, uap
->fd
, &fp
)) != 0)
2736 if (fp
->f_nchandle
.ncp
)
2737 error
= ncp_writechk(&fp
->f_nchandle
);
2739 error
= setfflags((struct vnode
*) fp
->f_data
, uap
->flags
);
2745 setfmode(struct vnode
*vp
, int mode
)
2747 struct thread
*td
= curthread
;
2748 struct proc
*p
= td
->td_proc
;
2753 * note: vget is required for any operation that might mod the vnode
2754 * so VINACTIVE is properly cleared.
2756 if ((error
= vget(vp
, LK_EXCLUSIVE
)) == 0) {
2758 vattr
.va_mode
= mode
& ALLPERMS
;
2759 error
= VOP_SETATTR(vp
, &vattr
, p
->p_ucred
);
2766 kern_chmod(struct nlookupdata
*nd
, int mode
)
2771 if ((error
= nlookup(nd
)) != 0)
2773 if ((error
= cache_vref(&nd
->nl_nch
, nd
->nl_cred
, &vp
)) != 0)
2775 if ((error
= ncp_writechk(&nd
->nl_nch
)) == 0)
2776 error
= setfmode(vp
, mode
);
2782 * chmod_args(char *path, int mode)
2784 * Change mode of a file given path name.
2788 sys_chmod(struct chmod_args
*uap
)
2790 struct nlookupdata nd
;
2793 error
= nlookup_init(&nd
, uap
->path
, UIO_USERSPACE
, NLC_FOLLOW
);
2795 error
= kern_chmod(&nd
, uap
->mode
);
2801 * lchmod_args(char *path, int mode)
2803 * Change mode of a file given path name (don't follow links.)
2807 sys_lchmod(struct lchmod_args
*uap
)
2809 struct nlookupdata nd
;
2812 error
= nlookup_init(&nd
, uap
->path
, UIO_USERSPACE
, 0);
2814 error
= kern_chmod(&nd
, uap
->mode
);
2820 * fchmod_args(int fd, int mode)
2822 * Change mode of a file given a file descriptor.
2826 sys_fchmod(struct fchmod_args
*uap
)
2828 struct thread
*td
= curthread
;
2829 struct proc
*p
= td
->td_proc
;
2833 if ((error
= holdvnode(p
->p_fd
, uap
->fd
, &fp
)) != 0)
2835 if (fp
->f_nchandle
.ncp
)
2836 error
= ncp_writechk(&fp
->f_nchandle
);
2838 error
= setfmode((struct vnode
*)fp
->f_data
, uap
->mode
);
2844 * fchmodat_args(char *path, int mode)
2846 * Change mode of a file pointed to by fd/path.
2849 sys_fchmodat(struct fchmodat_args
*uap
)
2851 struct nlookupdata nd
;
2856 if (uap
->flags
& ~AT_SYMLINK_NOFOLLOW
)
2858 flags
= (uap
->flags
& AT_SYMLINK_NOFOLLOW
) ? 0 : NLC_FOLLOW
;
2860 error
= nlookup_init_at(&nd
, &fp
, uap
->fd
, uap
->path
,
2861 UIO_USERSPACE
, flags
);
2863 error
= kern_chmod(&nd
, uap
->mode
);
2864 nlookup_done_at(&nd
, fp
);
2869 setfown(struct vnode
*vp
, uid_t uid
, gid_t gid
)
2871 struct thread
*td
= curthread
;
2872 struct proc
*p
= td
->td_proc
;
2877 * note: vget is required for any operation that might mod the vnode
2878 * so VINACTIVE is properly cleared.
2880 if ((error
= vget(vp
, LK_EXCLUSIVE
)) == 0) {
2884 error
= VOP_SETATTR(vp
, &vattr
, p
->p_ucred
);
2891 kern_chown(struct nlookupdata
*nd
, int uid
, int gid
)
2896 if ((error
= nlookup(nd
)) != 0)
2898 if ((error
= cache_vref(&nd
->nl_nch
, nd
->nl_cred
, &vp
)) != 0)
2900 if ((error
= ncp_writechk(&nd
->nl_nch
)) == 0)
2901 error
= setfown(vp
, uid
, gid
);
2907 * chown(char *path, int uid, int gid)
2909 * Set ownership given a path name.
2912 sys_chown(struct chown_args
*uap
)
2914 struct nlookupdata nd
;
2917 error
= nlookup_init(&nd
, uap
->path
, UIO_USERSPACE
, NLC_FOLLOW
);
2919 error
= kern_chown(&nd
, uap
->uid
, uap
->gid
);
2925 * lchown_args(char *path, int uid, int gid)
2927 * Set ownership given a path name, do not cross symlinks.
2930 sys_lchown(struct lchown_args
*uap
)
2932 struct nlookupdata nd
;
2935 error
= nlookup_init(&nd
, uap
->path
, UIO_USERSPACE
, 0);
2937 error
= kern_chown(&nd
, uap
->uid
, uap
->gid
);
2943 * fchown_args(int fd, int uid, int gid)
2945 * Set ownership given a file descriptor.
2949 sys_fchown(struct fchown_args
*uap
)
2951 struct thread
*td
= curthread
;
2952 struct proc
*p
= td
->td_proc
;
2956 if ((error
= holdvnode(p
->p_fd
, uap
->fd
, &fp
)) != 0)
2958 if (fp
->f_nchandle
.ncp
)
2959 error
= ncp_writechk(&fp
->f_nchandle
);
2961 error
= setfown((struct vnode
*)fp
->f_data
, uap
->uid
, uap
->gid
);
2967 * fchownat(int fd, char *path, int uid, int gid, int flags)
2969 * Set ownership of file pointed to by fd/path.
2972 sys_fchownat(struct fchownat_args
*uap
)
2974 struct nlookupdata nd
;
2979 if (uap
->flags
& ~AT_SYMLINK_NOFOLLOW
)
2981 flags
= (uap
->flags
& AT_SYMLINK_NOFOLLOW
) ? 0 : NLC_FOLLOW
;
2983 error
= nlookup_init_at(&nd
, &fp
, uap
->fd
, uap
->path
,
2984 UIO_USERSPACE
, flags
);
2986 error
= kern_chown(&nd
, uap
->uid
, uap
->gid
);
2987 nlookup_done_at(&nd
, fp
);
2993 getutimes(const struct timeval
*tvp
, struct timespec
*tsp
)
2995 struct timeval tv
[2];
2999 TIMEVAL_TO_TIMESPEC(&tv
[0], &tsp
[0]);
3002 TIMEVAL_TO_TIMESPEC(&tvp
[0], &tsp
[0]);
3003 TIMEVAL_TO_TIMESPEC(&tvp
[1], &tsp
[1]);
3009 setutimes(struct vnode
*vp
, struct vattr
*vattr
,
3010 const struct timespec
*ts
, int nullflag
)
3012 struct thread
*td
= curthread
;
3013 struct proc
*p
= td
->td_proc
;
3017 vattr
->va_atime
= ts
[0];
3018 vattr
->va_mtime
= ts
[1];
3020 vattr
->va_vaflags
|= VA_UTIMES_NULL
;
3021 error
= VOP_SETATTR(vp
, vattr
, p
->p_ucred
);
3027 kern_utimes(struct nlookupdata
*nd
, struct timeval
*tptr
)
3029 struct timespec ts
[2];
3034 if ((error
= getutimes(tptr
, ts
)) != 0)
3038 * NOTE: utimes() succeeds for the owner even if the file
3039 * is not user-writable.
3041 nd
->nl_flags
|= NLC_OWN
| NLC_WRITE
;
3043 if ((error
= nlookup(nd
)) != 0)
3045 if ((error
= ncp_writechk(&nd
->nl_nch
)) != 0)
3047 if ((error
= cache_vref(&nd
->nl_nch
, nd
->nl_cred
, &vp
)) != 0)
3051 * note: vget is required for any operation that might mod the vnode
3052 * so VINACTIVE is properly cleared.
3054 if ((error
= vn_writechk(vp
, &nd
->nl_nch
)) == 0) {
3055 error
= vget(vp
, LK_EXCLUSIVE
);
3057 error
= setutimes(vp
, &vattr
, ts
, (tptr
== NULL
));
3066 * utimes_args(char *path, struct timeval *tptr)
3068 * Set the access and modification times of a file.
3071 sys_utimes(struct utimes_args
*uap
)
3073 struct timeval tv
[2];
3074 struct nlookupdata nd
;
3078 error
= copyin(uap
->tptr
, tv
, sizeof(tv
));
3082 error
= nlookup_init(&nd
, uap
->path
, UIO_USERSPACE
, NLC_FOLLOW
);
3084 error
= kern_utimes(&nd
, uap
->tptr
? tv
: NULL
);
3090 * lutimes_args(char *path, struct timeval *tptr)
3092 * Set the access and modification times of a file.
3095 sys_lutimes(struct lutimes_args
*uap
)
3097 struct timeval tv
[2];
3098 struct nlookupdata nd
;
3102 error
= copyin(uap
->tptr
, tv
, sizeof(tv
));
3106 error
= nlookup_init(&nd
, uap
->path
, UIO_USERSPACE
, 0);
3108 error
= kern_utimes(&nd
, uap
->tptr
? tv
: NULL
);
3114 * Set utimes on a file descriptor. The creds used to open the
3115 * file are used to determine whether the operation is allowed
3119 kern_futimes(int fd
, struct timeval
*tptr
)
3121 struct thread
*td
= curthread
;
3122 struct proc
*p
= td
->td_proc
;
3123 struct timespec ts
[2];
3129 error
= getutimes(tptr
, ts
);
3132 if ((error
= holdvnode(p
->p_fd
, fd
, &fp
)) != 0)
3134 if (fp
->f_nchandle
.ncp
)
3135 error
= ncp_writechk(&fp
->f_nchandle
);
3138 error
= vget(vp
, LK_EXCLUSIVE
);
3140 error
= VOP_GETATTR(vp
, &vattr
);
3142 error
= naccess_va(&vattr
, NLC_OWN
| NLC_WRITE
,
3146 error
= setutimes(vp
, &vattr
, ts
,
3157 * futimes_args(int fd, struct timeval *tptr)
3159 * Set the access and modification times of a file.
3162 sys_futimes(struct futimes_args
*uap
)
3164 struct timeval tv
[2];
3168 error
= copyin(uap
->tptr
, tv
, sizeof(tv
));
3173 error
= kern_futimes(uap
->fd
, uap
->tptr
? tv
: NULL
);
3179 kern_truncate(struct nlookupdata
*nd
, off_t length
)
3187 nd
->nl_flags
|= NLC_WRITE
| NLC_TRUNCATE
;
3188 if ((error
= nlookup(nd
)) != 0)
3190 if ((error
= ncp_writechk(&nd
->nl_nch
)) != 0)
3192 if ((error
= cache_vref(&nd
->nl_nch
, nd
->nl_cred
, &vp
)) != 0)
3194 if ((error
= vn_lock(vp
, LK_EXCLUSIVE
| LK_RETRY
)) != 0) {
3198 if (vp
->v_type
== VDIR
) {
3200 } else if ((error
= vn_writechk(vp
, &nd
->nl_nch
)) == 0) {
3202 vattr
.va_size
= length
;
3203 error
= VOP_SETATTR(vp
, &vattr
, nd
->nl_cred
);
3210 * truncate(char *path, int pad, off_t length)
3212 * Truncate a file given its path name.
3215 sys_truncate(struct truncate_args
*uap
)
3217 struct nlookupdata nd
;
3220 error
= nlookup_init(&nd
, uap
->path
, UIO_USERSPACE
, NLC_FOLLOW
);
3222 error
= kern_truncate(&nd
, uap
->length
);
3228 kern_ftruncate(int fd
, off_t length
)
3230 struct thread
*td
= curthread
;
3231 struct proc
*p
= td
->td_proc
;
3239 if ((error
= holdvnode(p
->p_fd
, fd
, &fp
)) != 0)
3241 if (fp
->f_nchandle
.ncp
) {
3242 error
= ncp_writechk(&fp
->f_nchandle
);
3246 if ((fp
->f_flag
& FWRITE
) == 0) {
3250 if (fp
->f_flag
& FAPPENDONLY
) { /* inode was set s/uapnd */
3254 vp
= (struct vnode
*)fp
->f_data
;
3255 vn_lock(vp
, LK_EXCLUSIVE
| LK_RETRY
);
3256 if (vp
->v_type
== VDIR
) {
3258 } else if ((error
= vn_writechk(vp
, NULL
)) == 0) {
3260 vattr
.va_size
= length
;
3261 error
= VOP_SETATTR(vp
, &vattr
, fp
->f_cred
);
3270 * ftruncate_args(int fd, int pad, off_t length)
3272 * Truncate a file given a file descriptor.
3275 sys_ftruncate(struct ftruncate_args
*uap
)
3279 error
= kern_ftruncate(uap
->fd
, uap
->length
);
3287 * Sync an open file.
3291 sys_fsync(struct fsync_args
*uap
)
3293 struct thread
*td
= curthread
;
3294 struct proc
*p
= td
->td_proc
;
3300 if ((error
= holdvnode(p
->p_fd
, uap
->fd
, &fp
)) != 0)
3302 vp
= (struct vnode
*)fp
->f_data
;
3303 vn_lock(vp
, LK_EXCLUSIVE
| LK_RETRY
);
3304 if ((obj
= vp
->v_object
) != NULL
)
3305 vm_object_page_clean(obj
, 0, 0, 0);
3306 error
= VOP_FSYNC(vp
, MNT_WAIT
, VOP_FSYNC_SYSCALL
);
3307 if (error
== 0 && vp
->v_mount
)
3308 error
= buf_fsync(vp
);
3315 kern_rename(struct nlookupdata
*fromnd
, struct nlookupdata
*tond
)
3317 struct nchandle fnchd
;
3318 struct nchandle tnchd
;
3319 struct namecache
*ncp
;
3326 fromnd
->nl_flags
|= NLC_REFDVP
| NLC_RENAME_SRC
;
3327 if ((error
= nlookup(fromnd
)) != 0)
3329 if ((fnchd
.ncp
= fromnd
->nl_nch
.ncp
->nc_parent
) == NULL
)
3331 fnchd
.mount
= fromnd
->nl_nch
.mount
;
3335 * unlock the source nch so we can lookup the target nch without
3336 * deadlocking. The target may or may not exist so we do not check
3337 * for a target vp like kern_mkdir() and other creation functions do.
3339 * The source and target directories are ref'd and rechecked after
3340 * everything is relocked to determine if the source or target file
3343 KKASSERT(fromnd
->nl_flags
& NLC_NCPISLOCKED
);
3344 fromnd
->nl_flags
&= ~NLC_NCPISLOCKED
;
3345 cache_unlock(&fromnd
->nl_nch
);
3347 tond
->nl_flags
|= NLC_RENAME_DST
| NLC_REFDVP
;
3348 if ((error
= nlookup(tond
)) != 0) {
3352 if ((tnchd
.ncp
= tond
->nl_nch
.ncp
->nc_parent
) == NULL
) {
3356 tnchd
.mount
= tond
->nl_nch
.mount
;
3360 * If the source and target are the same there is nothing to do
3362 if (fromnd
->nl_nch
.ncp
== tond
->nl_nch
.ncp
) {
3369 * Mount points cannot be renamed or overwritten
3371 if ((fromnd
->nl_nch
.ncp
->nc_flag
| tond
->nl_nch
.ncp
->nc_flag
) &
3380 * relock the source ncp. NOTE AFTER RELOCKING: the source ncp
3381 * may have become invalid while it was unlocked, nc_vp and nc_mount
3384 if (cache_lock_nonblock(&fromnd
->nl_nch
) == 0) {
3385 cache_resolve(&fromnd
->nl_nch
, fromnd
->nl_cred
);
3386 } else if (fromnd
->nl_nch
.ncp
> tond
->nl_nch
.ncp
) {
3387 cache_lock(&fromnd
->nl_nch
);
3388 cache_resolve(&fromnd
->nl_nch
, fromnd
->nl_cred
);
3390 cache_unlock(&tond
->nl_nch
);
3391 cache_lock(&fromnd
->nl_nch
);
3392 cache_resolve(&fromnd
->nl_nch
, fromnd
->nl_cred
);
3393 cache_lock(&tond
->nl_nch
);
3394 cache_resolve(&tond
->nl_nch
, tond
->nl_cred
);
3396 fromnd
->nl_flags
|= NLC_NCPISLOCKED
;
3399 * make sure the parent directories linkages are the same
3401 if (fnchd
.ncp
!= fromnd
->nl_nch
.ncp
->nc_parent
||
3402 tnchd
.ncp
!= tond
->nl_nch
.ncp
->nc_parent
) {
3409 * Both the source and target must be within the same filesystem and
3410 * in the same filesystem as their parent directories within the
3411 * namecache topology.
3413 * NOTE: fromnd's nc_mount or nc_vp could be NULL.
3416 if (mp
!= tnchd
.mount
|| mp
!= fromnd
->nl_nch
.mount
||
3417 mp
!= tond
->nl_nch
.mount
) {
3424 * Make sure the mount point is writable
3426 if ((error
= ncp_writechk(&tond
->nl_nch
)) != 0) {
3433 * If the target exists and either the source or target is a directory,
3434 * then both must be directories.
3436 * Due to relocking of the source, fromnd->nl_nch.ncp->nc_vp might h
3439 if (tond
->nl_nch
.ncp
->nc_vp
) {
3440 if (fromnd
->nl_nch
.ncp
->nc_vp
== NULL
) {
3442 } else if (fromnd
->nl_nch
.ncp
->nc_vp
->v_type
== VDIR
) {
3443 if (tond
->nl_nch
.ncp
->nc_vp
->v_type
!= VDIR
)
3445 } else if (tond
->nl_nch
.ncp
->nc_vp
->v_type
== VDIR
) {
3451 * You cannot rename a source into itself or a subdirectory of itself.
3452 * We check this by travsersing the target directory upwards looking
3453 * for a match against the source.
3456 for (ncp
= tnchd
.ncp
; ncp
; ncp
= ncp
->nc_parent
) {
3457 if (fromnd
->nl_nch
.ncp
== ncp
) {
3468 * Even though the namespaces are different, they may still represent
3469 * hardlinks to the same file. The filesystem might have a hard time
3470 * with this so we issue a NREMOVE of the source instead of a NRENAME
3471 * when we detect the situation.
3474 fdvp
= fromnd
->nl_dvp
;
3475 tdvp
= tond
->nl_dvp
;
3476 if (fdvp
== NULL
|| tdvp
== NULL
) {
3478 } else if (fromnd
->nl_nch
.ncp
->nc_vp
== tond
->nl_nch
.ncp
->nc_vp
) {
3479 error
= VOP_NREMOVE(&fromnd
->nl_nch
, fdvp
,
3482 error
= VOP_NRENAME(&fromnd
->nl_nch
, &tond
->nl_nch
,
3483 fdvp
, tdvp
, tond
->nl_cred
);
3490 * rename_args(char *from, char *to)
3492 * Rename files. Source and destination must either both be directories,
3493 * or both not be directories. If target is a directory, it must be empty.
3496 sys_rename(struct rename_args
*uap
)
3498 struct nlookupdata fromnd
, tond
;
3501 error
= nlookup_init(&fromnd
, uap
->from
, UIO_USERSPACE
, 0);
3503 error
= nlookup_init(&tond
, uap
->to
, UIO_USERSPACE
, 0);
3505 error
= kern_rename(&fromnd
, &tond
);
3506 nlookup_done(&tond
);
3508 nlookup_done(&fromnd
);
3513 kern_mkdir(struct nlookupdata
*nd
, int mode
)
3515 struct thread
*td
= curthread
;
3516 struct proc
*p
= td
->td_proc
;
3522 nd
->nl_flags
|= NLC_WILLBEDIR
| NLC_CREATE
| NLC_REFDVP
;
3523 if ((error
= nlookup(nd
)) != 0)
3526 if (nd
->nl_nch
.ncp
->nc_vp
)
3528 if ((error
= ncp_writechk(&nd
->nl_nch
)) != 0)
3531 vattr
.va_type
= VDIR
;
3532 vattr
.va_mode
= (mode
& ACCESSPERMS
) &~ p
->p_fd
->fd_cmask
;
3535 error
= VOP_NMKDIR(&nd
->nl_nch
, nd
->nl_dvp
, &vp
, p
->p_ucred
, &vattr
);
3542 * mkdir_args(char *path, int mode)
3544 * Make a directory file.
3548 sys_mkdir(struct mkdir_args
*uap
)
3550 struct nlookupdata nd
;
3553 error
= nlookup_init(&nd
, uap
->path
, UIO_USERSPACE
, 0);
3555 error
= kern_mkdir(&nd
, uap
->mode
);
3561 kern_rmdir(struct nlookupdata
*nd
)
3566 nd
->nl_flags
|= NLC_DELETE
| NLC_REFDVP
;
3567 if ((error
= nlookup(nd
)) != 0)
3571 * Do not allow directories representing mount points to be
3572 * deleted, even if empty. Check write perms on mount point
3573 * in case the vnode is aliased (aka nullfs).
3575 if (nd
->nl_nch
.ncp
->nc_flag
& (NCF_ISMOUNTPT
))
3577 if ((error
= ncp_writechk(&nd
->nl_nch
)) != 0)
3579 error
= VOP_NRMDIR(&nd
->nl_nch
, nd
->nl_dvp
, nd
->nl_cred
);
3584 * rmdir_args(char *path)
3586 * Remove a directory file.
3590 sys_rmdir(struct rmdir_args
*uap
)
3592 struct nlookupdata nd
;
3595 error
= nlookup_init(&nd
, uap
->path
, UIO_USERSPACE
, 0);
3597 error
= kern_rmdir(&nd
);
3603 kern_getdirentries(int fd
, char *buf
, u_int count
, long *basep
, int *res
,
3604 enum uio_seg direction
)
3606 struct thread
*td
= curthread
;
3607 struct proc
*p
= td
->td_proc
;
3615 if ((error
= holdvnode(p
->p_fd
, fd
, &fp
)) != 0)
3617 if ((fp
->f_flag
& FREAD
) == 0) {
3621 vp
= (struct vnode
*)fp
->f_data
;
3623 if (vp
->v_type
!= VDIR
) {
3627 aiov
.iov_base
= buf
;
3628 aiov
.iov_len
= count
;
3629 auio
.uio_iov
= &aiov
;
3630 auio
.uio_iovcnt
= 1;
3631 auio
.uio_rw
= UIO_READ
;
3632 auio
.uio_segflg
= direction
;
3634 auio
.uio_resid
= count
;
3635 loff
= auio
.uio_offset
= fp
->f_offset
;
3636 error
= VOP_READDIR(vp
, &auio
, fp
->f_cred
, &eofflag
, NULL
, NULL
);
3637 fp
->f_offset
= auio
.uio_offset
;
3640 if (count
== auio
.uio_resid
) {
3641 if (union_dircheckp
) {
3642 error
= union_dircheckp(td
, &vp
, fp
);
3649 if ((vp
->v_flag
& VROOT
) &&
3650 (vp
->v_mount
->mnt_flag
& MNT_UNION
)) {
3651 struct vnode
*tvp
= vp
;
3652 vp
= vp
->v_mount
->mnt_vnodecovered
;
3663 * WARNING! *basep may not be wide enough to accomodate the
3664 * seek offset. XXX should we hack this to return the upper 32 bits
3665 * for offsets greater then 4G?
3668 *basep
= (long)loff
;
3670 *res
= count
- auio
.uio_resid
;
3677 * getdirentries_args(int fd, char *buf, u_int conut, long *basep)
3679 * Read a block of directory entries in a file system independent format.
3682 sys_getdirentries(struct getdirentries_args
*uap
)
3687 error
= kern_getdirentries(uap
->fd
, uap
->buf
, uap
->count
, &base
,
3688 &uap
->sysmsg_result
, UIO_USERSPACE
);
3690 if (error
== 0 && uap
->basep
)
3691 error
= copyout(&base
, uap
->basep
, sizeof(*uap
->basep
));
3696 * getdents_args(int fd, char *buf, size_t count)
3699 sys_getdents(struct getdents_args
*uap
)
3703 error
= kern_getdirentries(uap
->fd
, uap
->buf
, uap
->count
, NULL
,
3704 &uap
->sysmsg_result
, UIO_USERSPACE
);
3710 * umask(int newmask)
3712 * Set the mode mask for creation of filesystem nodes.
3717 sys_umask(struct umask_args
*uap
)
3719 struct thread
*td
= curthread
;
3720 struct proc
*p
= td
->td_proc
;
3721 struct filedesc
*fdp
;
3724 uap
->sysmsg_result
= fdp
->fd_cmask
;
3725 fdp
->fd_cmask
= uap
->newmask
& ALLPERMS
;
3730 * revoke(char *path)
3732 * Void all references to file by ripping underlying filesystem
3737 sys_revoke(struct revoke_args
*uap
)
3739 struct nlookupdata nd
;
3746 error
= nlookup_init(&nd
, uap
->path
, UIO_USERSPACE
, NLC_FOLLOW
);
3748 error
= nlookup(&nd
);
3750 error
= cache_vref(&nd
.nl_nch
, nd
.nl_cred
, &vp
);
3751 cred
= crhold(nd
.nl_cred
);
3755 error
= VOP_GETATTR(vp
, &vattr
);
3756 if (error
== 0 && cred
->cr_uid
!= vattr
.va_uid
)
3757 error
= priv_check_cred(cred
, PRIV_VFS_REVOKE
, 0);
3758 if (error
== 0 && (vp
->v_type
== VCHR
|| vp
->v_type
== VBLK
)) {
3760 error
= vrevoke(vp
, cred
);
3761 } else if (error
== 0) {
3762 error
= vrevoke(vp
, cred
);
3772 * getfh_args(char *fname, fhandle_t *fhp)
3774 * Get (NFS) file handle
3776 * NOTE: We use the fsid of the covering mount, even if it is a nullfs
3777 * mount. This allows nullfs mounts to be explicitly exported.
3779 * WARNING: nullfs mounts of HAMMER PFS ROOTs are safe.
3781 * nullfs mounts of subdirectories are not safe. That is, it will
3782 * work, but you do not really have protection against access to
3783 * the related parent directories.
3786 sys_getfh(struct getfh_args
*uap
)
3788 struct thread
*td
= curthread
;
3789 struct nlookupdata nd
;
3796 * Must be super user
3798 if ((error
= priv_check(td
, PRIV_ROOT
)) != 0)
3802 error
= nlookup_init(&nd
, uap
->fname
, UIO_USERSPACE
, NLC_FOLLOW
);
3804 error
= nlookup(&nd
);
3806 error
= cache_vget(&nd
.nl_nch
, nd
.nl_cred
, LK_EXCLUSIVE
, &vp
);
3807 mp
= nd
.nl_nch
.mount
;
3810 bzero(&fh
, sizeof(fh
));
3811 fh
.fh_fsid
= mp
->mnt_stat
.f_fsid
;
3812 error
= VFS_VPTOFH(vp
, &fh
.fh_fid
);
3815 error
= copyout(&fh
, uap
->fhp
, sizeof(fh
));
3821 * fhopen_args(const struct fhandle *u_fhp, int flags)
3823 * syscall for the rpc.lockd to use to translate a NFS file handle into
3824 * an open descriptor.
3826 * warning: do not remove the priv_check() call or this becomes one giant
3830 sys_fhopen(struct fhopen_args
*uap
)
3832 struct thread
*td
= curthread
;
3833 struct proc
*p
= td
->td_proc
;
3838 struct vattr
*vap
= &vat
;
3840 int fmode
, mode
, error
, type
;
3846 * Must be super user
3848 error
= priv_check(td
, PRIV_ROOT
);
3852 fmode
= FFLAGS(uap
->flags
);
3853 /* why not allow a non-read/write open for our lockd? */
3854 if (((fmode
& (FREAD
| FWRITE
)) == 0) || (fmode
& O_CREAT
))
3856 error
= copyin(uap
->u_fhp
, &fhp
, sizeof(fhp
));
3859 /* find the mount point */
3860 mp
= vfs_getvfs(&fhp
.fh_fsid
);
3863 /* now give me my vnode, it gets returned to me locked */
3864 error
= VFS_FHTOVP(mp
, NULL
, &fhp
.fh_fid
, &vp
);
3868 * from now on we have to make sure not
3869 * to forget about the vnode
3870 * any error that causes an abort must vput(vp)
3871 * just set error = err and 'goto bad;'.
3877 if (vp
->v_type
== VLNK
) {
3881 if (vp
->v_type
== VSOCK
) {
3886 if (fmode
& (FWRITE
| O_TRUNC
)) {
3887 if (vp
->v_type
== VDIR
) {
3891 error
= vn_writechk(vp
, NULL
);
3899 error
= VOP_ACCESS(vp
, mode
, p
->p_ucred
);
3903 if (fmode
& O_TRUNC
) {
3904 vn_unlock(vp
); /* XXX */
3905 vn_lock(vp
, LK_EXCLUSIVE
| LK_RETRY
); /* XXX */
3908 error
= VOP_SETATTR(vp
, vap
, p
->p_ucred
);
3914 * VOP_OPEN needs the file pointer so it can potentially override
3917 * WARNING! no f_nchandle will be associated when fhopen()ing a
3920 if ((error
= falloc(p
, &nfp
, &indx
)) != 0)
3924 error
= VOP_OPEN(vp
, fmode
, p
->p_ucred
, fp
);
3927 * setting f_ops this way prevents VOP_CLOSE from being
3928 * called or fdrop() releasing the vp from v_data. Since
3929 * the VOP_OPEN failed we don't want to VOP_CLOSE.
3931 fp
->f_ops
= &badfileops
;
3937 * The fp is given its own reference, we still have our ref and lock.
3939 * Assert that all regular files must be created with a VM object.
3941 if (vp
->v_type
== VREG
&& vp
->v_object
== NULL
) {
3942 kprintf("fhopen: regular file did not have VM object: %p\n", vp
);
3947 * The open was successful. Handle any locking requirements.
3949 if (fmode
& (O_EXLOCK
| O_SHLOCK
)) {
3950 lf
.l_whence
= SEEK_SET
;
3953 if (fmode
& O_EXLOCK
)
3954 lf
.l_type
= F_WRLCK
;
3956 lf
.l_type
= F_RDLCK
;
3957 if (fmode
& FNONBLOCK
)
3962 if ((error
= VOP_ADVLOCK(vp
, (caddr_t
)fp
, F_SETLK
, &lf
, type
)) != 0) {
3964 * release our private reference.
3966 fsetfd(p
, NULL
, indx
);
3971 vn_lock(vp
, LK_EXCLUSIVE
| LK_RETRY
);
3972 fp
->f_flag
|= FHASLOCK
;
3976 * Clean up. Associate the file pointer with the previously
3977 * reserved descriptor and return it.
3980 fsetfd(p
, fp
, indx
);
3982 uap
->sysmsg_result
= indx
;
3986 fsetfd(p
, NULL
, indx
);
3994 * fhstat_args(struct fhandle *u_fhp, struct stat *sb)
3997 sys_fhstat(struct fhstat_args
*uap
)
3999 struct thread
*td
= curthread
;
4007 * Must be super user
4009 error
= priv_check(td
, PRIV_ROOT
);
4013 error
= copyin(uap
->u_fhp
, &fh
, sizeof(fhandle_t
));
4017 if ((mp
= vfs_getvfs(&fh
.fh_fsid
)) == NULL
)
4019 if ((error
= VFS_FHTOVP(mp
, NULL
, &fh
.fh_fid
, &vp
)))
4021 error
= vn_stat(vp
, &sb
, td
->td_proc
->p_ucred
);
4025 error
= copyout(&sb
, uap
->sb
, sizeof(sb
));
4030 * fhstatfs_args(struct fhandle *u_fhp, struct statfs *buf)
4033 sys_fhstatfs(struct fhstatfs_args
*uap
)
4035 struct thread
*td
= curthread
;
4036 struct proc
*p
= td
->td_proc
;
4041 char *fullpath
, *freepath
;
4046 * Must be super user
4048 if ((error
= priv_check(td
, PRIV_ROOT
)))
4051 if ((error
= copyin(uap
->u_fhp
, &fh
, sizeof(fhandle_t
))) != 0)
4054 if ((mp
= vfs_getvfs(&fh
.fh_fsid
)) == NULL
)
4057 if (p
!= NULL
&& !chroot_visible_mnt(mp
, p
))
4060 if ((error
= VFS_FHTOVP(mp
, NULL
, &fh
.fh_fid
, &vp
)))
4065 if ((error
= VFS_STATFS(mp
, sp
, p
->p_ucred
)) != 0)
4068 error
= mount_path(p
, mp
, &fullpath
, &freepath
);
4071 bzero(sp
->f_mntonname
, sizeof(sp
->f_mntonname
));
4072 strlcpy(sp
->f_mntonname
, fullpath
, sizeof(sp
->f_mntonname
));
4073 kfree(freepath
, M_TEMP
);
4075 sp
->f_flags
= mp
->mnt_flag
& MNT_VISFLAGMASK
;
4076 if (priv_check(td
, PRIV_ROOT
)) {
4077 bcopy(sp
, &sb
, sizeof(sb
));
4078 sb
.f_fsid
.val
[0] = sb
.f_fsid
.val
[1] = 0;
4081 return (copyout(sp
, uap
->buf
, sizeof(*sp
)));
4085 * fhstatvfs_args(struct fhandle *u_fhp, struct statvfs *buf)
4088 sys_fhstatvfs(struct fhstatvfs_args
*uap
)
4090 struct thread
*td
= curthread
;
4091 struct proc
*p
= td
->td_proc
;
4099 * Must be super user
4101 if ((error
= priv_check(td
, PRIV_ROOT
)))
4104 if ((error
= copyin(uap
->u_fhp
, &fh
, sizeof(fhandle_t
))) != 0)
4107 if ((mp
= vfs_getvfs(&fh
.fh_fsid
)) == NULL
)
4110 if (p
!= NULL
&& !chroot_visible_mnt(mp
, p
))
4113 if ((error
= VFS_FHTOVP(mp
, NULL
, &fh
.fh_fid
, &vp
)))
4116 sp
= &mp
->mnt_vstat
;
4118 if ((error
= VFS_STATVFS(mp
, sp
, p
->p_ucred
)) != 0)
4122 if (mp
->mnt_flag
& MNT_RDONLY
)
4123 sp
->f_flag
|= ST_RDONLY
;
4124 if (mp
->mnt_flag
& MNT_NOSUID
)
4125 sp
->f_flag
|= ST_NOSUID
;
4127 return (copyout(sp
, uap
->buf
, sizeof(*sp
)));
4132 * Syscall to push extended attribute configuration information into the
4133 * VFS. Accepts a path, which it converts to a mountpoint, as well as
4134 * a command (int cmd), and attribute name and misc data. For now, the
4135 * attribute name is left in userspace for consumption by the VFS_op.
4136 * It will probably be changed to be copied into sysspace by the
4137 * syscall in the future, once issues with various consumers of the
4138 * attribute code have raised their hands.
4140 * Currently this is used only by UFS Extended Attributes.
4143 sys_extattrctl(struct extattrctl_args
*uap
)
4145 struct nlookupdata nd
;
4151 error
= nlookup_init(&nd
, uap
->path
, UIO_USERSPACE
, NLC_FOLLOW
);
4153 error
= nlookup(&nd
);
4155 mp
= nd
.nl_nch
.mount
;
4156 error
= VFS_EXTATTRCTL(mp
, uap
->cmd
,
4157 uap
->attrname
, uap
->arg
,
4165 * Syscall to set a named extended attribute on a file or directory.
4166 * Accepts attribute name, and a uio structure pointing to the data to set.
4167 * The uio is consumed in the style of writev(). The real work happens
4168 * in VOP_SETEXTATTR().
4171 sys_extattr_set_file(struct extattr_set_file_args
*uap
)
4173 char attrname
[EXTATTR_MAXNAMELEN
];
4174 struct iovec aiov
[UIO_SMALLIOV
];
4175 struct iovec
*needfree
;
4176 struct nlookupdata nd
;
4185 error
= copyin(uap
->attrname
, attrname
, EXTATTR_MAXNAMELEN
);
4190 error
= nlookup_init(&nd
, uap
->path
, UIO_USERSPACE
, NLC_FOLLOW
);
4192 error
= nlookup(&nd
);
4194 error
= ncp_writechk(&nd
.nl_nch
);
4196 error
= cache_vget(&nd
.nl_nch
, nd
.nl_cred
, LK_EXCLUSIVE
, &vp
);
4203 iovlen
= uap
->iovcnt
* sizeof(struct iovec
);
4204 if (uap
->iovcnt
> UIO_SMALLIOV
) {
4205 if (uap
->iovcnt
> UIO_MAXIOV
) {
4209 MALLOC(iov
, struct iovec
*, iovlen
, M_IOV
, M_WAITOK
);
4215 auio
.uio_iovcnt
= uap
->iovcnt
;
4216 auio
.uio_rw
= UIO_WRITE
;
4217 auio
.uio_segflg
= UIO_USERSPACE
;
4218 auio
.uio_td
= nd
.nl_td
;
4219 auio
.uio_offset
= 0;
4220 if ((error
= copyin(uap
->iovp
, iov
, iovlen
)))
4223 for (i
= 0; i
< uap
->iovcnt
; i
++) {
4224 if (iov
->iov_len
> LONG_MAX
- auio
.uio_resid
) {
4228 auio
.uio_resid
+= iov
->iov_len
;
4231 cnt
= auio
.uio_resid
;
4232 error
= VOP_SETEXTATTR(vp
, attrname
, &auio
, nd
.nl_cred
);
4233 cnt
-= auio
.uio_resid
;
4234 uap
->sysmsg_result
= cnt
;
4239 FREE(needfree
, M_IOV
);
4244 * Syscall to get a named extended attribute on a file or directory.
4245 * Accepts attribute name, and a uio structure pointing to a buffer for the
4246 * data. The uio is consumed in the style of readv(). The real work
4247 * happens in VOP_GETEXTATTR();
4250 sys_extattr_get_file(struct extattr_get_file_args
*uap
)
4252 char attrname
[EXTATTR_MAXNAMELEN
];
4253 struct iovec aiov
[UIO_SMALLIOV
];
4254 struct iovec
*needfree
;
4255 struct nlookupdata nd
;
4264 error
= copyin(uap
->attrname
, attrname
, EXTATTR_MAXNAMELEN
);
4269 error
= nlookup_init(&nd
, uap
->path
, UIO_USERSPACE
, NLC_FOLLOW
);
4271 error
= nlookup(&nd
);
4273 error
= cache_vget(&nd
.nl_nch
, nd
.nl_cred
, LK_EXCLUSIVE
, &vp
);
4279 iovlen
= uap
->iovcnt
* sizeof (struct iovec
);
4281 if (uap
->iovcnt
> UIO_SMALLIOV
) {
4282 if (uap
->iovcnt
> UIO_MAXIOV
) {
4286 MALLOC(iov
, struct iovec
*, iovlen
, M_IOV
, M_WAITOK
);
4292 auio
.uio_iovcnt
= uap
->iovcnt
;
4293 auio
.uio_rw
= UIO_READ
;
4294 auio
.uio_segflg
= UIO_USERSPACE
;
4295 auio
.uio_td
= nd
.nl_td
;
4296 auio
.uio_offset
= 0;
4297 if ((error
= copyin(uap
->iovp
, iov
, iovlen
)))
4300 for (i
= 0; i
< uap
->iovcnt
; i
++) {
4301 if (iov
->iov_len
> LONG_MAX
- auio
.uio_resid
) {
4305 auio
.uio_resid
+= iov
->iov_len
;
4308 cnt
= auio
.uio_resid
;
4309 error
= VOP_GETEXTATTR(vp
, attrname
, &auio
, nd
.nl_cred
);
4310 cnt
-= auio
.uio_resid
;
4311 uap
->sysmsg_result
= cnt
;
4316 FREE(needfree
, M_IOV
);
4321 * Syscall to delete a named extended attribute from a file or directory.
4322 * Accepts attribute name. The real work happens in VOP_SETEXTATTR().
4325 sys_extattr_delete_file(struct extattr_delete_file_args
*uap
)
4327 char attrname
[EXTATTR_MAXNAMELEN
];
4328 struct nlookupdata nd
;
4332 error
= copyin(uap
->attrname
, attrname
, EXTATTR_MAXNAMELEN
);
4337 error
= nlookup_init(&nd
, uap
->path
, UIO_USERSPACE
, NLC_FOLLOW
);
4339 error
= nlookup(&nd
);
4341 error
= ncp_writechk(&nd
.nl_nch
);
4343 error
= cache_vget(&nd
.nl_nch
, nd
.nl_cred
, LK_EXCLUSIVE
, &vp
);
4349 error
= VOP_SETEXTATTR(vp
, attrname
, NULL
, nd
.nl_cred
);
4356 * Determine if the mount is visible to the process.
4359 chroot_visible_mnt(struct mount
*mp
, struct proc
*p
)
4361 struct nchandle nch
;
4364 * Traverse from the mount point upwards. If we hit the process
4365 * root then the mount point is visible to the process.
4367 nch
= mp
->mnt_ncmountpt
;
4369 if (nch
.mount
== p
->p_fd
->fd_nrdir
.mount
&&
4370 nch
.ncp
== p
->p_fd
->fd_nrdir
.ncp
) {
4373 if (nch
.ncp
== nch
.mount
->mnt_ncmountpt
.ncp
) {
4374 nch
= nch
.mount
->mnt_ncmounton
;
4376 nch
.ncp
= nch
.ncp
->nc_parent
;
4381 * If the mount point is not visible to the process, but the
4382 * process root is in a subdirectory of the mount, return
4385 if (p
->p_fd
->fd_nrdir
.mount
== mp
)