Resync patch with contrib.
[dragonfly.git] / sys / kern / vfs_syscalls.c
blob61552e04f7d556a780b29a9414dca46edd10165c
1 /*
2 * Copyright (c) 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
38 * @(#)vfs_syscalls.c 8.13 (Berkeley) 4/15/94
39 * $FreeBSD: src/sys/kern/vfs_syscalls.c,v 1.151.2.18 2003/04/04 20:35:58 tegge Exp $
40 * $DragonFly: src/sys/kern/vfs_syscalls.c,v 1.120 2007/09/03 17:06:21 dillon Exp $
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/buf.h>
46 #include <sys/conf.h>
47 #include <sys/sysent.h>
48 #include <sys/malloc.h>
49 #include <sys/mount.h>
50 #include <sys/mountctl.h>
51 #include <sys/sysproto.h>
52 #include <sys/filedesc.h>
53 #include <sys/kernel.h>
54 #include <sys/fcntl.h>
55 #include <sys/file.h>
56 #include <sys/linker.h>
57 #include <sys/stat.h>
58 #include <sys/unistd.h>
59 #include <sys/vnode.h>
60 #include <sys/proc.h>
61 #include <sys/namei.h>
62 #include <sys/nlookup.h>
63 #include <sys/dirent.h>
64 #include <sys/extattr.h>
65 #include <sys/spinlock.h>
66 #include <sys/kern_syscall.h>
67 #include <sys/objcache.h>
68 #include <sys/sysctl.h>
69 #include <sys/file2.h>
70 #include <sys/spinlock2.h>
72 #include <vm/vm.h>
73 #include <vm/vm_object.h>
74 #include <vm/vm_page.h>
76 #include <machine/limits.h>
77 #include <machine/stdarg.h>
79 #include <vfs/union/union.h>
81 static void mount_warning(struct mount *mp, const char *ctl, ...);
82 static int mount_path(struct proc *p, struct mount *mp, char **rb, char **fb);
83 static int checkvp_chdir (struct vnode *vn, struct thread *td);
84 static void checkdirs (struct nchandle *old_nch, struct nchandle *new_nch);
85 static int chroot_refuse_vdir_fds (struct filedesc *fdp);
86 static int chroot_visible_mnt(struct mount *mp, struct proc *p);
87 static int getutimes (const struct timeval *, struct timespec *);
88 static int setfown (struct vnode *, uid_t, gid_t);
89 static int setfmode (struct vnode *, int);
90 static int setfflags (struct vnode *, int);
91 static int setutimes (struct vnode *, const struct timespec *, int);
92 static int usermount = 0; /* if 1, non-root can mount fs. */
94 int (*union_dircheckp) (struct thread *, struct vnode **, struct file *);
96 SYSCTL_INT(_vfs, OID_AUTO, usermount, CTLFLAG_RW, &usermount, 0, "");
99 * Virtual File System System Calls
103 * Mount a file system.
106 * mount_args(char *type, char *path, int flags, caddr_t data)
108 /* ARGSUSED */
110 sys_mount(struct mount_args *uap)
112 struct thread *td = curthread;
113 struct proc *p = td->td_proc;
114 struct vnode *vp;
115 struct nchandle nch;
116 struct mount *mp;
117 struct vfsconf *vfsp;
118 int error, flag = 0, flag2 = 0;
119 int hasmount;
120 struct vattr va;
121 struct nlookupdata nd;
122 char fstypename[MFSNAMELEN];
123 struct ucred *cred = p->p_ucred;
125 KKASSERT(p);
126 if (cred->cr_prison != NULL)
127 return (EPERM);
128 if (usermount == 0 && (error = suser(td)))
129 return (error);
131 * Do not allow NFS export by non-root users.
133 if (uap->flags & MNT_EXPORTED) {
134 error = suser(td);
135 if (error)
136 return (error);
139 * Silently enforce MNT_NOSUID and MNT_NODEV for non-root users
141 if (suser(td))
142 uap->flags |= MNT_NOSUID | MNT_NODEV;
145 * Lookup the requested path and extract the nch and vnode.
147 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW);
148 if (error == 0) {
149 if ((error = nlookup(&nd)) == 0) {
150 if (nd.nl_nch.ncp->nc_vp == NULL)
151 error = ENOENT;
154 if (error) {
155 nlookup_done(&nd);
156 return (error);
160 * Extract the locked+refd ncp and cleanup the nd structure
162 nch = nd.nl_nch;
163 cache_zero(&nd.nl_nch);
164 nlookup_done(&nd);
166 if ((nch.ncp->nc_flag & NCF_ISMOUNTPT) && cache_findmount(&nch))
167 hasmount = 1;
168 else
169 hasmount = 0;
173 * now we have the locked ref'd nch and unreferenced vnode.
175 vp = nch.ncp->nc_vp;
176 if ((error = vget(vp, LK_EXCLUSIVE)) != 0) {
177 cache_put(&nch);
178 return (error);
180 cache_unlock(&nch);
183 * Now we have an unlocked ref'd nch and a locked ref'd vp
185 if (uap->flags & MNT_UPDATE) {
186 if ((vp->v_flag & VROOT) == 0) {
187 cache_drop(&nch);
188 vput(vp);
189 return (EINVAL);
191 mp = vp->v_mount;
192 flag = mp->mnt_flag;
193 flag2 = mp->mnt_kern_flag;
195 * We only allow the filesystem to be reloaded if it
196 * is currently mounted read-only.
198 if ((uap->flags & MNT_RELOAD) &&
199 ((mp->mnt_flag & MNT_RDONLY) == 0)) {
200 cache_drop(&nch);
201 vput(vp);
202 return (EOPNOTSUPP); /* Needs translation */
205 * Only root, or the user that did the original mount is
206 * permitted to update it.
208 if (mp->mnt_stat.f_owner != cred->cr_uid &&
209 (error = suser(td))) {
210 cache_drop(&nch);
211 vput(vp);
212 return (error);
214 if (vfs_busy(mp, LK_NOWAIT)) {
215 cache_drop(&nch);
216 vput(vp);
217 return (EBUSY);
219 if ((vp->v_flag & VMOUNT) != 0 || hasmount) {
220 cache_drop(&nch);
221 vfs_unbusy(mp);
222 vput(vp);
223 return (EBUSY);
225 vp->v_flag |= VMOUNT;
226 mp->mnt_flag |=
227 uap->flags & (MNT_RELOAD | MNT_FORCE | MNT_UPDATE);
228 vn_unlock(vp);
229 goto update;
232 * If the user is not root, ensure that they own the directory
233 * onto which we are attempting to mount.
235 if ((error = VOP_GETATTR(vp, &va)) ||
236 (va.va_uid != cred->cr_uid && (error = suser(td)))) {
237 cache_drop(&nch);
238 vput(vp);
239 return (error);
241 if ((error = vinvalbuf(vp, V_SAVE, 0, 0)) != 0) {
242 cache_drop(&nch);
243 vput(vp);
244 return (error);
246 if (vp->v_type != VDIR) {
247 cache_drop(&nch);
248 vput(vp);
249 return (ENOTDIR);
251 if (vp->v_mount->mnt_kern_flag & MNTK_NOSTKMNT) {
252 cache_drop(&nch);
253 vput(vp);
254 return (EPERM);
256 if ((error = copyinstr(uap->type, fstypename, MFSNAMELEN, NULL)) != 0) {
257 cache_drop(&nch);
258 vput(vp);
259 return (error);
261 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) {
262 if (!strcmp(vfsp->vfc_name, fstypename))
263 break;
265 if (vfsp == NULL) {
266 linker_file_t lf;
268 /* Only load modules for root (very important!) */
269 if ((error = suser(td)) != 0) {
270 cache_drop(&nch);
271 vput(vp);
272 return error;
274 error = linker_load_file(fstypename, &lf);
275 if (error || lf == NULL) {
276 cache_drop(&nch);
277 vput(vp);
278 if (lf == NULL)
279 error = ENODEV;
280 return error;
282 lf->userrefs++;
283 /* lookup again, see if the VFS was loaded */
284 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) {
285 if (!strcmp(vfsp->vfc_name, fstypename))
286 break;
288 if (vfsp == NULL) {
289 lf->userrefs--;
290 linker_file_unload(lf);
291 cache_drop(&nch);
292 vput(vp);
293 return (ENODEV);
296 if ((vp->v_flag & VMOUNT) != 0 || hasmount) {
297 cache_drop(&nch);
298 vput(vp);
299 return (EBUSY);
301 vp->v_flag |= VMOUNT;
304 * Allocate and initialize the filesystem.
306 mp = kmalloc(sizeof(struct mount), M_MOUNT, M_ZERO|M_WAITOK);
307 TAILQ_INIT(&mp->mnt_nvnodelist);
308 TAILQ_INIT(&mp->mnt_reservedvnlist);
309 TAILQ_INIT(&mp->mnt_jlist);
310 mp->mnt_nvnodelistsize = 0;
311 lockinit(&mp->mnt_lock, "vfslock", 0, 0);
312 vfs_busy(mp, LK_NOWAIT);
313 mp->mnt_op = vfsp->vfc_vfsops;
314 mp->mnt_vfc = vfsp;
315 vfsp->vfc_refcount++;
316 mp->mnt_stat.f_type = vfsp->vfc_typenum;
317 mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK;
318 strncpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN);
319 mp->mnt_stat.f_owner = cred->cr_uid;
320 mp->mnt_iosize_max = DFLTPHYS;
321 vn_unlock(vp);
322 update:
324 * Set the mount level flags.
326 if (uap->flags & MNT_RDONLY)
327 mp->mnt_flag |= MNT_RDONLY;
328 else if (mp->mnt_flag & MNT_RDONLY)
329 mp->mnt_kern_flag |= MNTK_WANTRDWR;
330 mp->mnt_flag &=~ (MNT_NOSUID | MNT_NOEXEC | MNT_NODEV |
331 MNT_SYNCHRONOUS | MNT_UNION | MNT_ASYNC | MNT_NOATIME |
332 MNT_NOSYMFOLLOW | MNT_IGNORE |
333 MNT_NOCLUSTERR | MNT_NOCLUSTERW | MNT_SUIDDIR);
334 mp->mnt_flag |= uap->flags & (MNT_NOSUID | MNT_NOEXEC |
335 MNT_NODEV | MNT_SYNCHRONOUS | MNT_UNION | MNT_ASYNC | MNT_FORCE |
336 MNT_NOSYMFOLLOW | MNT_IGNORE |
337 MNT_NOATIME | MNT_NOCLUSTERR | MNT_NOCLUSTERW | MNT_SUIDDIR);
339 * Mount the filesystem.
340 * XXX The final recipients of VFS_MOUNT just overwrite the ndp they
341 * get.
343 error = VFS_MOUNT(mp, uap->path, uap->data, cred);
344 if (mp->mnt_flag & MNT_UPDATE) {
345 if (mp->mnt_kern_flag & MNTK_WANTRDWR)
346 mp->mnt_flag &= ~MNT_RDONLY;
347 mp->mnt_flag &=~ (MNT_UPDATE | MNT_RELOAD | MNT_FORCE);
348 mp->mnt_kern_flag &=~ MNTK_WANTRDWR;
349 if (error) {
350 mp->mnt_flag = flag;
351 mp->mnt_kern_flag = flag2;
353 vfs_unbusy(mp);
354 vp->v_flag &= ~VMOUNT;
355 vrele(vp);
356 cache_drop(&nch);
357 return (error);
359 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
361 * Put the new filesystem on the mount list after root. The mount
362 * point gets its own mnt_ncmountpt (unless the VFS already set one
363 * up) which represents the root of the mount. The lookup code
364 * detects the mount point going forward and checks the root of
365 * the mount going backwards.
367 * It is not necessary to invalidate or purge the vnode underneath
368 * because elements under the mount will be given their own glue
369 * namecache record.
371 if (!error) {
372 if (mp->mnt_ncmountpt.ncp == NULL) {
374 * allocate, then unlock, but leave the ref intact
376 cache_allocroot(&mp->mnt_ncmountpt, mp, NULL);
377 cache_unlock(&mp->mnt_ncmountpt);
379 mp->mnt_ncmounton = nch; /* inherits ref */
380 nch.ncp->nc_flag |= NCF_ISMOUNTPT;
382 /* XXX get the root of the fs and cache_setvp(mnt_ncmountpt...) */
383 vp->v_flag &= ~VMOUNT;
384 mountlist_insert(mp, MNTINS_LAST);
385 checkdirs(&mp->mnt_ncmounton, &mp->mnt_ncmountpt);
386 vn_unlock(vp);
387 error = vfs_allocate_syncvnode(mp);
388 vfs_unbusy(mp);
389 error = VFS_START(mp, 0);
390 vrele(vp);
391 } else {
392 vfs_rm_vnodeops(mp, NULL, &mp->mnt_vn_coherency_ops);
393 vfs_rm_vnodeops(mp, NULL, &mp->mnt_vn_journal_ops);
394 vfs_rm_vnodeops(mp, NULL, &mp->mnt_vn_norm_ops);
395 vfs_rm_vnodeops(mp, NULL, &mp->mnt_vn_spec_ops);
396 vfs_rm_vnodeops(mp, NULL, &mp->mnt_vn_fifo_ops);
397 vp->v_flag &= ~VMOUNT;
398 mp->mnt_vfc->vfc_refcount--;
399 vfs_unbusy(mp);
400 kfree(mp, M_MOUNT);
401 cache_drop(&nch);
402 vput(vp);
404 return (error);
408 * Scan all active processes to see if any of them have a current
409 * or root directory onto which the new filesystem has just been
410 * mounted. If so, replace them with the new mount point.
412 * The passed ncp is ref'd and locked (from the mount code) and
413 * must be associated with the vnode representing the root of the
414 * mount point.
416 struct checkdirs_info {
417 struct nchandle old_nch;
418 struct nchandle new_nch;
419 struct vnode *old_vp;
420 struct vnode *new_vp;
423 static int checkdirs_callback(struct proc *p, void *data);
425 static void
426 checkdirs(struct nchandle *old_nch, struct nchandle *new_nch)
428 struct checkdirs_info info;
429 struct vnode *olddp;
430 struct vnode *newdp;
431 struct mount *mp;
434 * If the old mount point's vnode has a usecount of 1, it is not
435 * being held as a descriptor anywhere.
437 olddp = old_nch->ncp->nc_vp;
438 if (olddp == NULL || olddp->v_sysref.refcnt == 1)
439 return;
442 * Force the root vnode of the new mount point to be resolved
443 * so we can update any matching processes.
445 mp = new_nch->mount;
446 if (VFS_ROOT(mp, &newdp))
447 panic("mount: lost mount");
448 cache_setunresolved(new_nch);
449 cache_setvp(new_nch, newdp);
452 * Special handling of the root node
454 if (rootvnode == olddp) {
455 vref(newdp);
456 vfs_cache_setroot(newdp, cache_hold(new_nch));
460 * Pass newdp separately so the callback does not have to access
461 * it via new_nch->ncp->nc_vp.
463 info.old_nch = *old_nch;
464 info.new_nch = *new_nch;
465 info.new_vp = newdp;
466 allproc_scan(checkdirs_callback, &info);
467 vput(newdp);
471 * NOTE: callback is not MP safe because the scanned process's filedesc
472 * structure can be ripped out from under us, amoung other things.
474 static int
475 checkdirs_callback(struct proc *p, void *data)
477 struct checkdirs_info *info = data;
478 struct filedesc *fdp;
479 struct nchandle ncdrop1;
480 struct nchandle ncdrop2;
481 struct vnode *vprele1;
482 struct vnode *vprele2;
484 if ((fdp = p->p_fd) != NULL) {
485 cache_zero(&ncdrop1);
486 cache_zero(&ncdrop2);
487 vprele1 = NULL;
488 vprele2 = NULL;
491 * MPUNSAFE - XXX fdp can be pulled out from under a
492 * foreign process.
494 * A shared filedesc is ok, we don't have to copy it
495 * because we are making this change globally.
497 spin_lock_wr(&fdp->fd_spin);
498 if (fdp->fd_ncdir.mount == info->old_nch.mount &&
499 fdp->fd_ncdir.ncp == info->old_nch.ncp) {
500 vprele1 = fdp->fd_cdir;
501 vref(info->new_vp);
502 fdp->fd_cdir = info->new_vp;
503 ncdrop1 = fdp->fd_ncdir;
504 cache_copy(&info->new_nch, &fdp->fd_ncdir);
506 if (fdp->fd_nrdir.mount == info->old_nch.mount &&
507 fdp->fd_nrdir.ncp == info->old_nch.ncp) {
508 vprele2 = fdp->fd_rdir;
509 vref(info->new_vp);
510 fdp->fd_rdir = info->new_vp;
511 ncdrop2 = fdp->fd_nrdir;
512 cache_copy(&info->new_nch, &fdp->fd_nrdir);
514 spin_unlock_wr(&fdp->fd_spin);
515 if (ncdrop1.ncp)
516 cache_drop(&ncdrop1);
517 if (ncdrop2.ncp)
518 cache_drop(&ncdrop2);
519 if (vprele1)
520 vrele(vprele1);
521 if (vprele2)
522 vrele(vprele2);
524 return(0);
528 * Unmount a file system.
530 * Note: unmount takes a path to the vnode mounted on as argument,
531 * not special file (as before).
534 * umount_args(char *path, int flags)
536 /* ARGSUSED */
538 sys_unmount(struct unmount_args *uap)
540 struct thread *td = curthread;
541 struct proc *p = td->td_proc;
542 struct mount *mp = NULL;
543 int error;
544 struct nlookupdata nd;
546 KKASSERT(p);
547 if (p->p_ucred->cr_prison != NULL)
548 return (EPERM);
549 if (usermount == 0 && (error = suser(td)))
550 return (error);
552 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW);
553 if (error == 0)
554 error = nlookup(&nd);
555 if (error)
556 goto out;
558 mp = nd.nl_nch.mount;
561 * Only root, or the user that did the original mount is
562 * permitted to unmount this filesystem.
564 if ((mp->mnt_stat.f_owner != p->p_ucred->cr_uid) &&
565 (error = suser(td)))
566 goto out;
569 * Don't allow unmounting the root file system.
571 if (mp->mnt_flag & MNT_ROOTFS) {
572 error = EINVAL;
573 goto out;
577 * Must be the root of the filesystem
579 if (nd.nl_nch.ncp != mp->mnt_ncmountpt.ncp) {
580 error = EINVAL;
581 goto out;
584 out:
585 nlookup_done(&nd);
586 if (error)
587 return (error);
588 return (dounmount(mp, uap->flags));
592 * Do the actual file system unmount.
594 static int
595 dounmount_interlock(struct mount *mp)
597 if (mp->mnt_kern_flag & MNTK_UNMOUNT)
598 return (EBUSY);
599 mp->mnt_kern_flag |= MNTK_UNMOUNT;
600 return(0);
604 dounmount(struct mount *mp, int flags)
606 struct namecache *ncp;
607 struct nchandle nch;
608 int error;
609 int async_flag;
610 int lflags;
611 int freeok = 1;
614 * Exclusive access for unmounting purposes
616 if ((error = mountlist_interlock(dounmount_interlock, mp)) != 0)
617 return (error);
620 * Allow filesystems to detect that a forced unmount is in progress.
622 if (flags & MNT_FORCE)
623 mp->mnt_kern_flag |= MNTK_UNMOUNTF;
624 lflags = LK_EXCLUSIVE | ((flags & MNT_FORCE) ? 0 : LK_NOWAIT);
625 error = lockmgr(&mp->mnt_lock, lflags);
626 if (error) {
627 mp->mnt_kern_flag &= ~(MNTK_UNMOUNT | MNTK_UNMOUNTF);
628 if (mp->mnt_kern_flag & MNTK_MWAIT)
629 wakeup(mp);
630 return (error);
633 if (mp->mnt_flag & MNT_EXPUBLIC)
634 vfs_setpublicfs(NULL, NULL, NULL);
636 vfs_msync(mp, MNT_WAIT);
637 async_flag = mp->mnt_flag & MNT_ASYNC;
638 mp->mnt_flag &=~ MNT_ASYNC;
641 * If this filesystem isn't aliasing other filesystems,
642 * try to invalidate any remaining namecache entries and
643 * check the count afterwords.
645 if ((mp->mnt_kern_flag & MNTK_NCALIASED) == 0) {
646 cache_lock(&mp->mnt_ncmountpt);
647 cache_inval(&mp->mnt_ncmountpt, CINV_DESTROY|CINV_CHILDREN);
648 cache_unlock(&mp->mnt_ncmountpt);
650 if ((ncp = mp->mnt_ncmountpt.ncp) != NULL &&
651 (ncp->nc_refs != 1 || TAILQ_FIRST(&ncp->nc_list))) {
653 if ((flags & MNT_FORCE) == 0) {
654 error = EBUSY;
655 mount_warning(mp, "Cannot unmount: "
656 "%d namecache "
657 "references still "
658 "present",
659 ncp->nc_refs - 1);
660 } else {
661 mount_warning(mp, "Forced unmount: "
662 "%d namecache "
663 "references still "
664 "present",
665 ncp->nc_refs - 1);
666 freeok = 0;
672 * nchandle records ref the mount structure. Expect a count of 1
673 * (our mount->mnt_ncmountpt).
675 if (mp->mnt_refs != 1) {
676 if ((flags & MNT_FORCE) == 0) {
677 mount_warning(mp, "Cannot unmount: "
678 "%d process references still "
679 "present", mp->mnt_refs);
680 error = EBUSY;
681 } else {
682 mount_warning(mp, "Forced unmount: "
683 "%d process references still "
684 "present", mp->mnt_refs);
685 freeok = 0;
689 if (error == 0) {
690 if (mp->mnt_syncer != NULL)
691 vrele(mp->mnt_syncer);
692 if (((mp->mnt_flag & MNT_RDONLY) ||
693 (error = VFS_SYNC(mp, MNT_WAIT)) == 0) ||
694 (flags & MNT_FORCE)) {
695 error = VFS_UNMOUNT(mp, flags);
698 if (error) {
699 if (mp->mnt_syncer == NULL)
700 vfs_allocate_syncvnode(mp);
701 mp->mnt_kern_flag &= ~(MNTK_UNMOUNT | MNTK_UNMOUNTF);
702 mp->mnt_flag |= async_flag;
703 lockmgr(&mp->mnt_lock, LK_RELEASE);
704 if (mp->mnt_kern_flag & MNTK_MWAIT)
705 wakeup(mp);
706 return (error);
709 * Clean up any journals still associated with the mount after
710 * filesystem activity has ceased.
712 journal_remove_all_journals(mp,
713 ((flags & MNT_FORCE) ? MC_JOURNAL_STOP_IMM : 0));
715 mountlist_remove(mp);
718 * Remove any installed vnode ops here so the individual VFSs don't
719 * have to.
721 vfs_rm_vnodeops(mp, NULL, &mp->mnt_vn_coherency_ops);
722 vfs_rm_vnodeops(mp, NULL, &mp->mnt_vn_journal_ops);
723 vfs_rm_vnodeops(mp, NULL, &mp->mnt_vn_norm_ops);
724 vfs_rm_vnodeops(mp, NULL, &mp->mnt_vn_spec_ops);
725 vfs_rm_vnodeops(mp, NULL, &mp->mnt_vn_fifo_ops);
727 if (mp->mnt_ncmountpt.ncp != NULL) {
728 nch = mp->mnt_ncmountpt;
729 cache_zero(&mp->mnt_ncmountpt);
730 cache_clrmountpt(&nch);
731 cache_drop(&nch);
733 if (mp->mnt_ncmounton.ncp != NULL) {
734 nch = mp->mnt_ncmounton;
735 cache_zero(&mp->mnt_ncmounton);
736 cache_clrmountpt(&nch);
737 cache_drop(&nch);
740 mp->mnt_vfc->vfc_refcount--;
741 if (!TAILQ_EMPTY(&mp->mnt_nvnodelist))
742 panic("unmount: dangling vnode");
743 lockmgr(&mp->mnt_lock, LK_RELEASE);
744 if (mp->mnt_kern_flag & MNTK_MWAIT)
745 wakeup(mp);
746 if (freeok)
747 kfree(mp, M_MOUNT);
748 return (0);
751 static
752 void
753 mount_warning(struct mount *mp, const char *ctl, ...)
755 char *ptr;
756 char *buf;
757 __va_list va;
759 __va_start(va, ctl);
760 if (cache_fullpath(NULL, &mp->mnt_ncmounton, &ptr, &buf) == 0) {
761 kprintf("unmount(%s): ", ptr);
762 kvprintf(ctl, va);
763 kprintf("\n");
764 kfree(buf, M_TEMP);
765 } else {
766 kprintf("unmount(%p", mp);
767 if (mp->mnt_ncmounton.ncp && mp->mnt_ncmounton.ncp->nc_name)
768 kprintf(",%s", mp->mnt_ncmounton.ncp->nc_name);
769 kprintf("): ");
770 kvprintf(ctl, va);
771 kprintf("\n");
773 __va_end(va);
777 * Shim cache_fullpath() to handle the case where a process is chrooted into
778 * a subdirectory of a mount. In this case if the root mount matches the
779 * process root directory's mount we have to specify the process's root
780 * directory instead of the mount point, because the mount point might
781 * be above the root directory.
783 static
785 mount_path(struct proc *p, struct mount *mp, char **rb, char **fb)
787 struct nchandle *nch;
789 if (p && p->p_fd->fd_nrdir.mount == mp)
790 nch = &p->p_fd->fd_nrdir;
791 else
792 nch = &mp->mnt_ncmountpt;
793 return(cache_fullpath(p, nch, rb, fb));
797 * Sync each mounted filesystem.
800 #ifdef DEBUG
801 static int syncprt = 0;
802 SYSCTL_INT(_debug, OID_AUTO, syncprt, CTLFLAG_RW, &syncprt, 0, "");
803 #endif /* DEBUG */
805 static int sync_callback(struct mount *mp, void *data);
807 /* ARGSUSED */
809 sys_sync(struct sync_args *uap)
811 mountlist_scan(sync_callback, NULL, MNTSCAN_FORWARD);
812 #ifdef DEBUG
814 * print out buffer pool stat information on each sync() call.
816 if (syncprt)
817 vfs_bufstats();
818 #endif /* DEBUG */
819 return (0);
822 static
824 sync_callback(struct mount *mp, void *data __unused)
826 int asyncflag;
828 if ((mp->mnt_flag & MNT_RDONLY) == 0) {
829 asyncflag = mp->mnt_flag & MNT_ASYNC;
830 mp->mnt_flag &= ~MNT_ASYNC;
831 vfs_msync(mp, MNT_NOWAIT);
832 VFS_SYNC(mp, MNT_NOWAIT);
833 mp->mnt_flag |= asyncflag;
835 return(0);
838 /* XXX PRISON: could be per prison flag */
839 static int prison_quotas;
840 #if 0
841 SYSCTL_INT(_kern_prison, OID_AUTO, quotas, CTLFLAG_RW, &prison_quotas, 0, "");
842 #endif
845 * quotactl_args(char *path, int fcmd, int uid, caddr_t arg)
847 * Change filesystem quotas.
849 /* ARGSUSED */
851 sys_quotactl(struct quotactl_args *uap)
853 struct nlookupdata nd;
854 struct thread *td;
855 struct proc *p;
856 struct mount *mp;
857 int error;
859 td = curthread;
860 p = td->td_proc;
861 if (p->p_ucred->cr_prison && !prison_quotas)
862 return (EPERM);
864 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW);
865 if (error == 0)
866 error = nlookup(&nd);
867 if (error == 0) {
868 mp = nd.nl_nch.mount;
869 error = VFS_QUOTACTL(mp, uap->cmd, uap->uid,
870 uap->arg, nd.nl_cred);
872 nlookup_done(&nd);
873 return (error);
877 * mountctl(char *path, int op, int fd, const void *ctl, int ctllen,
878 * void *buf, int buflen)
880 * This function operates on a mount point and executes the specified
881 * operation using the specified control data, and possibly returns data.
883 * The actual number of bytes stored in the result buffer is returned, 0
884 * if none, otherwise an error is returned.
886 /* ARGSUSED */
888 sys_mountctl(struct mountctl_args *uap)
890 struct thread *td = curthread;
891 struct proc *p = td->td_proc;
892 struct file *fp;
893 void *ctl = NULL;
894 void *buf = NULL;
895 char *path = NULL;
896 int error;
899 * Sanity and permissions checks. We must be root.
901 KKASSERT(p);
902 if (p->p_ucred->cr_prison != NULL)
903 return (EPERM);
904 if ((error = suser(td)) != 0)
905 return (error);
908 * Argument length checks
910 if (uap->ctllen < 0 || uap->ctllen > 1024)
911 return (EINVAL);
912 if (uap->buflen < 0 || uap->buflen > 16 * 1024)
913 return (EINVAL);
914 if (uap->path == NULL)
915 return (EINVAL);
918 * Allocate the necessary buffers and copyin data
920 path = objcache_get(namei_oc, M_WAITOK);
921 error = copyinstr(uap->path, path, MAXPATHLEN, NULL);
922 if (error)
923 goto done;
925 if (uap->ctllen) {
926 ctl = kmalloc(uap->ctllen + 1, M_TEMP, M_WAITOK|M_ZERO);
927 error = copyin(uap->ctl, ctl, uap->ctllen);
928 if (error)
929 goto done;
931 if (uap->buflen)
932 buf = kmalloc(uap->buflen + 1, M_TEMP, M_WAITOK|M_ZERO);
935 * Validate the descriptor
937 if (uap->fd >= 0) {
938 fp = holdfp(p->p_fd, uap->fd, -1);
939 if (fp == NULL) {
940 error = EBADF;
941 goto done;
943 } else {
944 fp = NULL;
948 * Execute the internal kernel function and clean up.
950 error = kern_mountctl(path, uap->op, fp, ctl, uap->ctllen, buf, uap->buflen, &uap->sysmsg_result);
951 if (fp)
952 fdrop(fp);
953 if (error == 0 && uap->sysmsg_result > 0)
954 error = copyout(buf, uap->buf, uap->sysmsg_result);
955 done:
956 if (path)
957 objcache_put(namei_oc, path);
958 if (ctl)
959 kfree(ctl, M_TEMP);
960 if (buf)
961 kfree(buf, M_TEMP);
962 return (error);
966 * Execute a mount control operation by resolving the path to a mount point
967 * and calling vop_mountctl().
970 kern_mountctl(const char *path, int op, struct file *fp,
971 const void *ctl, int ctllen,
972 void *buf, int buflen, int *res)
974 struct vnode *vp;
975 struct mount *mp;
976 struct nlookupdata nd;
977 int error;
979 *res = 0;
980 vp = NULL;
981 error = nlookup_init(&nd, path, UIO_SYSSPACE, NLC_FOLLOW);
982 if (error == 0)
983 error = nlookup(&nd);
984 if (error == 0)
985 error = cache_vget(&nd.nl_nch, nd.nl_cred, LK_EXCLUSIVE, &vp);
986 nlookup_done(&nd);
987 if (error)
988 return (error);
990 mp = vp->v_mount;
993 * Must be the root of the filesystem
995 if ((vp->v_flag & VROOT) == 0) {
996 vput(vp);
997 return (EINVAL);
999 error = vop_mountctl(mp->mnt_vn_use_ops, op, fp, ctl, ctllen,
1000 buf, buflen, res);
1001 vput(vp);
1002 return (error);
1006 kern_statfs(struct nlookupdata *nd, struct statfs *buf)
1008 struct thread *td = curthread;
1009 struct proc *p = td->td_proc;
1010 struct mount *mp;
1011 struct statfs *sp;
1012 char *fullpath, *freepath;
1013 int error;
1015 if ((error = nlookup(nd)) != 0)
1016 return (error);
1017 mp = nd->nl_nch.mount;
1018 sp = &mp->mnt_stat;
1019 if ((error = VFS_STATFS(mp, sp, nd->nl_cred)) != 0)
1020 return (error);
1022 error = mount_path(p, mp, &fullpath, &freepath);
1023 if (error)
1024 return(error);
1025 bzero(sp->f_mntonname, sizeof(sp->f_mntonname));
1026 strlcpy(sp->f_mntonname, fullpath, sizeof(sp->f_mntonname));
1027 kfree(freepath, M_TEMP);
1029 sp->f_flags = mp->mnt_flag & MNT_VISFLAGMASK;
1030 bcopy(sp, buf, sizeof(*buf));
1031 /* Only root should have access to the fsid's. */
1032 if (suser(td))
1033 buf->f_fsid.val[0] = buf->f_fsid.val[1] = 0;
1034 return (0);
1038 * statfs_args(char *path, struct statfs *buf)
1040 * Get filesystem statistics.
1043 sys_statfs(struct statfs_args *uap)
1045 struct nlookupdata nd;
1046 struct statfs buf;
1047 int error;
1049 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW);
1050 if (error == 0)
1051 error = kern_statfs(&nd, &buf);
1052 nlookup_done(&nd);
1053 if (error == 0)
1054 error = copyout(&buf, uap->buf, sizeof(*uap->buf));
1055 return (error);
1059 kern_fstatfs(int fd, struct statfs *buf)
1061 struct thread *td = curthread;
1062 struct proc *p = td->td_proc;
1063 struct file *fp;
1064 struct mount *mp;
1065 struct statfs *sp;
1066 char *fullpath, *freepath;
1067 int error;
1069 KKASSERT(p);
1070 if ((error = holdvnode(p->p_fd, fd, &fp)) != 0)
1071 return (error);
1072 mp = ((struct vnode *)fp->f_data)->v_mount;
1073 if (mp == NULL) {
1074 error = EBADF;
1075 goto done;
1077 if (fp->f_cred == NULL) {
1078 error = EINVAL;
1079 goto done;
1081 sp = &mp->mnt_stat;
1082 if ((error = VFS_STATFS(mp, sp, fp->f_cred)) != 0)
1083 goto done;
1085 if ((error = mount_path(p, mp, &fullpath, &freepath)) != 0)
1086 goto done;
1087 bzero(sp->f_mntonname, sizeof(sp->f_mntonname));
1088 strlcpy(sp->f_mntonname, fullpath, sizeof(sp->f_mntonname));
1089 kfree(freepath, M_TEMP);
1091 sp->f_flags = mp->mnt_flag & MNT_VISFLAGMASK;
1092 bcopy(sp, buf, sizeof(*buf));
1094 /* Only root should have access to the fsid's. */
1095 if (suser(td))
1096 buf->f_fsid.val[0] = buf->f_fsid.val[1] = 0;
1097 error = 0;
1098 done:
1099 fdrop(fp);
1100 return (error);
1104 * fstatfs_args(int fd, struct statfs *buf)
1106 * Get filesystem statistics.
1109 sys_fstatfs(struct fstatfs_args *uap)
1111 struct statfs buf;
1112 int error;
1114 error = kern_fstatfs(uap->fd, &buf);
1116 if (error == 0)
1117 error = copyout(&buf, uap->buf, sizeof(*uap->buf));
1118 return (error);
1122 * getfsstat_args(struct statfs *buf, long bufsize, int flags)
1124 * Get statistics on all filesystems.
1127 struct getfsstat_info {
1128 struct statfs *sfsp;
1129 long count;
1130 long maxcount;
1131 int error;
1132 int flags;
1133 struct proc *p;
1136 static int getfsstat_callback(struct mount *, void *);
1138 /* ARGSUSED */
1140 sys_getfsstat(struct getfsstat_args *uap)
1142 struct thread *td = curthread;
1143 struct proc *p = td->td_proc;
1144 struct getfsstat_info info;
1146 bzero(&info, sizeof(info));
1148 info.maxcount = uap->bufsize / sizeof(struct statfs);
1149 info.sfsp = uap->buf;
1150 info.count = 0;
1151 info.flags = uap->flags;
1152 info.p = p;
1154 mountlist_scan(getfsstat_callback, &info, MNTSCAN_FORWARD);
1155 if (info.sfsp && info.count > info.maxcount)
1156 uap->sysmsg_result = info.maxcount;
1157 else
1158 uap->sysmsg_result = info.count;
1159 return (info.error);
1162 static int
1163 getfsstat_callback(struct mount *mp, void *data)
1165 struct getfsstat_info *info = data;
1166 struct statfs *sp;
1167 char *freepath;
1168 char *fullpath;
1169 int error;
1171 if (info->sfsp && info->count < info->maxcount) {
1172 if (info->p && !chroot_visible_mnt(mp, info->p))
1173 return(0);
1174 sp = &mp->mnt_stat;
1177 * If MNT_NOWAIT or MNT_LAZY is specified, do not
1178 * refresh the fsstat cache. MNT_NOWAIT or MNT_LAZY
1179 * overrides MNT_WAIT.
1181 if (((info->flags & (MNT_LAZY|MNT_NOWAIT)) == 0 ||
1182 (info->flags & MNT_WAIT)) &&
1183 (error = VFS_STATFS(mp, sp, info->p->p_ucred))) {
1184 return(0);
1186 sp->f_flags = mp->mnt_flag & MNT_VISFLAGMASK;
1188 error = mount_path(info->p, mp, &fullpath, &freepath);
1189 if (error) {
1190 info->error = error;
1191 return(-1);
1193 bzero(sp->f_mntonname, sizeof(sp->f_mntonname));
1194 strlcpy(sp->f_mntonname, fullpath, sizeof(sp->f_mntonname));
1195 kfree(freepath, M_TEMP);
1197 error = copyout(sp, info->sfsp, sizeof(*sp));
1198 if (error) {
1199 info->error = error;
1200 return (-1);
1202 ++info->sfsp;
1204 info->count++;
1205 return(0);
1209 * fchdir_args(int fd)
1211 * Change current working directory to a given file descriptor.
1213 /* ARGSUSED */
1215 sys_fchdir(struct fchdir_args *uap)
1217 struct thread *td = curthread;
1218 struct proc *p = td->td_proc;
1219 struct filedesc *fdp = p->p_fd;
1220 struct vnode *vp, *ovp;
1221 struct mount *mp;
1222 struct file *fp;
1223 struct nchandle nch, onch, tnch;
1224 int error;
1226 if ((error = holdvnode(fdp, uap->fd, &fp)) != 0)
1227 return (error);
1228 vp = (struct vnode *)fp->f_data;
1229 vref(vp);
1230 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1231 if (vp->v_type != VDIR || fp->f_nchandle.ncp == NULL)
1232 error = ENOTDIR;
1233 else
1234 error = VOP_ACCESS(vp, VEXEC, p->p_ucred);
1235 if (error) {
1236 vput(vp);
1237 fdrop(fp);
1238 return (error);
1240 cache_copy(&fp->f_nchandle, &nch);
1243 * If the ncp has become a mount point, traverse through
1244 * the mount point.
1247 while (!error && (nch.ncp->nc_flag & NCF_ISMOUNTPT) &&
1248 (mp = cache_findmount(&nch)) != NULL
1250 error = nlookup_mp(mp, &tnch);
1251 if (error == 0) {
1252 cache_unlock(&tnch); /* leave ref intact */
1253 vput(vp);
1254 vp = tnch.ncp->nc_vp;
1255 error = vget(vp, LK_SHARED);
1256 KKASSERT(error == 0);
1257 cache_drop(&nch);
1258 nch = tnch;
1261 if (error == 0) {
1262 ovp = fdp->fd_cdir;
1263 onch = fdp->fd_ncdir;
1264 vn_unlock(vp); /* leave ref intact */
1265 fdp->fd_cdir = vp;
1266 fdp->fd_ncdir = nch;
1267 cache_drop(&onch);
1268 vrele(ovp);
1269 } else {
1270 cache_drop(&nch);
1271 vput(vp);
1273 fdrop(fp);
1274 return (error);
1278 kern_chdir(struct nlookupdata *nd)
1280 struct thread *td = curthread;
1281 struct proc *p = td->td_proc;
1282 struct filedesc *fdp = p->p_fd;
1283 struct vnode *vp, *ovp;
1284 struct nchandle onch;
1285 int error;
1287 if ((error = nlookup(nd)) != 0)
1288 return (error);
1289 if ((vp = nd->nl_nch.ncp->nc_vp) == NULL)
1290 return (ENOENT);
1291 if ((error = vget(vp, LK_SHARED)) != 0)
1292 return (error);
1294 error = checkvp_chdir(vp, td);
1295 vn_unlock(vp);
1296 if (error == 0) {
1297 ovp = fdp->fd_cdir;
1298 onch = fdp->fd_ncdir;
1299 cache_unlock(&nd->nl_nch); /* leave reference intact */
1300 fdp->fd_ncdir = nd->nl_nch;
1301 fdp->fd_cdir = vp;
1302 cache_drop(&onch);
1303 vrele(ovp);
1304 cache_zero(&nd->nl_nch);
1305 } else {
1306 vrele(vp);
1308 return (error);
1312 * chdir_args(char *path)
1314 * Change current working directory (``.'').
1317 sys_chdir(struct chdir_args *uap)
1319 struct nlookupdata nd;
1320 int error;
1322 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW);
1323 if (error == 0)
1324 error = kern_chdir(&nd);
1325 nlookup_done(&nd);
1326 return (error);
1330 * Helper function for raised chroot(2) security function: Refuse if
1331 * any filedescriptors are open directories.
1333 static int
1334 chroot_refuse_vdir_fds(struct filedesc *fdp)
1336 struct vnode *vp;
1337 struct file *fp;
1338 int error;
1339 int fd;
1341 for (fd = 0; fd < fdp->fd_nfiles ; fd++) {
1342 if ((error = holdvnode(fdp, fd, &fp)) != 0)
1343 continue;
1344 vp = (struct vnode *)fp->f_data;
1345 if (vp->v_type != VDIR) {
1346 fdrop(fp);
1347 continue;
1349 fdrop(fp);
1350 return(EPERM);
1352 return (0);
1356 * This sysctl determines if we will allow a process to chroot(2) if it
1357 * has a directory open:
1358 * 0: disallowed for all processes.
1359 * 1: allowed for processes that were not already chroot(2)'ed.
1360 * 2: allowed for all processes.
1363 static int chroot_allow_open_directories = 1;
1365 SYSCTL_INT(_kern, OID_AUTO, chroot_allow_open_directories, CTLFLAG_RW,
1366 &chroot_allow_open_directories, 0, "");
1369 * chroot to the specified namecache entry. We obtain the vp from the
1370 * namecache data. The passed ncp must be locked and referenced and will
1371 * remain locked and referenced on return.
1374 kern_chroot(struct nchandle *nch)
1376 struct thread *td = curthread;
1377 struct proc *p = td->td_proc;
1378 struct filedesc *fdp = p->p_fd;
1379 struct vnode *vp;
1380 int error;
1383 * Only root can chroot
1385 if ((error = suser_cred(p->p_ucred, PRISON_ROOT)) != 0)
1386 return (error);
1389 * Disallow open directory descriptors (fchdir() breakouts).
1391 if (chroot_allow_open_directories == 0 ||
1392 (chroot_allow_open_directories == 1 && fdp->fd_rdir != rootvnode)) {
1393 if ((error = chroot_refuse_vdir_fds(fdp)) != 0)
1394 return (error);
1396 if ((vp = nch->ncp->nc_vp) == NULL)
1397 return (ENOENT);
1399 if ((error = vget(vp, LK_SHARED)) != 0)
1400 return (error);
1403 * Check the validity of vp as a directory to change to and
1404 * associate it with rdir/jdir.
1406 error = checkvp_chdir(vp, td);
1407 vn_unlock(vp); /* leave reference intact */
1408 if (error == 0) {
1409 vrele(fdp->fd_rdir);
1410 fdp->fd_rdir = vp; /* reference inherited by fd_rdir */
1411 cache_drop(&fdp->fd_nrdir);
1412 cache_copy(nch, &fdp->fd_nrdir);
1413 if (fdp->fd_jdir == NULL) {
1414 fdp->fd_jdir = vp;
1415 vref(fdp->fd_jdir);
1416 cache_copy(nch, &fdp->fd_njdir);
1418 } else {
1419 vrele(vp);
1421 return (error);
1425 * chroot_args(char *path)
1427 * Change notion of root (``/'') directory.
1429 /* ARGSUSED */
1431 sys_chroot(struct chroot_args *uap)
1433 struct thread *td = curthread;
1434 struct nlookupdata nd;
1435 int error;
1437 KKASSERT(td->td_proc);
1438 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW);
1439 if (error) {
1440 nlookup_done(&nd);
1441 return(error);
1443 error = nlookup(&nd);
1444 if (error == 0)
1445 error = kern_chroot(&nd.nl_nch);
1446 nlookup_done(&nd);
1447 return(error);
1451 * Common routine for chroot and chdir. Given a locked, referenced vnode,
1452 * determine whether it is legal to chdir to the vnode. The vnode's state
1453 * is not changed by this call.
1456 checkvp_chdir(struct vnode *vp, struct thread *td)
1458 int error;
1460 if (vp->v_type != VDIR)
1461 error = ENOTDIR;
1462 else
1463 error = VOP_ACCESS(vp, VEXEC, td->td_proc->p_ucred);
1464 return (error);
1468 kern_open(struct nlookupdata *nd, int oflags, int mode, int *res)
1470 struct thread *td = curthread;
1471 struct proc *p = td->td_proc;
1472 struct lwp *lp = td->td_lwp;
1473 struct filedesc *fdp = p->p_fd;
1474 int cmode, flags;
1475 struct file *nfp;
1476 struct file *fp;
1477 struct vnode *vp;
1478 int type, indx, error;
1479 struct flock lf;
1481 if ((oflags & O_ACCMODE) == O_ACCMODE)
1482 return (EINVAL);
1483 flags = FFLAGS(oflags);
1484 error = falloc(p, &nfp, NULL);
1485 if (error)
1486 return (error);
1487 fp = nfp;
1488 cmode = ((mode &~ fdp->fd_cmask) & ALLPERMS) &~ S_ISTXT;
1491 * XXX p_dupfd is a real mess. It allows a device to return a
1492 * file descriptor to be duplicated rather then doing the open
1493 * itself.
1495 lp->lwp_dupfd = -1;
1498 * Call vn_open() to do the lookup and assign the vnode to the
1499 * file pointer. vn_open() does not change the ref count on fp
1500 * and the vnode, on success, will be inherited by the file pointer
1501 * and unlocked.
1503 nd->nl_flags |= NLC_LOCKVP;
1504 error = vn_open(nd, fp, flags, cmode);
1505 nlookup_done(nd);
1506 if (error) {
1508 * handle special fdopen() case. bleh. dupfdopen() is
1509 * responsible for dropping the old contents of ofiles[indx]
1510 * if it succeeds.
1512 * Note that fsetfd() will add a ref to fp which represents
1513 * the fd_files[] assignment. We must still drop our
1514 * reference.
1516 if ((error == ENODEV || error == ENXIO) && lp->lwp_dupfd >= 0) {
1517 if (fdalloc(p, 0, &indx) == 0) {
1518 error = dupfdopen(p, indx, lp->lwp_dupfd, flags, error);
1519 if (error == 0) {
1520 *res = indx;
1521 fdrop(fp); /* our ref */
1522 return (0);
1524 fsetfd(p, NULL, indx);
1527 fdrop(fp); /* our ref */
1528 if (error == ERESTART)
1529 error = EINTR;
1530 return (error);
1534 * ref the vnode for ourselves so it can't be ripped out from under
1535 * is. XXX need an ND flag to request that the vnode be returned
1536 * anyway.
1538 * Reserve a file descriptor but do not assign it until the open
1539 * succeeds.
1541 vp = (struct vnode *)fp->f_data;
1542 vref(vp);
1543 if ((error = fdalloc(p, 0, &indx)) != 0) {
1544 fdrop(fp);
1545 vrele(vp);
1546 return (error);
1550 * If no error occurs the vp will have been assigned to the file
1551 * pointer.
1553 lp->lwp_dupfd = 0;
1555 if (flags & (O_EXLOCK | O_SHLOCK)) {
1556 lf.l_whence = SEEK_SET;
1557 lf.l_start = 0;
1558 lf.l_len = 0;
1559 if (flags & O_EXLOCK)
1560 lf.l_type = F_WRLCK;
1561 else
1562 lf.l_type = F_RDLCK;
1563 if (flags & FNONBLOCK)
1564 type = 0;
1565 else
1566 type = F_WAIT;
1568 if ((error = VOP_ADVLOCK(vp, (caddr_t)fp, F_SETLK, &lf, type)) != 0) {
1570 * lock request failed. Clean up the reserved
1571 * descriptor.
1573 vrele(vp);
1574 fsetfd(p, NULL, indx);
1575 fdrop(fp);
1576 return (error);
1578 fp->f_flag |= FHASLOCK;
1580 #if 0
1582 * Assert that all regular file vnodes were created with a object.
1584 KASSERT(vp->v_type != VREG || vp->v_object != NULL,
1585 ("open: regular file has no backing object after vn_open"));
1586 #endif
1588 vrele(vp);
1591 * release our private reference, leaving the one associated with the
1592 * descriptor table intact.
1594 fsetfd(p, fp, indx);
1595 fdrop(fp);
1596 *res = indx;
1597 return (0);
1601 * open_args(char *path, int flags, int mode)
1603 * Check permissions, allocate an open file structure,
1604 * and call the device open routine if any.
1607 sys_open(struct open_args *uap)
1609 struct nlookupdata nd;
1610 int error;
1612 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW);
1613 if (error == 0) {
1614 error = kern_open(&nd, uap->flags,
1615 uap->mode, &uap->sysmsg_result);
1617 nlookup_done(&nd);
1618 return (error);
1622 kern_mknod(struct nlookupdata *nd, int mode, int rmajor, int rminor)
1624 struct thread *td = curthread;
1625 struct proc *p = td->td_proc;
1626 struct vnode *vp;
1627 struct vnode *dvp;
1628 struct vattr vattr;
1629 int error;
1630 int whiteout = 0;
1632 KKASSERT(p);
1634 switch (mode & S_IFMT) {
1635 case S_IFCHR:
1636 case S_IFBLK:
1637 error = suser(td);
1638 break;
1639 default:
1640 error = suser_cred(p->p_ucred, PRISON_ROOT);
1641 break;
1643 if (error)
1644 return (error);
1646 bwillwrite();
1647 nd->nl_flags |= NLC_CREATE;
1648 if ((error = nlookup(nd)) != 0)
1649 return (error);
1650 if (nd->nl_nch.ncp->nc_vp)
1651 return (EEXIST);
1652 if ((error = ncp_writechk(&nd->nl_nch)) != 0)
1653 return (error);
1654 if ((dvp = nd->nl_nch.ncp->nc_parent->nc_vp) == NULL)
1655 return (EPERM);
1656 /* vhold(dvp); - DVP can't go away */
1658 VATTR_NULL(&vattr);
1659 vattr.va_mode = (mode & ALLPERMS) &~ p->p_fd->fd_cmask;
1660 vattr.va_rmajor = rmajor;
1661 vattr.va_rminor = rminor;
1662 whiteout = 0;
1664 switch (mode & S_IFMT) {
1665 case S_IFMT: /* used by badsect to flag bad sectors */
1666 vattr.va_type = VBAD;
1667 break;
1668 case S_IFCHR:
1669 vattr.va_type = VCHR;
1670 break;
1671 case S_IFBLK:
1672 vattr.va_type = VBLK;
1673 break;
1674 case S_IFWHT:
1675 whiteout = 1;
1676 break;
1677 default:
1678 error = EINVAL;
1679 break;
1681 if (error == 0) {
1682 if (whiteout) {
1683 error = VOP_NWHITEOUT(&nd->nl_nch, dvp, nd->nl_cred, NAMEI_CREATE);
1684 } else {
1685 vp = NULL;
1686 error = VOP_NMKNOD(&nd->nl_nch, dvp, &vp, nd->nl_cred, &vattr);
1687 if (error == 0)
1688 vput(vp);
1691 /* vdrop(dvp); */
1692 return (error);
1696 * mknod_args(char *path, int mode, int dev)
1698 * Create a special file.
1701 sys_mknod(struct mknod_args *uap)
1703 struct nlookupdata nd;
1704 int error;
1706 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, 0);
1707 if (error == 0) {
1708 error = kern_mknod(&nd, uap->mode,
1709 umajor(uap->dev), uminor(uap->dev));
1711 nlookup_done(&nd);
1712 return (error);
1716 kern_mkfifo(struct nlookupdata *nd, int mode)
1718 struct thread *td = curthread;
1719 struct proc *p = td->td_proc;
1720 struct vattr vattr;
1721 struct vnode *vp;
1722 struct vnode *dvp;
1723 int error;
1725 bwillwrite();
1727 nd->nl_flags |= NLC_CREATE;
1728 if ((error = nlookup(nd)) != 0)
1729 return (error);
1730 if (nd->nl_nch.ncp->nc_vp)
1731 return (EEXIST);
1732 if ((error = ncp_writechk(&nd->nl_nch)) != 0)
1733 return (error);
1734 if ((dvp = nd->nl_nch.ncp->nc_parent->nc_vp) == NULL)
1735 return (EPERM);
1736 /* vhold(dvp); - DVP can't go away */
1738 VATTR_NULL(&vattr);
1739 vattr.va_type = VFIFO;
1740 vattr.va_mode = (mode & ALLPERMS) &~ p->p_fd->fd_cmask;
1741 vp = NULL;
1742 error = VOP_NMKNOD(&nd->nl_nch, dvp, &vp, nd->nl_cred, &vattr);
1743 /* vdrop(dvp); */
1744 if (error == 0)
1745 vput(vp);
1746 return (error);
1750 * mkfifo_args(char *path, int mode)
1752 * Create a named pipe.
1755 sys_mkfifo(struct mkfifo_args *uap)
1757 struct nlookupdata nd;
1758 int error;
1760 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, 0);
1761 if (error == 0)
1762 error = kern_mkfifo(&nd, uap->mode);
1763 nlookup_done(&nd);
1764 return (error);
1767 static int hardlink_check_uid = 0;
1768 SYSCTL_INT(_kern, OID_AUTO, hardlink_check_uid, CTLFLAG_RW,
1769 &hardlink_check_uid, 0,
1770 "Unprivileged processes cannot create hard links to files owned by other "
1771 "users");
1772 static int hardlink_check_gid = 0;
1773 SYSCTL_INT(_kern, OID_AUTO, hardlink_check_gid, CTLFLAG_RW,
1774 &hardlink_check_gid, 0,
1775 "Unprivileged processes cannot create hard links to files owned by other "
1776 "groups");
1778 static int
1779 can_hardlink(struct vnode *vp, struct thread *td, struct ucred *cred)
1781 struct vattr va;
1782 int error;
1785 * Shortcut if disabled
1787 if (hardlink_check_uid == 0 && hardlink_check_gid == 0)
1788 return (0);
1791 * root cred can always hardlink
1793 if (suser_cred(cred, PRISON_ROOT) == 0)
1794 return (0);
1797 * Otherwise only if the originating file is owned by the
1798 * same user or group. Note that any group is allowed if
1799 * the file is owned by the caller.
1801 error = VOP_GETATTR(vp, &va);
1802 if (error != 0)
1803 return (error);
1805 if (hardlink_check_uid) {
1806 if (cred->cr_uid != va.va_uid)
1807 return (EPERM);
1810 if (hardlink_check_gid) {
1811 if (cred->cr_uid != va.va_uid && !groupmember(va.va_gid, cred))
1812 return (EPERM);
1815 return (0);
1819 kern_link(struct nlookupdata *nd, struct nlookupdata *linknd)
1821 struct thread *td = curthread;
1822 struct vnode *vp;
1823 struct vnode *dvp;
1824 int error;
1827 * Lookup the source and obtained a locked vnode.
1829 * XXX relookup on vget failure / race ?
1831 bwillwrite();
1832 if ((error = nlookup(nd)) != 0)
1833 return (error);
1834 vp = nd->nl_nch.ncp->nc_vp;
1835 KKASSERT(vp != NULL);
1836 if (vp->v_type == VDIR)
1837 return (EPERM); /* POSIX */
1838 if ((error = ncp_writechk(&nd->nl_nch)) != 0)
1839 return (error);
1840 if ((error = vget(vp, LK_EXCLUSIVE)) != 0)
1841 return (error);
1844 * Unlock the source so we can lookup the target without deadlocking
1845 * (XXX vp is locked already, possible other deadlock?). The target
1846 * must not exist.
1848 KKASSERT(nd->nl_flags & NLC_NCPISLOCKED);
1849 nd->nl_flags &= ~NLC_NCPISLOCKED;
1850 cache_unlock(&nd->nl_nch);
1852 linknd->nl_flags |= NLC_CREATE;
1853 if ((error = nlookup(linknd)) != 0) {
1854 vput(vp);
1855 return (error);
1857 if (linknd->nl_nch.ncp->nc_vp) {
1858 vput(vp);
1859 return (EEXIST);
1861 if ((dvp = linknd->nl_nch.ncp->nc_parent->nc_vp) == NULL) {
1862 vput(vp);
1863 return (EPERM);
1865 /* vhold(dvp); - dvp can't go away */
1868 * Finally run the new API VOP.
1870 error = can_hardlink(vp, td, td->td_proc->p_ucred);
1871 if (error == 0)
1872 error = VOP_NLINK(&linknd->nl_nch, dvp, vp, linknd->nl_cred);
1873 /* vdrop(dvp); */
1874 vput(vp);
1875 return (error);
1879 * link_args(char *path, char *link)
1881 * Make a hard file link.
1884 sys_link(struct link_args *uap)
1886 struct nlookupdata nd, linknd;
1887 int error;
1889 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW);
1890 if (error == 0) {
1891 error = nlookup_init(&linknd, uap->link, UIO_USERSPACE, 0);
1892 if (error == 0)
1893 error = kern_link(&nd, &linknd);
1894 nlookup_done(&linknd);
1896 nlookup_done(&nd);
1897 return (error);
1901 kern_symlink(struct nlookupdata *nd, char *path, int mode)
1903 struct vattr vattr;
1904 struct vnode *vp;
1905 struct vnode *dvp;
1906 int error;
1908 bwillwrite();
1909 nd->nl_flags |= NLC_CREATE;
1910 if ((error = nlookup(nd)) != 0)
1911 return (error);
1912 if (nd->nl_nch.ncp->nc_vp)
1913 return (EEXIST);
1914 if ((error = ncp_writechk(&nd->nl_nch)) != 0)
1915 return (error);
1916 if ((dvp = nd->nl_nch.ncp->nc_parent->nc_vp) == NULL)
1917 return (EPERM);
1918 /* vhold(dvp); - dvp can't go away */
1919 VATTR_NULL(&vattr);
1920 vattr.va_mode = mode;
1921 error = VOP_NSYMLINK(&nd->nl_nch, dvp, &vp, nd->nl_cred, &vattr, path);
1922 /* vdrop(dvp); */
1923 if (error == 0)
1924 vput(vp);
1925 return (error);
1929 * symlink(char *path, char *link)
1931 * Make a symbolic link.
1934 sys_symlink(struct symlink_args *uap)
1936 struct thread *td = curthread;
1937 struct nlookupdata nd;
1938 char *path;
1939 int error;
1940 int mode;
1942 path = objcache_get(namei_oc, M_WAITOK);
1943 error = copyinstr(uap->path, path, MAXPATHLEN, NULL);
1944 if (error == 0) {
1945 error = nlookup_init(&nd, uap->link, UIO_USERSPACE, 0);
1946 if (error == 0) {
1947 mode = ACCESSPERMS & ~td->td_proc->p_fd->fd_cmask;
1948 error = kern_symlink(&nd, path, mode);
1950 nlookup_done(&nd);
1952 objcache_put(namei_oc, path);
1953 return (error);
1957 * undelete_args(char *path)
1959 * Delete a whiteout from the filesystem.
1961 /* ARGSUSED */
1963 sys_undelete(struct undelete_args *uap)
1965 struct nlookupdata nd;
1966 struct vnode *dvp;
1967 int error;
1969 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, 0);
1970 bwillwrite();
1971 nd.nl_flags |= NLC_DELETE;
1972 if (error == 0)
1973 error = nlookup(&nd);
1974 if (error == 0)
1975 error = ncp_writechk(&nd.nl_nch);
1976 dvp = NULL;
1977 if (error == 0) {
1978 if ((dvp = nd.nl_nch.ncp->nc_parent->nc_vp) == NULL)
1979 error = EPERM;
1981 if (error == 0) {
1982 /* vhold(dvp); - dvp can't go away */
1983 error = VOP_NWHITEOUT(&nd.nl_nch, dvp, nd.nl_cred, NAMEI_DELETE);
1984 /* vdrop(dvp); */
1986 nlookup_done(&nd);
1987 return (error);
1991 kern_unlink(struct nlookupdata *nd)
1993 struct vnode *dvp;
1994 int error;
1996 bwillwrite();
1997 nd->nl_flags |= NLC_DELETE;
1998 if ((error = nlookup(nd)) != 0)
1999 return (error);
2000 if ((error = ncp_writechk(&nd->nl_nch)) != 0)
2001 return (error);
2002 if ((dvp = nd->nl_nch.ncp->nc_parent->nc_vp) == NULL)
2003 return (EPERM);
2004 /* vhold(dvp); - dvp can't go away */
2005 error = VOP_NREMOVE(&nd->nl_nch, dvp, nd->nl_cred);
2006 /* vdrop(dvp); */
2007 return (error);
2011 * unlink_args(char *path)
2013 * Delete a name from the filesystem.
2016 sys_unlink(struct unlink_args *uap)
2018 struct nlookupdata nd;
2019 int error;
2021 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, 0);
2022 if (error == 0)
2023 error = kern_unlink(&nd);
2024 nlookup_done(&nd);
2025 return (error);
2029 kern_lseek(int fd, off_t offset, int whence, off_t *res)
2031 struct thread *td = curthread;
2032 struct proc *p = td->td_proc;
2033 struct file *fp;
2034 struct vattr vattr;
2035 int error;
2037 fp = holdfp(p->p_fd, fd, -1);
2038 if (fp == NULL)
2039 return (EBADF);
2040 if (fp->f_type != DTYPE_VNODE) {
2041 error = ESPIPE;
2042 goto done;
2045 switch (whence) {
2046 case L_INCR:
2047 fp->f_offset += offset;
2048 error = 0;
2049 break;
2050 case L_XTND:
2051 error = VOP_GETATTR((struct vnode *)fp->f_data, &vattr);
2052 if (error == 0)
2053 fp->f_offset = offset + vattr.va_size;
2054 break;
2055 case L_SET:
2056 fp->f_offset = offset;
2057 error = 0;
2058 break;
2059 default:
2060 error = EINVAL;
2061 break;
2063 *res = fp->f_offset;
2064 done:
2065 fdrop(fp);
2066 return (error);
2070 * lseek_args(int fd, int pad, off_t offset, int whence)
2072 * Reposition read/write file offset.
2075 sys_lseek(struct lseek_args *uap)
2077 int error;
2079 error = kern_lseek(uap->fd, uap->offset, uap->whence,
2080 &uap->sysmsg_offset);
2082 return (error);
2086 kern_access(struct nlookupdata *nd, int aflags)
2088 struct vnode *vp;
2089 int error, flags;
2091 if ((error = nlookup(nd)) != 0)
2092 return (error);
2093 retry:
2094 error = cache_vget(&nd->nl_nch, nd->nl_cred, LK_EXCLUSIVE, &vp);
2095 if (error)
2096 return (error);
2098 /* Flags == 0 means only check for existence. */
2099 if (aflags) {
2100 flags = 0;
2101 if (aflags & R_OK)
2102 flags |= VREAD;
2103 if (aflags & W_OK)
2104 flags |= VWRITE;
2105 if (aflags & X_OK)
2106 flags |= VEXEC;
2107 if ((flags & VWRITE) == 0 ||
2108 (error = vn_writechk(vp, &nd->nl_nch)) == 0)
2109 error = VOP_ACCESS(vp, flags, nd->nl_cred);
2112 * If the file handle is stale we have to re-resolve the
2113 * entry. This is a hack at the moment.
2115 if (error == ESTALE) {
2116 cache_setunresolved(&nd->nl_nch);
2117 error = cache_resolve(&nd->nl_nch, nd->nl_cred);
2118 if (error == 0) {
2119 vput(vp);
2120 vp = NULL;
2121 goto retry;
2125 vput(vp);
2126 return (error);
2130 * access_args(char *path, int flags)
2132 * Check access permissions.
2135 sys_access(struct access_args *uap)
2137 struct nlookupdata nd;
2138 int error;
2140 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW);
2141 if (error == 0)
2142 error = kern_access(&nd, uap->flags);
2143 nlookup_done(&nd);
2144 return (error);
2148 kern_stat(struct nlookupdata *nd, struct stat *st)
2150 int error;
2151 struct vnode *vp;
2152 thread_t td;
2154 if ((error = nlookup(nd)) != 0)
2155 return (error);
2156 again:
2157 if ((vp = nd->nl_nch.ncp->nc_vp) == NULL)
2158 return (ENOENT);
2160 td = curthread;
2161 if ((error = vget(vp, LK_SHARED)) != 0)
2162 return (error);
2163 error = vn_stat(vp, st, nd->nl_cred);
2166 * If the file handle is stale we have to re-resolve the entry. This
2167 * is a hack at the moment.
2169 if (error == ESTALE) {
2170 vput(vp);
2171 cache_setunresolved(&nd->nl_nch);
2172 error = cache_resolve(&nd->nl_nch, nd->nl_cred);
2173 if (error == 0)
2174 goto again;
2175 } else {
2176 vput(vp);
2178 return (error);
2182 * stat_args(char *path, struct stat *ub)
2184 * Get file status; this version follows links.
2187 sys_stat(struct stat_args *uap)
2189 struct nlookupdata nd;
2190 struct stat st;
2191 int error;
2193 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW);
2194 if (error == 0) {
2195 error = kern_stat(&nd, &st);
2196 if (error == 0)
2197 error = copyout(&st, uap->ub, sizeof(*uap->ub));
2199 nlookup_done(&nd);
2200 return (error);
2204 * lstat_args(char *path, struct stat *ub)
2206 * Get file status; this version does not follow links.
2209 sys_lstat(struct lstat_args *uap)
2211 struct nlookupdata nd;
2212 struct stat st;
2213 int error;
2215 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, 0);
2216 if (error == 0) {
2217 error = kern_stat(&nd, &st);
2218 if (error == 0)
2219 error = copyout(&st, uap->ub, sizeof(*uap->ub));
2221 nlookup_done(&nd);
2222 return (error);
2226 * pathconf_Args(char *path, int name)
2228 * Get configurable pathname variables.
2230 /* ARGSUSED */
2232 sys_pathconf(struct pathconf_args *uap)
2234 struct nlookupdata nd;
2235 struct vnode *vp;
2236 int error;
2238 vp = NULL;
2239 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW);
2240 if (error == 0)
2241 error = nlookup(&nd);
2242 if (error == 0)
2243 error = cache_vget(&nd.nl_nch, nd.nl_cred, LK_EXCLUSIVE, &vp);
2244 nlookup_done(&nd);
2245 if (error == 0) {
2246 error = VOP_PATHCONF(vp, uap->name, uap->sysmsg_fds);
2247 vput(vp);
2249 return (error);
2253 * XXX: daver
2254 * kern_readlink isn't properly split yet. There is a copyin burried
2255 * in VOP_READLINK().
2258 kern_readlink(struct nlookupdata *nd, char *buf, int count, int *res)
2260 struct thread *td = curthread;
2261 struct proc *p = td->td_proc;
2262 struct vnode *vp;
2263 struct iovec aiov;
2264 struct uio auio;
2265 int error;
2267 if ((error = nlookup(nd)) != 0)
2268 return (error);
2269 error = cache_vget(&nd->nl_nch, nd->nl_cred, LK_EXCLUSIVE, &vp);
2270 if (error)
2271 return (error);
2272 if (vp->v_type != VLNK) {
2273 error = EINVAL;
2274 } else {
2275 aiov.iov_base = buf;
2276 aiov.iov_len = count;
2277 auio.uio_iov = &aiov;
2278 auio.uio_iovcnt = 1;
2279 auio.uio_offset = 0;
2280 auio.uio_rw = UIO_READ;
2281 auio.uio_segflg = UIO_USERSPACE;
2282 auio.uio_td = td;
2283 auio.uio_resid = count;
2284 error = VOP_READLINK(vp, &auio, p->p_ucred);
2286 vput(vp);
2287 *res = count - auio.uio_resid;
2288 return (error);
2292 * readlink_args(char *path, char *buf, int count)
2294 * Return target name of a symbolic link.
2297 sys_readlink(struct readlink_args *uap)
2299 struct nlookupdata nd;
2300 int error;
2302 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, 0);
2303 if (error == 0) {
2304 error = kern_readlink(&nd, uap->buf, uap->count,
2305 &uap->sysmsg_result);
2307 nlookup_done(&nd);
2308 return (error);
2311 static int
2312 setfflags(struct vnode *vp, int flags)
2314 struct thread *td = curthread;
2315 struct proc *p = td->td_proc;
2316 int error;
2317 struct vattr vattr;
2320 * Prevent non-root users from setting flags on devices. When
2321 * a device is reused, users can retain ownership of the device
2322 * if they are allowed to set flags and programs assume that
2323 * chown can't fail when done as root.
2325 if ((vp->v_type == VCHR || vp->v_type == VBLK) &&
2326 ((error = suser_cred(p->p_ucred, PRISON_ROOT)) != 0))
2327 return (error);
2330 * note: vget is required for any operation that might mod the vnode
2331 * so VINACTIVE is properly cleared.
2333 if ((error = vget(vp, LK_EXCLUSIVE)) == 0) {
2334 VATTR_NULL(&vattr);
2335 vattr.va_flags = flags;
2336 error = VOP_SETATTR(vp, &vattr, p->p_ucred);
2337 vput(vp);
2339 return (error);
2343 * chflags(char *path, int flags)
2345 * Change flags of a file given a path name.
2347 /* ARGSUSED */
2349 sys_chflags(struct chflags_args *uap)
2351 struct nlookupdata nd;
2352 struct vnode *vp;
2353 int error;
2355 vp = NULL;
2356 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW);
2357 /* XXX Add NLC flag indicating modifying operation? */
2358 if (error == 0)
2359 error = nlookup(&nd);
2360 if (error == 0)
2361 error = ncp_writechk(&nd.nl_nch);
2362 if (error == 0)
2363 error = cache_vref(&nd.nl_nch, nd.nl_cred, &vp);
2364 nlookup_done(&nd);
2365 if (error == 0) {
2366 error = setfflags(vp, uap->flags);
2367 vrele(vp);
2369 return (error);
2373 * fchflags_args(int fd, int flags)
2375 * Change flags of a file given a file descriptor.
2377 /* ARGSUSED */
2379 sys_fchflags(struct fchflags_args *uap)
2381 struct thread *td = curthread;
2382 struct proc *p = td->td_proc;
2383 struct file *fp;
2384 int error;
2386 if ((error = holdvnode(p->p_fd, uap->fd, &fp)) != 0)
2387 return (error);
2388 if (fp->f_nchandle.ncp)
2389 error = ncp_writechk(&fp->f_nchandle);
2390 if (error == 0)
2391 error = setfflags((struct vnode *) fp->f_data, uap->flags);
2392 fdrop(fp);
2393 return (error);
2396 static int
2397 setfmode(struct vnode *vp, int mode)
2399 struct thread *td = curthread;
2400 struct proc *p = td->td_proc;
2401 int error;
2402 struct vattr vattr;
2405 * note: vget is required for any operation that might mod the vnode
2406 * so VINACTIVE is properly cleared.
2408 if ((error = vget(vp, LK_EXCLUSIVE)) == 0) {
2409 VATTR_NULL(&vattr);
2410 vattr.va_mode = mode & ALLPERMS;
2411 error = VOP_SETATTR(vp, &vattr, p->p_ucred);
2412 vput(vp);
2414 return error;
2418 kern_chmod(struct nlookupdata *nd, int mode)
2420 struct vnode *vp;
2421 int error;
2423 /* XXX Add NLC flag indicating modifying operation? */
2424 if ((error = nlookup(nd)) != 0)
2425 return (error);
2426 if ((error = cache_vref(&nd->nl_nch, nd->nl_cred, &vp)) != 0)
2427 return (error);
2428 if ((error = ncp_writechk(&nd->nl_nch)) == 0)
2429 error = setfmode(vp, mode);
2430 vrele(vp);
2431 return (error);
2435 * chmod_args(char *path, int mode)
2437 * Change mode of a file given path name.
2439 /* ARGSUSED */
2441 sys_chmod(struct chmod_args *uap)
2443 struct nlookupdata nd;
2444 int error;
2446 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW);
2447 if (error == 0)
2448 error = kern_chmod(&nd, uap->mode);
2449 nlookup_done(&nd);
2450 return (error);
2454 * lchmod_args(char *path, int mode)
2456 * Change mode of a file given path name (don't follow links.)
2458 /* ARGSUSED */
2460 sys_lchmod(struct lchmod_args *uap)
2462 struct nlookupdata nd;
2463 int error;
2465 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, 0);
2466 if (error == 0)
2467 error = kern_chmod(&nd, uap->mode);
2468 nlookup_done(&nd);
2469 return (error);
2473 * fchmod_args(int fd, int mode)
2475 * Change mode of a file given a file descriptor.
2477 /* ARGSUSED */
2479 sys_fchmod(struct fchmod_args *uap)
2481 struct thread *td = curthread;
2482 struct proc *p = td->td_proc;
2483 struct file *fp;
2484 int error;
2486 if ((error = holdvnode(p->p_fd, uap->fd, &fp)) != 0)
2487 return (error);
2488 if (fp->f_nchandle.ncp)
2489 error = ncp_writechk(&fp->f_nchandle);
2490 if (error == 0)
2491 error = setfmode((struct vnode *)fp->f_data, uap->mode);
2492 fdrop(fp);
2493 return (error);
2496 static int
2497 setfown(struct vnode *vp, uid_t uid, gid_t gid)
2499 struct thread *td = curthread;
2500 struct proc *p = td->td_proc;
2501 int error;
2502 struct vattr vattr;
2505 * note: vget is required for any operation that might mod the vnode
2506 * so VINACTIVE is properly cleared.
2508 if ((error = vget(vp, LK_EXCLUSIVE)) == 0) {
2509 VATTR_NULL(&vattr);
2510 vattr.va_uid = uid;
2511 vattr.va_gid = gid;
2512 error = VOP_SETATTR(vp, &vattr, p->p_ucred);
2513 vput(vp);
2515 return error;
2519 kern_chown(struct nlookupdata *nd, int uid, int gid)
2521 struct vnode *vp;
2522 int error;
2524 /* XXX Add NLC flag indicating modifying operation? */
2525 if ((error = nlookup(nd)) != 0)
2526 return (error);
2527 if ((error = cache_vref(&nd->nl_nch, nd->nl_cred, &vp)) != 0)
2528 return (error);
2529 if ((error = ncp_writechk(&nd->nl_nch)) == 0)
2530 error = setfown(vp, uid, gid);
2531 vrele(vp);
2532 return (error);
2536 * chown(char *path, int uid, int gid)
2538 * Set ownership given a path name.
2541 sys_chown(struct chown_args *uap)
2543 struct nlookupdata nd;
2544 int error;
2546 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW);
2547 if (error == 0)
2548 error = kern_chown(&nd, uap->uid, uap->gid);
2549 nlookup_done(&nd);
2550 return (error);
2554 * lchown_args(char *path, int uid, int gid)
2556 * Set ownership given a path name, do not cross symlinks.
2559 sys_lchown(struct lchown_args *uap)
2561 struct nlookupdata nd;
2562 int error;
2564 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, 0);
2565 if (error == 0)
2566 error = kern_chown(&nd, uap->uid, uap->gid);
2567 nlookup_done(&nd);
2568 return (error);
2572 * fchown_args(int fd, int uid, int gid)
2574 * Set ownership given a file descriptor.
2576 /* ARGSUSED */
2578 sys_fchown(struct fchown_args *uap)
2580 struct thread *td = curthread;
2581 struct proc *p = td->td_proc;
2582 struct file *fp;
2583 int error;
2585 if ((error = holdvnode(p->p_fd, uap->fd, &fp)) != 0)
2586 return (error);
2587 if (fp->f_nchandle.ncp)
2588 error = ncp_writechk(&fp->f_nchandle);
2589 if (error == 0)
2590 error = setfown((struct vnode *)fp->f_data, uap->uid, uap->gid);
2591 fdrop(fp);
2592 return (error);
2595 static int
2596 getutimes(const struct timeval *tvp, struct timespec *tsp)
2598 struct timeval tv[2];
2600 if (tvp == NULL) {
2601 microtime(&tv[0]);
2602 TIMEVAL_TO_TIMESPEC(&tv[0], &tsp[0]);
2603 tsp[1] = tsp[0];
2604 } else {
2605 TIMEVAL_TO_TIMESPEC(&tvp[0], &tsp[0]);
2606 TIMEVAL_TO_TIMESPEC(&tvp[1], &tsp[1]);
2608 return 0;
2611 static int
2612 setutimes(struct vnode *vp, const struct timespec *ts, int nullflag)
2614 struct thread *td = curthread;
2615 struct proc *p = td->td_proc;
2616 int error;
2617 struct vattr vattr;
2620 * note: vget is required for any operation that might mod the vnode
2621 * so VINACTIVE is properly cleared.
2623 if ((error = vget(vp, LK_EXCLUSIVE)) == 0) {
2624 VATTR_NULL(&vattr);
2625 vattr.va_atime = ts[0];
2626 vattr.va_mtime = ts[1];
2627 if (nullflag)
2628 vattr.va_vaflags |= VA_UTIMES_NULL;
2629 error = VOP_SETATTR(vp, &vattr, p->p_ucred);
2630 vput(vp);
2632 return error;
2636 kern_utimes(struct nlookupdata *nd, struct timeval *tptr)
2638 struct timespec ts[2];
2639 struct vnode *vp;
2640 int error;
2642 if ((error = getutimes(tptr, ts)) != 0)
2643 return (error);
2644 /* XXX Add NLC flag indicating modifying operation? */
2645 if ((error = nlookup(nd)) != 0)
2646 return (error);
2647 if ((error = ncp_writechk(&nd->nl_nch)) != 0)
2648 return (error);
2649 if ((error = cache_vref(&nd->nl_nch, nd->nl_cred, &vp)) != 0)
2650 return (error);
2651 error = setutimes(vp, ts, tptr == NULL);
2652 vrele(vp);
2653 return (error);
2657 * utimes_args(char *path, struct timeval *tptr)
2659 * Set the access and modification times of a file.
2662 sys_utimes(struct utimes_args *uap)
2664 struct timeval tv[2];
2665 struct nlookupdata nd;
2666 int error;
2668 if (uap->tptr) {
2669 error = copyin(uap->tptr, tv, sizeof(tv));
2670 if (error)
2671 return (error);
2673 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW);
2674 if (error == 0)
2675 error = kern_utimes(&nd, uap->tptr ? tv : NULL);
2676 nlookup_done(&nd);
2677 return (error);
2681 * lutimes_args(char *path, struct timeval *tptr)
2683 * Set the access and modification times of a file.
2686 sys_lutimes(struct lutimes_args *uap)
2688 struct timeval tv[2];
2689 struct nlookupdata nd;
2690 int error;
2692 if (uap->tptr) {
2693 error = copyin(uap->tptr, tv, sizeof(tv));
2694 if (error)
2695 return (error);
2697 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, 0);
2698 if (error == 0)
2699 error = kern_utimes(&nd, uap->tptr ? tv : NULL);
2700 nlookup_done(&nd);
2701 return (error);
2705 kern_futimes(int fd, struct timeval *tptr)
2707 struct thread *td = curthread;
2708 struct proc *p = td->td_proc;
2709 struct timespec ts[2];
2710 struct file *fp;
2711 int error;
2713 error = getutimes(tptr, ts);
2714 if (error)
2715 return (error);
2716 if ((error = holdvnode(p->p_fd, fd, &fp)) != 0)
2717 return (error);
2718 if (fp->f_nchandle.ncp)
2719 error = ncp_writechk(&fp->f_nchandle);
2720 if (error == 0)
2721 error = setutimes((struct vnode *)fp->f_data, ts, tptr == NULL);
2722 fdrop(fp);
2723 return (error);
2727 * futimes_args(int fd, struct timeval *tptr)
2729 * Set the access and modification times of a file.
2732 sys_futimes(struct futimes_args *uap)
2734 struct timeval tv[2];
2735 int error;
2737 if (uap->tptr) {
2738 error = copyin(uap->tptr, tv, sizeof(tv));
2739 if (error)
2740 return (error);
2743 error = kern_futimes(uap->fd, uap->tptr ? tv : NULL);
2745 return (error);
2749 kern_truncate(struct nlookupdata *nd, off_t length)
2751 struct vnode *vp;
2752 struct vattr vattr;
2753 int error;
2755 if (length < 0)
2756 return(EINVAL);
2757 /* XXX Add NLC flag indicating modifying operation? */
2758 if ((error = nlookup(nd)) != 0)
2759 return (error);
2760 if ((error = ncp_writechk(&nd->nl_nch)) != 0)
2761 return (error);
2762 if ((error = cache_vref(&nd->nl_nch, nd->nl_cred, &vp)) != 0)
2763 return (error);
2764 if ((error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY)) != 0) {
2765 vrele(vp);
2766 return (error);
2768 if (vp->v_type == VDIR) {
2769 error = EISDIR;
2770 } else if ((error = vn_writechk(vp, &nd->nl_nch)) == 0 &&
2771 (error = VOP_ACCESS(vp, VWRITE, nd->nl_cred)) == 0) {
2772 VATTR_NULL(&vattr);
2773 vattr.va_size = length;
2774 error = VOP_SETATTR(vp, &vattr, nd->nl_cred);
2776 vput(vp);
2777 return (error);
2781 * truncate(char *path, int pad, off_t length)
2783 * Truncate a file given its path name.
2786 sys_truncate(struct truncate_args *uap)
2788 struct nlookupdata nd;
2789 int error;
2791 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW);
2792 if (error == 0)
2793 error = kern_truncate(&nd, uap->length);
2794 nlookup_done(&nd);
2795 return error;
2799 kern_ftruncate(int fd, off_t length)
2801 struct thread *td = curthread;
2802 struct proc *p = td->td_proc;
2803 struct vattr vattr;
2804 struct vnode *vp;
2805 struct file *fp;
2806 int error;
2808 if (length < 0)
2809 return(EINVAL);
2810 if ((error = holdvnode(p->p_fd, fd, &fp)) != 0)
2811 return (error);
2812 if (fp->f_nchandle.ncp) {
2813 error = ncp_writechk(&fp->f_nchandle);
2814 if (error)
2815 goto done;
2817 if ((fp->f_flag & FWRITE) == 0) {
2818 error = EINVAL;
2819 goto done;
2821 vp = (struct vnode *)fp->f_data;
2822 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
2823 if (vp->v_type == VDIR) {
2824 error = EISDIR;
2825 } else if ((error = vn_writechk(vp, NULL)) == 0) {
2826 VATTR_NULL(&vattr);
2827 vattr.va_size = length;
2828 error = VOP_SETATTR(vp, &vattr, fp->f_cred);
2830 vn_unlock(vp);
2831 done:
2832 fdrop(fp);
2833 return (error);
2837 * ftruncate_args(int fd, int pad, off_t length)
2839 * Truncate a file given a file descriptor.
2842 sys_ftruncate(struct ftruncate_args *uap)
2844 int error;
2846 error = kern_ftruncate(uap->fd, uap->length);
2848 return (error);
2852 * fsync(int fd)
2854 * Sync an open file.
2856 /* ARGSUSED */
2858 sys_fsync(struct fsync_args *uap)
2860 struct thread *td = curthread;
2861 struct proc *p = td->td_proc;
2862 struct vnode *vp;
2863 struct file *fp;
2864 vm_object_t obj;
2865 int error;
2867 if ((error = holdvnode(p->p_fd, uap->fd, &fp)) != 0)
2868 return (error);
2869 vp = (struct vnode *)fp->f_data;
2870 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
2871 if ((obj = vp->v_object) != NULL)
2872 vm_object_page_clean(obj, 0, 0, 0);
2873 if ((error = VOP_FSYNC(vp, MNT_WAIT)) == 0 &&
2874 vp->v_mount && (vp->v_mount->mnt_flag & MNT_SOFTDEP) &&
2875 bioops.io_fsync) {
2876 error = (*bioops.io_fsync)(vp);
2878 vn_unlock(vp);
2879 fdrop(fp);
2880 return (error);
2884 kern_rename(struct nlookupdata *fromnd, struct nlookupdata *tond)
2886 struct nchandle fnchd;
2887 struct nchandle tnchd;
2888 struct namecache *ncp;
2889 struct vnode *fdvp;
2890 struct vnode *tdvp;
2891 struct mount *mp;
2892 int error;
2894 bwillwrite();
2895 if ((error = nlookup(fromnd)) != 0)
2896 return (error);
2897 if ((fnchd.ncp = fromnd->nl_nch.ncp->nc_parent) == NULL)
2898 return (ENOENT);
2899 fnchd.mount = fromnd->nl_nch.mount;
2900 cache_hold(&fnchd);
2903 * unlock the source nch so we can lookup the target nch without
2904 * deadlocking. The target may or may not exist so we do not check
2905 * for a target vp like kern_mkdir() and other creation functions do.
2907 * The source and target directories are ref'd and rechecked after
2908 * everything is relocked to determine if the source or target file
2909 * has been renamed.
2911 KKASSERT(fromnd->nl_flags & NLC_NCPISLOCKED);
2912 fromnd->nl_flags &= ~NLC_NCPISLOCKED;
2913 cache_unlock(&fromnd->nl_nch);
2915 tond->nl_flags |= NLC_CREATE;
2916 if ((error = nlookup(tond)) != 0) {
2917 cache_drop(&fnchd);
2918 return (error);
2920 if ((tnchd.ncp = tond->nl_nch.ncp->nc_parent) == NULL) {
2921 cache_drop(&fnchd);
2922 return (ENOENT);
2924 tnchd.mount = tond->nl_nch.mount;
2925 cache_hold(&tnchd);
2928 * If the source and target are the same there is nothing to do
2930 if (fromnd->nl_nch.ncp == tond->nl_nch.ncp) {
2931 cache_drop(&fnchd);
2932 cache_drop(&tnchd);
2933 return (0);
2937 * Mount points cannot be renamed or overwritten
2939 if ((fromnd->nl_nch.ncp->nc_flag | tond->nl_nch.ncp->nc_flag) &
2940 NCF_ISMOUNTPT
2942 cache_drop(&fnchd);
2943 cache_drop(&tnchd);
2944 return (EINVAL);
2948 * relock the source ncp. NOTE AFTER RELOCKING: the source ncp
2949 * may have become invalid while it was unlocked, nc_vp and nc_mount
2950 * could be NULL.
2952 if (cache_lock_nonblock(&fromnd->nl_nch) == 0) {
2953 cache_resolve(&fromnd->nl_nch, fromnd->nl_cred);
2954 } else if (fromnd->nl_nch.ncp > tond->nl_nch.ncp) {
2955 cache_lock(&fromnd->nl_nch);
2956 cache_resolve(&fromnd->nl_nch, fromnd->nl_cred);
2957 } else {
2958 cache_unlock(&tond->nl_nch);
2959 cache_lock(&fromnd->nl_nch);
2960 cache_resolve(&fromnd->nl_nch, fromnd->nl_cred);
2961 cache_lock(&tond->nl_nch);
2962 cache_resolve(&tond->nl_nch, tond->nl_cred);
2964 fromnd->nl_flags |= NLC_NCPISLOCKED;
2967 * make sure the parent directories linkages are the same
2969 if (fnchd.ncp != fromnd->nl_nch.ncp->nc_parent ||
2970 tnchd.ncp != tond->nl_nch.ncp->nc_parent) {
2971 cache_drop(&fnchd);
2972 cache_drop(&tnchd);
2973 return (ENOENT);
2977 * Both the source and target must be within the same filesystem and
2978 * in the same filesystem as their parent directories within the
2979 * namecache topology.
2981 * NOTE: fromnd's nc_mount or nc_vp could be NULL.
2983 mp = fnchd.mount;
2984 if (mp != tnchd.mount || mp != fromnd->nl_nch.mount ||
2985 mp != tond->nl_nch.mount) {
2986 cache_drop(&fnchd);
2987 cache_drop(&tnchd);
2988 return (EXDEV);
2992 * Make sure the mount point is writable
2994 if ((error = ncp_writechk(&tond->nl_nch)) != 0) {
2995 cache_drop(&fnchd);
2996 cache_drop(&tnchd);
2997 return (error);
3001 * If the target exists and either the source or target is a directory,
3002 * then both must be directories.
3004 * Due to relocking of the source, fromnd->nl_nch.ncp->nc_vp might h
3005 * have become NULL.
3007 if (tond->nl_nch.ncp->nc_vp) {
3008 if (fromnd->nl_nch.ncp->nc_vp == NULL) {
3009 error = ENOENT;
3010 } else if (fromnd->nl_nch.ncp->nc_vp->v_type == VDIR) {
3011 if (tond->nl_nch.ncp->nc_vp->v_type != VDIR)
3012 error = ENOTDIR;
3013 } else if (tond->nl_nch.ncp->nc_vp->v_type == VDIR) {
3014 error = EISDIR;
3019 * You cannot rename a source into itself or a subdirectory of itself.
3020 * We check this by travsersing the target directory upwards looking
3021 * for a match against the source.
3023 if (error == 0) {
3024 for (ncp = tnchd.ncp; ncp; ncp = ncp->nc_parent) {
3025 if (fromnd->nl_nch.ncp == ncp) {
3026 error = EINVAL;
3027 break;
3032 cache_drop(&fnchd);
3033 cache_drop(&tnchd);
3036 * Even though the namespaces are different, they may still represent
3037 * hardlinks to the same file. The filesystem might have a hard time
3038 * with this so we issue a NREMOVE of the source instead of a NRENAME
3039 * when we detect the situation.
3041 if (error == 0) {
3042 fdvp = fromnd->nl_nch.ncp->nc_parent->nc_vp;
3043 tdvp = tond->nl_nch.ncp->nc_parent->nc_vp;
3044 if (fdvp == NULL || tdvp == NULL) {
3045 error = EPERM;
3046 } else if (fromnd->nl_nch.ncp->nc_vp == tond->nl_nch.ncp->nc_vp) {
3047 /* vhold(fdvp); - dvp can't go away */
3048 error = VOP_NREMOVE(&fromnd->nl_nch, fdvp,
3049 fromnd->nl_cred);
3050 /* vdrop(fdvp); */
3051 } else {
3052 /* vhold(fdvp); - dvp can't go away */
3053 /* vhold(tdvp); - dvp can't go away */
3054 error = VOP_NRENAME(&fromnd->nl_nch, &tond->nl_nch,
3055 fdvp, tdvp, tond->nl_cred);
3056 /* vdrop(fdvp); */
3057 /* vdrop(tdvp); */
3060 return (error);
3064 * rename_args(char *from, char *to)
3066 * Rename files. Source and destination must either both be directories,
3067 * or both not be directories. If target is a directory, it must be empty.
3070 sys_rename(struct rename_args *uap)
3072 struct nlookupdata fromnd, tond;
3073 int error;
3075 error = nlookup_init(&fromnd, uap->from, UIO_USERSPACE, 0);
3076 if (error == 0) {
3077 error = nlookup_init(&tond, uap->to, UIO_USERSPACE, 0);
3078 if (error == 0)
3079 error = kern_rename(&fromnd, &tond);
3080 nlookup_done(&tond);
3082 nlookup_done(&fromnd);
3083 return (error);
3087 kern_mkdir(struct nlookupdata *nd, int mode)
3089 struct thread *td = curthread;
3090 struct proc *p = td->td_proc;
3091 struct vnode *vp;
3092 struct vnode *dvp;
3093 struct vattr vattr;
3094 int error;
3096 bwillwrite();
3097 nd->nl_flags |= NLC_WILLBEDIR | NLC_CREATE;
3098 if ((error = nlookup(nd)) != 0)
3099 return (error);
3101 if (nd->nl_nch.ncp->nc_vp)
3102 return (EEXIST);
3103 if ((error = ncp_writechk(&nd->nl_nch)) != 0)
3104 return (error);
3105 if ((dvp = nd->nl_nch.ncp->nc_parent->nc_vp) == NULL)
3106 return (EPERM);
3107 /* vhold(dvp); - dvp can't go away */
3108 VATTR_NULL(&vattr);
3109 vattr.va_type = VDIR;
3110 vattr.va_mode = (mode & ACCESSPERMS) &~ p->p_fd->fd_cmask;
3112 vp = NULL;
3113 error = VOP_NMKDIR(&nd->nl_nch, dvp, &vp, p->p_ucred, &vattr);
3114 /* vdrop(dvp); */
3115 if (error == 0)
3116 vput(vp);
3117 return (error);
3121 * mkdir_args(char *path, int mode)
3123 * Make a directory file.
3125 /* ARGSUSED */
3127 sys_mkdir(struct mkdir_args *uap)
3129 struct nlookupdata nd;
3130 int error;
3132 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, 0);
3133 if (error == 0)
3134 error = kern_mkdir(&nd, uap->mode);
3135 nlookup_done(&nd);
3136 return (error);
3140 kern_rmdir(struct nlookupdata *nd)
3142 struct vnode *dvp;
3143 int error;
3145 bwillwrite();
3146 nd->nl_flags |= NLC_DELETE;
3147 if ((error = nlookup(nd)) != 0)
3148 return (error);
3151 * Do not allow directories representing mount points to be
3152 * deleted, even if empty. Check write perms on mount point
3153 * in case the vnode is aliased (aka nullfs).
3155 if (nd->nl_nch.ncp->nc_flag & (NCF_ISMOUNTPT))
3156 return (EINVAL);
3157 if ((error = ncp_writechk(&nd->nl_nch)) != 0)
3158 return (error);
3159 if ((dvp = nd->nl_nch.ncp->nc_parent->nc_vp) == NULL)
3160 return (EPERM);
3161 /* vhold(dvp); - dvp can't go away */
3162 error = VOP_NRMDIR(&nd->nl_nch, dvp, nd->nl_cred);
3163 /* vdrop(dvp); */
3164 return (error);
3168 * rmdir_args(char *path)
3170 * Remove a directory file.
3172 /* ARGSUSED */
3174 sys_rmdir(struct rmdir_args *uap)
3176 struct nlookupdata nd;
3177 int error;
3179 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, 0);
3180 if (error == 0)
3181 error = kern_rmdir(&nd);
3182 nlookup_done(&nd);
3183 return (error);
3187 kern_getdirentries(int fd, char *buf, u_int count, long *basep, int *res,
3188 enum uio_seg direction)
3190 struct thread *td = curthread;
3191 struct proc *p = td->td_proc;
3192 struct vnode *vp;
3193 struct file *fp;
3194 struct uio auio;
3195 struct iovec aiov;
3196 long loff;
3197 int error, eofflag;
3199 if ((error = holdvnode(p->p_fd, fd, &fp)) != 0)
3200 return (error);
3201 if ((fp->f_flag & FREAD) == 0) {
3202 error = EBADF;
3203 goto done;
3205 vp = (struct vnode *)fp->f_data;
3206 unionread:
3207 if (vp->v_type != VDIR) {
3208 error = EINVAL;
3209 goto done;
3211 aiov.iov_base = buf;
3212 aiov.iov_len = count;
3213 auio.uio_iov = &aiov;
3214 auio.uio_iovcnt = 1;
3215 auio.uio_rw = UIO_READ;
3216 auio.uio_segflg = direction;
3217 auio.uio_td = td;
3218 auio.uio_resid = count;
3219 loff = auio.uio_offset = fp->f_offset;
3220 error = VOP_READDIR(vp, &auio, fp->f_cred, &eofflag, NULL, NULL);
3221 fp->f_offset = auio.uio_offset;
3222 if (error)
3223 goto done;
3224 if (count == auio.uio_resid) {
3225 if (union_dircheckp) {
3226 error = union_dircheckp(td, &vp, fp);
3227 if (error == -1)
3228 goto unionread;
3229 if (error)
3230 goto done;
3232 #if 0
3233 if ((vp->v_flag & VROOT) &&
3234 (vp->v_mount->mnt_flag & MNT_UNION)) {
3235 struct vnode *tvp = vp;
3236 vp = vp->v_mount->mnt_vnodecovered;
3237 vref(vp);
3238 fp->f_data = vp;
3239 fp->f_offset = 0;
3240 vrele(tvp);
3241 goto unionread;
3243 #endif
3245 if (basep) {
3246 *basep = loff;
3248 *res = count - auio.uio_resid;
3249 done:
3250 fdrop(fp);
3251 return (error);
3255 * getdirentries_args(int fd, char *buf, u_int conut, long *basep)
3257 * Read a block of directory entries in a file system independent format.
3260 sys_getdirentries(struct getdirentries_args *uap)
3262 long base;
3263 int error;
3265 error = kern_getdirentries(uap->fd, uap->buf, uap->count, &base,
3266 &uap->sysmsg_result, UIO_USERSPACE);
3268 if (error == 0)
3269 error = copyout(&base, uap->basep, sizeof(*uap->basep));
3270 return (error);
3274 * getdents_args(int fd, char *buf, size_t count)
3277 sys_getdents(struct getdents_args *uap)
3279 int error;
3281 error = kern_getdirentries(uap->fd, uap->buf, uap->count, NULL,
3282 &uap->sysmsg_result, UIO_USERSPACE);
3284 return (error);
3288 * umask(int newmask)
3290 * Set the mode mask for creation of filesystem nodes.
3292 * MP SAFE
3295 sys_umask(struct umask_args *uap)
3297 struct thread *td = curthread;
3298 struct proc *p = td->td_proc;
3299 struct filedesc *fdp;
3301 fdp = p->p_fd;
3302 uap->sysmsg_result = fdp->fd_cmask;
3303 fdp->fd_cmask = uap->newmask & ALLPERMS;
3304 return (0);
3308 * revoke(char *path)
3310 * Void all references to file by ripping underlying filesystem
3311 * away from vnode.
3313 /* ARGSUSED */
3315 sys_revoke(struct revoke_args *uap)
3317 struct nlookupdata nd;
3318 struct vattr vattr;
3319 struct vnode *vp;
3320 struct ucred *cred;
3321 int error;
3323 vp = NULL;
3324 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW);
3325 if (error == 0)
3326 error = nlookup(&nd);
3327 if (error == 0)
3328 error = cache_vref(&nd.nl_nch, nd.nl_cred, &vp);
3329 cred = crhold(nd.nl_cred);
3330 nlookup_done(&nd);
3331 if (error == 0) {
3332 if (vp->v_type != VCHR && vp->v_type != VBLK)
3333 error = EINVAL;
3334 if (error == 0)
3335 error = VOP_GETATTR(vp, &vattr);
3336 if (error == 0 && cred->cr_uid != vattr.va_uid)
3337 error = suser_cred(cred, PRISON_ROOT);
3338 if (error == 0 && count_udev(vp->v_umajor, vp->v_uminor) > 0) {
3339 error = 0;
3340 vx_lock(vp);
3341 VOP_REVOKE(vp, REVOKEALL);
3342 vx_unlock(vp);
3344 vrele(vp);
3346 if (cred)
3347 crfree(cred);
3348 return (error);
3352 * getfh_args(char *fname, fhandle_t *fhp)
3354 * Get (NFS) file handle
3357 sys_getfh(struct getfh_args *uap)
3359 struct thread *td = curthread;
3360 struct nlookupdata nd;
3361 fhandle_t fh;
3362 struct vnode *vp;
3363 int error;
3366 * Must be super user
3368 if ((error = suser(td)) != 0)
3369 return (error);
3371 vp = NULL;
3372 error = nlookup_init(&nd, uap->fname, UIO_USERSPACE, NLC_FOLLOW);
3373 if (error == 0)
3374 error = nlookup(&nd);
3375 if (error == 0)
3376 error = cache_vget(&nd.nl_nch, nd.nl_cred, LK_EXCLUSIVE, &vp);
3377 nlookup_done(&nd);
3378 if (error == 0) {
3379 bzero(&fh, sizeof(fh));
3380 fh.fh_fsid = vp->v_mount->mnt_stat.f_fsid;
3381 error = VFS_VPTOFH(vp, &fh.fh_fid);
3382 vput(vp);
3383 if (error == 0)
3384 error = copyout(&fh, uap->fhp, sizeof(fh));
3386 return (error);
3390 * fhopen_args(const struct fhandle *u_fhp, int flags)
3392 * syscall for the rpc.lockd to use to translate a NFS file handle into
3393 * an open descriptor.
3395 * warning: do not remove the suser() call or this becomes one giant
3396 * security hole.
3399 sys_fhopen(struct fhopen_args *uap)
3401 struct thread *td = curthread;
3402 struct proc *p = td->td_proc;
3403 struct mount *mp;
3404 struct vnode *vp;
3405 struct fhandle fhp;
3406 struct vattr vat;
3407 struct vattr *vap = &vat;
3408 struct flock lf;
3409 int fmode, mode, error, type;
3410 struct file *nfp;
3411 struct file *fp;
3412 int indx;
3415 * Must be super user
3417 error = suser(td);
3418 if (error)
3419 return (error);
3421 fmode = FFLAGS(uap->flags);
3422 /* why not allow a non-read/write open for our lockd? */
3423 if (((fmode & (FREAD | FWRITE)) == 0) || (fmode & O_CREAT))
3424 return (EINVAL);
3425 error = copyin(uap->u_fhp, &fhp, sizeof(fhp));
3426 if (error)
3427 return(error);
3428 /* find the mount point */
3429 mp = vfs_getvfs(&fhp.fh_fsid);
3430 if (mp == NULL)
3431 return (ESTALE);
3432 /* now give me my vnode, it gets returned to me locked */
3433 error = VFS_FHTOVP(mp, &fhp.fh_fid, &vp);
3434 if (error)
3435 return (error);
3437 * from now on we have to make sure not
3438 * to forget about the vnode
3439 * any error that causes an abort must vput(vp)
3440 * just set error = err and 'goto bad;'.
3444 * from vn_open
3446 if (vp->v_type == VLNK) {
3447 error = EMLINK;
3448 goto bad;
3450 if (vp->v_type == VSOCK) {
3451 error = EOPNOTSUPP;
3452 goto bad;
3454 mode = 0;
3455 if (fmode & (FWRITE | O_TRUNC)) {
3456 if (vp->v_type == VDIR) {
3457 error = EISDIR;
3458 goto bad;
3460 error = vn_writechk(vp, NULL);
3461 if (error)
3462 goto bad;
3463 mode |= VWRITE;
3465 if (fmode & FREAD)
3466 mode |= VREAD;
3467 if (mode) {
3468 error = VOP_ACCESS(vp, mode, p->p_ucred);
3469 if (error)
3470 goto bad;
3472 if (fmode & O_TRUNC) {
3473 vn_unlock(vp); /* XXX */
3474 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); /* XXX */
3475 VATTR_NULL(vap);
3476 vap->va_size = 0;
3477 error = VOP_SETATTR(vp, vap, p->p_ucred);
3478 if (error)
3479 goto bad;
3483 * VOP_OPEN needs the file pointer so it can potentially override
3484 * it.
3486 * WARNING! no f_nchandle will be associated when fhopen()ing a
3487 * directory. XXX
3489 if ((error = falloc(p, &nfp, &indx)) != 0)
3490 goto bad;
3491 fp = nfp;
3493 error = VOP_OPEN(vp, fmode, p->p_ucred, fp);
3494 if (error) {
3496 * setting f_ops this way prevents VOP_CLOSE from being
3497 * called or fdrop() releasing the vp from v_data. Since
3498 * the VOP_OPEN failed we don't want to VOP_CLOSE.
3500 fp->f_ops = &badfileops;
3501 fp->f_data = NULL;
3502 goto bad_drop;
3506 * The fp is given its own reference, we still have our ref and lock.
3508 * Assert that all regular files must be created with a VM object.
3510 if (vp->v_type == VREG && vp->v_object == NULL) {
3511 kprintf("fhopen: regular file did not have VM object: %p\n", vp);
3512 goto bad_drop;
3516 * The open was successful. Handle any locking requirements.
3518 if (fmode & (O_EXLOCK | O_SHLOCK)) {
3519 lf.l_whence = SEEK_SET;
3520 lf.l_start = 0;
3521 lf.l_len = 0;
3522 if (fmode & O_EXLOCK)
3523 lf.l_type = F_WRLCK;
3524 else
3525 lf.l_type = F_RDLCK;
3526 if (fmode & FNONBLOCK)
3527 type = 0;
3528 else
3529 type = F_WAIT;
3530 vn_unlock(vp);
3531 if ((error = VOP_ADVLOCK(vp, (caddr_t)fp, F_SETLK, &lf, type)) != 0) {
3533 * release our private reference.
3535 fsetfd(p, NULL, indx);
3536 fdrop(fp);
3537 vrele(vp);
3538 return (error);
3540 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
3541 fp->f_flag |= FHASLOCK;
3545 * Clean up. Associate the file pointer with the previously
3546 * reserved descriptor and return it.
3548 vput(vp);
3549 fsetfd(p, fp, indx);
3550 fdrop(fp);
3551 uap->sysmsg_result = indx;
3552 return (0);
3554 bad_drop:
3555 fsetfd(p, NULL, indx);
3556 fdrop(fp);
3557 bad:
3558 vput(vp);
3559 return (error);
3563 * fhstat_args(struct fhandle *u_fhp, struct stat *sb)
3566 sys_fhstat(struct fhstat_args *uap)
3568 struct thread *td = curthread;
3569 struct stat sb;
3570 fhandle_t fh;
3571 struct mount *mp;
3572 struct vnode *vp;
3573 int error;
3576 * Must be super user
3578 error = suser(td);
3579 if (error)
3580 return (error);
3582 error = copyin(uap->u_fhp, &fh, sizeof(fhandle_t));
3583 if (error)
3584 return (error);
3586 if ((mp = vfs_getvfs(&fh.fh_fsid)) == NULL)
3587 return (ESTALE);
3588 if ((error = VFS_FHTOVP(mp, &fh.fh_fid, &vp)))
3589 return (error);
3590 error = vn_stat(vp, &sb, td->td_proc->p_ucred);
3591 vput(vp);
3592 if (error)
3593 return (error);
3594 error = copyout(&sb, uap->sb, sizeof(sb));
3595 return (error);
3599 * fhstatfs_args(struct fhandle *u_fhp, struct statfs *buf)
3602 sys_fhstatfs(struct fhstatfs_args *uap)
3604 struct thread *td = curthread;
3605 struct proc *p = td->td_proc;
3606 struct statfs *sp;
3607 struct mount *mp;
3608 struct vnode *vp;
3609 struct statfs sb;
3610 char *fullpath, *freepath;
3611 fhandle_t fh;
3612 int error;
3615 * Must be super user
3617 if ((error = suser(td)))
3618 return (error);
3620 if ((error = copyin(uap->u_fhp, &fh, sizeof(fhandle_t))) != 0)
3621 return (error);
3623 if ((mp = vfs_getvfs(&fh.fh_fsid)) == NULL)
3624 return (ESTALE);
3626 if (p != NULL && !chroot_visible_mnt(mp, p))
3627 return (ESTALE);
3629 if ((error = VFS_FHTOVP(mp, &fh.fh_fid, &vp)))
3630 return (error);
3631 mp = vp->v_mount;
3632 sp = &mp->mnt_stat;
3633 vput(vp);
3634 if ((error = VFS_STATFS(mp, sp, p->p_ucred)) != 0)
3635 return (error);
3637 error = mount_path(p, mp, &fullpath, &freepath);
3638 if (error)
3639 return(error);
3640 bzero(sp->f_mntonname, sizeof(sp->f_mntonname));
3641 strlcpy(sp->f_mntonname, fullpath, sizeof(sp->f_mntonname));
3642 kfree(freepath, M_TEMP);
3644 sp->f_flags = mp->mnt_flag & MNT_VISFLAGMASK;
3645 if (suser(td)) {
3646 bcopy(sp, &sb, sizeof(sb));
3647 sb.f_fsid.val[0] = sb.f_fsid.val[1] = 0;
3648 sp = &sb;
3650 return (copyout(sp, uap->buf, sizeof(*sp)));
3654 * Syscall to push extended attribute configuration information into the
3655 * VFS. Accepts a path, which it converts to a mountpoint, as well as
3656 * a command (int cmd), and attribute name and misc data. For now, the
3657 * attribute name is left in userspace for consumption by the VFS_op.
3658 * It will probably be changed to be copied into sysspace by the
3659 * syscall in the future, once issues with various consumers of the
3660 * attribute code have raised their hands.
3662 * Currently this is used only by UFS Extended Attributes.
3665 sys_extattrctl(struct extattrctl_args *uap)
3667 struct nlookupdata nd;
3668 struct mount *mp;
3669 struct vnode *vp;
3670 int error;
3672 vp = NULL;
3673 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW);
3674 if (error == 0)
3675 error = nlookup(&nd);
3676 if (error == 0) {
3677 mp = nd.nl_nch.mount;
3678 error = VFS_EXTATTRCTL(mp, uap->cmd,
3679 uap->attrname, uap->arg,
3680 nd.nl_cred);
3682 nlookup_done(&nd);
3683 return (error);
3687 * Syscall to set a named extended attribute on a file or directory.
3688 * Accepts attribute name, and a uio structure pointing to the data to set.
3689 * The uio is consumed in the style of writev(). The real work happens
3690 * in VOP_SETEXTATTR().
3693 sys_extattr_set_file(struct extattr_set_file_args *uap)
3695 char attrname[EXTATTR_MAXNAMELEN];
3696 struct iovec aiov[UIO_SMALLIOV];
3697 struct iovec *needfree;
3698 struct nlookupdata nd;
3699 struct iovec *iov;
3700 struct vnode *vp;
3701 struct uio auio;
3702 u_int iovlen;
3703 u_int cnt;
3704 int error;
3705 int i;
3707 error = copyin(uap->attrname, attrname, EXTATTR_MAXNAMELEN);
3708 if (error)
3709 return (error);
3711 vp = NULL;
3712 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW);
3713 if (error == 0)
3714 error = nlookup(&nd);
3715 if (error == 0)
3716 error = ncp_writechk(&nd.nl_nch);
3717 if (error == 0)
3718 error = cache_vget(&nd.nl_nch, nd.nl_cred, LK_EXCLUSIVE, &vp);
3719 if (error) {
3720 nlookup_done(&nd);
3721 return (error);
3724 needfree = NULL;
3725 iovlen = uap->iovcnt * sizeof(struct iovec);
3726 if (uap->iovcnt > UIO_SMALLIOV) {
3727 if (uap->iovcnt > UIO_MAXIOV) {
3728 error = EINVAL;
3729 goto done;
3731 MALLOC(iov, struct iovec *, iovlen, M_IOV, M_WAITOK);
3732 needfree = iov;
3733 } else {
3734 iov = aiov;
3736 auio.uio_iov = iov;
3737 auio.uio_iovcnt = uap->iovcnt;
3738 auio.uio_rw = UIO_WRITE;
3739 auio.uio_segflg = UIO_USERSPACE;
3740 auio.uio_td = nd.nl_td;
3741 auio.uio_offset = 0;
3742 if ((error = copyin(uap->iovp, iov, iovlen)))
3743 goto done;
3744 auio.uio_resid = 0;
3745 for (i = 0; i < uap->iovcnt; i++) {
3746 if (iov->iov_len > INT_MAX - auio.uio_resid) {
3747 error = EINVAL;
3748 goto done;
3750 auio.uio_resid += iov->iov_len;
3751 iov++;
3753 cnt = auio.uio_resid;
3754 error = VOP_SETEXTATTR(vp, attrname, &auio, nd.nl_cred);
3755 cnt -= auio.uio_resid;
3756 uap->sysmsg_result = cnt;
3757 done:
3758 vput(vp);
3759 nlookup_done(&nd);
3760 if (needfree)
3761 FREE(needfree, M_IOV);
3762 return (error);
3766 * Syscall to get a named extended attribute on a file or directory.
3767 * Accepts attribute name, and a uio structure pointing to a buffer for the
3768 * data. The uio is consumed in the style of readv(). The real work
3769 * happens in VOP_GETEXTATTR();
3772 sys_extattr_get_file(struct extattr_get_file_args *uap)
3774 char attrname[EXTATTR_MAXNAMELEN];
3775 struct iovec aiov[UIO_SMALLIOV];
3776 struct iovec *needfree;
3777 struct nlookupdata nd;
3778 struct iovec *iov;
3779 struct vnode *vp;
3780 struct uio auio;
3781 u_int iovlen;
3782 u_int cnt;
3783 int error;
3784 int i;
3786 error = copyin(uap->attrname, attrname, EXTATTR_MAXNAMELEN);
3787 if (error)
3788 return (error);
3790 vp = NULL;
3791 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW);
3792 if (error == 0)
3793 error = nlookup(&nd);
3794 if (error == 0)
3795 error = cache_vget(&nd.nl_nch, nd.nl_cred, LK_EXCLUSIVE, &vp);
3796 if (error) {
3797 nlookup_done(&nd);
3798 return (error);
3801 iovlen = uap->iovcnt * sizeof (struct iovec);
3802 needfree = NULL;
3803 if (uap->iovcnt > UIO_SMALLIOV) {
3804 if (uap->iovcnt > UIO_MAXIOV) {
3805 error = EINVAL;
3806 goto done;
3808 MALLOC(iov, struct iovec *, iovlen, M_IOV, M_WAITOK);
3809 needfree = iov;
3810 } else {
3811 iov = aiov;
3813 auio.uio_iov = iov;
3814 auio.uio_iovcnt = uap->iovcnt;
3815 auio.uio_rw = UIO_READ;
3816 auio.uio_segflg = UIO_USERSPACE;
3817 auio.uio_td = nd.nl_td;
3818 auio.uio_offset = 0;
3819 if ((error = copyin(uap->iovp, iov, iovlen)))
3820 goto done;
3821 auio.uio_resid = 0;
3822 for (i = 0; i < uap->iovcnt; i++) {
3823 if (iov->iov_len > INT_MAX - auio.uio_resid) {
3824 error = EINVAL;
3825 goto done;
3827 auio.uio_resid += iov->iov_len;
3828 iov++;
3830 cnt = auio.uio_resid;
3831 error = VOP_GETEXTATTR(vp, attrname, &auio, nd.nl_cred);
3832 cnt -= auio.uio_resid;
3833 uap->sysmsg_result = cnt;
3834 done:
3835 vput(vp);
3836 nlookup_done(&nd);
3837 if (needfree)
3838 FREE(needfree, M_IOV);
3839 return(error);
3843 * Syscall to delete a named extended attribute from a file or directory.
3844 * Accepts attribute name. The real work happens in VOP_SETEXTATTR().
3847 sys_extattr_delete_file(struct extattr_delete_file_args *uap)
3849 char attrname[EXTATTR_MAXNAMELEN];
3850 struct nlookupdata nd;
3851 struct vnode *vp;
3852 int error;
3854 error = copyin(uap->attrname, attrname, EXTATTR_MAXNAMELEN);
3855 if (error)
3856 return(error);
3858 vp = NULL;
3859 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW);
3860 if (error == 0)
3861 error = nlookup(&nd);
3862 if (error == 0)
3863 error = ncp_writechk(&nd.nl_nch);
3864 if (error == 0)
3865 error = cache_vget(&nd.nl_nch, nd.nl_cred, LK_EXCLUSIVE, &vp);
3866 if (error) {
3867 nlookup_done(&nd);
3868 return (error);
3871 error = VOP_SETEXTATTR(vp, attrname, NULL, nd.nl_cred);
3872 vput(vp);
3873 nlookup_done(&nd);
3874 return(error);
3878 * Determine if the mount is visible to the process.
3880 static int
3881 chroot_visible_mnt(struct mount *mp, struct proc *p)
3883 struct nchandle nch;
3886 * Traverse from the mount point upwards. If we hit the process
3887 * root then the mount point is visible to the process.
3889 nch = mp->mnt_ncmountpt;
3890 while (nch.ncp) {
3891 if (nch.mount == p->p_fd->fd_nrdir.mount &&
3892 nch.ncp == p->p_fd->fd_nrdir.ncp) {
3893 return(1);
3895 if (nch.ncp == nch.mount->mnt_ncmountpt.ncp) {
3896 nch = nch.mount->mnt_ncmounton;
3897 } else {
3898 nch.ncp = nch.ncp->nc_parent;
3903 * If the mount point is not visible to the process, but the
3904 * process root is in a subdirectory of the mount, return
3905 * TRUE anyway.
3907 if (p->p_fd->fd_nrdir.mount == mp)
3908 return(1);
3910 return(0);