kernel - TMPFS - Bug fixing pass - paging to/from swap, vnode recycling
[dragonfly.git] / sys / vfs / tmpfs / tmpfs_vnops.c
blobc0c4b5f0b324e6f3ba7cbff290eb485084523fc3
1 /* $NetBSD: tmpfs_vnops.c,v 1.39 2007/07/23 15:41:01 jmmv Exp $ */
3 /*-
4 * Copyright (c) 2005, 2006 The NetBSD Foundation, Inc.
5 * All rights reserved.
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Julio M. Merino Vidal, developed as part of Google's Summer of Code
9 * 2005 program.
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
34 * tmpfs vnode interface.
36 #include <sys/cdefs.h>
38 #include <sys/kernel.h>
39 #include <sys/kern_syscall.h>
40 #include <sys/param.h>
41 #include <sys/fcntl.h>
42 #include <sys/lockf.h>
43 #include <sys/priv.h>
44 #include <sys/proc.h>
45 #include <sys/resourcevar.h>
46 #include <sys/sched.h>
47 #include <sys/sfbuf.h>
48 #include <sys/stat.h>
49 #include <sys/systm.h>
50 #include <sys/unistd.h>
51 #include <sys/vfsops.h>
52 #include <sys/vnode.h>
54 #include <sys/mplock2.h>
56 #include <vm/vm.h>
57 #include <vm/vm_object.h>
58 #include <vm/vm_page.h>
59 #include <vm/vm_pager.h>
61 #include <vfs/fifofs/fifo.h>
62 #include <vfs/tmpfs/tmpfs_vnops.h>
63 #include <vfs/tmpfs/tmpfs.h>
65 MALLOC_DECLARE(M_TMPFS);
67 /* --------------------------------------------------------------------- */
69 static int
70 tmpfs_nresolve(struct vop_nresolve_args *v)
72 struct vnode *dvp = v->a_dvp;
73 struct vnode *vp = NULL;
74 struct namecache *ncp = v->a_nch->ncp;
75 struct tmpfs_node *tnode;
77 int error;
78 struct tmpfs_dirent *de;
79 struct tmpfs_node *dnode;
81 dnode = VP_TO_TMPFS_DIR(dvp);
83 de = tmpfs_dir_lookup(dnode, NULL, ncp);
84 if (de == NULL) {
85 error = ENOENT;
86 } else {
88 * Allocate a vnode for the node we found.
90 tnode = de->td_node;
91 error = tmpfs_alloc_vp(dvp->v_mount, tnode,
92 LK_EXCLUSIVE | LK_RETRY, &vp);
93 if (error)
94 goto out;
95 KKASSERT(vp);
98 out:
100 * Store the result of this lookup in the cache. Avoid this if the
101 * request was for creation, as it does not improve timings on
102 * emprical tests.
104 if (vp) {
105 vn_unlock(vp);
106 cache_setvp(v->a_nch, vp);
107 vrele(vp);
108 } else if (error == ENOENT) {
109 cache_setvp(v->a_nch, NULL);
111 return error;
114 static int
115 tmpfs_nlookupdotdot(struct vop_nlookupdotdot_args *v)
117 struct vnode *dvp = v->a_dvp;
118 struct vnode **vpp = v->a_vpp;
119 struct tmpfs_node *dnode = VP_TO_TMPFS_NODE(dvp);
120 struct ucred *cred = v->a_cred;
121 int error;
123 *vpp = NULL;
124 /* Check accessibility of requested node as a first step. */
125 error = VOP_ACCESS(dvp, VEXEC, cred);
126 if (error != 0)
127 return error;
129 if (dnode->tn_dir.tn_parent != NULL) {
130 /* Allocate a new vnode on the matching entry. */
131 error = tmpfs_alloc_vp(dvp->v_mount, dnode->tn_dir.tn_parent,
132 LK_EXCLUSIVE | LK_RETRY, vpp);
134 if (*vpp)
135 vn_unlock(*vpp);
138 return (*vpp == NULL) ? ENOENT : 0;
141 /* --------------------------------------------------------------------- */
143 static int
144 tmpfs_ncreate(struct vop_ncreate_args *v)
146 struct vnode *dvp = v->a_dvp;
147 struct vnode **vpp = v->a_vpp;
148 struct namecache *ncp = v->a_nch->ncp;
149 struct vattr *vap = v->a_vap;
150 struct ucred *cred = v->a_cred;
151 int error;
153 KKASSERT(vap->va_type == VREG || vap->va_type == VSOCK);
155 error = tmpfs_alloc_file(dvp, vpp, vap, ncp, cred, NULL);
156 if (error == 0) {
157 cache_setunresolved(v->a_nch);
158 cache_setvp(v->a_nch, *vpp);
161 return error;
163 /* --------------------------------------------------------------------- */
165 static int
166 tmpfs_nmknod(struct vop_nmknod_args *v)
168 struct vnode *dvp = v->a_dvp;
169 struct vnode **vpp = v->a_vpp;
170 struct namecache *ncp = v->a_nch->ncp;
171 struct vattr *vap = v->a_vap;
172 struct ucred *cred = v->a_cred;
173 int error;
175 if (vap->va_type != VBLK && vap->va_type != VCHR &&
176 vap->va_type != VFIFO)
177 return EINVAL;
179 error = tmpfs_alloc_file(dvp, vpp, vap, ncp, cred, NULL);
180 if (error == 0) {
181 cache_setunresolved(v->a_nch);
182 cache_setvp(v->a_nch, *vpp);
185 return error;
188 /* --------------------------------------------------------------------- */
190 static int
191 tmpfs_open(struct vop_open_args *v)
193 struct vnode *vp = v->a_vp;
194 int mode = v->a_mode;
196 int error;
197 struct tmpfs_node *node;
199 node = VP_TO_TMPFS_NODE(vp);
201 /* The file is still active but all its names have been removed
202 * (e.g. by a "rmdir $(pwd)"). It cannot be opened any more as
203 * it is about to die. */
204 if (node->tn_links < 1)
205 return (ENOENT);
207 /* If the file is marked append-only, deny write requests. */
208 if ((node->tn_flags & APPEND) &&
209 (mode & (FWRITE | O_APPEND)) == FWRITE) {
210 error = EPERM;
211 } else {
212 return (vop_stdopen(v));
214 return error;
217 /* --------------------------------------------------------------------- */
219 static int
220 tmpfs_close(struct vop_close_args *v)
222 struct vnode *vp = v->a_vp;
223 struct tmpfs_node *node;
225 node = VP_TO_TMPFS_NODE(vp);
227 if (node->tn_links > 0) {
228 /* Update node times. No need to do it if the node has
229 * been deleted, because it will vanish after we return. */
230 tmpfs_update(vp);
233 return vop_stdclose(v);
236 /* --------------------------------------------------------------------- */
239 tmpfs_access(struct vop_access_args *v)
241 struct vnode *vp = v->a_vp;
242 int error;
243 struct tmpfs_node *node;
245 node = VP_TO_TMPFS_NODE(vp);
247 switch (vp->v_type) {
248 case VDIR:
249 /* FALLTHROUGH */
250 case VLNK:
251 /* FALLTHROUGH */
252 case VREG:
253 if (VWRITE && vp->v_mount->mnt_flag & MNT_RDONLY) {
254 error = EROFS;
255 goto out;
257 break;
259 case VBLK:
260 /* FALLTHROUGH */
261 case VCHR:
262 /* FALLTHROUGH */
263 case VSOCK:
264 /* FALLTHROUGH */
265 case VFIFO:
266 break;
268 default:
269 error = EINVAL;
270 goto out;
273 if (VWRITE && node->tn_flags & IMMUTABLE) {
274 error = EPERM;
275 goto out;
278 error = vop_helper_access(v, node->tn_uid, node->tn_gid, node->tn_mode, 0);
280 out:
282 return error;
285 /* --------------------------------------------------------------------- */
288 tmpfs_getattr(struct vop_getattr_args *v)
290 struct vnode *vp = v->a_vp;
291 struct vattr *vap = v->a_vap;
292 struct tmpfs_node *node;
294 node = VP_TO_TMPFS_NODE(vp);
296 tmpfs_update(vp);
298 vap->va_type = vp->v_type;
299 vap->va_mode = node->tn_mode;
300 vap->va_nlink = node->tn_links;
301 vap->va_uid = node->tn_uid;
302 vap->va_gid = node->tn_gid;
303 vap->va_fsid = vp->v_mount->mnt_stat.f_fsid.val[0];
304 vap->va_fileid = node->tn_id;
305 vap->va_size = node->tn_size;
306 vap->va_blocksize = PAGE_SIZE;
307 vap->va_atime.tv_sec = node->tn_atime;
308 vap->va_atime.tv_nsec = node->tn_atimensec;
309 vap->va_mtime.tv_sec = node->tn_mtime;
310 vap->va_mtime.tv_nsec = node->tn_mtimensec;
311 vap->va_ctime.tv_sec = node->tn_ctime;
312 vap->va_ctime.tv_nsec = node->tn_ctimensec;
313 vap->va_gen = node->tn_gen;
314 vap->va_flags = node->tn_flags;
315 if (vp->v_type == VBLK || vp->v_type == VCHR)
317 vap->va_rmajor = umajor(node->tn_rdev);
318 vap->va_rminor = uminor(node->tn_rdev);
320 vap->va_bytes = round_page(node->tn_size);
321 vap->va_filerev = 0;
323 return 0;
326 /* --------------------------------------------------------------------- */
329 tmpfs_setattr(struct vop_setattr_args *v)
331 struct vnode *vp = v->a_vp;
332 struct vattr *vap = v->a_vap;
333 struct ucred *cred = v->a_cred;
334 int error = 0;
336 if (error == 0 && (vap->va_flags != VNOVAL))
337 error = tmpfs_chflags(vp, vap->va_flags, cred);
339 if (error == 0 && (vap->va_size != VNOVAL))
340 error = tmpfs_chsize(vp, vap->va_size, cred);
342 if (error == 0 && (vap->va_uid != (uid_t)VNOVAL ||
343 vap->va_gid != (gid_t)VNOVAL)) {
344 error = tmpfs_chown(vp, vap->va_uid, vap->va_gid, cred);
347 if (error == 0 && (vap->va_mode != (mode_t)VNOVAL))
348 error = tmpfs_chmod(vp, vap->va_mode, cred);
350 if (error == 0 && ((vap->va_atime.tv_sec != VNOVAL &&
351 vap->va_atime.tv_nsec != VNOVAL) ||
352 (vap->va_mtime.tv_sec != VNOVAL &&
353 vap->va_mtime.tv_nsec != VNOVAL) )) {
354 error = tmpfs_chtimes(vp, &vap->va_atime, &vap->va_mtime,
355 vap->va_vaflags, cred);
358 /* Update the node times. We give preference to the error codes
359 * generated by this function rather than the ones that may arise
360 * from tmpfs_update. */
361 tmpfs_update(vp);
363 return error;
366 /* --------------------------------------------------------------------- */
369 * fsync is usually a NOP, but we must take action when unmounting or
370 * when recycling.
372 static int
373 tmpfs_fsync(struct vop_fsync_args *v)
375 struct tmpfs_mount *tmp;
376 struct tmpfs_node *node;
377 struct vnode *vp = v->a_vp;
379 tmp = VFS_TO_TMPFS(vp->v_mount);
380 node = VP_TO_TMPFS_NODE(vp);
382 tmpfs_update(vp);
383 if (vp->v_type == VREG) {
384 if (tmp->tm_flags & TMPFS_FLAG_UNMOUNTING) {
385 tmpfs_truncate(vp, 0);
386 } else if (vp->v_flag & VRECLAIMED) {
387 if (node->tn_links == 0)
388 tmpfs_truncate(vp, 0);
389 else
390 vfsync(v->a_vp, v->a_waitfor, 1, NULL, NULL);
393 return 0;
396 /* --------------------------------------------------------------------- */
398 static int
399 tmpfs_read (struct vop_read_args *ap)
401 struct buf *bp;
402 struct vnode *vp = ap->a_vp;
403 struct uio *uio = ap->a_uio;
404 struct tmpfs_node *node;
405 off_t base_offset;
406 size_t offset;
407 size_t len;
408 int got_mplock;
409 int error;
411 error = 0;
412 if (uio->uio_resid == 0) {
413 return error;
416 node = VP_TO_TMPFS_NODE(vp);
418 if (uio->uio_offset < 0)
419 return (EINVAL);
420 if (vp->v_type != VREG)
421 return (EINVAL);
423 #ifdef SMP
424 if(curthread->td_mpcount)
425 got_mplock = -1;
426 else
427 got_mplock = 0;
428 #else
429 got_mplock = -1;
430 #endif
432 while (uio->uio_resid > 0 && uio->uio_offset < node->tn_size) {
434 * Use buffer cache I/O (via tmpfs_strategy)
436 offset = (size_t)uio->uio_offset & BMASK;
437 base_offset = (off_t)uio->uio_offset - offset;
438 bp = getcacheblk(vp, base_offset);
439 if (bp == NULL)
441 if (got_mplock == 0) {
442 got_mplock = 1;
443 get_mplock();
446 error = bread(vp, base_offset, BSIZE, &bp);
447 if (error) {
448 brelse(bp);
449 kprintf("tmpfs_read bread error %d\n", error);
450 break;
454 if (got_mplock == 0) {
455 got_mplock = 1;
456 get_mplock();
460 * Figure out how many bytes we can actually copy this loop.
462 len = BSIZE - offset;
463 if (len > uio->uio_resid)
464 len = uio->uio_resid;
465 if (len > node->tn_size - uio->uio_offset)
466 len = (size_t)(node->tn_size - uio->uio_offset);
468 error = uiomove((char *)bp->b_data + offset, len, uio);
469 bqrelse(bp);
470 if (error) {
471 kprintf("tmpfs_read uiomove error %d\n", error);
472 break;
476 if (got_mplock > 0)
477 rel_mplock();
479 TMPFS_NODE_LOCK(node);
480 node->tn_status |= TMPFS_NODE_ACCESSED;
481 TMPFS_NODE_UNLOCK(node);
483 return(error);
486 static int
487 tmpfs_write (struct vop_write_args *ap)
489 struct buf *bp;
490 struct vnode *vp = ap->a_vp;
491 struct uio *uio = ap->a_uio;
492 struct thread *td = uio->uio_td;
493 struct tmpfs_node *node;
494 boolean_t extended;
495 off_t oldsize;
496 int error;
497 off_t base_offset;
498 size_t offset;
499 size_t len;
500 struct rlimit limit;
501 int got_mplock;
502 int trivial = 0;
504 error = 0;
505 if (uio->uio_resid == 0) {
506 return error;
509 node = VP_TO_TMPFS_NODE(vp);
511 if (vp->v_type != VREG)
512 return (EINVAL);
514 oldsize = node->tn_size;
515 if (ap->a_ioflag & IO_APPEND)
516 uio->uio_offset = node->tn_size;
519 * Check for illegal write offsets.
521 if (uio->uio_offset + uio->uio_resid >
522 VFS_TO_TMPFS(vp->v_mount)->tm_maxfilesize)
523 return (EFBIG);
525 if (vp->v_type == VREG && td != NULL) {
526 error = kern_getrlimit(RLIMIT_FSIZE, &limit);
527 if (error != 0)
528 return error;
529 if (uio->uio_offset + uio->uio_resid > limit.rlim_cur) {
530 ksignal(td->td_proc, SIGXFSZ);
531 return (EFBIG);
537 * Extend the file's size if necessary
539 extended = ((uio->uio_offset + uio->uio_resid) > node->tn_size);
541 #ifdef SMP
542 if (curthread->td_mpcount) {
543 got_mplock = -1;
544 } else {
545 got_mplock = 1;
546 get_mplock();
548 #else
549 got_mplock = -1;
550 #endif
551 while (uio->uio_resid > 0) {
553 * Use buffer cache I/O (via tmpfs_strategy)
555 offset = (size_t)uio->uio_offset & BMASK;
556 base_offset = (off_t)uio->uio_offset - offset;
557 len = BSIZE - offset;
558 if (len > uio->uio_resid)
559 len = uio->uio_resid;
561 if ((uio->uio_offset + len) > node->tn_size) {
562 trivial = (uio->uio_offset <= node->tn_size);
563 error = tmpfs_reg_resize(vp, uio->uio_offset + len, trivial);
564 if (error)
565 break;
569 * Read to fill in any gaps. Theoretically we could
570 * optimize this if the write covers the entire buffer
571 * and is not a UIO_NOCOPY write, however this can lead
572 * to a security violation exposing random kernel memory
573 * (whatever junk was in the backing VM pages before).
575 * So just use bread() to do the right thing.
577 error = bread(vp, base_offset, BSIZE, &bp);
578 error = uiomove((char *)bp->b_data + offset, len, uio);
579 if (error) {
580 kprintf("tmpfs_write uiomove error %d\n", error);
581 brelse(bp);
582 break;
585 if (uio->uio_offset > node->tn_size)
586 node->tn_size = uio->uio_offset;
589 * The data has been loaded into the buffer, write it out.
591 * We want tmpfs to be able to use all available ram, not
592 * just the buffer cache, so if not explicitly paging we
593 * use buwrite() to leave the buffer clean but mark all the
594 * VM pages valid+dirty.
596 * When the kernel is paging, either via normal pageout
597 * operation or when cleaning the object during a recycle,
598 * the underlying VM pages are going to get thrown away
599 * so we MUST write them to swap.
601 * XXX unfortunately this catches msync() system calls too
602 * for the moment.
604 if (ap->a_ioflag & IO_SYNC) {
605 bwrite(bp);
606 } else if ((ap->a_ioflag & IO_ASYNC) ||
607 (uio->uio_segflg == UIO_NOCOPY)) {
608 bawrite(bp);
609 } else {
610 buwrite(bp);
613 if (bp->b_error) {
614 kprintf("tmpfs_write bwrite error %d\n", error);
615 break;
619 if (got_mplock > 0)
620 rel_mplock();
622 if (error) {
623 if (extended)
624 (void)tmpfs_reg_resize(vp, oldsize, trivial);
625 return error;
628 TMPFS_NODE_LOCK(node);
629 node->tn_status |= TMPFS_NODE_ACCESSED | TMPFS_NODE_MODIFIED |
630 (extended? TMPFS_NODE_CHANGED : 0);
632 if (node->tn_mode & (S_ISUID | S_ISGID)) {
633 if (priv_check_cred(ap->a_cred, PRIV_VFS_RETAINSUGID, 0))
634 node->tn_mode &= ~(S_ISUID | S_ISGID);
636 TMPFS_NODE_UNLOCK(node);
638 return(error);
641 static int
642 tmpfs_advlock (struct vop_advlock_args *ap)
644 struct tmpfs_node *node;
645 struct vnode *vp = ap->a_vp;
647 node = VP_TO_TMPFS_NODE(vp);
649 return (lf_advlock(ap, &node->tn_advlock, node->tn_size));
653 static int
654 tmpfs_strategy(struct vop_strategy_args *ap)
656 struct bio *bio = ap->a_bio;
657 struct buf *bp = bio->bio_buf;
658 struct vnode *vp = ap->a_vp;
659 struct tmpfs_node *node;
660 vm_object_t uobj;
662 if (vp->v_type != VREG) {
663 bp->b_resid = bp->b_bcount;
664 bp->b_flags |= B_ERROR | B_INVAL;
665 bp->b_error = EINVAL;
666 biodone(bio);
667 return(0);
670 node = VP_TO_TMPFS_NODE(vp);
672 uobj = node->tn_reg.tn_aobj;
675 * Call swap_pager_strategy to read or write between the VM
676 * object and the buffer cache.
678 swap_pager_strategy(uobj, bio);
680 return 0;
683 static int
684 tmpfs_bmap(struct vop_bmap_args *ap)
686 if (ap->a_doffsetp != NULL)
687 *ap->a_doffsetp = ap->a_loffset;
688 if (ap->a_runp != NULL)
689 *ap->a_runp = 0;
690 if (ap->a_runb != NULL)
691 *ap->a_runb = 0;
693 return 0;
696 /* --------------------------------------------------------------------- */
698 static int
699 tmpfs_nremove(struct vop_nremove_args *v)
701 struct vnode *dvp = v->a_dvp;
702 struct namecache *ncp = v->a_nch->ncp;
703 struct vnode *vp;
704 int error;
705 struct tmpfs_dirent *de;
706 struct tmpfs_mount *tmp;
707 struct tmpfs_node *dnode;
708 struct tmpfs_node *node;
711 * We have to acquire the vp from v->a_nch because
712 * we will likely unresolve the namecache entry, and
713 * a vrele is needed to trigger the tmpfs_inactive/tmpfs_reclaim
714 * sequence to recover space from the file.
716 error = cache_vref(v->a_nch, v->a_cred, &vp);
717 KKASSERT(error == 0);
719 if (vp->v_type == VDIR) {
720 error = EISDIR;
721 goto out;
724 dnode = VP_TO_TMPFS_DIR(dvp);
725 node = VP_TO_TMPFS_NODE(vp);
726 tmp = VFS_TO_TMPFS(vp->v_mount);
727 de = tmpfs_dir_lookup(dnode, node, ncp);
728 if (de == NULL) {
729 error = ENOENT;
730 goto out;
733 /* Files marked as immutable or append-only cannot be deleted. */
734 if ((node->tn_flags & (IMMUTABLE | APPEND | NOUNLINK)) ||
735 (dnode->tn_flags & APPEND)) {
736 error = EPERM;
737 goto out;
740 /* Remove the entry from the directory; as it is a file, we do not
741 * have to change the number of hard links of the directory. */
742 tmpfs_dir_detach(dnode, de);
744 /* Free the directory entry we just deleted. Note that the node
745 * referred by it will not be removed until the vnode is really
746 * reclaimed. */
747 tmpfs_free_dirent(tmp, de);
749 if (node->tn_links > 0) {
750 TMPFS_NODE_LOCK(node);
751 node->tn_status |= TMPFS_NODE_ACCESSED | TMPFS_NODE_CHANGED | \
752 TMPFS_NODE_MODIFIED;
753 TMPFS_NODE_UNLOCK(node);
756 cache_setunresolved(v->a_nch);
757 cache_setvp(v->a_nch, NULL);
758 /*cache_inval_vp(vp, CINV_DESTROY);*/
759 error = 0;
761 out:
762 vrele(vp);
764 return error;
767 /* --------------------------------------------------------------------- */
769 static int
770 tmpfs_nlink(struct vop_nlink_args *v)
772 struct vnode *dvp = v->a_dvp;
773 struct vnode *vp = v->a_vp;
774 struct namecache *ncp = v->a_nch->ncp;
775 struct tmpfs_dirent *de;
776 struct tmpfs_node *node;
777 struct tmpfs_node *dnode;
778 int error;
780 KKASSERT(dvp != vp); /* XXX When can this be false? */
782 node = VP_TO_TMPFS_NODE(vp);
783 dnode = VP_TO_TMPFS_NODE(dvp);
785 /* XXX: Why aren't the following two tests done by the caller? */
787 /* Hard links of directories are forbidden. */
788 if (vp->v_type == VDIR) {
789 error = EPERM;
790 goto out;
793 /* Cannot create cross-device links. */
794 if (dvp->v_mount != vp->v_mount) {
795 error = EXDEV;
796 goto out;
799 /* Ensure that we do not overflow the maximum number of links imposed
800 * by the system. */
801 KKASSERT(node->tn_links <= LINK_MAX);
802 if (node->tn_links == LINK_MAX) {
803 error = EMLINK;
804 goto out;
807 /* We cannot create links of files marked immutable or append-only. */
808 if (node->tn_flags & (IMMUTABLE | APPEND)) {
809 error = EPERM;
810 goto out;
813 /* Allocate a new directory entry to represent the node. */
814 error = tmpfs_alloc_dirent(VFS_TO_TMPFS(vp->v_mount), node,
815 ncp->nc_name, ncp->nc_nlen, &de);
816 if (error != 0)
817 goto out;
819 /* Insert the new directory entry into the appropriate directory. */
820 tmpfs_dir_attach(dnode, de);
822 /* vp link count has changed, so update node times. */
824 TMPFS_NODE_LOCK(node);
825 node->tn_status |= TMPFS_NODE_CHANGED;
826 TMPFS_NODE_UNLOCK(node);
827 tmpfs_update(vp);
829 cache_setunresolved(v->a_nch);
830 cache_setvp(v->a_nch, vp);
831 error = 0;
833 out:
834 return error;
837 /* --------------------------------------------------------------------- */
839 static int
840 tmpfs_nrename(struct vop_nrename_args *v)
842 struct vnode *fdvp = v->a_fdvp;
843 struct namecache *fncp = v->a_fnch->ncp;
844 struct vnode *fvp = fncp->nc_vp;
845 struct vnode *tdvp = v->a_tdvp;
846 struct namecache *tncp = v->a_tnch->ncp;
847 struct vnode *tvp = tncp->nc_vp;
848 struct tmpfs_dirent *de;
849 struct tmpfs_mount *tmp;
850 struct tmpfs_node *fdnode;
851 struct tmpfs_node *fnode;
852 struct tmpfs_node *tnode;
853 struct tmpfs_node *tdnode;
854 char *newname;
855 int error;
857 tnode = (tvp == NULL) ? NULL : VP_TO_TMPFS_NODE(tvp);
859 /* Disallow cross-device renames.
860 * XXX Why isn't this done by the caller? */
861 if (fvp->v_mount != tdvp->v_mount ||
862 (tvp != NULL && fvp->v_mount != tvp->v_mount)) {
863 error = EXDEV;
864 goto out;
867 tmp = VFS_TO_TMPFS(tdvp->v_mount);
868 tdnode = VP_TO_TMPFS_DIR(tdvp);
870 /* If source and target are the same file, there is nothing to do. */
871 if (fvp == tvp) {
872 error = 0;
873 goto out;
876 fdnode = VP_TO_TMPFS_DIR(fdvp);
877 fnode = VP_TO_TMPFS_NODE(fvp);
878 de = tmpfs_dir_lookup(fdnode, fnode, fncp);
880 /* Avoid manipulating '.' and '..' entries. */
881 if (de == NULL) {
882 error = ENOENT;
883 goto out_locked;
885 KKASSERT(de->td_node == fnode);
887 /* If re-naming a directory to another preexisting directory
888 * ensure that the target directory is empty so that its
889 * removal causes no side effects.
890 * Kern_rename gurantees the destination to be a directory
891 * if the source is one. */
892 if (tvp != NULL) {
893 KKASSERT(tnode != NULL);
895 if ((tnode->tn_flags & (NOUNLINK | IMMUTABLE | APPEND)) ||
896 (tdnode->tn_flags & (APPEND | IMMUTABLE))) {
897 error = EPERM;
898 goto out_locked;
901 if (fnode->tn_type == VDIR && tnode->tn_type == VDIR) {
902 if (tnode->tn_size > 0) {
903 error = ENOTEMPTY;
904 goto out_locked;
906 } else if (fnode->tn_type == VDIR && tnode->tn_type != VDIR) {
907 error = ENOTDIR;
908 goto out_locked;
909 } else if (fnode->tn_type != VDIR && tnode->tn_type == VDIR) {
910 error = EISDIR;
911 goto out_locked;
912 } else {
913 KKASSERT(fnode->tn_type != VDIR &&
914 tnode->tn_type != VDIR);
918 if ((fnode->tn_flags & (NOUNLINK | IMMUTABLE | APPEND))
919 || (fdnode->tn_flags & (APPEND | IMMUTABLE))) {
920 error = EPERM;
921 goto out_locked;
924 /* Ensure that we have enough memory to hold the new name, if it
925 * has to be changed. */
926 if (fncp->nc_nlen != tncp->nc_nlen ||
927 bcmp(fncp->nc_name, tncp->nc_name, fncp->nc_nlen) != 0) {
928 newname = kmalloc(tncp->nc_nlen + 1, M_TMPFSNAME, M_WAITOK);
929 } else
930 newname = NULL;
932 /* If the node is being moved to another directory, we have to do
933 * the move. */
934 if (fdnode != tdnode) {
935 /* In case we are moving a directory, we have to adjust its
936 * parent to point to the new parent. */
937 if (de->td_node->tn_type == VDIR) {
938 struct tmpfs_node *n;
940 /* Ensure the target directory is not a child of the
941 * directory being moved. Otherwise, we'd end up
942 * with stale nodes. */
943 n = tdnode;
944 /* TMPFS_LOCK garanties that no nodes are freed while
945 * traversing the list. Nodes can only be marked as
946 * removed: tn_parent == NULL. */
947 TMPFS_LOCK(tmp);
948 TMPFS_NODE_LOCK(n);
949 while (n != n->tn_dir.tn_parent) {
950 struct tmpfs_node *parent;
952 if (n == fnode) {
953 TMPFS_NODE_UNLOCK(n);
954 TMPFS_UNLOCK(tmp);
955 error = EINVAL;
956 if (newname != NULL)
957 kfree(newname, M_TMPFSNAME);
958 goto out_locked;
960 parent = n->tn_dir.tn_parent;
961 if (parent == NULL) {
962 n = NULL;
963 break;
965 TMPFS_NODE_LOCK(parent);
966 if (parent->tn_dir.tn_parent == NULL) {
967 TMPFS_NODE_UNLOCK(parent);
968 n = NULL;
969 break;
971 n = parent;
973 TMPFS_NODE_UNLOCK(n);
974 TMPFS_UNLOCK(tmp);
975 if (n == NULL) {
976 error = EINVAL;
977 if (newname != NULL)
978 kfree(newname, M_TMPFSNAME);
979 goto out_locked;
982 /* Adjust the parent pointer. */
983 TMPFS_VALIDATE_DIR(fnode);
984 TMPFS_NODE_LOCK(de->td_node);
985 de->td_node->tn_dir.tn_parent = tdnode;
987 /* As a result of changing the target of the '..'
988 * entry, the link count of the source and target
989 * directories has to be adjusted. */
990 TMPFS_NODE_LOCK(tdnode);
991 TMPFS_ASSERT_LOCKED(tdnode);
992 TMPFS_NODE_LOCK(fdnode);
993 TMPFS_ASSERT_LOCKED(fdnode);
995 tdnode->tn_links++;
996 fdnode->tn_links--;
998 TMPFS_NODE_UNLOCK(fdnode);
999 TMPFS_NODE_UNLOCK(tdnode);
1000 TMPFS_NODE_UNLOCK(de->td_node);
1003 /* Do the move: just remove the entry from the source directory
1004 * and insert it into the target one. */
1005 tmpfs_dir_detach(fdnode, de);
1006 tmpfs_dir_attach(tdnode, de);
1009 /* If the name has changed, we need to make it effective by changing
1010 * it in the directory entry. */
1011 if (newname != NULL) {
1013 kfree(de->td_name, M_TMPFSNAME);
1014 de->td_namelen = (uint16_t)tncp->nc_nlen;
1015 bcopy(tncp->nc_name, newname, tncp->nc_nlen);
1016 newname[tncp->nc_nlen] = '\0';
1017 de->td_name = newname;
1019 TMPFS_NODE_LOCK(tdnode);
1020 TMPFS_NODE_LOCK(fdnode);
1022 fnode->tn_status |= TMPFS_NODE_CHANGED;
1023 tdnode->tn_status |= TMPFS_NODE_MODIFIED;
1025 TMPFS_NODE_UNLOCK(fdnode);
1026 TMPFS_NODE_UNLOCK(tdnode);
1029 /* If we are overwriting an entry, we have to remove the old one
1030 * from the target directory. */
1031 if (tvp != NULL) {
1032 /* Remove the old entry from the target directory. */
1033 de = tmpfs_dir_lookup(tdnode, tnode, tncp);
1034 tmpfs_dir_detach(tdnode, de);
1036 /* Free the directory entry we just deleted. Note that the
1037 * node referred by it will not be removed until the vnode is
1038 * really reclaimed. */
1039 tmpfs_free_dirent(VFS_TO_TMPFS(tvp->v_mount), de);
1040 /*cache_inval_vp(tvp, CINV_DESTROY);*/
1043 cache_rename(v->a_fnch, v->a_tnch);
1044 error = 0;
1046 out_locked:
1049 out:
1050 /* Release target nodes. */
1051 /* XXX: I don't understand when tdvp can be the same as tvp, but
1052 * other code takes care of this... */
1053 if (tdvp == tvp)
1054 vrele(tdvp);
1056 return error;
1059 /* --------------------------------------------------------------------- */
1061 static int
1062 tmpfs_nmkdir(struct vop_nmkdir_args *v)
1064 struct vnode *dvp = v->a_dvp;
1065 struct vnode **vpp = v->a_vpp;
1066 struct namecache *ncp = v->a_nch->ncp;
1067 struct vattr *vap = v->a_vap;
1068 struct ucred *cred = v->a_cred;
1069 int error;
1071 KKASSERT(vap->va_type == VDIR);
1073 error = tmpfs_alloc_file(dvp, vpp, vap, ncp, cred, NULL);
1074 if (error == 0) {
1075 cache_setunresolved(v->a_nch);
1076 cache_setvp(v->a_nch, *vpp);
1079 return error;
1082 /* --------------------------------------------------------------------- */
1084 static int
1085 tmpfs_nrmdir(struct vop_nrmdir_args *v)
1087 struct vnode *dvp = v->a_dvp;
1088 struct namecache *ncp = v->a_nch->ncp;
1089 struct vnode *vp;
1091 int error;
1092 struct tmpfs_dirent *de;
1093 struct tmpfs_mount *tmp;
1094 struct tmpfs_node *dnode;
1095 struct tmpfs_node *node;
1098 * We have to acquire the vp from v->a_nch because
1099 * we will likely unresolve the namecache entry, and
1100 * a vrele is needed to trigger the tmpfs_inactive/tmpfs_reclaim
1101 * sequence.
1103 error = cache_vref(v->a_nch, v->a_cred, &vp);
1104 KKASSERT(error == 0);
1106 tmp = VFS_TO_TMPFS(dvp->v_mount);
1107 dnode = VP_TO_TMPFS_DIR(dvp);
1108 node = VP_TO_TMPFS_DIR(vp);
1110 /* Directories with more than two entries ('.' and '..') cannot be
1111 * removed. */
1112 if (node->tn_size > 0) {
1113 error = ENOTEMPTY;
1114 goto out;
1117 if ((dnode->tn_flags & APPEND)
1118 || (node->tn_flags & (NOUNLINK | IMMUTABLE | APPEND))) {
1119 error = EPERM;
1120 goto out;
1123 /* This invariant holds only if we are not trying to remove "..".
1124 * We checked for that above so this is safe now. */
1125 KKASSERT(node->tn_dir.tn_parent == dnode);
1127 /* Get the directory entry associated with node (vp). This was
1128 * filled by tmpfs_lookup while looking up the entry. */
1129 de = tmpfs_dir_lookup(dnode, node, ncp);
1130 KKASSERT(TMPFS_DIRENT_MATCHES(de,
1131 ncp->nc_name,
1132 ncp->nc_nlen));
1134 /* Check flags to see if we are allowed to remove the directory. */
1135 if (dnode->tn_flags & APPEND
1136 || node->tn_flags & (NOUNLINK | IMMUTABLE | APPEND)) {
1137 error = EPERM;
1138 goto out;
1142 /* Detach the directory entry from the directory (dnode). */
1143 tmpfs_dir_detach(dnode, de);
1145 /* No vnode should be allocated for this entry from this point */
1146 TMPFS_NODE_LOCK(node);
1147 TMPFS_ASSERT_ELOCKED(node);
1148 TMPFS_NODE_LOCK(dnode);
1149 TMPFS_ASSERT_ELOCKED(dnode);
1151 #if 0
1152 /* handled by tmpfs_free_node */
1153 KKASSERT(node->tn_links > 0);
1154 node->tn_links--;
1155 node->tn_dir.tn_parent = NULL;
1156 #endif
1157 node->tn_status |= TMPFS_NODE_ACCESSED | TMPFS_NODE_CHANGED | \
1158 TMPFS_NODE_MODIFIED;
1160 #if 0
1161 /* handled by tmpfs_free_node */
1162 KKASSERT(dnode->tn_links > 0);
1163 dnode->tn_links--;
1164 #endif
1165 dnode->tn_status |= TMPFS_NODE_ACCESSED | \
1166 TMPFS_NODE_CHANGED | TMPFS_NODE_MODIFIED;
1168 TMPFS_NODE_UNLOCK(dnode);
1169 TMPFS_NODE_UNLOCK(node);
1171 /* Free the directory entry we just deleted. Note that the node
1172 * referred by it will not be removed until the vnode is really
1173 * reclaimed. */
1174 tmpfs_free_dirent(tmp, de);
1176 /* Release the deleted vnode (will destroy the node, notify
1177 * interested parties and clean it from the cache). */
1179 TMPFS_NODE_LOCK(dnode);
1180 dnode->tn_status |= TMPFS_NODE_CHANGED;
1181 TMPFS_NODE_UNLOCK(dnode);
1182 tmpfs_update(dvp);
1184 cache_setunresolved(v->a_nch);
1185 cache_setvp(v->a_nch, NULL);
1186 /*cache_inval_vp(vp, CINV_DESTROY);*/
1187 error = 0;
1189 out:
1190 vrele(vp);
1192 return error;
1195 /* --------------------------------------------------------------------- */
1197 static int
1198 tmpfs_nsymlink(struct vop_nsymlink_args *v)
1200 struct vnode *dvp = v->a_dvp;
1201 struct vnode **vpp = v->a_vpp;
1202 struct namecache *ncp = v->a_nch->ncp;
1203 struct vattr *vap = v->a_vap;
1204 struct ucred *cred = v->a_cred;
1205 char *target = v->a_target;
1206 int error;
1208 vap->va_type = VLNK;
1209 error = tmpfs_alloc_file(dvp, vpp, vap, ncp, cred, target);
1210 if (error == 0) {
1211 cache_setunresolved(v->a_nch);
1212 cache_setvp(v->a_nch, *vpp);
1215 return error;
1218 /* --------------------------------------------------------------------- */
1220 static int
1221 tmpfs_readdir(struct vop_readdir_args *v)
1223 struct vnode *vp = v->a_vp;
1224 struct uio *uio = v->a_uio;
1225 int *eofflag = v->a_eofflag;
1226 off_t **cookies = v->a_cookies;
1227 int *ncookies = v->a_ncookies;
1228 struct tmpfs_mount *tmp;
1229 int error;
1230 off_t startoff;
1231 off_t cnt = 0;
1232 struct tmpfs_node *node;
1234 /* This operation only makes sense on directory nodes. */
1235 if (vp->v_type != VDIR)
1236 return ENOTDIR;
1238 tmp = VFS_TO_TMPFS(vp->v_mount);
1239 node = VP_TO_TMPFS_DIR(vp);
1240 startoff = uio->uio_offset;
1242 if (uio->uio_offset == TMPFS_DIRCOOKIE_DOT) {
1243 error = tmpfs_dir_getdotdent(node, uio);
1244 if (error != 0)
1245 goto outok;
1246 cnt++;
1249 if (uio->uio_offset == TMPFS_DIRCOOKIE_DOTDOT) {
1250 error = tmpfs_dir_getdotdotdent(tmp, node, uio);
1251 if (error != 0)
1252 goto outok;
1253 cnt++;
1256 error = tmpfs_dir_getdents(node, uio, &cnt);
1258 outok:
1259 KKASSERT(error >= -1);
1261 if (error == -1)
1262 error = 0;
1264 if (eofflag != NULL)
1265 *eofflag =
1266 (error == 0 && uio->uio_offset == TMPFS_DIRCOOKIE_EOF);
1268 /* Update NFS-related variables. */
1269 if (error == 0 && cookies != NULL && ncookies != NULL) {
1270 off_t i;
1271 off_t off = startoff;
1272 struct tmpfs_dirent *de = NULL;
1274 *ncookies = cnt;
1275 *cookies = kmalloc(cnt * sizeof(off_t), M_TEMP, M_WAITOK);
1277 for (i = 0; i < cnt; i++) {
1278 KKASSERT(off != TMPFS_DIRCOOKIE_EOF);
1279 if (off == TMPFS_DIRCOOKIE_DOT) {
1280 off = TMPFS_DIRCOOKIE_DOTDOT;
1281 } else {
1282 if (off == TMPFS_DIRCOOKIE_DOTDOT) {
1283 de = TAILQ_FIRST(&node->tn_dir.tn_dirhead);
1284 } else if (de != NULL) {
1285 de = TAILQ_NEXT(de, td_entries);
1286 } else {
1287 de = tmpfs_dir_lookupbycookie(node,
1288 off);
1289 KKASSERT(de != NULL);
1290 de = TAILQ_NEXT(de, td_entries);
1292 if (de == NULL)
1293 off = TMPFS_DIRCOOKIE_EOF;
1294 else
1295 off = tmpfs_dircookie(de);
1298 (*cookies)[i] = off;
1300 KKASSERT(uio->uio_offset == off);
1303 return error;
1306 /* --------------------------------------------------------------------- */
1308 static int
1309 tmpfs_readlink(struct vop_readlink_args *v)
1311 struct vnode *vp = v->a_vp;
1312 struct uio *uio = v->a_uio;
1314 int error;
1315 struct tmpfs_node *node;
1317 KKASSERT(uio->uio_offset == 0);
1318 KKASSERT(vp->v_type == VLNK);
1320 node = VP_TO_TMPFS_NODE(vp);
1322 error = uiomove(node->tn_link, MIN(node->tn_size, uio->uio_resid),
1323 uio);
1324 TMPFS_NODE_LOCK(node);
1325 node->tn_status |= TMPFS_NODE_ACCESSED;
1326 TMPFS_NODE_UNLOCK(node);
1328 return error;
1331 /* --------------------------------------------------------------------- */
1333 static int
1334 tmpfs_inactive(struct vop_inactive_args *v)
1336 struct vnode *vp = v->a_vp;
1338 struct tmpfs_node *node;
1340 node = VP_TO_TMPFS_NODE(vp);
1343 * Get rid of unreferenced deleted vnodes sooner rather than
1344 * later so the data memory can be recovered immediately.
1346 TMPFS_NODE_LOCK(node);
1347 if (node->tn_links == 0 &&
1348 (node->tn_vpstate & TMPFS_VNODE_ALLOCATING) == 0) {
1349 node->tn_vpstate = TMPFS_VNODE_DOOMED;
1350 TMPFS_NODE_UNLOCK(node);
1351 vrecycle(vp);
1352 } else {
1353 TMPFS_NODE_UNLOCK(node);
1356 return 0;
1359 /* --------------------------------------------------------------------- */
1362 tmpfs_reclaim(struct vop_reclaim_args *v)
1364 struct vnode *vp = v->a_vp;
1365 struct tmpfs_mount *tmp;
1366 struct tmpfs_node *node;
1368 node = VP_TO_TMPFS_NODE(vp);
1369 tmp = VFS_TO_TMPFS(vp->v_mount);
1371 tmpfs_free_vp(vp);
1373 /* If the node referenced by this vnode was deleted by the user,
1374 * we must free its associated data structures (now that the vnode
1375 * is being reclaimed). */
1376 TMPFS_NODE_LOCK(node);
1377 if (node->tn_links == 0 &&
1378 (node->tn_vpstate & TMPFS_VNODE_ALLOCATING) == 0) {
1379 node->tn_vpstate = TMPFS_VNODE_DOOMED;
1380 tmpfs_free_node(tmp, node);
1381 /* eats the lock */
1382 } else {
1383 TMPFS_NODE_UNLOCK(node);
1386 KKASSERT(vp->v_data == NULL);
1387 return 0;
1390 /* --------------------------------------------------------------------- */
1392 static int
1393 tmpfs_print(struct vop_print_args *v)
1395 struct vnode *vp = v->a_vp;
1397 struct tmpfs_node *node;
1399 node = VP_TO_TMPFS_NODE(vp);
1401 kprintf("tag VT_TMPFS, tmpfs_node %p, flags 0x%x, links %d\n",
1402 node, node->tn_flags, node->tn_links);
1403 kprintf("\tmode 0%o, owner %d, group %d, size %ju, status 0x%x\n",
1404 node->tn_mode, node->tn_uid, node->tn_gid,
1405 (uintmax_t)node->tn_size, node->tn_status);
1407 if (vp->v_type == VFIFO)
1408 fifo_printinfo(vp);
1410 kprintf("\n");
1412 return 0;
1415 /* --------------------------------------------------------------------- */
1417 static int
1418 tmpfs_pathconf(struct vop_pathconf_args *v)
1420 int name = v->a_name;
1421 register_t *retval = v->a_retval;
1423 int error;
1425 error = 0;
1427 switch (name) {
1428 case _PC_LINK_MAX:
1429 *retval = LINK_MAX;
1430 break;
1432 case _PC_NAME_MAX:
1433 *retval = NAME_MAX;
1434 break;
1436 case _PC_PATH_MAX:
1437 *retval = PATH_MAX;
1438 break;
1440 case _PC_PIPE_BUF:
1441 *retval = PIPE_BUF;
1442 break;
1444 case _PC_CHOWN_RESTRICTED:
1445 *retval = 1;
1446 break;
1448 case _PC_NO_TRUNC:
1449 *retval = 1;
1450 break;
1452 case _PC_SYNC_IO:
1453 *retval = 1;
1454 break;
1456 case _PC_FILESIZEBITS:
1457 *retval = 0; /* XXX Don't know which value should I return. */
1458 break;
1460 default:
1461 error = EINVAL;
1464 return error;
1467 /* --------------------------------------------------------------------- */
1470 * vnode operations vector used for files stored in a tmpfs file system.
1472 struct vop_ops tmpfs_vnode_vops = {
1473 .vop_default = vop_defaultop,
1474 .vop_getpages = vop_stdgetpages,
1475 .vop_putpages = vop_stdputpages,
1476 .vop_ncreate = tmpfs_ncreate,
1477 .vop_nresolve = tmpfs_nresolve,
1478 .vop_nlookupdotdot = tmpfs_nlookupdotdot,
1479 .vop_nmknod = tmpfs_nmknod,
1480 .vop_open = tmpfs_open,
1481 .vop_close = tmpfs_close,
1482 .vop_access = tmpfs_access,
1483 .vop_getattr = tmpfs_getattr,
1484 .vop_setattr = tmpfs_setattr,
1485 .vop_read = tmpfs_read,
1486 .vop_write = tmpfs_write,
1487 .vop_fsync = tmpfs_fsync,
1488 .vop_nremove = tmpfs_nremove,
1489 .vop_nlink = tmpfs_nlink,
1490 .vop_nrename = tmpfs_nrename,
1491 .vop_nmkdir = tmpfs_nmkdir,
1492 .vop_nrmdir = tmpfs_nrmdir,
1493 .vop_nsymlink = tmpfs_nsymlink,
1494 .vop_readdir = tmpfs_readdir,
1495 .vop_readlink = tmpfs_readlink,
1496 .vop_inactive = tmpfs_inactive,
1497 .vop_reclaim = tmpfs_reclaim,
1498 .vop_print = tmpfs_print,
1499 .vop_pathconf = tmpfs_pathconf,
1500 .vop_bmap = tmpfs_bmap,
1501 .vop_bmap = (void *)vop_eopnotsupp,
1502 .vop_strategy = tmpfs_strategy,
1503 .vop_advlock = tmpfs_advlock,