kernel - Fix excessive call stack depth on stuck interrupt
[dragonfly.git] / sys / kern / vfs_vnops.c
blob4ea4fd6257516967f49b8cecb0a5bd55679f61fe
1 /*
2 * Copyright (c) 1982, 1986, 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
34 * @(#)vfs_vnops.c 8.2 (Berkeley) 1/21/94
35 * $FreeBSD: src/sys/kern/vfs_vnops.c,v 1.87.2.13 2002/12/29 18:19:53 dillon Exp $
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/fcntl.h>
41 #include <sys/file.h>
42 #include <sys/stat.h>
43 #include <sys/proc.h>
44 #include <sys/priv.h>
45 #include <sys/mount.h>
46 #include <sys/nlookup.h>
47 #include <sys/vnode.h>
48 #include <sys/buf.h>
49 #include <sys/filio.h>
50 #include <sys/ttycom.h>
51 #include <sys/conf.h>
52 #include <sys/sysctl.h>
53 #include <sys/syslog.h>
55 #include <sys/thread2.h>
56 #include <sys/mplock2.h>
58 static int vn_closefile (struct file *fp);
59 static int vn_ioctl (struct file *fp, u_long com, caddr_t data,
60 struct ucred *cred, struct sysmsg *msg);
61 static int vn_read (struct file *fp, struct uio *uio,
62 struct ucred *cred, int flags);
63 static int vn_kqfilter (struct file *fp, struct knote *kn);
64 static int vn_statfile (struct file *fp, struct stat *sb, struct ucred *cred);
65 static int vn_write (struct file *fp, struct uio *uio,
66 struct ucred *cred, int flags);
68 struct fileops vnode_fileops = {
69 .fo_read = vn_read,
70 .fo_write = vn_write,
71 .fo_ioctl = vn_ioctl,
72 .fo_kqfilter = vn_kqfilter,
73 .fo_stat = vn_statfile,
74 .fo_close = vn_closefile,
75 .fo_shutdown = nofo_shutdown
79 * Common code for vnode open operations. Check permissions, and call
80 * the VOP_NOPEN or VOP_NCREATE routine.
82 * The caller is responsible for setting up nd with nlookup_init() and
83 * for cleaning it up with nlookup_done(), whether we return an error
84 * or not.
86 * On success nd->nl_open_vp will hold a referenced and, if requested,
87 * locked vnode. A locked vnode is requested via NLC_LOCKVP. If fp
88 * is non-NULL the vnode will be installed in the file pointer.
90 * NOTE: If the caller wishes the namecache entry to be operated with
91 * a shared lock it must use NLC_SHAREDLOCK. If NLC_LOCKVP is set
92 * then the vnode lock will also be shared.
94 * NOTE: The vnode is referenced just once on return whether or not it
95 * is also installed in the file pointer.
97 int
98 vn_open(struct nlookupdata *nd, struct file *fp, int fmode, int cmode)
100 struct vnode *vp;
101 struct ucred *cred = nd->nl_cred;
102 struct vattr vat;
103 struct vattr *vap = &vat;
104 int error;
105 u_int flags;
106 uint64_t osize;
107 struct mount *mp;
110 * Certain combinations are illegal
112 if ((fmode & (FWRITE | O_TRUNC)) == O_TRUNC)
113 return(EACCES);
116 * Lookup the path and create or obtain the vnode. After a
117 * successful lookup a locked nd->nl_nch will be returned.
119 * The result of this section should be a locked vnode.
121 * XXX with only a little work we should be able to avoid locking
122 * the vnode if FWRITE, O_CREAT, and O_TRUNC are *not* set.
124 nd->nl_flags |= NLC_OPEN;
125 if (fmode & O_APPEND)
126 nd->nl_flags |= NLC_APPEND;
127 if (fmode & O_TRUNC)
128 nd->nl_flags |= NLC_TRUNCATE;
129 if (fmode & FREAD)
130 nd->nl_flags |= NLC_READ;
131 if (fmode & FWRITE)
132 nd->nl_flags |= NLC_WRITE;
133 if ((fmode & O_EXCL) == 0 && (fmode & O_NOFOLLOW) == 0)
134 nd->nl_flags |= NLC_FOLLOW;
136 if (fmode & O_CREAT) {
138 * CONDITIONAL CREATE FILE CASE
140 * Setting NLC_CREATE causes a negative hit to store
141 * the negative hit ncp and not return an error. Then
142 * nc_error or nc_vp may be checked to see if the ncp
143 * represents a negative hit. NLC_CREATE also requires
144 * write permission on the governing directory or EPERM
145 * is returned.
147 nd->nl_flags |= NLC_CREATE;
148 nd->nl_flags |= NLC_REFDVP;
149 bwillinode(1);
150 error = nlookup(nd);
151 } else {
153 * NORMAL OPEN FILE CASE
155 error = nlookup(nd);
158 if (error)
159 return (error);
162 * split case to allow us to re-resolve and retry the ncp in case
163 * we get ESTALE.
165 again:
166 if (fmode & O_CREAT) {
167 if (nd->nl_nch.ncp->nc_vp == NULL) {
168 if ((error = ncp_writechk(&nd->nl_nch)) != 0)
169 return (error);
170 VATTR_NULL(vap);
171 vap->va_type = VREG;
172 vap->va_mode = cmode;
173 if (fmode & O_EXCL)
174 vap->va_vaflags |= VA_EXCLUSIVE;
175 error = VOP_NCREATE(&nd->nl_nch, nd->nl_dvp, &vp,
176 nd->nl_cred, vap);
177 if (error)
178 return (error);
179 fmode &= ~O_TRUNC;
180 /* locked vnode is returned */
181 } else {
182 if (fmode & O_EXCL) {
183 error = EEXIST;
184 } else {
185 error = cache_vget(&nd->nl_nch, cred,
186 LK_EXCLUSIVE, &vp);
188 if (error)
189 return (error);
190 fmode &= ~O_CREAT;
192 } else {
193 if (nd->nl_flags & NLC_SHAREDLOCK) {
194 error = cache_vget(&nd->nl_nch, cred, LK_SHARED, &vp);
195 } else {
196 error = cache_vget(&nd->nl_nch, cred,
197 LK_EXCLUSIVE, &vp);
199 if (error)
200 return (error);
204 * We have a locked vnode and ncp now. Note that the ncp will
205 * be cleaned up by the caller if nd->nl_nch is left intact.
207 if (vp->v_type == VLNK) {
208 error = EMLINK;
209 goto bad;
211 if (vp->v_type == VSOCK) {
212 error = EOPNOTSUPP;
213 goto bad;
215 if (vp->v_type != VDIR && (fmode & O_DIRECTORY)) {
216 error = ENOTDIR;
217 goto bad;
219 if ((fmode & O_CREAT) == 0) {
220 if (fmode & (FWRITE | O_TRUNC)) {
221 if (vp->v_type == VDIR) {
222 error = EISDIR;
223 goto bad;
225 error = vn_writechk(vp, &nd->nl_nch);
226 if (error) {
228 * Special stale handling, re-resolve the
229 * vnode.
231 if (error == ESTALE) {
232 vput(vp);
233 vp = NULL;
234 if (nd->nl_flags & NLC_SHAREDLOCK) {
235 cache_unlock(&nd->nl_nch);
236 cache_lock(&nd->nl_nch);
238 cache_setunresolved(&nd->nl_nch);
239 error = cache_resolve(&nd->nl_nch,
240 cred);
241 if (error == 0)
242 goto again;
244 goto bad;
248 if (fmode & O_TRUNC) {
249 vn_unlock(vp); /* XXX */
250 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); /* XXX */
251 osize = vp->v_filesize;
252 VATTR_NULL(vap);
253 vap->va_size = 0;
254 error = VOP_SETATTR(vp, vap, cred);
255 if (error)
256 goto bad;
257 error = VOP_GETATTR(vp, vap);
258 if (error)
259 goto bad;
260 mp = vq_vptomp(vp);
261 VFS_ACCOUNT(mp, vap->va_uid, vap->va_gid, -osize);
265 * Set or clear VNSWAPCACHE on the vp based on nd->nl_nch.ncp->nc_flag.
266 * These particular bits a tracked all the way from the root.
268 * NOTE: Might not work properly on NFS servers due to the
269 * disconnected namecache.
271 flags = nd->nl_nch.ncp->nc_flag;
272 if ((flags & (NCF_UF_CACHE | NCF_UF_PCACHE)) &&
273 (flags & (NCF_SF_NOCACHE | NCF_SF_PNOCACHE)) == 0) {
274 vsetflags(vp, VSWAPCACHE);
275 } else {
276 vclrflags(vp, VSWAPCACHE);
280 * Setup the fp so VOP_OPEN can override it. No descriptor has been
281 * associated with the fp yet so we own it clean.
283 * f_nchandle inherits nl_nch. This used to be necessary only for
284 * directories but now we do it unconditionally so f*() ops
285 * such as fchmod() can access the actual namespace that was
286 * used to open the file.
288 if (fp) {
289 if (nd->nl_flags & NLC_APPENDONLY)
290 fmode |= FAPPENDONLY;
291 fp->f_nchandle = nd->nl_nch;
292 cache_zero(&nd->nl_nch);
293 cache_unlock(&fp->f_nchandle);
297 * Get rid of nl_nch. vn_open does not return it (it returns the
298 * vnode or the file pointer). Note: we can't leave nl_nch locked
299 * through the VOP_OPEN anyway since the VOP_OPEN may block, e.g.
300 * on /dev/ttyd0
302 if (nd->nl_nch.ncp)
303 cache_put(&nd->nl_nch);
305 error = VOP_OPEN(vp, fmode, cred, fp);
306 if (error) {
308 * setting f_ops to &badfileops will prevent the descriptor
309 * code from trying to close and release the vnode, since
310 * the open failed we do not want to call close.
312 if (fp) {
313 fp->f_data = NULL;
314 fp->f_ops = &badfileops;
316 goto bad;
319 #if 0
321 * Assert that VREG files have been setup for vmio.
323 KASSERT(vp->v_type != VREG || vp->v_object != NULL,
324 ("vn_open: regular file was not VMIO enabled!"));
325 #endif
328 * Return the vnode. XXX needs some cleaning up. The vnode is
329 * only returned in the fp == NULL case.
331 if (fp == NULL) {
332 nd->nl_open_vp = vp;
333 nd->nl_vp_fmode = fmode;
334 if ((nd->nl_flags & NLC_LOCKVP) == 0)
335 vn_unlock(vp);
336 } else {
337 vput(vp);
339 return (0);
340 bad:
341 if (vp)
342 vput(vp);
343 return (error);
347 vn_opendisk(const char *devname, int fmode, struct vnode **vpp)
349 struct vnode *vp;
350 int error;
352 if (strncmp(devname, "/dev/", 5) == 0)
353 devname += 5;
354 if ((vp = getsynthvnode(devname)) == NULL) {
355 error = ENODEV;
356 } else {
357 error = VOP_OPEN(vp, fmode, proc0.p_ucred, NULL);
358 vn_unlock(vp);
359 if (error) {
360 vrele(vp);
361 vp = NULL;
364 *vpp = vp;
365 return (error);
369 * Check for write permissions on the specified vnode. nch may be NULL.
372 vn_writechk(struct vnode *vp, struct nchandle *nch)
375 * If there's shared text associated with
376 * the vnode, try to free it up once. If
377 * we fail, we can't allow writing.
379 if (vp->v_flag & VTEXT)
380 return (ETXTBSY);
383 * If the vnode represents a regular file, check the mount
384 * point via the nch. This may be a different mount point
385 * then the one embedded in the vnode (e.g. nullfs).
387 * We can still write to non-regular files (e.g. devices)
388 * via read-only mounts.
390 if (nch && nch->ncp && vp->v_type == VREG)
391 return (ncp_writechk(nch));
392 return (0);
396 * Check whether the underlying mount is read-only. The mount point
397 * referenced by the namecache may be different from the mount point
398 * used by the underlying vnode in the case of NULLFS, so a separate
399 * check is needed.
402 ncp_writechk(struct nchandle *nch)
404 if (nch->mount && (nch->mount->mnt_flag & MNT_RDONLY))
405 return (EROFS);
406 return(0);
410 * Vnode close call
412 * MPSAFE
415 vn_close(struct vnode *vp, int flags, struct file *fp)
417 int error;
419 error = vn_lock(vp, LK_SHARED | LK_RETRY | LK_FAILRECLAIM);
420 if (error == 0) {
421 error = VOP_CLOSE(vp, flags, fp);
422 vn_unlock(vp);
424 vrele(vp);
425 return (error);
429 * Sequential heuristic.
431 * MPSAFE (f_seqcount and f_nextoff are allowed to race)
433 static __inline
435 sequential_heuristic(struct uio *uio, struct file *fp)
438 * Sequential heuristic - detect sequential operation
440 * NOTE: SMP: We allow f_seqcount updates to race.
442 if ((uio->uio_offset == 0 && fp->f_seqcount > 0) ||
443 uio->uio_offset == fp->f_nextoff) {
444 int tmpseq = fp->f_seqcount;
446 tmpseq += (uio->uio_resid + MAXBSIZE - 1) / MAXBSIZE;
447 if (tmpseq > IO_SEQMAX)
448 tmpseq = IO_SEQMAX;
449 fp->f_seqcount = tmpseq;
450 return(fp->f_seqcount << IO_SEQSHIFT);
454 * Not sequential, quick draw-down of seqcount
456 * NOTE: SMP: We allow f_seqcount updates to race.
458 if (fp->f_seqcount > 1)
459 fp->f_seqcount = 1;
460 else
461 fp->f_seqcount = 0;
462 return(0);
466 * get - lock and return the f_offset field.
467 * set - set and unlock the f_offset field.
469 * These routines serve the dual purpose of serializing access to the
470 * f_offset field (at least on i386) and guaranteeing operational integrity
471 * when multiple read()ers and write()ers are present on the same fp.
473 * MPSAFE
475 static __inline off_t
476 vn_get_fpf_offset(struct file *fp)
478 u_int flags;
479 u_int nflags;
482 * Shortcut critical path.
484 flags = fp->f_flag & ~FOFFSETLOCK;
485 if (atomic_cmpset_int(&fp->f_flag, flags, flags | FOFFSETLOCK))
486 return(fp->f_offset);
489 * The hard way
491 for (;;) {
492 flags = fp->f_flag;
493 if (flags & FOFFSETLOCK) {
494 nflags = flags | FOFFSETWAKE;
495 tsleep_interlock(&fp->f_flag, 0);
496 if (atomic_cmpset_int(&fp->f_flag, flags, nflags))
497 tsleep(&fp->f_flag, PINTERLOCKED, "fpoff", 0);
498 } else {
499 nflags = flags | FOFFSETLOCK;
500 if (atomic_cmpset_int(&fp->f_flag, flags, nflags))
501 break;
504 return(fp->f_offset);
508 * MPSAFE
510 static __inline void
511 vn_set_fpf_offset(struct file *fp, off_t offset)
513 u_int flags;
514 u_int nflags;
517 * We hold the lock so we can set the offset without interference.
519 fp->f_offset = offset;
522 * Normal release is already a reasonably critical path.
524 for (;;) {
525 flags = fp->f_flag;
526 nflags = flags & ~(FOFFSETLOCK | FOFFSETWAKE);
527 if (atomic_cmpset_int(&fp->f_flag, flags, nflags)) {
528 if (flags & FOFFSETWAKE)
529 wakeup(&fp->f_flag);
530 break;
536 * MPSAFE
538 static __inline off_t
539 vn_poll_fpf_offset(struct file *fp)
541 #if defined(__x86_64__)
542 return(fp->f_offset);
543 #else
544 off_t off = vn_get_fpf_offset(fp);
545 vn_set_fpf_offset(fp, off);
546 return(off);
547 #endif
551 * Package up an I/O request on a vnode into a uio and do it.
553 * MPSAFE
556 vn_rdwr(enum uio_rw rw, struct vnode *vp, caddr_t base, int len,
557 off_t offset, enum uio_seg segflg, int ioflg,
558 struct ucred *cred, int *aresid)
560 struct uio auio;
561 struct iovec aiov;
562 int error;
564 if ((ioflg & IO_NODELOCKED) == 0)
565 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
566 auio.uio_iov = &aiov;
567 auio.uio_iovcnt = 1;
568 aiov.iov_base = base;
569 aiov.iov_len = len;
570 auio.uio_resid = len;
571 auio.uio_offset = offset;
572 auio.uio_segflg = segflg;
573 auio.uio_rw = rw;
574 auio.uio_td = curthread;
575 if (rw == UIO_READ) {
576 error = VOP_READ(vp, &auio, ioflg, cred);
577 } else {
578 error = VOP_WRITE(vp, &auio, ioflg, cred);
580 if (aresid)
581 *aresid = auio.uio_resid;
582 else
583 if (auio.uio_resid && error == 0)
584 error = EIO;
585 if ((ioflg & IO_NODELOCKED) == 0)
586 vn_unlock(vp);
587 return (error);
591 * Package up an I/O request on a vnode into a uio and do it. The I/O
592 * request is split up into smaller chunks and we try to avoid saturating
593 * the buffer cache while potentially holding a vnode locked, so we
594 * check bwillwrite() before calling vn_rdwr(). We also call lwkt_user_yield()
595 * to give other processes a chance to lock the vnode (either other processes
596 * core'ing the same binary, or unrelated processes scanning the directory).
598 * MPSAFE
601 vn_rdwr_inchunks(enum uio_rw rw, struct vnode *vp, caddr_t base, int len,
602 off_t offset, enum uio_seg segflg, int ioflg,
603 struct ucred *cred, int *aresid)
605 int error = 0;
607 do {
608 int chunk;
611 * Force `offset' to a multiple of MAXBSIZE except possibly
612 * for the first chunk, so that filesystems only need to
613 * write full blocks except possibly for the first and last
614 * chunks.
616 chunk = MAXBSIZE - (uoff_t)offset % MAXBSIZE;
618 if (chunk > len)
619 chunk = len;
620 if (vp->v_type == VREG && (ioflg & IO_RECURSE) == 0) {
621 switch(rw) {
622 case UIO_READ:
623 bwillread(chunk);
624 break;
625 case UIO_WRITE:
626 bwillwrite(chunk);
627 break;
630 error = vn_rdwr(rw, vp, base, chunk, offset, segflg,
631 ioflg, cred, aresid);
632 len -= chunk; /* aresid calc already includes length */
633 if (error)
634 break;
635 offset += chunk;
636 base += chunk;
637 lwkt_user_yield();
638 } while (len);
639 if (aresid)
640 *aresid += len;
641 return (error);
645 * File pointers can no longer get ripped up by revoke so
646 * we don't need to lock access to the vp.
648 * f_offset updates are not guaranteed against multiple readers
650 static int
651 vn_read(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
653 struct vnode *vp;
654 int error, ioflag;
656 KASSERT(uio->uio_td == curthread,
657 ("uio_td %p is not td %p", uio->uio_td, curthread));
658 vp = (struct vnode *)fp->f_data;
660 ioflag = 0;
661 if (flags & O_FBLOCKING) {
662 /* ioflag &= ~IO_NDELAY; */
663 } else if (flags & O_FNONBLOCKING) {
664 ioflag |= IO_NDELAY;
665 } else if (fp->f_flag & FNONBLOCK) {
666 ioflag |= IO_NDELAY;
668 if (fp->f_flag & O_DIRECT) {
669 ioflag |= IO_DIRECT;
671 if ((flags & O_FOFFSET) == 0 && (vp->v_flag & VNOTSEEKABLE) == 0)
672 uio->uio_offset = vn_get_fpf_offset(fp);
673 vn_lock(vp, LK_SHARED | LK_RETRY);
674 ioflag |= sequential_heuristic(uio, fp);
676 error = VOP_READ(vp, uio, ioflag, cred);
677 fp->f_nextoff = uio->uio_offset;
678 vn_unlock(vp);
679 if ((flags & O_FOFFSET) == 0 && (vp->v_flag & VNOTSEEKABLE) == 0)
680 vn_set_fpf_offset(fp, uio->uio_offset);
681 return (error);
685 * MPSAFE
687 static int
688 vn_write(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
690 struct vnode *vp;
691 int error, ioflag;
693 KASSERT(uio->uio_td == curthread,
694 ("uio_td %p is not p %p", uio->uio_td, curthread));
695 vp = (struct vnode *)fp->f_data;
697 ioflag = IO_UNIT;
698 if (vp->v_type == VREG &&
699 ((fp->f_flag & O_APPEND) || (flags & O_FAPPEND))) {
700 ioflag |= IO_APPEND;
703 if (flags & O_FBLOCKING) {
704 /* ioflag &= ~IO_NDELAY; */
705 } else if (flags & O_FNONBLOCKING) {
706 ioflag |= IO_NDELAY;
707 } else if (fp->f_flag & FNONBLOCK) {
708 ioflag |= IO_NDELAY;
710 if (fp->f_flag & O_DIRECT) {
711 ioflag |= IO_DIRECT;
713 if (flags & O_FASYNCWRITE) {
714 /* ioflag &= ~IO_SYNC; */
715 } else if (flags & O_FSYNCWRITE) {
716 ioflag |= IO_SYNC;
717 } else if (fp->f_flag & O_FSYNC) {
718 ioflag |= IO_SYNC;
721 if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS))
722 ioflag |= IO_SYNC;
723 if ((flags & O_FOFFSET) == 0)
724 uio->uio_offset = vn_get_fpf_offset(fp);
725 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
726 ioflag |= sequential_heuristic(uio, fp);
727 error = VOP_WRITE(vp, uio, ioflag, cred);
728 fp->f_nextoff = uio->uio_offset;
729 vn_unlock(vp);
730 if ((flags & O_FOFFSET) == 0)
731 vn_set_fpf_offset(fp, uio->uio_offset);
732 return (error);
736 * MPSAFE
738 static int
739 vn_statfile(struct file *fp, struct stat *sb, struct ucred *cred)
741 struct vnode *vp;
742 int error;
744 vp = (struct vnode *)fp->f_data;
745 error = vn_stat(vp, sb, cred);
746 return (error);
750 * MPSAFE
753 vn_stat(struct vnode *vp, struct stat *sb, struct ucred *cred)
755 struct vattr vattr;
756 struct vattr *vap;
757 int error;
758 u_short mode;
759 cdev_t dev;
761 vap = &vattr;
762 error = VOP_GETATTR(vp, vap);
763 if (error)
764 return (error);
767 * Zero the spare stat fields
769 sb->st_lspare = 0;
770 sb->st_qspare1 = 0;
771 sb->st_qspare2 = 0;
774 * Copy from vattr table
776 if (vap->va_fsid != VNOVAL)
777 sb->st_dev = vap->va_fsid;
778 else
779 sb->st_dev = vp->v_mount->mnt_stat.f_fsid.val[0];
780 sb->st_ino = vap->va_fileid;
781 mode = vap->va_mode;
782 switch (vap->va_type) {
783 case VREG:
784 mode |= S_IFREG;
785 break;
786 case VDATABASE:
787 mode |= S_IFDB;
788 break;
789 case VDIR:
790 mode |= S_IFDIR;
791 break;
792 case VBLK:
793 mode |= S_IFBLK;
794 break;
795 case VCHR:
796 mode |= S_IFCHR;
797 break;
798 case VLNK:
799 mode |= S_IFLNK;
800 /* This is a cosmetic change, symlinks do not have a mode. */
801 if (vp->v_mount->mnt_flag & MNT_NOSYMFOLLOW)
802 sb->st_mode &= ~ACCESSPERMS; /* 0000 */
803 else
804 sb->st_mode |= ACCESSPERMS; /* 0777 */
805 break;
806 case VSOCK:
807 mode |= S_IFSOCK;
808 break;
809 case VFIFO:
810 mode |= S_IFIFO;
811 break;
812 default:
813 return (EBADF);
815 sb->st_mode = mode;
816 if (vap->va_nlink > (nlink_t)-1)
817 sb->st_nlink = (nlink_t)-1;
818 else
819 sb->st_nlink = vap->va_nlink;
820 sb->st_uid = vap->va_uid;
821 sb->st_gid = vap->va_gid;
822 sb->st_rdev = dev2udev(vp->v_rdev);
823 sb->st_size = vap->va_size;
824 sb->st_atimespec = vap->va_atime;
825 sb->st_mtimespec = vap->va_mtime;
826 sb->st_ctimespec = vap->va_ctime;
829 * A VCHR and VBLK device may track the last access and last modified
830 * time independantly of the filesystem. This is particularly true
831 * because device read and write calls may bypass the filesystem.
833 if (vp->v_type == VCHR || vp->v_type == VBLK) {
834 dev = vp->v_rdev;
835 if (dev != NULL) {
836 if (dev->si_lastread) {
837 sb->st_atimespec.tv_sec = time_second +
838 (time_uptime -
839 dev->si_lastread);
840 sb->st_atimespec.tv_nsec = 0;
842 if (dev->si_lastwrite) {
843 sb->st_atimespec.tv_sec = time_second +
844 (time_uptime -
845 dev->si_lastwrite);
846 sb->st_atimespec.tv_nsec = 0;
852 * According to www.opengroup.org, the meaning of st_blksize is
853 * "a filesystem-specific preferred I/O block size for this
854 * object. In some filesystem types, this may vary from file
855 * to file"
856 * Default to PAGE_SIZE after much discussion.
859 if (vap->va_type == VREG) {
860 sb->st_blksize = vap->va_blocksize;
861 } else if (vn_isdisk(vp, NULL)) {
863 * XXX this is broken. If the device is not yet open (aka
864 * stat() call, aka v_rdev == NULL), how are we supposed
865 * to get a valid block size out of it?
867 dev = vp->v_rdev;
869 sb->st_blksize = dev->si_bsize_best;
870 if (sb->st_blksize < dev->si_bsize_phys)
871 sb->st_blksize = dev->si_bsize_phys;
872 if (sb->st_blksize < BLKDEV_IOSIZE)
873 sb->st_blksize = BLKDEV_IOSIZE;
874 } else {
875 sb->st_blksize = PAGE_SIZE;
878 sb->st_flags = vap->va_flags;
880 error = priv_check_cred(cred, PRIV_VFS_GENERATION, 0);
881 if (error)
882 sb->st_gen = 0;
883 else
884 sb->st_gen = (u_int32_t)vap->va_gen;
886 sb->st_blocks = vap->va_bytes / S_BLKSIZE;
887 return (0);
891 * MPALMOSTSAFE - acquires mplock
893 static int
894 vn_ioctl(struct file *fp, u_long com, caddr_t data, struct ucred *ucred,
895 struct sysmsg *msg)
897 struct vnode *vp = ((struct vnode *)fp->f_data);
898 struct vnode *ovp;
899 struct vattr vattr;
900 int error;
901 off_t size;
903 switch (vp->v_type) {
904 case VREG:
905 case VDIR:
906 if (com == FIONREAD) {
907 error = VOP_GETATTR(vp, &vattr);
908 if (error)
909 break;
910 size = vattr.va_size;
911 if ((vp->v_flag & VNOTSEEKABLE) == 0)
912 size -= vn_poll_fpf_offset(fp);
913 if (size > 0x7FFFFFFF)
914 size = 0x7FFFFFFF;
915 *(int *)data = size;
916 error = 0;
917 break;
919 if (com == FIOASYNC) { /* XXX */
920 error = 0; /* XXX */
921 break;
923 /* fall into ... */
924 default:
925 #if 0
926 return (ENOTTY);
927 #endif
928 case VFIFO:
929 case VCHR:
930 case VBLK:
931 if (com == FIODTYPE) {
932 if (vp->v_type != VCHR && vp->v_type != VBLK) {
933 error = ENOTTY;
934 break;
936 *(int *)data = dev_dflags(vp->v_rdev) & D_TYPEMASK;
937 error = 0;
938 break;
940 error = VOP_IOCTL(vp, com, data, fp->f_flag, ucred, msg);
941 if (error == 0 && com == TIOCSCTTY) {
942 struct proc *p = curthread->td_proc;
943 struct session *sess;
945 if (p == NULL) {
946 error = ENOTTY;
947 break;
950 get_mplock();
951 sess = p->p_session;
952 /* Do nothing if reassigning same control tty */
953 if (sess->s_ttyvp == vp) {
954 error = 0;
955 rel_mplock();
956 break;
959 /* Get rid of reference to old control tty */
960 ovp = sess->s_ttyvp;
961 vref(vp);
962 sess->s_ttyvp = vp;
963 if (ovp)
964 vrele(ovp);
965 rel_mplock();
967 break;
969 return (error);
973 * Obtain the requested vnode lock
975 * LK_RETRY Automatically retry on timeout
976 * LK_FAILRECLAIM Fail if the vnode is being reclaimed
978 * Failures will occur if the vnode is undergoing recyclement, but not
979 * all callers expect that the function will fail so the caller must pass
980 * LK_FAILOK if it wants to process an error code.
982 * Errors can occur for other reasons if you pass in other LK_ flags,
983 * regardless of whether you pass in LK_FAILRECLAIM
986 #ifndef DEBUG_LOCKS
987 vn_lock(struct vnode *vp, int flags)
988 #else
989 debug_vn_lock(struct vnode *vp, int flags, const char *filename, int line)
990 #endif
992 int error;
994 do {
995 #ifdef DEBUG_LOCKS
996 vp->filename = filename;
997 vp->line = line;
998 error = debuglockmgr(&vp->v_lock, flags,
999 "vn_lock", filename, line);
1000 #else
1001 error = lockmgr(&vp->v_lock, flags);
1002 #endif
1003 if (error == 0)
1004 break;
1005 } while (flags & LK_RETRY);
1008 * Because we (had better!) have a ref on the vnode, once it
1009 * goes to VRECLAIMED state it will not be recycled until all
1010 * refs go away. So we can just check the flag.
1012 if (error == 0 && (vp->v_flag & VRECLAIMED)) {
1013 if (flags & LK_FAILRECLAIM) {
1014 lockmgr(&vp->v_lock, LK_RELEASE);
1015 error = ENOENT;
1018 return (error);
1021 #ifdef DEBUG_VN_UNLOCK
1023 void
1024 debug_vn_unlock(struct vnode *vp, const char *filename, int line)
1026 kprintf("vn_unlock from %s:%d\n", filename, line);
1027 lockmgr(&vp->v_lock, LK_RELEASE);
1030 #else
1032 void
1033 vn_unlock(struct vnode *vp)
1035 lockmgr(&vp->v_lock, LK_RELEASE);
1038 #endif
1041 * MPSAFE
1044 vn_islocked(struct vnode *vp)
1046 return (lockstatus(&vp->v_lock, curthread));
1050 * Return the lock status of a vnode and unlock the vnode
1051 * if we owned the lock. This is not a boolean, if the
1052 * caller cares what the lock status is the caller must
1053 * check the various possible values.
1055 * This only unlocks exclusive locks held by the caller,
1056 * it will NOT unlock shared locks (there is no way to
1057 * tell who the shared lock belongs to).
1059 * MPSAFE
1062 vn_islocked_unlock(struct vnode *vp)
1064 int vpls;
1066 vpls = lockstatus(&vp->v_lock, curthread);
1067 if (vpls == LK_EXCLUSIVE)
1068 lockmgr(&vp->v_lock, LK_RELEASE);
1069 return(vpls);
1073 * Restore a vnode lock that we previously released via
1074 * vn_islocked_unlock(). This is a NOP if we did not
1075 * own the original lock.
1077 * MPSAFE
1079 void
1080 vn_islocked_relock(struct vnode *vp, int vpls)
1082 int error;
1084 if (vpls == LK_EXCLUSIVE)
1085 error = lockmgr(&vp->v_lock, vpls);
1089 * MPSAFE
1091 static int
1092 vn_closefile(struct file *fp)
1094 int error;
1096 fp->f_ops = &badfileops;
1097 error = vn_close(((struct vnode *)fp->f_data), fp->f_flag, fp);
1098 return (error);
1102 * MPSAFE
1104 static int
1105 vn_kqfilter(struct file *fp, struct knote *kn)
1107 int error;
1109 error = VOP_KQFILTER(((struct vnode *)fp->f_data), kn);
1110 return (error);