Rename msleep() to ssleep().
[dragonfly.git] / sys / kern / vfs_vnops.c
blobbf067e2d3bdd38bf13b0f891f8063ae243fdc91e
1 /*
2 * Copyright (c) 1982, 1986, 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
38 * @(#)vfs_vnops.c 8.2 (Berkeley) 1/21/94
39 * $FreeBSD: src/sys/kern/vfs_vnops.c,v 1.87.2.13 2002/12/29 18:19:53 dillon Exp $
40 * $DragonFly: src/sys/kern/vfs_vnops.c,v 1.58 2008/06/28 17:59:49 dillon Exp $
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/fcntl.h>
46 #include <sys/file.h>
47 #include <sys/stat.h>
48 #include <sys/proc.h>
49 #include <sys/priv.h>
50 #include <sys/mount.h>
51 #include <sys/nlookup.h>
52 #include <sys/vnode.h>
53 #include <sys/buf.h>
54 #include <sys/filio.h>
55 #include <sys/ttycom.h>
56 #include <sys/conf.h>
57 #include <sys/sysctl.h>
58 #include <sys/syslog.h>
60 #include <sys/thread2.h>
62 static int vn_closefile (struct file *fp);
63 static int vn_ioctl (struct file *fp, u_long com, caddr_t data,
64 struct ucred *cred);
65 static int vn_read (struct file *fp, struct uio *uio,
66 struct ucred *cred, int flags);
67 static int vn_poll (struct file *fp, int events, struct ucred *cred);
68 static int vn_kqfilter (struct file *fp, struct knote *kn);
69 static int vn_statfile (struct file *fp, struct stat *sb, struct ucred *cred);
70 static int vn_write (struct file *fp, struct uio *uio,
71 struct ucred *cred, int flags);
73 #ifdef SMP
74 static int read_mpsafe = 0;
75 SYSCTL_INT(_vfs, OID_AUTO, read_mpsafe, CTLFLAG_RW, &read_mpsafe, 0, "");
76 static int write_mpsafe = 0;
77 SYSCTL_INT(_vfs, OID_AUTO, write_mpsafe, CTLFLAG_RW, &write_mpsafe, 0, "");
78 static int getattr_mpsafe = 0;
79 SYSCTL_INT(_vfs, OID_AUTO, getattr_mpsafe, CTLFLAG_RW, &getattr_mpsafe, 0, "");
80 #else
81 #define read_mpsafe 0
82 #define write_mpsafe 0
83 #define getattr_mpsafe 0
84 #endif
86 struct fileops vnode_fileops = {
87 .fo_read = vn_read,
88 .fo_write = vn_write,
89 .fo_ioctl = vn_ioctl,
90 .fo_poll = vn_poll,
91 .fo_kqfilter = vn_kqfilter,
92 .fo_stat = vn_statfile,
93 .fo_close = vn_closefile,
94 .fo_shutdown = nofo_shutdown
98 * Common code for vnode open operations. Check permissions, and call
99 * the VOP_NOPEN or VOP_NCREATE routine.
101 * The caller is responsible for setting up nd with nlookup_init() and
102 * for cleaning it up with nlookup_done(), whether we return an error
103 * or not.
105 * On success nd->nl_open_vp will hold a referenced and, if requested,
106 * locked vnode. A locked vnode is requested via NLC_LOCKVP. If fp
107 * is non-NULL the vnode will be installed in the file pointer.
109 * NOTE: The vnode is referenced just once on return whether or not it
110 * is also installed in the file pointer.
113 vn_open(struct nlookupdata *nd, struct file *fp, int fmode, int cmode)
115 struct vnode *vp;
116 struct ucred *cred = nd->nl_cred;
117 struct vattr vat;
118 struct vattr *vap = &vat;
119 int error;
122 * Certain combinations are illegal
124 if ((fmode & (FWRITE | O_TRUNC)) == O_TRUNC)
125 return(EACCES);
128 * Lookup the path and create or obtain the vnode. After a
129 * successful lookup a locked nd->nl_nch will be returned.
131 * The result of this section should be a locked vnode.
133 * XXX with only a little work we should be able to avoid locking
134 * the vnode if FWRITE, O_CREAT, and O_TRUNC are *not* set.
136 nd->nl_flags |= NLC_OPEN;
137 if (fmode & O_APPEND)
138 nd->nl_flags |= NLC_APPEND;
139 if (fmode & O_TRUNC)
140 nd->nl_flags |= NLC_TRUNCATE;
141 if (fmode & FREAD)
142 nd->nl_flags |= NLC_READ;
143 if (fmode & FWRITE)
144 nd->nl_flags |= NLC_WRITE;
145 if ((fmode & O_EXCL) == 0 && (fmode & O_NOFOLLOW) == 0)
146 nd->nl_flags |= NLC_FOLLOW;
148 if (fmode & O_CREAT) {
150 * CONDITIONAL CREATE FILE CASE
152 * Setting NLC_CREATE causes a negative hit to store
153 * the negative hit ncp and not return an error. Then
154 * nc_error or nc_vp may be checked to see if the ncp
155 * represents a negative hit. NLC_CREATE also requires
156 * write permission on the governing directory or EPERM
157 * is returned.
159 nd->nl_flags |= NLC_CREATE;
160 nd->nl_flags |= NLC_REFDVP;
161 bwillinode(1);
162 error = nlookup(nd);
163 } else {
165 * NORMAL OPEN FILE CASE
167 error = nlookup(nd);
170 if (error)
171 return (error);
174 * split case to allow us to re-resolve and retry the ncp in case
175 * we get ESTALE.
177 again:
178 if (fmode & O_CREAT) {
179 if (nd->nl_nch.ncp->nc_vp == NULL) {
180 if ((error = ncp_writechk(&nd->nl_nch)) != 0)
181 return (error);
182 VATTR_NULL(vap);
183 vap->va_type = VREG;
184 vap->va_mode = cmode;
185 if (fmode & O_EXCL)
186 vap->va_vaflags |= VA_EXCLUSIVE;
187 error = VOP_NCREATE(&nd->nl_nch, nd->nl_dvp, &vp,
188 nd->nl_cred, vap);
189 if (error)
190 return (error);
191 fmode &= ~O_TRUNC;
192 /* locked vnode is returned */
193 } else {
194 if (fmode & O_EXCL) {
195 error = EEXIST;
196 } else {
197 error = cache_vget(&nd->nl_nch, cred,
198 LK_EXCLUSIVE, &vp);
200 if (error)
201 return (error);
202 fmode &= ~O_CREAT;
204 } else {
205 error = cache_vget(&nd->nl_nch, cred, LK_EXCLUSIVE, &vp);
206 if (error)
207 return (error);
211 * We have a locked vnode and ncp now. Note that the ncp will
212 * be cleaned up by the caller if nd->nl_nch is left intact.
214 if (vp->v_type == VLNK) {
215 error = EMLINK;
216 goto bad;
218 if (vp->v_type == VSOCK) {
219 error = EOPNOTSUPP;
220 goto bad;
222 if ((fmode & O_CREAT) == 0) {
223 if (fmode & (FWRITE | O_TRUNC)) {
224 if (vp->v_type == VDIR) {
225 error = EISDIR;
226 goto bad;
228 error = vn_writechk(vp, &nd->nl_nch);
229 if (error) {
231 * Special stale handling, re-resolve the
232 * vnode.
234 if (error == ESTALE) {
235 vput(vp);
236 vp = NULL;
237 cache_setunresolved(&nd->nl_nch);
238 error = cache_resolve(&nd->nl_nch, cred);
239 if (error == 0)
240 goto again;
242 goto bad;
246 if (fmode & O_TRUNC) {
247 vn_unlock(vp); /* XXX */
248 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); /* XXX */
249 VATTR_NULL(vap);
250 vap->va_size = 0;
251 error = VOP_SETATTR(vp, vap, cred);
252 if (error)
253 goto bad;
257 * Setup the fp so VOP_OPEN can override it. No descriptor has been
258 * associated with the fp yet so we own it clean.
260 * f_nchandle inherits nl_nch. This used to be necessary only for
261 * directories but now we do it unconditionally so f*() ops
262 * such as fchmod() can access the actual namespace that was
263 * used to open the file.
265 if (fp) {
266 if (nd->nl_flags & NLC_APPENDONLY)
267 fmode |= FAPPENDONLY;
268 fp->f_nchandle = nd->nl_nch;
269 cache_zero(&nd->nl_nch);
270 cache_unlock(&fp->f_nchandle);
274 * Get rid of nl_nch. vn_open does not return it (it returns the
275 * vnode or the file pointer). Note: we can't leave nl_nch locked
276 * through the VOP_OPEN anyway since the VOP_OPEN may block, e.g.
277 * on /dev/ttyd0
279 if (nd->nl_nch.ncp)
280 cache_put(&nd->nl_nch);
282 error = VOP_OPEN(vp, fmode, cred, fp);
283 if (error) {
285 * setting f_ops to &badfileops will prevent the descriptor
286 * code from trying to close and release the vnode, since
287 * the open failed we do not want to call close.
289 if (fp) {
290 fp->f_data = NULL;
291 fp->f_ops = &badfileops;
293 goto bad;
296 #if 0
298 * Assert that VREG files have been setup for vmio.
300 KASSERT(vp->v_type != VREG || vp->v_object != NULL,
301 ("vn_open: regular file was not VMIO enabled!"));
302 #endif
305 * Return the vnode. XXX needs some cleaning up. The vnode is
306 * only returned in the fp == NULL case.
308 if (fp == NULL) {
309 nd->nl_open_vp = vp;
310 nd->nl_vp_fmode = fmode;
311 if ((nd->nl_flags & NLC_LOCKVP) == 0)
312 vn_unlock(vp);
313 } else {
314 vput(vp);
316 return (0);
317 bad:
318 if (vp)
319 vput(vp);
320 return (error);
324 vn_opendisk(const char *devname, int fmode, struct vnode **vpp)
326 struct vnode *vp;
327 int error;
329 if (strncmp(devname, "/dev/", 5) == 0)
330 devname += 5;
331 if ((vp = getsynthvnode(devname)) == NULL) {
332 error = ENODEV;
333 } else {
334 error = VOP_OPEN(vp, fmode, proc0.p_ucred, NULL);
335 vn_unlock(vp);
336 if (error) {
337 vrele(vp);
338 vp = NULL;
341 *vpp = vp;
342 return (error);
346 * Check for write permissions on the specified vnode. nch may be NULL.
349 vn_writechk(struct vnode *vp, struct nchandle *nch)
352 * If there's shared text associated with
353 * the vnode, try to free it up once. If
354 * we fail, we can't allow writing.
356 if (vp->v_flag & VTEXT)
357 return (ETXTBSY);
360 * If the vnode represents a regular file, check the mount
361 * point via the nch. This may be a different mount point
362 * then the one embedded in the vnode (e.g. nullfs).
364 * We can still write to non-regular files (e.g. devices)
365 * via read-only mounts.
367 if (nch && nch->ncp && vp->v_type == VREG)
368 return (ncp_writechk(nch));
369 return (0);
373 * Check whether the underlying mount is read-only. The mount point
374 * referenced by the namecache may be different from the mount point
375 * used by the underlying vnode in the case of NULLFS, so a separate
376 * check is needed.
379 ncp_writechk(struct nchandle *nch)
381 if (nch->mount && (nch->mount->mnt_flag & MNT_RDONLY))
382 return (EROFS);
383 return(0);
387 * Vnode close call
390 vn_close(struct vnode *vp, int flags)
392 int error;
394 error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
395 if (error == 0) {
396 error = VOP_CLOSE(vp, flags);
397 vn_unlock(vp);
399 vrele(vp);
400 return (error);
403 static __inline
405 sequential_heuristic(struct uio *uio, struct file *fp)
408 * Sequential heuristic - detect sequential operation
410 * NOTE: SMP: We allow f_seqcount updates to race.
412 if ((uio->uio_offset == 0 && fp->f_seqcount > 0) ||
413 uio->uio_offset == fp->f_nextoff) {
414 int tmpseq = fp->f_seqcount;
416 * XXX we assume that the filesystem block size is
417 * the default. Not true, but still gives us a pretty
418 * good indicator of how sequential the read operations
419 * are.
421 tmpseq += (uio->uio_resid + BKVASIZE - 1) / BKVASIZE;
422 if (tmpseq > IO_SEQMAX)
423 tmpseq = IO_SEQMAX;
424 fp->f_seqcount = tmpseq;
425 return(fp->f_seqcount << IO_SEQSHIFT);
429 * Not sequential, quick draw-down of seqcount
431 * NOTE: SMP: We allow f_seqcount updates to race.
433 if (fp->f_seqcount > 1)
434 fp->f_seqcount = 1;
435 else
436 fp->f_seqcount = 0;
437 return(0);
441 * get - lock and return the f_offset field.
442 * set - set and unlock the f_offset field.
444 * These routines serve the dual purpose of serializing access to the
445 * f_offset field (at least on i386) and guaranteeing operational integrity
446 * when multiple read()ers and write()ers are present on the same fp.
448 static __inline off_t
449 vn_get_fpf_offset(struct file *fp)
451 u_int flags;
452 u_int nflags;
455 * Shortcut critical path.
457 flags = fp->f_flag & ~FOFFSETLOCK;
458 if (atomic_cmpset_int(&fp->f_flag, flags, flags | FOFFSETLOCK))
459 return(fp->f_offset);
462 * The hard way
464 for (;;) {
465 flags = fp->f_flag;
466 if (flags & FOFFSETLOCK) {
467 nflags = flags | FOFFSETWAKE;
468 tsleep_interlock(&fp->f_flag, 0);
469 if (atomic_cmpset_int(&fp->f_flag, flags, nflags))
470 tsleep(&fp->f_flag, PINTERLOCKED, "fpoff", 0);
471 } else {
472 nflags = flags | FOFFSETLOCK;
473 if (atomic_cmpset_int(&fp->f_flag, flags, nflags))
474 break;
477 return(fp->f_offset);
480 static __inline void
481 vn_set_fpf_offset(struct file *fp, off_t offset)
483 u_int flags;
484 u_int nflags;
487 * We hold the lock so we can set the offset without interference.
489 fp->f_offset = offset;
492 * Normal release is already a reasonably critical path.
494 for (;;) {
495 flags = fp->f_flag;
496 nflags = flags & ~(FOFFSETLOCK | FOFFSETWAKE);
497 if (atomic_cmpset_int(&fp->f_flag, flags, nflags)) {
498 if (flags & FOFFSETWAKE)
499 wakeup(&fp->f_flag);
500 break;
505 static __inline off_t
506 vn_poll_fpf_offset(struct file *fp)
508 #if defined(__amd64__) || !defined(SMP)
509 return(fp->f_offset);
510 #else
511 off_t off = vn_get_fpf_offset(fp);
512 vn_set_fpf_offset(fp, off);
513 return(off);
514 #endif
518 * Package up an I/O request on a vnode into a uio and do it.
521 vn_rdwr(enum uio_rw rw, struct vnode *vp, caddr_t base, int len,
522 off_t offset, enum uio_seg segflg, int ioflg,
523 struct ucred *cred, int *aresid)
525 struct uio auio;
526 struct iovec aiov;
527 struct ccms_lock ccms_lock;
528 int error;
530 if ((ioflg & IO_NODELOCKED) == 0)
531 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
532 auio.uio_iov = &aiov;
533 auio.uio_iovcnt = 1;
534 aiov.iov_base = base;
535 aiov.iov_len = len;
536 auio.uio_resid = len;
537 auio.uio_offset = offset;
538 auio.uio_segflg = segflg;
539 auio.uio_rw = rw;
540 auio.uio_td = curthread;
541 ccms_lock_get_uio(&vp->v_ccms, &ccms_lock, &auio);
542 if (rw == UIO_READ) {
543 error = VOP_READ(vp, &auio, ioflg, cred);
544 } else {
545 error = VOP_WRITE(vp, &auio, ioflg, cred);
547 ccms_lock_put(&vp->v_ccms, &ccms_lock);
548 if (aresid)
549 *aresid = auio.uio_resid;
550 else
551 if (auio.uio_resid && error == 0)
552 error = EIO;
553 if ((ioflg & IO_NODELOCKED) == 0)
554 vn_unlock(vp);
555 return (error);
559 * Package up an I/O request on a vnode into a uio and do it. The I/O
560 * request is split up into smaller chunks and we try to avoid saturating
561 * the buffer cache while potentially holding a vnode locked, so we
562 * check bwillwrite() before calling vn_rdwr(). We also call uio_yield()
563 * to give other processes a chance to lock the vnode (either other processes
564 * core'ing the same binary, or unrelated processes scanning the directory).
567 vn_rdwr_inchunks(enum uio_rw rw, struct vnode *vp, caddr_t base, int len,
568 off_t offset, enum uio_seg segflg, int ioflg,
569 struct ucred *cred, int *aresid)
571 int error = 0;
573 do {
574 int chunk;
577 * Force `offset' to a multiple of MAXBSIZE except possibly
578 * for the first chunk, so that filesystems only need to
579 * write full blocks except possibly for the first and last
580 * chunks.
582 chunk = MAXBSIZE - (uoff_t)offset % MAXBSIZE;
584 if (chunk > len)
585 chunk = len;
586 if (vp->v_type == VREG) {
587 switch(rw) {
588 case UIO_READ:
589 bwillread(chunk);
590 break;
591 case UIO_WRITE:
592 bwillwrite(chunk);
593 break;
596 error = vn_rdwr(rw, vp, base, chunk, offset, segflg,
597 ioflg, cred, aresid);
598 len -= chunk; /* aresid calc already includes length */
599 if (error)
600 break;
601 offset += chunk;
602 base += chunk;
603 uio_yield();
604 } while (len);
605 if (aresid)
606 *aresid += len;
607 return (error);
611 * MPALMOSTSAFE - acquires mplock
613 * File pointers can no longer get ripped up by revoke so
614 * we don't need to lock access to the vp.
616 * f_offset updates are not guaranteed against multiple readers
618 static int
619 vn_read(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
621 struct ccms_lock ccms_lock;
622 struct vnode *vp;
623 int error, ioflag;
625 KASSERT(uio->uio_td == curthread,
626 ("uio_td %p is not td %p", uio->uio_td, curthread));
627 vp = (struct vnode *)fp->f_data;
629 ioflag = 0;
630 if (flags & O_FBLOCKING) {
631 /* ioflag &= ~IO_NDELAY; */
632 } else if (flags & O_FNONBLOCKING) {
633 ioflag |= IO_NDELAY;
634 } else if (fp->f_flag & FNONBLOCK) {
635 ioflag |= IO_NDELAY;
637 if (flags & O_FBUFFERED) {
638 /* ioflag &= ~IO_DIRECT; */
639 } else if (flags & O_FUNBUFFERED) {
640 ioflag |= IO_DIRECT;
641 } else if (fp->f_flag & O_DIRECT) {
642 ioflag |= IO_DIRECT;
644 if ((flags & O_FOFFSET) == 0 && (vp->v_flag & VNOTSEEKABLE) == 0)
645 uio->uio_offset = vn_get_fpf_offset(fp);
646 vn_lock(vp, LK_SHARED | LK_RETRY);
647 ioflag |= sequential_heuristic(uio, fp);
649 ccms_lock_get_uio(&vp->v_ccms, &ccms_lock, uio);
650 if (read_mpsafe && (vp->v_flag & VMP_READ)) {
651 error = VOP_READ(vp, uio, ioflag, cred);
652 } else {
653 get_mplock();
654 error = VOP_READ(vp, uio, ioflag, cred);
655 rel_mplock();
657 ccms_lock_put(&vp->v_ccms, &ccms_lock);
658 fp->f_nextoff = uio->uio_offset;
659 vn_unlock(vp);
660 if ((flags & O_FOFFSET) == 0 && (vp->v_flag & VNOTSEEKABLE) == 0)
661 vn_set_fpf_offset(fp, uio->uio_offset);
662 return (error);
666 * MPALMOSTSAFE - acquires mplock
668 static int
669 vn_write(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
671 struct ccms_lock ccms_lock;
672 struct vnode *vp;
673 int error, ioflag;
675 KASSERT(uio->uio_td == curthread,
676 ("uio_td %p is not p %p", uio->uio_td, curthread));
677 vp = (struct vnode *)fp->f_data;
679 ioflag = IO_UNIT;
680 if (vp->v_type == VREG &&
681 ((fp->f_flag & O_APPEND) || (flags & O_FAPPEND))) {
682 ioflag |= IO_APPEND;
685 if (flags & O_FBLOCKING) {
686 /* ioflag &= ~IO_NDELAY; */
687 } else if (flags & O_FNONBLOCKING) {
688 ioflag |= IO_NDELAY;
689 } else if (fp->f_flag & FNONBLOCK) {
690 ioflag |= IO_NDELAY;
692 if (flags & O_FBUFFERED) {
693 /* ioflag &= ~IO_DIRECT; */
694 } else if (flags & O_FUNBUFFERED) {
695 ioflag |= IO_DIRECT;
696 } else if (fp->f_flag & O_DIRECT) {
697 ioflag |= IO_DIRECT;
699 if (flags & O_FASYNCWRITE) {
700 /* ioflag &= ~IO_SYNC; */
701 } else if (flags & O_FSYNCWRITE) {
702 ioflag |= IO_SYNC;
703 } else if (fp->f_flag & O_FSYNC) {
704 ioflag |= IO_SYNC;
707 if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS))
708 ioflag |= IO_SYNC;
709 if ((flags & O_FOFFSET) == 0)
710 uio->uio_offset = vn_get_fpf_offset(fp);
711 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
712 ioflag |= sequential_heuristic(uio, fp);
713 ccms_lock_get_uio(&vp->v_ccms, &ccms_lock, uio);
714 if (write_mpsafe && (vp->v_flag & VMP_WRITE)) {
715 error = VOP_WRITE(vp, uio, ioflag, cred);
716 } else {
717 get_mplock();
718 error = VOP_WRITE(vp, uio, ioflag, cred);
719 rel_mplock();
721 ccms_lock_put(&vp->v_ccms, &ccms_lock);
722 fp->f_nextoff = uio->uio_offset;
723 vn_unlock(vp);
724 if ((flags & O_FOFFSET) == 0)
725 vn_set_fpf_offset(fp, uio->uio_offset);
726 return (error);
730 * MPSAFE
732 static int
733 vn_statfile(struct file *fp, struct stat *sb, struct ucred *cred)
735 struct vnode *vp;
736 int error;
738 vp = (struct vnode *)fp->f_data;
739 error = vn_stat(vp, sb, cred);
740 return (error);
744 * MPSAFE (if vnode has VMP_GETATTR)
747 vn_stat(struct vnode *vp, struct stat *sb, struct ucred *cred)
749 struct vattr vattr;
750 struct vattr *vap;
751 int error;
752 u_short mode;
753 cdev_t dev;
755 vap = &vattr;
756 if (getattr_mpsafe && (vp->v_flag & VMP_GETATTR)) {
757 error = VOP_GETATTR(vp, vap);
758 } else {
759 get_mplock();
760 error = VOP_GETATTR(vp, vap);
761 rel_mplock();
763 if (error)
764 return (error);
767 * Zero the spare stat fields
769 sb->st_lspare = 0;
770 sb->st_qspare = 0;
773 * Copy from vattr table
775 if (vap->va_fsid != VNOVAL)
776 sb->st_dev = vap->va_fsid;
777 else
778 sb->st_dev = vp->v_mount->mnt_stat.f_fsid.val[0];
779 sb->st_ino = vap->va_fileid;
780 mode = vap->va_mode;
781 switch (vap->va_type) {
782 case VREG:
783 mode |= S_IFREG;
784 break;
785 case VDATABASE:
786 mode |= S_IFDB;
787 break;
788 case VDIR:
789 mode |= S_IFDIR;
790 break;
791 case VBLK:
792 mode |= S_IFBLK;
793 break;
794 case VCHR:
795 mode |= S_IFCHR;
796 break;
797 case VLNK:
798 mode |= S_IFLNK;
799 /* This is a cosmetic change, symlinks do not have a mode. */
800 if (vp->v_mount->mnt_flag & MNT_NOSYMFOLLOW)
801 sb->st_mode &= ~ACCESSPERMS; /* 0000 */
802 else
803 sb->st_mode |= ACCESSPERMS; /* 0777 */
804 break;
805 case VSOCK:
806 mode |= S_IFSOCK;
807 break;
808 case VFIFO:
809 mode |= S_IFIFO;
810 break;
811 default:
812 return (EBADF);
814 sb->st_mode = mode;
815 if (vap->va_nlink > (nlink_t)-1)
816 sb->st_nlink = (nlink_t)-1;
817 else
818 sb->st_nlink = vap->va_nlink;
819 sb->st_uid = vap->va_uid;
820 sb->st_gid = vap->va_gid;
821 sb->st_rdev = dev2udev(vp->v_rdev);
822 sb->st_size = vap->va_size;
823 sb->st_atimespec = vap->va_atime;
824 sb->st_mtimespec = vap->va_mtime;
825 sb->st_ctimespec = vap->va_ctime;
828 * A VCHR and VBLK device may track the last access and last modified
829 * time independantly of the filesystem. This is particularly true
830 * because device read and write calls may bypass the filesystem.
832 if (vp->v_type == VCHR || vp->v_type == VBLK) {
833 dev = vp->v_rdev;
834 if (dev != NULL) {
835 if (dev->si_lastread) {
836 sb->st_atimespec.tv_sec = dev->si_lastread;
837 sb->st_atimespec.tv_nsec = 0;
839 if (dev->si_lastwrite) {
840 sb->st_atimespec.tv_sec = dev->si_lastwrite;
841 sb->st_atimespec.tv_nsec = 0;
847 * According to www.opengroup.org, the meaning of st_blksize is
848 * "a filesystem-specific preferred I/O block size for this
849 * object. In some filesystem types, this may vary from file
850 * to file"
851 * Default to PAGE_SIZE after much discussion.
854 if (vap->va_type == VREG) {
855 sb->st_blksize = vap->va_blocksize;
856 } else if (vn_isdisk(vp, NULL)) {
858 * XXX this is broken. If the device is not yet open (aka
859 * stat() call, aka v_rdev == NULL), how are we supposed
860 * to get a valid block size out of it?
862 dev = vp->v_rdev;
864 sb->st_blksize = dev->si_bsize_best;
865 if (sb->st_blksize < dev->si_bsize_phys)
866 sb->st_blksize = dev->si_bsize_phys;
867 if (sb->st_blksize < BLKDEV_IOSIZE)
868 sb->st_blksize = BLKDEV_IOSIZE;
869 } else {
870 sb->st_blksize = PAGE_SIZE;
873 sb->st_flags = vap->va_flags;
875 error = priv_check_cred(cred, PRIV_VFS_GENERATION, 0);
876 if (error)
877 sb->st_gen = 0;
878 else
879 sb->st_gen = (u_int32_t)vap->va_gen;
881 sb->st_blocks = vap->va_bytes / S_BLKSIZE;
882 sb->st_fsmid = vap->va_fsmid;
883 return (0);
887 * MPALMOSTSAFE - acquires mplock
889 static int
890 vn_ioctl(struct file *fp, u_long com, caddr_t data, struct ucred *ucred)
892 struct vnode *vp = ((struct vnode *)fp->f_data);
893 struct vnode *ovp;
894 struct vattr vattr;
895 int error;
896 off_t size;
898 get_mplock();
900 switch (vp->v_type) {
901 case VREG:
902 case VDIR:
903 if (com == FIONREAD) {
904 error = VOP_GETATTR(vp, &vattr);
905 if (error)
906 break;
907 size = vattr.va_size;
908 if ((vp->v_flag & VNOTSEEKABLE) == 0)
909 size -= vn_poll_fpf_offset(fp);
910 if (size > 0x7FFFFFFF)
911 size = 0x7FFFFFFF;
912 *(int *)data = size;
913 error = 0;
914 break;
916 if (com == FIOASYNC) { /* XXX */
917 error = 0; /* XXX */
918 break;
920 /* fall into ... */
921 default:
922 #if 0
923 return (ENOTTY);
924 #endif
925 case VFIFO:
926 case VCHR:
927 case VBLK:
928 if (com == FIODTYPE) {
929 if (vp->v_type != VCHR && vp->v_type != VBLK) {
930 error = ENOTTY;
931 break;
933 *(int *)data = dev_dflags(vp->v_rdev) & D_TYPEMASK;
934 error = 0;
935 break;
937 error = VOP_IOCTL(vp, com, data, fp->f_flag, ucred);
938 if (error == 0 && com == TIOCSCTTY) {
939 struct proc *p = curthread->td_proc;
940 struct session *sess;
942 if (p == NULL) {
943 error = ENOTTY;
944 break;
947 sess = p->p_session;
948 /* Do nothing if reassigning same control tty */
949 if (sess->s_ttyvp == vp) {
950 error = 0;
951 break;
954 /* Get rid of reference to old control tty */
955 ovp = sess->s_ttyvp;
956 vref(vp);
957 sess->s_ttyvp = vp;
958 if (ovp)
959 vrele(ovp);
961 break;
963 rel_mplock();
964 return (error);
968 * MPALMOSTSAFE - acquires mplock
970 static int
971 vn_poll(struct file *fp, int events, struct ucred *cred)
973 int error;
975 get_mplock();
976 error = VOP_POLL(((struct vnode *)fp->f_data), events, cred);
977 rel_mplock();
978 return (error);
982 * Check that the vnode is still valid, and if so
983 * acquire requested lock.
986 #ifndef DEBUG_LOCKS
987 vn_lock(struct vnode *vp, int flags)
988 #else
989 debug_vn_lock(struct vnode *vp, int flags, const char *filename, int line)
990 #endif
992 int error;
994 do {
995 #ifdef DEBUG_LOCKS
996 vp->filename = filename;
997 vp->line = line;
998 error = debuglockmgr(&vp->v_lock, flags,
999 "vn_lock", filename, line);
1000 #else
1001 error = lockmgr(&vp->v_lock, flags);
1002 #endif
1003 if (error == 0)
1004 break;
1005 } while (flags & LK_RETRY);
1008 * Because we (had better!) have a ref on the vnode, once it
1009 * goes to VRECLAIMED state it will not be recycled until all
1010 * refs go away. So we can just check the flag.
1012 if (error == 0 && (vp->v_flag & VRECLAIMED)) {
1013 lockmgr(&vp->v_lock, LK_RELEASE);
1014 error = ENOENT;
1016 return (error);
1019 void
1020 vn_unlock(struct vnode *vp)
1022 lockmgr(&vp->v_lock, LK_RELEASE);
1026 vn_islocked(struct vnode *vp)
1028 return (lockstatus(&vp->v_lock, curthread));
1032 * MPALMOSTSAFE - acquires mplock
1034 static int
1035 vn_closefile(struct file *fp)
1037 int error;
1039 get_mplock();
1040 fp->f_ops = &badfileops;
1041 error = vn_close(((struct vnode *)fp->f_data), fp->f_flag);
1042 rel_mplock();
1043 return (error);
1047 * MPALMOSTSAFE - acquires mplock
1049 static int
1050 vn_kqfilter(struct file *fp, struct knote *kn)
1052 int error;
1054 get_mplock();
1055 error = VOP_KQFILTER(((struct vnode *)fp->f_data), kn);
1056 rel_mplock();
1057 return (error);