acpi: Narrow workaround for broken interrupt settings
[dragonfly.git] / sys / kern / vfs_vnops.c
blobeffcaa9b88a59b0d082ab440422e1cd4fbb10846
1 /*
2 * Copyright (c) 1982, 1986, 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
34 * @(#)vfs_vnops.c 8.2 (Berkeley) 1/21/94
35 * $FreeBSD: src/sys/kern/vfs_vnops.c,v 1.87.2.13 2002/12/29 18:19:53 dillon Exp $
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/uio.h>
41 #include <sys/fcntl.h>
42 #include <sys/file.h>
43 #include <sys/stat.h>
44 #include <sys/proc.h>
45 #include <sys/caps.h>
46 #include <sys/mount.h>
47 #include <sys/nlookup.h>
48 #include <sys/vnode.h>
49 #include <sys/buf.h>
50 #include <sys/filio.h>
51 #include <sys/ttycom.h>
52 #include <sys/conf.h>
53 #include <sys/sysctl.h>
54 #include <sys/syslog.h>
56 #include <sys/mplock2.h>
58 static int vn_closefile (struct file *fp);
59 static int vn_ioctl (struct file *fp, u_long com, caddr_t data,
60 struct ucred *cred, struct sysmsg *msg);
61 static int vn_read (struct file *fp, struct uio *uio,
62 struct ucred *cred, int flags);
63 static int vn_kqfilter (struct file *fp, struct knote *kn);
64 static int vn_statfile (struct file *fp, struct stat *sb, struct ucred *cred);
65 static int vn_write (struct file *fp, struct uio *uio,
66 struct ucred *cred, int flags);
68 struct fileops vnode_fileops = {
69 .fo_read = vn_read,
70 .fo_write = vn_write,
71 .fo_ioctl = vn_ioctl,
72 .fo_kqfilter = vn_kqfilter,
73 .fo_stat = vn_statfile,
74 .fo_close = vn_closefile,
75 .fo_shutdown = nofo_shutdown
79 * Common code for vnode open operations. Check permissions, and call
80 * the VOP_NOPEN or VOP_NCREATE routine.
82 * The caller is responsible for setting up nd with nlookup_init() and
83 * for cleaning it up with nlookup_done(), whether we return an error
84 * or not.
86 * On success nd->nl_open_vp will hold a referenced and, if requested,
87 * locked vnode. A locked vnode is requested via NLC_LOCKVP. If fp
88 * is non-NULL the vnode will be installed in the file pointer.
90 * NOTE: If the caller wishes the namecache entry to be operated with
91 * a shared lock it must use NLC_SHAREDLOCK. If NLC_LOCKVP is set
92 * then the vnode lock will also be shared.
94 * NOTE: The vnode is referenced just once on return whether or not it
95 * is also installed in the file pointer.
97 int
98 vn_open(struct nlookupdata *nd, struct file **fpp, int fmode, int cmode)
100 struct file *fp = fpp ? *fpp : NULL;
101 struct vnode *vp;
102 struct ucred *cred = nd->nl_cred;
103 struct vattr vat;
104 struct vattr *vap = &vat;
105 int error;
106 int vpexcl;
107 u_int flags;
108 uint64_t osize;
109 struct mount *mp;
112 * Certain combinations are illegal
114 if ((fmode & (FWRITE | O_TRUNC)) == O_TRUNC)
115 return(EACCES);
118 * Lookup the path and create or obtain the vnode. After a
119 * successful lookup a locked nd->nl_nch will be returned.
121 * The result of this section should be a locked vnode.
123 * XXX with only a little work we should be able to avoid locking
124 * the vnode if FWRITE, O_CREAT, and O_TRUNC are *not* set.
126 nd->nl_flags |= NLC_OPEN;
127 if (fmode & O_APPEND)
128 nd->nl_flags |= NLC_APPEND;
129 if (fmode & O_TRUNC)
130 nd->nl_flags |= NLC_TRUNCATE;
131 if (fmode & FREAD)
132 nd->nl_flags |= NLC_READ;
133 if (fmode & FWRITE)
134 nd->nl_flags |= NLC_WRITE;
135 if ((fmode & O_EXCL) == 0 && (fmode & O_NOFOLLOW) == 0)
136 nd->nl_flags |= NLC_FOLLOW;
138 if (fmode & O_CREAT) {
140 * CONDITIONAL CREATE FILE CASE
142 * Setting NLC_CREATE causes a negative hit to store
143 * the negative hit ncp and not return an error. Then
144 * nc_error or nc_vp may be checked to see if the ncp
145 * represents a negative hit. NLC_CREATE also requires
146 * write permission on the governing directory or EPERM
147 * is returned.
149 * If the file exists but is missing write permission,
150 * nlookup() returns EACCES. This has to be handled specially
151 * when combined with O_EXCL.
153 nd->nl_flags |= NLC_CREATE;
154 nd->nl_flags |= NLC_REFDVP;
155 bwillinode(1);
156 error = nlookup(nd);
157 if (error == EACCES && nd->nl_nch.ncp->nc_vp != NULL &&
158 (fmode & O_EXCL) && !nd->nl_dir_error)
160 error = EEXIST;
164 * If no error and nd->nl_dvp is NULL, the nlookup represents
165 * a mount-point or cross-mount situation. e.g.
166 * open("/var/cache", O_CREAT), where /var/cache is a
167 * mount point or a null-mount point.
169 if (error == 0 && nd->nl_dvp == NULL)
170 error = EINVAL;
171 } else {
173 * NORMAL OPEN FILE CASE
175 error = nlookup(nd);
178 if (error)
179 return (error);
182 * split case to allow us to re-resolve and retry the ncp in case
183 * we get ESTALE.
185 * (error is 0 on entry / retry)
187 again:
189 * Checks for (likely) filesystem-modifying cases and allows
190 * the filesystem to stall the front-end.
192 if ((fmode & (FWRITE | O_TRUNC)) ||
193 ((fmode & O_CREAT) && nd->nl_nch.ncp->nc_vp == NULL)) {
194 error = ncp_writechk(&nd->nl_nch);
195 if (error)
196 return error;
199 vpexcl = 1;
200 if (fmode & O_CREAT) {
201 if (nd->nl_nch.ncp->nc_vp == NULL) {
202 VATTR_NULL(vap);
203 vap->va_type = VREG;
204 vap->va_mode = cmode;
205 vap->va_fuseflags = fmode; /* FUSE */
206 if (fmode & O_EXCL)
207 vap->va_vaflags |= VA_EXCLUSIVE;
208 error = VOP_NCREATE(&nd->nl_nch, nd->nl_dvp, &vp,
209 nd->nl_cred, vap);
210 if (error)
211 return (error);
212 fmode &= ~O_TRUNC;
213 /* locked vnode is returned */
214 } else {
215 if (fmode & O_EXCL) {
216 error = EEXIST;
217 } else {
218 error = cache_vget(&nd->nl_nch, cred,
219 LK_EXCLUSIVE, &vp);
221 if (error)
222 return (error);
223 fmode &= ~O_CREAT;
225 } else {
227 * In most other cases a shared lock on the vnode is
228 * sufficient. However, the O_RDWR case needs an
229 * exclusive lock if the vnode is executable. The
230 * NLC_EXCLLOCK_IFEXEC and NCF_NOTX flags help resolve
231 * this.
233 * NOTE: If NCF_NOTX is not set, we do not know the
234 * the state of the 'x' bits and have to get
235 * an exclusive lock for the EXCLLOCK_IFEXEC case.
237 if ((nd->nl_flags & NLC_SHAREDLOCK) &&
238 ((nd->nl_flags & NLC_EXCLLOCK_IFEXEC) == 0 ||
239 nd->nl_nch.ncp->nc_flag & NCF_NOTX)) {
240 error = cache_vget(&nd->nl_nch, cred, LK_SHARED, &vp);
241 vpexcl = 0;
242 } else {
243 error = cache_vget(&nd->nl_nch, cred,
244 LK_EXCLUSIVE, &vp);
246 if (error)
247 return (error);
251 * We have a locked vnode and ncp now. Note that the ncp will
252 * be cleaned up by the caller if nd->nl_nch is left intact.
254 if (vp->v_type == VLNK) {
255 error = EMLINK;
256 goto bad;
258 if (vp->v_type == VSOCK) {
259 error = EOPNOTSUPP;
260 goto bad;
262 if (vp->v_type != VDIR && (fmode & O_DIRECTORY)) {
263 error = ENOTDIR;
264 goto bad;
266 if ((fmode & O_CREAT) == 0) {
267 if (fmode & (FWRITE | O_TRUNC)) {
268 if (vp->v_type == VDIR) {
269 error = EISDIR;
270 goto bad;
274 * Additional checks on vnode (does not substitute
275 * for ncp_writechk()).
277 error = vn_writechk(vp);
278 if (error) {
280 * Special stale handling, re-resolve the
281 * vnode.
283 if (error == ESTALE) {
284 u_int dummy_gen = 0;
286 vput(vp);
287 vp = NULL;
288 if (vpexcl == 0) {
289 cache_unlock(&nd->nl_nch);
290 cache_lock(&nd->nl_nch);
292 cache_setunresolved(&nd->nl_nch);
293 error = cache_resolve(&nd->nl_nch,
294 &dummy_gen,
295 cred);
296 if (error == 0)
297 goto again;
299 goto bad;
303 if (fmode & O_TRUNC) {
304 vn_unlock(vp); /* XXX */
305 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); /* XXX */
306 osize = vp->v_filesize;
307 VATTR_NULL(vap);
308 vap->va_size = 0;
309 error = VOP_SETATTR_FP(vp, vap, cred, fp);
310 if (error)
311 goto bad;
312 error = VOP_GETATTR(vp, vap);
313 if (error)
314 goto bad;
315 mp = vq_vptomp(vp);
316 VFS_ACCOUNT(mp, vap->va_uid, vap->va_gid, -osize);
320 * Set or clear VNSWAPCACHE on the vp based on nd->nl_nch.ncp->nc_flag.
321 * These particular bits a tracked all the way from the root.
323 * NOTE: Might not work properly on NFS servers due to the
324 * disconnected namecache.
326 flags = nd->nl_nch.ncp->nc_flag;
327 if ((flags & (NCF_UF_CACHE | NCF_UF_PCACHE)) &&
328 (flags & (NCF_SF_NOCACHE | NCF_SF_PNOCACHE)) == 0) {
329 vsetflags(vp, VSWAPCACHE);
330 } else {
331 vclrflags(vp, VSWAPCACHE);
335 * Setup the fp so VOP_OPEN can override it. No descriptor has been
336 * associated with the fp yet so we own it clean.
338 * f_nchandle inherits nl_nch. This used to be necessary only for
339 * directories but now we do it unconditionally so f*() ops
340 * such as fchmod() can access the actual namespace that was
341 * used to open the file.
343 if (fp) {
344 if (nd->nl_flags & NLC_APPENDONLY)
345 fmode |= FAPPENDONLY;
346 fp->f_nchandle = nd->nl_nch;
347 cache_zero(&nd->nl_nch);
348 cache_unlock(&fp->f_nchandle);
352 * Get rid of nl_nch. vn_open does not return it (it returns the
353 * vnode or the file pointer).
355 * NOTE: We can't leave nl_nch locked through the VOP_OPEN anyway
356 * since the VOP_OPEN may block, e.g. on /dev/ttyd0
358 * NOTE: The VOP_OPEN() can replace the *fpp we supply with its own
359 * (it will fdrop/fhold), and can also set the *fpp up however
360 * it wants, not necessarily using DTYPE_VNODE.
362 if (nd->nl_nch.ncp)
363 cache_put(&nd->nl_nch);
365 error = VOP_OPEN(vp, fmode, cred, fpp);
366 fp = fpp ? *fpp : NULL;
368 if (error) {
370 * setting f_ops to &badfileops will prevent the descriptor
371 * code from trying to close and release the vnode, since
372 * the open failed we do not want to call close.
374 if (fp) {
375 fp->f_data = NULL;
376 fp->f_ops = &badfileops;
378 goto bad;
381 #if 0
383 * Assert that VREG files have been setup for vmio.
385 KASSERT(vp->v_type != VREG || vp->v_object != NULL,
386 ("vn_open: regular file was not VMIO enabled!"));
387 #endif
390 * Return the vnode. XXX needs some cleaning up. The vnode is
391 * only returned in the fp == NULL case.
393 * NOTE: vnode stored in fp may be different
395 if (fp == NULL) {
396 nd->nl_open_vp = vp;
397 nd->nl_vp_fmode = fmode;
398 if ((nd->nl_flags & NLC_LOCKVP) == 0)
399 vn_unlock(vp);
400 } else {
401 vput(vp);
403 return (0);
404 bad:
405 if (vp)
406 vput(vp);
407 return (error);
411 vn_opendisk(const char *devname, int fmode, struct vnode **vpp)
413 struct vnode *vp;
414 int error;
416 if (strncmp(devname, "/dev/", 5) == 0)
417 devname += 5;
418 if ((vp = getsynthvnode(devname)) == NULL) {
419 error = ENODEV;
420 } else {
421 error = VOP_OPEN(vp, fmode, proc0.p_ucred, NULL);
422 vn_unlock(vp);
423 if (error) {
424 vrele(vp);
425 vp = NULL;
428 *vpp = vp;
429 return (error);
433 * Checks for special conditions on the vnode which might prevent writing
434 * after the vnode has (likely) been locked. The vnode might or might not
435 * be locked as of this call, but will be at least referenced.
437 * Also re-checks the mount RDONLY flag that ncp_writechk() checked prior
438 * to the vnode being locked.
441 vn_writechk(struct vnode *vp)
444 * If there's shared text associated with
445 * the vnode, try to free it up once. If
446 * we fail, we can't allow writing.
448 if (vp->v_flag & VTEXT)
449 return (ETXTBSY);
450 if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_RDONLY))
451 return (EROFS);
452 return 0;
456 * Check whether the underlying mount is read-only. The mount point
457 * referenced by the namecache may be different from the mount point
458 * used by the underlying vnode in the case of NULLFS, so a separate
459 * check is needed.
461 * Must be called PRIOR to any vnodes being locked.
464 ncp_writechk(struct nchandle *nch)
466 struct mount *mp;
468 if ((mp = nch->mount) != NULL) {
469 if (mp->mnt_flag & MNT_RDONLY)
470 return (EROFS);
471 if (mp->mnt_op->vfs_modifying != vfs_stdmodifying)
472 VFS_MODIFYING(mp);
474 return(0);
478 * Vnode close call
480 * MPSAFE
483 vn_close(struct vnode *vp, int flags, struct file *fp)
485 int error;
487 error = vn_lock(vp, LK_SHARED | LK_RETRY | LK_FAILRECLAIM);
488 if (error == 0) {
489 error = VOP_CLOSE(vp, flags, fp);
490 vn_unlock(vp);
492 vrele(vp);
493 return (error);
497 * Sequential heuristic.
499 * MPSAFE (f_seqcount and f_nextoff are allowed to race)
501 static __inline
503 sequential_heuristic(struct uio *uio, struct file *fp)
506 * Sequential heuristic - detect sequential operation
508 * NOTE: SMP: We allow f_seqcount updates to race.
510 if ((uio->uio_offset == 0 && fp->f_seqcount > 0) ||
511 uio->uio_offset == fp->f_nextoff) {
512 int tmpseq = fp->f_seqcount;
514 tmpseq += howmany(uio->uio_resid, MAXBSIZE);
515 if (tmpseq > IO_SEQMAX)
516 tmpseq = IO_SEQMAX;
517 fp->f_seqcount = tmpseq;
518 return(fp->f_seqcount << IO_SEQSHIFT);
522 * Not sequential, quick draw-down of seqcount
524 * NOTE: SMP: We allow f_seqcount updates to race.
526 if (fp->f_seqcount > 1)
527 fp->f_seqcount = 1;
528 else
529 fp->f_seqcount = 0;
530 return(0);
534 * get - lock and return the f_offset field.
535 * set - set and unlock the f_offset field.
537 * These routines serve the dual purpose of serializing access to the
538 * f_offset field (at least on x86) and guaranteeing operational integrity
539 * when multiple read()ers and write()ers are present on the same fp.
541 * MPSAFE
543 static __inline off_t
544 vn_get_fpf_offset(struct file *fp)
546 u_int flags;
547 u_int nflags;
550 * Shortcut critical path.
552 flags = fp->f_flag & ~FOFFSETLOCK;
553 if (atomic_cmpset_int(&fp->f_flag, flags, flags | FOFFSETLOCK))
554 return(fp->f_offset);
557 * The hard way
559 for (;;) {
560 flags = fp->f_flag;
561 if (flags & FOFFSETLOCK) {
562 nflags = flags | FOFFSETWAKE;
563 tsleep_interlock(&fp->f_flag, 0);
564 if (atomic_cmpset_int(&fp->f_flag, flags, nflags))
565 tsleep(&fp->f_flag, PINTERLOCKED, "fpoff", 0);
566 } else {
567 nflags = flags | FOFFSETLOCK;
568 if (atomic_cmpset_int(&fp->f_flag, flags, nflags))
569 break;
572 return(fp->f_offset);
576 * MPSAFE
578 static __inline void
579 vn_set_fpf_offset(struct file *fp, off_t offset)
581 u_int flags;
582 u_int nflags;
585 * We hold the lock so we can set the offset without interference.
587 fp->f_offset = offset;
590 * Normal release is already a reasonably critical path.
592 for (;;) {
593 flags = fp->f_flag;
594 nflags = flags & ~(FOFFSETLOCK | FOFFSETWAKE);
595 if (atomic_cmpset_int(&fp->f_flag, flags, nflags)) {
596 if (flags & FOFFSETWAKE)
597 wakeup(&fp->f_flag);
598 break;
604 * MPSAFE
606 static __inline off_t
607 vn_poll_fpf_offset(struct file *fp)
609 #if defined(__x86_64__)
610 return(fp->f_offset);
611 #else
612 off_t off = vn_get_fpf_offset(fp);
613 vn_set_fpf_offset(fp, off);
614 return(off);
615 #endif
619 * Package up an I/O request on a vnode into a uio and do it.
621 * MPSAFE
624 vn_rdwr(enum uio_rw rw, struct vnode *vp, caddr_t base, int len,
625 off_t offset, enum uio_seg segflg, int ioflg,
626 struct ucred *cred, int *aresid)
628 struct uio auio;
629 struct iovec aiov;
630 int error;
632 if ((ioflg & IO_NODELOCKED) == 0)
633 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
634 auio.uio_iov = &aiov;
635 auio.uio_iovcnt = 1;
636 aiov.iov_base = base;
637 aiov.iov_len = len;
638 auio.uio_resid = len;
639 auio.uio_offset = offset;
640 auio.uio_segflg = segflg;
641 auio.uio_rw = rw;
642 auio.uio_td = curthread;
643 if (rw == UIO_READ) {
644 error = VOP_READ(vp, &auio, ioflg, cred);
645 } else {
646 error = VOP_WRITE(vp, &auio, ioflg, cred);
648 if (aresid)
649 *aresid = auio.uio_resid;
650 else
651 if (auio.uio_resid && error == 0)
652 error = EIO;
653 if ((ioflg & IO_NODELOCKED) == 0)
654 vn_unlock(vp);
655 return (error);
659 * Package up an I/O request on a vnode into a uio and do it. The I/O
660 * request is split up into smaller chunks and we try to avoid saturating
661 * the buffer cache while potentially holding a vnode locked, so we
662 * check bwillwrite() before calling vn_rdwr(). We also call lwkt_user_yield()
663 * to give other processes a chance to lock the vnode (either other processes
664 * core'ing the same binary, or unrelated processes scanning the directory).
666 * MPSAFE
669 vn_rdwr_inchunks(enum uio_rw rw, struct vnode *vp, caddr_t base, int len,
670 off_t offset, enum uio_seg segflg, int ioflg,
671 struct ucred *cred, int *aresid)
673 int error = 0;
675 do {
676 int chunk;
679 * Force `offset' to a multiple of MAXBSIZE except possibly
680 * for the first chunk, so that filesystems only need to
681 * write full blocks except possibly for the first and last
682 * chunks.
684 chunk = MAXBSIZE - (uoff_t)offset % MAXBSIZE;
686 if (chunk > len)
687 chunk = len;
688 if (vp->v_type == VREG && (ioflg & IO_RECURSE) == 0) {
689 switch(rw) {
690 case UIO_READ:
691 bwillread(chunk);
692 break;
693 case UIO_WRITE:
694 bwillwrite(chunk);
695 break;
698 error = vn_rdwr(rw, vp, base, chunk, offset, segflg,
699 ioflg, cred, aresid);
700 len -= chunk; /* aresid calc already includes length */
701 if (error)
702 break;
703 offset += chunk;
704 base += chunk;
705 lwkt_user_yield();
706 } while (len);
707 if (aresid)
708 *aresid += len;
709 return (error);
713 * File pointers can no longer get ripped up by revoke so
714 * we don't need to lock access to the vp.
716 * f_offset updates are not guaranteed against multiple readers
718 static int
719 vn_read(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
721 struct vnode *vp;
722 int error, ioflag;
724 KASSERT(uio->uio_td == curthread,
725 ("uio_td %p is not td %p", uio->uio_td, curthread));
726 vp = (struct vnode *)fp->f_data;
728 ioflag = 0;
729 if (flags & O_FBLOCKING) {
730 /* ioflag &= ~IO_NDELAY; */
731 } else if (flags & O_FNONBLOCKING) {
732 ioflag |= IO_NDELAY;
733 } else if (fp->f_flag & FNONBLOCK) {
734 ioflag |= IO_NDELAY;
736 if (fp->f_flag & O_DIRECT) {
737 ioflag |= IO_DIRECT;
739 if ((flags & O_FOFFSET) == 0 && (vp->v_flag & VNOTSEEKABLE) == 0)
740 uio->uio_offset = vn_get_fpf_offset(fp);
741 vn_lock(vp, LK_SHARED | LK_RETRY);
742 ioflag |= sequential_heuristic(uio, fp);
744 error = VOP_READ_FP(vp, uio, ioflag, cred, fp);
745 fp->f_nextoff = uio->uio_offset;
746 vn_unlock(vp);
747 if ((flags & O_FOFFSET) == 0 && (vp->v_flag & VNOTSEEKABLE) == 0)
748 vn_set_fpf_offset(fp, uio->uio_offset);
749 return (error);
753 * MPSAFE
755 static int
756 vn_write(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
758 struct vnode *vp;
759 int error, ioflag;
761 KASSERT(uio->uio_td == curthread,
762 ("uio_td %p is not p %p", uio->uio_td, curthread));
763 vp = (struct vnode *)fp->f_data;
765 ioflag = IO_UNIT;
766 if (vp->v_type == VREG &&
767 ((fp->f_flag & O_APPEND) || (flags & O_FAPPEND))) {
768 ioflag |= IO_APPEND;
771 if (flags & O_FBLOCKING) {
772 /* ioflag &= ~IO_NDELAY; */
773 } else if (flags & O_FNONBLOCKING) {
774 ioflag |= IO_NDELAY;
775 } else if (fp->f_flag & FNONBLOCK) {
776 ioflag |= IO_NDELAY;
778 if (fp->f_flag & O_DIRECT) {
779 ioflag |= IO_DIRECT;
781 if (flags & O_FASYNCWRITE) {
782 /* ioflag &= ~IO_SYNC; */
783 } else if (flags & O_FSYNCWRITE) {
784 ioflag |= IO_SYNC;
785 } else if (fp->f_flag & O_FSYNC) {
786 ioflag |= IO_SYNC;
789 if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS))
790 ioflag |= IO_SYNC;
791 if ((flags & O_FOFFSET) == 0)
792 uio->uio_offset = vn_get_fpf_offset(fp);
793 if (vp->v_mount)
794 VFS_MODIFYING(vp->v_mount);
795 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
796 ioflag |= sequential_heuristic(uio, fp);
797 error = VOP_WRITE_FP(vp, uio, ioflag, cred, fp);
798 fp->f_nextoff = uio->uio_offset;
799 vn_unlock(vp);
800 if ((flags & O_FOFFSET) == 0)
801 vn_set_fpf_offset(fp, uio->uio_offset);
802 return (error);
806 * MPSAFE
808 static int
809 vn_statfile(struct file *fp, struct stat *sb, struct ucred *cred)
811 struct vnode *vp;
812 int error;
814 vp = (struct vnode *)fp->f_data;
815 error = vn_stat(vp, sb, cred);
816 return (error);
820 * MPSAFE
823 vn_stat(struct vnode *vp, struct stat *sb, struct ucred *cred)
825 struct vattr vattr;
826 struct vattr *vap;
827 int error;
828 u_short mode;
829 cdev_t dev;
832 * vp already has a ref and is validated, can call unlocked.
834 vap = &vattr;
835 error = VOP_GETATTR(vp, vap);
836 if (error)
837 return (error);
840 * Zero the spare stat fields
842 sb->st_lspare = 0;
843 sb->st_qspare2 = 0;
846 * Copy from vattr table
848 if (vap->va_fsid != VNOVAL)
849 sb->st_dev = vap->va_fsid;
850 else
851 sb->st_dev = vp->v_mount->mnt_stat.f_fsid.val[0];
852 sb->st_ino = vap->va_fileid;
853 mode = vap->va_mode;
854 switch (vap->va_type) {
855 case VREG:
856 mode |= S_IFREG;
857 break;
858 case VDATABASE:
859 mode |= S_IFDB;
860 break;
861 case VDIR:
862 mode |= S_IFDIR;
863 break;
864 case VBLK:
865 mode |= S_IFBLK;
866 break;
867 case VCHR:
868 mode |= S_IFCHR;
869 break;
870 case VLNK:
871 mode |= S_IFLNK;
872 /* This is a cosmetic change, symlinks do not have a mode. */
873 if (vp->v_mount->mnt_flag & MNT_NOSYMFOLLOW)
874 sb->st_mode &= ~ACCESSPERMS; /* 0000 */
875 else
876 sb->st_mode |= ACCESSPERMS; /* 0777 */
877 break;
878 case VSOCK:
879 mode |= S_IFSOCK;
880 break;
881 case VFIFO:
882 mode |= S_IFIFO;
883 break;
884 default:
885 return (EBADF);
887 sb->st_mode = mode;
888 if (vap->va_nlink > (nlink_t)-1)
889 sb->st_nlink = (nlink_t)-1;
890 else
891 sb->st_nlink = vap->va_nlink;
892 sb->st_uid = vap->va_uid;
893 sb->st_gid = vap->va_gid;
894 sb->st_rdev = devid_from_dev(vp->v_rdev);
895 sb->st_size = vap->va_size;
896 sb->st_atimespec = vap->va_atime;
897 sb->st_mtimespec = vap->va_mtime;
898 sb->st_ctimespec = vap->va_ctime;
901 * A VCHR and VBLK device may track the last access and last modified
902 * time independantly of the filesystem. This is particularly true
903 * because device read and write calls may bypass the filesystem.
905 if (vp->v_type == VCHR || vp->v_type == VBLK) {
906 dev = vp->v_rdev;
907 if (dev != NULL) {
908 if (dev->si_lastread) {
909 sb->st_atimespec.tv_sec = time_second +
910 (dev->si_lastread -
911 time_uptime);
912 sb->st_atimespec.tv_nsec = 0;
914 if (dev->si_lastwrite) {
915 sb->st_mtimespec.tv_sec = time_second +
916 (dev->si_lastwrite -
917 time_uptime);
918 sb->st_mtimespec.tv_nsec = 0;
924 * According to www.opengroup.org, the meaning of st_blksize is
925 * "a filesystem-specific preferred I/O block size for this
926 * object. In some filesystem types, this may vary from file
927 * to file"
928 * Default to PAGE_SIZE after much discussion.
931 if (vap->va_type == VREG) {
932 sb->st_blksize = vap->va_blocksize;
933 } else if (vn_isdisk(vp, NULL)) {
935 * XXX this is broken. If the device is not yet open (aka
936 * stat() call, aka v_rdev == NULL), how are we supposed
937 * to get a valid block size out of it?
939 dev = vp->v_rdev;
941 sb->st_blksize = dev->si_bsize_best;
942 if (sb->st_blksize < dev->si_bsize_phys)
943 sb->st_blksize = dev->si_bsize_phys;
944 if (sb->st_blksize < BLKDEV_IOSIZE)
945 sb->st_blksize = BLKDEV_IOSIZE;
946 } else {
947 sb->st_blksize = PAGE_SIZE;
950 sb->st_flags = vap->va_flags;
952 error = caps_priv_check(cred, SYSCAP_NOVFS_GENERATION);
953 if (error)
954 sb->st_gen = 0;
955 else
956 sb->st_gen = (u_int32_t)vap->va_gen;
958 sb->st_blocks = vap->va_bytes / S_BLKSIZE;
961 * This is for ABI compatibility <= 5.7 (for ABI change made in
962 * 5.7 master).
964 sb->__old_st_blksize = sb->st_blksize;
966 return (0);
970 * MPALMOSTSAFE - acquires mplock
972 static int
973 vn_ioctl(struct file *fp, u_long com, caddr_t data, struct ucred *ucred,
974 struct sysmsg *msg)
976 struct vnode *vp = ((struct vnode *)fp->f_data);
977 struct vnode *ovp;
978 struct vattr vattr;
979 int error;
980 off_t size;
982 switch (vp->v_type) {
983 case VREG:
984 case VDIR:
985 if (com == FIONREAD) {
986 error = VOP_GETATTR(vp, &vattr);
987 if (error)
988 break;
989 size = vattr.va_size;
990 if ((vp->v_flag & VNOTSEEKABLE) == 0)
991 size -= vn_poll_fpf_offset(fp);
992 if (size > 0x7FFFFFFF)
993 size = 0x7FFFFFFF;
994 *(int *)data = size;
995 error = 0;
996 break;
998 if (com == FIOASYNC) { /* XXX */
999 error = 0; /* XXX */
1000 break;
1002 /* fall into ... */
1003 default:
1004 #if 0
1005 return (ENOTTY);
1006 #endif
1007 case VFIFO:
1008 case VCHR:
1009 case VBLK:
1010 if (com == FIODTYPE) {
1011 if (vp->v_type != VCHR && vp->v_type != VBLK) {
1012 error = ENOTTY;
1013 break;
1015 *(int *)data = dev_dflags(vp->v_rdev) & D_TYPEMASK;
1016 error = 0;
1017 break;
1019 error = VOP_IOCTL(vp, com, data, fp->f_flag, ucred, msg);
1020 if (error == 0 && com == TIOCSCTTY) {
1021 struct proc *p = curthread->td_proc;
1022 struct session *sess;
1024 if (p == NULL) {
1025 error = ENOTTY;
1026 break;
1029 get_mplock();
1030 sess = p->p_session;
1031 /* Do nothing if reassigning same control tty */
1032 if (sess->s_ttyvp == vp) {
1033 error = 0;
1034 rel_mplock();
1035 break;
1038 /* Get rid of reference to old control tty */
1039 ovp = sess->s_ttyvp;
1040 vref(vp);
1041 sess->s_ttyvp = vp;
1042 if (ovp)
1043 vrele(ovp);
1044 rel_mplock();
1046 break;
1048 return (error);
1052 * Obtain the requested vnode lock
1054 * LK_RETRY Automatically retry on timeout
1055 * LK_FAILRECLAIM Fail if the vnode is being reclaimed
1057 * Failures will occur if the vnode is undergoing recyclement, but not
1058 * all callers expect that the function will fail so the caller must pass
1059 * LK_FAILOK if it wants to process an error code.
1061 * Errors can occur for other reasons if you pass in other LK_ flags,
1062 * regardless of whether you pass in LK_FAILRECLAIM
1065 vn_lock(struct vnode *vp, int flags)
1067 int error;
1069 do {
1070 error = lockmgr(&vp->v_lock, flags);
1071 if (error == 0)
1072 break;
1073 } while (flags & LK_RETRY);
1076 * Because we (had better!) have a ref on the vnode, once it
1077 * goes to VRECLAIMED state it will not be recycled until all
1078 * refs go away. So we can just check the flag.
1080 if (error == 0 && (vp->v_flag & VRECLAIMED)) {
1081 if (flags & LK_FAILRECLAIM) {
1082 lockmgr(&vp->v_lock, LK_RELEASE);
1083 error = ENOENT;
1086 return (error);
1090 vn_relock(struct vnode *vp, int flags)
1092 int error;
1094 do {
1095 error = lockmgr(&vp->v_lock, flags);
1096 if (error == 0)
1097 break;
1098 } while (flags & LK_RETRY);
1100 return error;
1103 #ifdef DEBUG_VN_UNLOCK
1105 void
1106 debug_vn_unlock(struct vnode *vp, const char *filename, int line)
1108 kprintf("vn_unlock from %s:%d\n", filename, line);
1109 lockmgr(&vp->v_lock, LK_RELEASE);
1112 #else
1114 void
1115 vn_unlock(struct vnode *vp)
1117 lockmgr(&vp->v_lock, LK_RELEASE);
1120 #endif
1123 * MPSAFE
1126 vn_islocked(struct vnode *vp)
1128 return (lockstatus(&vp->v_lock, curthread));
1132 * Return the lock status of a vnode and unlock the vnode
1133 * if we owned the lock. This is not a boolean, if the
1134 * caller cares what the lock status is the caller must
1135 * check the various possible values.
1137 * This only unlocks exclusive locks held by the caller,
1138 * it will NOT unlock shared locks (there is no way to
1139 * tell who the shared lock belongs to).
1141 * MPSAFE
1144 vn_islocked_unlock(struct vnode *vp)
1146 int vpls;
1148 vpls = lockstatus(&vp->v_lock, curthread);
1149 if (vpls == LK_EXCLUSIVE)
1150 lockmgr(&vp->v_lock, LK_RELEASE);
1151 return(vpls);
1155 * Restore a vnode lock that we previously released via
1156 * vn_islocked_unlock(). This is a NOP if we did not
1157 * own the original lock.
1159 * MPSAFE
1161 void
1162 vn_islocked_relock(struct vnode *vp, int vpls)
1164 int error;
1166 if (vpls == LK_EXCLUSIVE)
1167 error = lockmgr(&vp->v_lock, vpls);
1171 * MPSAFE
1173 static int
1174 vn_closefile(struct file *fp)
1176 int error;
1178 fp->f_ops = &badfileops;
1179 error = vn_close(((struct vnode *)fp->f_data), fp->f_flag, fp);
1180 return (error);
1184 * MPSAFE
1186 static int
1187 vn_kqfilter(struct file *fp, struct knote *kn)
1189 int error;
1191 error = VOP_KQFILTER(((struct vnode *)fp->f_data), kn);
1192 return (error);