usr.sbin/makefs/ffs: Remove m_buf::b_is_hammer2
[dragonfly.git] / sys / kern / vfs_vnops.c
blob2b2079b535889b13e2762f81775e1eca7fe23b47
1 /*
2 * Copyright (c) 1982, 1986, 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
34 * @(#)vfs_vnops.c 8.2 (Berkeley) 1/21/94
35 * $FreeBSD: src/sys/kern/vfs_vnops.c,v 1.87.2.13 2002/12/29 18:19:53 dillon Exp $
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/uio.h>
41 #include <sys/fcntl.h>
42 #include <sys/file.h>
43 #include <sys/stat.h>
44 #include <sys/proc.h>
45 #include <sys/priv.h>
46 #include <sys/mount.h>
47 #include <sys/nlookup.h>
48 #include <sys/vnode.h>
49 #include <sys/buf.h>
50 #include <sys/filio.h>
51 #include <sys/ttycom.h>
52 #include <sys/conf.h>
53 #include <sys/sysctl.h>
54 #include <sys/syslog.h>
56 #include <sys/mplock2.h>
58 static int vn_closefile (struct file *fp);
59 static int vn_ioctl (struct file *fp, u_long com, caddr_t data,
60 struct ucred *cred, struct sysmsg *msg);
61 static int vn_read (struct file *fp, struct uio *uio,
62 struct ucred *cred, int flags);
63 static int vn_kqfilter (struct file *fp, struct knote *kn);
64 static int vn_statfile (struct file *fp, struct stat *sb, struct ucred *cred);
65 static int vn_write (struct file *fp, struct uio *uio,
66 struct ucred *cred, int flags);
68 struct fileops vnode_fileops = {
69 .fo_read = vn_read,
70 .fo_write = vn_write,
71 .fo_ioctl = vn_ioctl,
72 .fo_kqfilter = vn_kqfilter,
73 .fo_stat = vn_statfile,
74 .fo_close = vn_closefile,
75 .fo_shutdown = nofo_shutdown
79 * Common code for vnode open operations. Check permissions, and call
80 * the VOP_NOPEN or VOP_NCREATE routine.
82 * The caller is responsible for setting up nd with nlookup_init() and
83 * for cleaning it up with nlookup_done(), whether we return an error
84 * or not.
86 * On success nd->nl_open_vp will hold a referenced and, if requested,
87 * locked vnode. A locked vnode is requested via NLC_LOCKVP. If fp
88 * is non-NULL the vnode will be installed in the file pointer.
90 * NOTE: If the caller wishes the namecache entry to be operated with
91 * a shared lock it must use NLC_SHAREDLOCK. If NLC_LOCKVP is set
92 * then the vnode lock will also be shared.
94 * NOTE: The vnode is referenced just once on return whether or not it
95 * is also installed in the file pointer.
97 int
98 vn_open(struct nlookupdata *nd, struct file **fpp, int fmode, int cmode)
100 struct file *fp = fpp ? *fpp : NULL;
101 struct vnode *vp;
102 struct ucred *cred = nd->nl_cred;
103 struct vattr vat;
104 struct vattr *vap = &vat;
105 int error;
106 int vpexcl;
107 u_int flags;
108 uint64_t osize;
109 struct mount *mp;
112 * Certain combinations are illegal
114 if ((fmode & (FWRITE | O_TRUNC)) == O_TRUNC)
115 return(EACCES);
118 * Lookup the path and create or obtain the vnode. After a
119 * successful lookup a locked nd->nl_nch will be returned.
121 * The result of this section should be a locked vnode.
123 * XXX with only a little work we should be able to avoid locking
124 * the vnode if FWRITE, O_CREAT, and O_TRUNC are *not* set.
126 nd->nl_flags |= NLC_OPEN;
127 if (fmode & O_APPEND)
128 nd->nl_flags |= NLC_APPEND;
129 if (fmode & O_TRUNC)
130 nd->nl_flags |= NLC_TRUNCATE;
131 if (fmode & FREAD)
132 nd->nl_flags |= NLC_READ;
133 if (fmode & FWRITE)
134 nd->nl_flags |= NLC_WRITE;
135 if ((fmode & O_EXCL) == 0 && (fmode & O_NOFOLLOW) == 0)
136 nd->nl_flags |= NLC_FOLLOW;
138 if (fmode & O_CREAT) {
140 * CONDITIONAL CREATE FILE CASE
142 * Setting NLC_CREATE causes a negative hit to store
143 * the negative hit ncp and not return an error. Then
144 * nc_error or nc_vp may be checked to see if the ncp
145 * represents a negative hit. NLC_CREATE also requires
146 * write permission on the governing directory or EPERM
147 * is returned.
149 * If the file exists but is missing write permission,
150 * nlookup() returns EACCES. This has to be handled specially
151 * when combined with O_EXCL.
153 nd->nl_flags |= NLC_CREATE;
154 nd->nl_flags |= NLC_REFDVP;
155 bwillinode(1);
156 error = nlookup(nd);
157 if (error == EACCES && nd->nl_nch.ncp->nc_vp != NULL &&
158 (fmode & O_EXCL)) {
159 error = EEXIST;
163 * If no error and nd->nl_dvp is NULL, the nlookup represents
164 * a mount-point or cross-mount situation. e.g.
165 * open("/var/cache", O_CREAT), where /var/cache is a
166 * mount point or a null-mount point.
168 if (error == 0 && nd->nl_dvp == NULL)
169 error = EINVAL;
170 } else {
172 * NORMAL OPEN FILE CASE
174 error = nlookup(nd);
177 if (error)
178 return (error);
181 * split case to allow us to re-resolve and retry the ncp in case
182 * we get ESTALE.
184 * (error is 0 on entry / retry)
186 again:
188 * Checks for (likely) filesystem-modifying cases and allows
189 * the filesystem to stall the front-end.
191 if ((fmode & (FWRITE | O_TRUNC)) ||
192 ((fmode & O_CREAT) && nd->nl_nch.ncp->nc_vp == NULL)) {
193 error = ncp_writechk(&nd->nl_nch);
194 if (error)
195 return error;
198 vpexcl = 1;
199 if (fmode & O_CREAT) {
200 if (nd->nl_nch.ncp->nc_vp == NULL) {
201 VATTR_NULL(vap);
202 vap->va_type = VREG;
203 vap->va_mode = cmode;
204 vap->va_fuseflags = fmode; /* FUSE */
205 if (fmode & O_EXCL)
206 vap->va_vaflags |= VA_EXCLUSIVE;
207 error = VOP_NCREATE(&nd->nl_nch, nd->nl_dvp, &vp,
208 nd->nl_cred, vap);
209 if (error)
210 return (error);
211 fmode &= ~O_TRUNC;
212 /* locked vnode is returned */
213 } else {
214 if (fmode & O_EXCL) {
215 error = EEXIST;
216 } else {
217 error = cache_vget(&nd->nl_nch, cred,
218 LK_EXCLUSIVE, &vp);
220 if (error)
221 return (error);
222 fmode &= ~O_CREAT;
224 } else {
226 * In most other cases a shared lock on the vnode is
227 * sufficient. However, the O_RDWR case needs an
228 * exclusive lock if the vnode is executable. The
229 * NLC_EXCLLOCK_IFEXEC and NCF_NOTX flags help resolve
230 * this.
232 * NOTE: If NCF_NOTX is not set, we do not know the
233 * the state of the 'x' bits and have to get
234 * an exclusive lock for the EXCLLOCK_IFEXEC case.
236 if ((nd->nl_flags & NLC_SHAREDLOCK) &&
237 ((nd->nl_flags & NLC_EXCLLOCK_IFEXEC) == 0 ||
238 nd->nl_nch.ncp->nc_flag & NCF_NOTX)) {
239 error = cache_vget(&nd->nl_nch, cred, LK_SHARED, &vp);
240 vpexcl = 0;
241 } else {
242 error = cache_vget(&nd->nl_nch, cred,
243 LK_EXCLUSIVE, &vp);
245 if (error)
246 return (error);
250 * We have a locked vnode and ncp now. Note that the ncp will
251 * be cleaned up by the caller if nd->nl_nch is left intact.
253 if (vp->v_type == VLNK) {
254 error = EMLINK;
255 goto bad;
257 if (vp->v_type == VSOCK) {
258 error = EOPNOTSUPP;
259 goto bad;
261 if (vp->v_type != VDIR && (fmode & O_DIRECTORY)) {
262 error = ENOTDIR;
263 goto bad;
265 if ((fmode & O_CREAT) == 0) {
266 if (fmode & (FWRITE | O_TRUNC)) {
267 if (vp->v_type == VDIR) {
268 error = EISDIR;
269 goto bad;
273 * Additional checks on vnode (does not substitute
274 * for ncp_writechk()).
276 error = vn_writechk(vp);
277 if (error) {
279 * Special stale handling, re-resolve the
280 * vnode.
282 if (error == ESTALE) {
283 u_int dummy_gen = 0;
285 vput(vp);
286 vp = NULL;
287 if (vpexcl == 0) {
288 cache_unlock(&nd->nl_nch);
289 cache_lock(&nd->nl_nch);
291 cache_setunresolved(&nd->nl_nch);
292 error = cache_resolve(&nd->nl_nch,
293 &dummy_gen,
294 cred);
295 if (error == 0)
296 goto again;
298 goto bad;
302 if (fmode & O_TRUNC) {
303 vn_unlock(vp); /* XXX */
304 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); /* XXX */
305 osize = vp->v_filesize;
306 VATTR_NULL(vap);
307 vap->va_size = 0;
308 error = VOP_SETATTR_FP(vp, vap, cred, fp);
309 if (error)
310 goto bad;
311 error = VOP_GETATTR(vp, vap);
312 if (error)
313 goto bad;
314 mp = vq_vptomp(vp);
315 VFS_ACCOUNT(mp, vap->va_uid, vap->va_gid, -osize);
319 * Set or clear VNSWAPCACHE on the vp based on nd->nl_nch.ncp->nc_flag.
320 * These particular bits a tracked all the way from the root.
322 * NOTE: Might not work properly on NFS servers due to the
323 * disconnected namecache.
325 flags = nd->nl_nch.ncp->nc_flag;
326 if ((flags & (NCF_UF_CACHE | NCF_UF_PCACHE)) &&
327 (flags & (NCF_SF_NOCACHE | NCF_SF_PNOCACHE)) == 0) {
328 vsetflags(vp, VSWAPCACHE);
329 } else {
330 vclrflags(vp, VSWAPCACHE);
334 * Setup the fp so VOP_OPEN can override it. No descriptor has been
335 * associated with the fp yet so we own it clean.
337 * f_nchandle inherits nl_nch. This used to be necessary only for
338 * directories but now we do it unconditionally so f*() ops
339 * such as fchmod() can access the actual namespace that was
340 * used to open the file.
342 if (fp) {
343 if (nd->nl_flags & NLC_APPENDONLY)
344 fmode |= FAPPENDONLY;
345 fp->f_nchandle = nd->nl_nch;
346 cache_zero(&nd->nl_nch);
347 cache_unlock(&fp->f_nchandle);
351 * Get rid of nl_nch. vn_open does not return it (it returns the
352 * vnode or the file pointer).
354 * NOTE: We can't leave nl_nch locked through the VOP_OPEN anyway
355 * since the VOP_OPEN may block, e.g. on /dev/ttyd0
357 * NOTE: The VOP_OPEN() can replace the *fpp we supply with its own
358 * (it will fdrop/fhold), and can also set the *fpp up however
359 * it wants, not necessarily using DTYPE_VNODE.
361 if (nd->nl_nch.ncp)
362 cache_put(&nd->nl_nch);
364 error = VOP_OPEN(vp, fmode, cred, fpp);
365 fp = fpp ? *fpp : NULL;
367 if (error) {
369 * setting f_ops to &badfileops will prevent the descriptor
370 * code from trying to close and release the vnode, since
371 * the open failed we do not want to call close.
373 if (fp) {
374 fp->f_data = NULL;
375 fp->f_ops = &badfileops;
377 goto bad;
380 #if 0
382 * Assert that VREG files have been setup for vmio.
384 KASSERT(vp->v_type != VREG || vp->v_object != NULL,
385 ("vn_open: regular file was not VMIO enabled!"));
386 #endif
389 * Return the vnode. XXX needs some cleaning up. The vnode is
390 * only returned in the fp == NULL case.
392 * NOTE: vnode stored in fp may be different
394 if (fp == NULL) {
395 nd->nl_open_vp = vp;
396 nd->nl_vp_fmode = fmode;
397 if ((nd->nl_flags & NLC_LOCKVP) == 0)
398 vn_unlock(vp);
399 } else {
400 vput(vp);
402 return (0);
403 bad:
404 if (vp)
405 vput(vp);
406 return (error);
410 vn_opendisk(const char *devname, int fmode, struct vnode **vpp)
412 struct vnode *vp;
413 int error;
415 if (strncmp(devname, "/dev/", 5) == 0)
416 devname += 5;
417 if ((vp = getsynthvnode(devname)) == NULL) {
418 error = ENODEV;
419 } else {
420 error = VOP_OPEN(vp, fmode, proc0.p_ucred, NULL);
421 vn_unlock(vp);
422 if (error) {
423 vrele(vp);
424 vp = NULL;
427 *vpp = vp;
428 return (error);
432 * Checks for special conditions on the vnode which might prevent writing
433 * after the vnode has (likely) been locked. The vnode might or might not
434 * be locked as of this call, but will be at least referenced.
436 * Also re-checks the mount RDONLY flag that ncp_writechk() checked prior
437 * to the vnode being locked.
440 vn_writechk(struct vnode *vp)
443 * If there's shared text associated with
444 * the vnode, try to free it up once. If
445 * we fail, we can't allow writing.
447 if (vp->v_flag & VTEXT)
448 return (ETXTBSY);
449 if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_RDONLY))
450 return (EROFS);
451 return 0;
455 * Check whether the underlying mount is read-only. The mount point
456 * referenced by the namecache may be different from the mount point
457 * used by the underlying vnode in the case of NULLFS, so a separate
458 * check is needed.
460 * Must be called PRIOR to any vnodes being locked.
463 ncp_writechk(struct nchandle *nch)
465 struct mount *mp;
467 if ((mp = nch->mount) != NULL) {
468 if (mp->mnt_flag & MNT_RDONLY)
469 return (EROFS);
470 if (mp->mnt_op->vfs_modifying != vfs_stdmodifying)
471 VFS_MODIFYING(mp);
473 return(0);
477 * Vnode close call
479 * MPSAFE
482 vn_close(struct vnode *vp, int flags, struct file *fp)
484 int error;
486 error = vn_lock(vp, LK_SHARED | LK_RETRY | LK_FAILRECLAIM);
487 if (error == 0) {
488 error = VOP_CLOSE(vp, flags, fp);
489 vn_unlock(vp);
491 vrele(vp);
492 return (error);
496 * Sequential heuristic.
498 * MPSAFE (f_seqcount and f_nextoff are allowed to race)
500 static __inline
502 sequential_heuristic(struct uio *uio, struct file *fp)
505 * Sequential heuristic - detect sequential operation
507 * NOTE: SMP: We allow f_seqcount updates to race.
509 if ((uio->uio_offset == 0 && fp->f_seqcount > 0) ||
510 uio->uio_offset == fp->f_nextoff) {
511 int tmpseq = fp->f_seqcount;
513 tmpseq += howmany(uio->uio_resid, MAXBSIZE);
514 if (tmpseq > IO_SEQMAX)
515 tmpseq = IO_SEQMAX;
516 fp->f_seqcount = tmpseq;
517 return(fp->f_seqcount << IO_SEQSHIFT);
521 * Not sequential, quick draw-down of seqcount
523 * NOTE: SMP: We allow f_seqcount updates to race.
525 if (fp->f_seqcount > 1)
526 fp->f_seqcount = 1;
527 else
528 fp->f_seqcount = 0;
529 return(0);
533 * get - lock and return the f_offset field.
534 * set - set and unlock the f_offset field.
536 * These routines serve the dual purpose of serializing access to the
537 * f_offset field (at least on x86) and guaranteeing operational integrity
538 * when multiple read()ers and write()ers are present on the same fp.
540 * MPSAFE
542 static __inline off_t
543 vn_get_fpf_offset(struct file *fp)
545 u_int flags;
546 u_int nflags;
549 * Shortcut critical path.
551 flags = fp->f_flag & ~FOFFSETLOCK;
552 if (atomic_cmpset_int(&fp->f_flag, flags, flags | FOFFSETLOCK))
553 return(fp->f_offset);
556 * The hard way
558 for (;;) {
559 flags = fp->f_flag;
560 if (flags & FOFFSETLOCK) {
561 nflags = flags | FOFFSETWAKE;
562 tsleep_interlock(&fp->f_flag, 0);
563 if (atomic_cmpset_int(&fp->f_flag, flags, nflags))
564 tsleep(&fp->f_flag, PINTERLOCKED, "fpoff", 0);
565 } else {
566 nflags = flags | FOFFSETLOCK;
567 if (atomic_cmpset_int(&fp->f_flag, flags, nflags))
568 break;
571 return(fp->f_offset);
575 * MPSAFE
577 static __inline void
578 vn_set_fpf_offset(struct file *fp, off_t offset)
580 u_int flags;
581 u_int nflags;
584 * We hold the lock so we can set the offset without interference.
586 fp->f_offset = offset;
589 * Normal release is already a reasonably critical path.
591 for (;;) {
592 flags = fp->f_flag;
593 nflags = flags & ~(FOFFSETLOCK | FOFFSETWAKE);
594 if (atomic_cmpset_int(&fp->f_flag, flags, nflags)) {
595 if (flags & FOFFSETWAKE)
596 wakeup(&fp->f_flag);
597 break;
603 * MPSAFE
605 static __inline off_t
606 vn_poll_fpf_offset(struct file *fp)
608 #if defined(__x86_64__)
609 return(fp->f_offset);
610 #else
611 off_t off = vn_get_fpf_offset(fp);
612 vn_set_fpf_offset(fp, off);
613 return(off);
614 #endif
618 * Package up an I/O request on a vnode into a uio and do it.
620 * MPSAFE
623 vn_rdwr(enum uio_rw rw, struct vnode *vp, caddr_t base, int len,
624 off_t offset, enum uio_seg segflg, int ioflg,
625 struct ucred *cred, int *aresid)
627 struct uio auio;
628 struct iovec aiov;
629 int error;
631 if ((ioflg & IO_NODELOCKED) == 0)
632 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
633 auio.uio_iov = &aiov;
634 auio.uio_iovcnt = 1;
635 aiov.iov_base = base;
636 aiov.iov_len = len;
637 auio.uio_resid = len;
638 auio.uio_offset = offset;
639 auio.uio_segflg = segflg;
640 auio.uio_rw = rw;
641 auio.uio_td = curthread;
642 if (rw == UIO_READ) {
643 error = VOP_READ(vp, &auio, ioflg, cred);
644 } else {
645 error = VOP_WRITE(vp, &auio, ioflg, cred);
647 if (aresid)
648 *aresid = auio.uio_resid;
649 else
650 if (auio.uio_resid && error == 0)
651 error = EIO;
652 if ((ioflg & IO_NODELOCKED) == 0)
653 vn_unlock(vp);
654 return (error);
658 * Package up an I/O request on a vnode into a uio and do it. The I/O
659 * request is split up into smaller chunks and we try to avoid saturating
660 * the buffer cache while potentially holding a vnode locked, so we
661 * check bwillwrite() before calling vn_rdwr(). We also call lwkt_user_yield()
662 * to give other processes a chance to lock the vnode (either other processes
663 * core'ing the same binary, or unrelated processes scanning the directory).
665 * MPSAFE
668 vn_rdwr_inchunks(enum uio_rw rw, struct vnode *vp, caddr_t base, int len,
669 off_t offset, enum uio_seg segflg, int ioflg,
670 struct ucred *cred, int *aresid)
672 int error = 0;
674 do {
675 int chunk;
678 * Force `offset' to a multiple of MAXBSIZE except possibly
679 * for the first chunk, so that filesystems only need to
680 * write full blocks except possibly for the first and last
681 * chunks.
683 chunk = MAXBSIZE - (uoff_t)offset % MAXBSIZE;
685 if (chunk > len)
686 chunk = len;
687 if (vp->v_type == VREG && (ioflg & IO_RECURSE) == 0) {
688 switch(rw) {
689 case UIO_READ:
690 bwillread(chunk);
691 break;
692 case UIO_WRITE:
693 bwillwrite(chunk);
694 break;
697 error = vn_rdwr(rw, vp, base, chunk, offset, segflg,
698 ioflg, cred, aresid);
699 len -= chunk; /* aresid calc already includes length */
700 if (error)
701 break;
702 offset += chunk;
703 base += chunk;
704 lwkt_user_yield();
705 } while (len);
706 if (aresid)
707 *aresid += len;
708 return (error);
712 * File pointers can no longer get ripped up by revoke so
713 * we don't need to lock access to the vp.
715 * f_offset updates are not guaranteed against multiple readers
717 static int
718 vn_read(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
720 struct vnode *vp;
721 int error, ioflag;
723 KASSERT(uio->uio_td == curthread,
724 ("uio_td %p is not td %p", uio->uio_td, curthread));
725 vp = (struct vnode *)fp->f_data;
727 ioflag = 0;
728 if (flags & O_FBLOCKING) {
729 /* ioflag &= ~IO_NDELAY; */
730 } else if (flags & O_FNONBLOCKING) {
731 ioflag |= IO_NDELAY;
732 } else if (fp->f_flag & FNONBLOCK) {
733 ioflag |= IO_NDELAY;
735 if (fp->f_flag & O_DIRECT) {
736 ioflag |= IO_DIRECT;
738 if ((flags & O_FOFFSET) == 0 && (vp->v_flag & VNOTSEEKABLE) == 0)
739 uio->uio_offset = vn_get_fpf_offset(fp);
740 vn_lock(vp, LK_SHARED | LK_RETRY);
741 ioflag |= sequential_heuristic(uio, fp);
743 error = VOP_READ_FP(vp, uio, ioflag, cred, fp);
744 fp->f_nextoff = uio->uio_offset;
745 vn_unlock(vp);
746 if ((flags & O_FOFFSET) == 0 && (vp->v_flag & VNOTSEEKABLE) == 0)
747 vn_set_fpf_offset(fp, uio->uio_offset);
748 return (error);
752 * MPSAFE
754 static int
755 vn_write(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
757 struct vnode *vp;
758 int error, ioflag;
760 KASSERT(uio->uio_td == curthread,
761 ("uio_td %p is not p %p", uio->uio_td, curthread));
762 vp = (struct vnode *)fp->f_data;
764 ioflag = IO_UNIT;
765 if (vp->v_type == VREG &&
766 ((fp->f_flag & O_APPEND) || (flags & O_FAPPEND))) {
767 ioflag |= IO_APPEND;
770 if (flags & O_FBLOCKING) {
771 /* ioflag &= ~IO_NDELAY; */
772 } else if (flags & O_FNONBLOCKING) {
773 ioflag |= IO_NDELAY;
774 } else if (fp->f_flag & FNONBLOCK) {
775 ioflag |= IO_NDELAY;
777 if (fp->f_flag & O_DIRECT) {
778 ioflag |= IO_DIRECT;
780 if (flags & O_FASYNCWRITE) {
781 /* ioflag &= ~IO_SYNC; */
782 } else if (flags & O_FSYNCWRITE) {
783 ioflag |= IO_SYNC;
784 } else if (fp->f_flag & O_FSYNC) {
785 ioflag |= IO_SYNC;
788 if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS))
789 ioflag |= IO_SYNC;
790 if ((flags & O_FOFFSET) == 0)
791 uio->uio_offset = vn_get_fpf_offset(fp);
792 if (vp->v_mount)
793 VFS_MODIFYING(vp->v_mount);
794 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
795 ioflag |= sequential_heuristic(uio, fp);
796 error = VOP_WRITE_FP(vp, uio, ioflag, cred, fp);
797 fp->f_nextoff = uio->uio_offset;
798 vn_unlock(vp);
799 if ((flags & O_FOFFSET) == 0)
800 vn_set_fpf_offset(fp, uio->uio_offset);
801 return (error);
805 * MPSAFE
807 static int
808 vn_statfile(struct file *fp, struct stat *sb, struct ucred *cred)
810 struct vnode *vp;
811 int error;
813 vp = (struct vnode *)fp->f_data;
814 error = vn_stat(vp, sb, cred);
815 return (error);
819 * MPSAFE
822 vn_stat(struct vnode *vp, struct stat *sb, struct ucred *cred)
824 struct vattr vattr;
825 struct vattr *vap;
826 int error;
827 u_short mode;
828 cdev_t dev;
831 * vp already has a ref and is validated, can call unlocked.
833 vap = &vattr;
834 error = VOP_GETATTR(vp, vap);
835 if (error)
836 return (error);
839 * Zero the spare stat fields
841 sb->st_lspare = 0;
842 sb->st_qspare2 = 0;
845 * Copy from vattr table
847 if (vap->va_fsid != VNOVAL)
848 sb->st_dev = vap->va_fsid;
849 else
850 sb->st_dev = vp->v_mount->mnt_stat.f_fsid.val[0];
851 sb->st_ino = vap->va_fileid;
852 mode = vap->va_mode;
853 switch (vap->va_type) {
854 case VREG:
855 mode |= S_IFREG;
856 break;
857 case VDATABASE:
858 mode |= S_IFDB;
859 break;
860 case VDIR:
861 mode |= S_IFDIR;
862 break;
863 case VBLK:
864 mode |= S_IFBLK;
865 break;
866 case VCHR:
867 mode |= S_IFCHR;
868 break;
869 case VLNK:
870 mode |= S_IFLNK;
871 /* This is a cosmetic change, symlinks do not have a mode. */
872 if (vp->v_mount->mnt_flag & MNT_NOSYMFOLLOW)
873 sb->st_mode &= ~ACCESSPERMS; /* 0000 */
874 else
875 sb->st_mode |= ACCESSPERMS; /* 0777 */
876 break;
877 case VSOCK:
878 mode |= S_IFSOCK;
879 break;
880 case VFIFO:
881 mode |= S_IFIFO;
882 break;
883 default:
884 return (EBADF);
886 sb->st_mode = mode;
887 if (vap->va_nlink > (nlink_t)-1)
888 sb->st_nlink = (nlink_t)-1;
889 else
890 sb->st_nlink = vap->va_nlink;
891 sb->st_uid = vap->va_uid;
892 sb->st_gid = vap->va_gid;
893 sb->st_rdev = devid_from_dev(vp->v_rdev);
894 sb->st_size = vap->va_size;
895 sb->st_atimespec = vap->va_atime;
896 sb->st_mtimespec = vap->va_mtime;
897 sb->st_ctimespec = vap->va_ctime;
900 * A VCHR and VBLK device may track the last access and last modified
901 * time independantly of the filesystem. This is particularly true
902 * because device read and write calls may bypass the filesystem.
904 if (vp->v_type == VCHR || vp->v_type == VBLK) {
905 dev = vp->v_rdev;
906 if (dev != NULL) {
907 if (dev->si_lastread) {
908 sb->st_atimespec.tv_sec = time_second +
909 (dev->si_lastread -
910 time_uptime);
911 sb->st_atimespec.tv_nsec = 0;
913 if (dev->si_lastwrite) {
914 sb->st_mtimespec.tv_sec = time_second +
915 (dev->si_lastwrite -
916 time_uptime);
917 sb->st_mtimespec.tv_nsec = 0;
923 * According to www.opengroup.org, the meaning of st_blksize is
924 * "a filesystem-specific preferred I/O block size for this
925 * object. In some filesystem types, this may vary from file
926 * to file"
927 * Default to PAGE_SIZE after much discussion.
930 if (vap->va_type == VREG) {
931 sb->st_blksize = vap->va_blocksize;
932 } else if (vn_isdisk(vp, NULL)) {
934 * XXX this is broken. If the device is not yet open (aka
935 * stat() call, aka v_rdev == NULL), how are we supposed
936 * to get a valid block size out of it?
938 dev = vp->v_rdev;
940 sb->st_blksize = dev->si_bsize_best;
941 if (sb->st_blksize < dev->si_bsize_phys)
942 sb->st_blksize = dev->si_bsize_phys;
943 if (sb->st_blksize < BLKDEV_IOSIZE)
944 sb->st_blksize = BLKDEV_IOSIZE;
945 } else {
946 sb->st_blksize = PAGE_SIZE;
949 sb->st_flags = vap->va_flags;
951 error = priv_check_cred(cred, PRIV_VFS_GENERATION, 0);
952 if (error)
953 sb->st_gen = 0;
954 else
955 sb->st_gen = (u_int32_t)vap->va_gen;
957 sb->st_blocks = vap->va_bytes / S_BLKSIZE;
960 * This is for ABI compatibility <= 5.7 (for ABI change made in
961 * 5.7 master).
963 sb->__old_st_blksize = sb->st_blksize;
965 return (0);
969 * MPALMOSTSAFE - acquires mplock
971 static int
972 vn_ioctl(struct file *fp, u_long com, caddr_t data, struct ucred *ucred,
973 struct sysmsg *msg)
975 struct vnode *vp = ((struct vnode *)fp->f_data);
976 struct vnode *ovp;
977 struct vattr vattr;
978 int error;
979 off_t size;
981 switch (vp->v_type) {
982 case VREG:
983 case VDIR:
984 if (com == FIONREAD) {
985 error = VOP_GETATTR(vp, &vattr);
986 if (error)
987 break;
988 size = vattr.va_size;
989 if ((vp->v_flag & VNOTSEEKABLE) == 0)
990 size -= vn_poll_fpf_offset(fp);
991 if (size > 0x7FFFFFFF)
992 size = 0x7FFFFFFF;
993 *(int *)data = size;
994 error = 0;
995 break;
997 if (com == FIOASYNC) { /* XXX */
998 error = 0; /* XXX */
999 break;
1001 /* fall into ... */
1002 default:
1003 #if 0
1004 return (ENOTTY);
1005 #endif
1006 case VFIFO:
1007 case VCHR:
1008 case VBLK:
1009 if (com == FIODTYPE) {
1010 if (vp->v_type != VCHR && vp->v_type != VBLK) {
1011 error = ENOTTY;
1012 break;
1014 *(int *)data = dev_dflags(vp->v_rdev) & D_TYPEMASK;
1015 error = 0;
1016 break;
1018 error = VOP_IOCTL(vp, com, data, fp->f_flag, ucred, msg);
1019 if (error == 0 && com == TIOCSCTTY) {
1020 struct proc *p = curthread->td_proc;
1021 struct session *sess;
1023 if (p == NULL) {
1024 error = ENOTTY;
1025 break;
1028 get_mplock();
1029 sess = p->p_session;
1030 /* Do nothing if reassigning same control tty */
1031 if (sess->s_ttyvp == vp) {
1032 error = 0;
1033 rel_mplock();
1034 break;
1037 /* Get rid of reference to old control tty */
1038 ovp = sess->s_ttyvp;
1039 vref(vp);
1040 sess->s_ttyvp = vp;
1041 if (ovp)
1042 vrele(ovp);
1043 rel_mplock();
1045 break;
1047 return (error);
1051 * Obtain the requested vnode lock
1053 * LK_RETRY Automatically retry on timeout
1054 * LK_FAILRECLAIM Fail if the vnode is being reclaimed
1056 * Failures will occur if the vnode is undergoing recyclement, but not
1057 * all callers expect that the function will fail so the caller must pass
1058 * LK_FAILOK if it wants to process an error code.
1060 * Errors can occur for other reasons if you pass in other LK_ flags,
1061 * regardless of whether you pass in LK_FAILRECLAIM
1064 vn_lock(struct vnode *vp, int flags)
1066 int error;
1068 do {
1069 error = lockmgr(&vp->v_lock, flags);
1070 if (error == 0)
1071 break;
1072 } while (flags & LK_RETRY);
1075 * Because we (had better!) have a ref on the vnode, once it
1076 * goes to VRECLAIMED state it will not be recycled until all
1077 * refs go away. So we can just check the flag.
1079 if (error == 0 && (vp->v_flag & VRECLAIMED)) {
1080 if (flags & LK_FAILRECLAIM) {
1081 lockmgr(&vp->v_lock, LK_RELEASE);
1082 error = ENOENT;
1085 return (error);
1089 vn_relock(struct vnode *vp, int flags)
1091 int error;
1093 do {
1094 error = lockmgr(&vp->v_lock, flags);
1095 if (error == 0)
1096 break;
1097 } while (flags & LK_RETRY);
1099 return error;
1102 #ifdef DEBUG_VN_UNLOCK
1104 void
1105 debug_vn_unlock(struct vnode *vp, const char *filename, int line)
1107 kprintf("vn_unlock from %s:%d\n", filename, line);
1108 lockmgr(&vp->v_lock, LK_RELEASE);
1111 #else
1113 void
1114 vn_unlock(struct vnode *vp)
1116 lockmgr(&vp->v_lock, LK_RELEASE);
1119 #endif
1122 * MPSAFE
1125 vn_islocked(struct vnode *vp)
1127 return (lockstatus(&vp->v_lock, curthread));
1131 * Return the lock status of a vnode and unlock the vnode
1132 * if we owned the lock. This is not a boolean, if the
1133 * caller cares what the lock status is the caller must
1134 * check the various possible values.
1136 * This only unlocks exclusive locks held by the caller,
1137 * it will NOT unlock shared locks (there is no way to
1138 * tell who the shared lock belongs to).
1140 * MPSAFE
1143 vn_islocked_unlock(struct vnode *vp)
1145 int vpls;
1147 vpls = lockstatus(&vp->v_lock, curthread);
1148 if (vpls == LK_EXCLUSIVE)
1149 lockmgr(&vp->v_lock, LK_RELEASE);
1150 return(vpls);
1154 * Restore a vnode lock that we previously released via
1155 * vn_islocked_unlock(). This is a NOP if we did not
1156 * own the original lock.
1158 * MPSAFE
1160 void
1161 vn_islocked_relock(struct vnode *vp, int vpls)
1163 int error;
1165 if (vpls == LK_EXCLUSIVE)
1166 error = lockmgr(&vp->v_lock, vpls);
1170 * MPSAFE
1172 static int
1173 vn_closefile(struct file *fp)
1175 int error;
1177 fp->f_ops = &badfileops;
1178 error = vn_close(((struct vnode *)fp->f_data), fp->f_flag, fp);
1179 return (error);
1183 * MPSAFE
1185 static int
1186 vn_kqfilter(struct file *fp, struct knote *kn)
1188 int error;
1190 error = VOP_KQFILTER(((struct vnode *)fp->f_data), kn);
1191 return (error);