Continue working the abort path. Move SS_ABORTING flag handling inward
[dragonfly.git] / sys / kern / vfs_vnops.c
blobfa99e6f31f5dfde37fdbdcfd7707142a06c62a9b
1 /*
2 * Copyright (c) 1982, 1986, 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
38 * @(#)vfs_vnops.c 8.2 (Berkeley) 1/21/94
39 * $FreeBSD: src/sys/kern/vfs_vnops.c,v 1.87.2.13 2002/12/29 18:19:53 dillon Exp $
40 * $DragonFly: src/sys/kern/vfs_vnops.c,v 1.58 2008/06/28 17:59:49 dillon Exp $
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/fcntl.h>
46 #include <sys/file.h>
47 #include <sys/stat.h>
48 #include <sys/proc.h>
49 #include <sys/mount.h>
50 #include <sys/nlookup.h>
51 #include <sys/vnode.h>
52 #include <sys/buf.h>
53 #include <sys/filio.h>
54 #include <sys/ttycom.h>
55 #include <sys/conf.h>
56 #include <sys/syslog.h>
58 static int vn_closefile (struct file *fp);
59 static int vn_ioctl (struct file *fp, u_long com, caddr_t data,
60 struct ucred *cred);
61 static int vn_read (struct file *fp, struct uio *uio,
62 struct ucred *cred, int flags);
63 static int svn_read (struct file *fp, struct uio *uio,
64 struct ucred *cred, int flags);
65 static int vn_poll (struct file *fp, int events, struct ucred *cred);
66 static int vn_kqfilter (struct file *fp, struct knote *kn);
67 static int vn_statfile (struct file *fp, struct stat *sb, struct ucred *cred);
68 static int vn_write (struct file *fp, struct uio *uio,
69 struct ucred *cred, int flags);
70 static int svn_write (struct file *fp, struct uio *uio,
71 struct ucred *cred, int flags);
73 struct fileops vnode_fileops = {
74 .fo_read = vn_read,
75 .fo_write = vn_write,
76 .fo_ioctl = vn_ioctl,
77 .fo_poll = vn_poll,
78 .fo_kqfilter = vn_kqfilter,
79 .fo_stat = vn_statfile,
80 .fo_close = vn_closefile,
81 .fo_shutdown = nofo_shutdown
84 struct fileops specvnode_fileops = {
85 .fo_read = svn_read,
86 .fo_write = svn_write,
87 .fo_ioctl = vn_ioctl,
88 .fo_poll = vn_poll,
89 .fo_kqfilter = vn_kqfilter,
90 .fo_stat = vn_statfile,
91 .fo_close = vn_closefile,
92 .fo_shutdown = nofo_shutdown
96 * Shortcut the device read/write. This avoids a lot of vnode junk.
97 * Basically the specfs vnops for read and write take the locked vnode,
98 * unlock it (because we can't hold the vnode locked while reading or writing
99 * a device which may block indefinitely), issues the device operation, then
100 * relock the vnode before returning, plus other junk. This bypasses all
101 * of that and just does the device operation.
103 void
104 vn_setspecops(struct file *fp)
106 if (vfs_fastdev && fp->f_ops == &vnode_fileops) {
107 fp->f_ops = &specvnode_fileops;
112 * Common code for vnode open operations. Check permissions, and call
113 * the VOP_NOPEN or VOP_NCREATE routine.
115 * The caller is responsible for setting up nd with nlookup_init() and
116 * for cleaning it up with nlookup_done(), whether we return an error
117 * or not.
119 * On success nd->nl_open_vp will hold a referenced and, if requested,
120 * locked vnode. A locked vnode is requested via NLC_LOCKVP. If fp
121 * is non-NULL the vnode will be installed in the file pointer.
123 * NOTE: The vnode is referenced just once on return whether or not it
124 * is also installed in the file pointer.
127 vn_open(struct nlookupdata *nd, struct file *fp, int fmode, int cmode)
129 struct vnode *vp;
130 struct ucred *cred = nd->nl_cred;
131 struct vattr vat;
132 struct vattr *vap = &vat;
133 int mode, error;
136 * Lookup the path and create or obtain the vnode. After a
137 * successful lookup a locked nd->nl_nch will be returned.
139 * The result of this section should be a locked vnode.
141 * XXX with only a little work we should be able to avoid locking
142 * the vnode if FWRITE, O_CREAT, and O_TRUNC are *not* set.
144 if (fmode & O_CREAT) {
146 * CONDITIONAL CREATE FILE CASE
148 * Setting NLC_CREATE causes a negative hit to store
149 * the negative hit ncp and not return an error. Then
150 * nc_error or nc_vp may be checked to see if the ncp
151 * represents a negative hit. NLC_CREATE also requires
152 * write permission on the governing directory or EPERM
153 * is returned.
155 if ((fmode & O_EXCL) == 0 && (fmode & O_NOFOLLOW) == 0)
156 nd->nl_flags |= NLC_FOLLOW;
157 nd->nl_flags |= NLC_CREATE;
158 nd->nl_flags |= NLC_REFDVP;
159 bwillinode(1);
160 error = nlookup(nd);
161 } else {
163 * NORMAL OPEN FILE CASE
165 error = nlookup(nd);
168 if (error)
169 return (error);
172 * split case to allow us to re-resolve and retry the ncp in case
173 * we get ESTALE.
175 again:
176 if (fmode & O_CREAT) {
177 if (nd->nl_nch.ncp->nc_vp == NULL) {
178 if ((error = ncp_writechk(&nd->nl_nch)) != 0)
179 return (error);
180 VATTR_NULL(vap);
181 vap->va_type = VREG;
182 vap->va_mode = cmode;
183 if (fmode & O_EXCL)
184 vap->va_vaflags |= VA_EXCLUSIVE;
185 error = VOP_NCREATE(&nd->nl_nch, nd->nl_dvp, &vp,
186 nd->nl_cred, vap);
187 if (error)
188 return (error);
189 fmode &= ~O_TRUNC;
190 /* locked vnode is returned */
191 } else {
192 if (fmode & O_EXCL) {
193 error = EEXIST;
194 } else {
195 error = cache_vget(&nd->nl_nch, cred,
196 LK_EXCLUSIVE, &vp);
198 if (error)
199 return (error);
200 fmode &= ~O_CREAT;
202 } else {
203 error = cache_vget(&nd->nl_nch, cred, LK_EXCLUSIVE, &vp);
204 if (error)
205 return (error);
209 * We have a locked vnode and ncp now. Note that the ncp will
210 * be cleaned up by the caller if nd->nl_nch is left intact.
212 if (vp->v_type == VLNK) {
213 error = EMLINK;
214 goto bad;
216 if (vp->v_type == VSOCK) {
217 error = EOPNOTSUPP;
218 goto bad;
220 if ((fmode & O_CREAT) == 0) {
221 mode = 0;
222 if (fmode & (FWRITE | O_TRUNC)) {
223 if (vp->v_type == VDIR) {
224 error = EISDIR;
225 goto bad;
227 error = vn_writechk(vp, &nd->nl_nch);
228 if (error) {
230 * Special stale handling, re-resolve the
231 * vnode.
233 if (error == ESTALE) {
234 vput(vp);
235 vp = NULL;
236 cache_setunresolved(&nd->nl_nch);
237 error = cache_resolve(&nd->nl_nch, cred);
238 if (error == 0)
239 goto again;
241 goto bad;
243 mode |= VWRITE;
245 if (fmode & FREAD)
246 mode |= VREAD;
247 if (mode) {
248 error = VOP_ACCESS(vp, mode, cred);
249 if (error) {
251 * Special stale handling, re-resolve the
252 * vnode.
254 if (error == ESTALE) {
255 vput(vp);
256 vp = NULL;
257 cache_setunresolved(&nd->nl_nch);
258 error = cache_resolve(&nd->nl_nch, cred);
259 if (error == 0)
260 goto again;
262 goto bad;
266 if (fmode & O_TRUNC) {
267 vn_unlock(vp); /* XXX */
268 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); /* XXX */
269 VATTR_NULL(vap);
270 vap->va_size = 0;
271 error = VOP_SETATTR(vp, vap, cred);
272 if (error)
273 goto bad;
277 * Setup the fp so VOP_OPEN can override it. No descriptor has been
278 * associated with the fp yet so we own it clean.
280 * f_nchandle inherits nl_nch. This used to be necessary only for
281 * directories but now we do it unconditionally so f*() ops
282 * such as fchmod() can access the actual namespace that was
283 * used to open the file.
285 if (fp) {
286 fp->f_nchandle = nd->nl_nch;
287 cache_zero(&nd->nl_nch);
288 cache_unlock(&fp->f_nchandle);
292 * Get rid of nl_nch. vn_open does not return it (it returns the
293 * vnode or the file pointer). Note: we can't leave nl_nch locked
294 * through the VOP_OPEN anyway since the VOP_OPEN may block, e.g.
295 * on /dev/ttyd0
297 if (nd->nl_nch.ncp)
298 cache_put(&nd->nl_nch);
300 error = VOP_OPEN(vp, fmode, cred, fp);
301 if (error) {
303 * setting f_ops to &badfileops will prevent the descriptor
304 * code from trying to close and release the vnode, since
305 * the open failed we do not want to call close.
307 if (fp) {
308 fp->f_data = NULL;
309 fp->f_ops = &badfileops;
311 goto bad;
314 #if 0
316 * Assert that VREG files have been setup for vmio.
318 KASSERT(vp->v_type != VREG || vp->v_object != NULL,
319 ("vn_open: regular file was not VMIO enabled!"));
320 #endif
323 * Return the vnode. XXX needs some cleaning up. The vnode is
324 * only returned in the fp == NULL case.
326 if (fp == NULL) {
327 nd->nl_open_vp = vp;
328 nd->nl_vp_fmode = fmode;
329 if ((nd->nl_flags & NLC_LOCKVP) == 0)
330 vn_unlock(vp);
331 } else {
332 vput(vp);
334 return (0);
335 bad:
336 if (vp)
337 vput(vp);
338 return (error);
342 vn_opendisk(const char *devname, int fmode, struct vnode **vpp)
344 struct vnode *vp;
345 int error;
347 if (strncmp(devname, "/dev/", 5) == 0)
348 devname += 5;
349 if ((vp = getsynthvnode(devname)) == NULL) {
350 error = ENODEV;
351 } else {
352 error = VOP_OPEN(vp, fmode, proc0.p_ucred, NULL);
353 vn_unlock(vp);
354 if (error) {
355 vrele(vp);
356 vp = NULL;
359 *vpp = vp;
360 return (error);
364 * Check for write permissions on the specified vnode. nch may be NULL.
367 vn_writechk(struct vnode *vp, struct nchandle *nch)
370 * If there's shared text associated with
371 * the vnode, try to free it up once. If
372 * we fail, we can't allow writing.
374 if (vp->v_flag & VTEXT)
375 return (ETXTBSY);
378 * If the vnode represents a regular file, check the mount
379 * point via the nch. This may be a different mount point
380 * then the one embedded in the vnode (e.g. nullfs).
382 * We can still write to non-regular files (e.g. devices)
383 * via read-only mounts.
385 if (nch && nch->ncp && vp->v_type == VREG)
386 return (ncp_writechk(nch));
387 return (0);
391 * Check whether the underlying mount is read-only. The mount point
392 * referenced by the namecache may be different from the mount point
393 * used by the underlying vnode in the case of NULLFS, so a separate
394 * check is needed.
397 ncp_writechk(struct nchandle *nch)
399 if (nch->mount && (nch->mount->mnt_flag & MNT_RDONLY))
400 return (EROFS);
401 return(0);
405 * Vnode close call
408 vn_close(struct vnode *vp, int flags)
410 int error;
412 if ((error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY)) == 0) {
413 error = VOP_CLOSE(vp, flags);
414 vn_unlock(vp);
416 vrele(vp);
417 return (error);
420 static __inline
422 sequential_heuristic(struct uio *uio, struct file *fp)
425 * Sequential heuristic - detect sequential operation
427 if ((uio->uio_offset == 0 && fp->f_seqcount > 0) ||
428 uio->uio_offset == fp->f_nextoff) {
429 int tmpseq = fp->f_seqcount;
431 * XXX we assume that the filesystem block size is
432 * the default. Not true, but still gives us a pretty
433 * good indicator of how sequential the read operations
434 * are.
436 tmpseq += (uio->uio_resid + BKVASIZE - 1) / BKVASIZE;
437 if (tmpseq > IO_SEQMAX)
438 tmpseq = IO_SEQMAX;
439 fp->f_seqcount = tmpseq;
440 return(fp->f_seqcount << IO_SEQSHIFT);
444 * Not sequential, quick draw-down of seqcount
446 if (fp->f_seqcount > 1)
447 fp->f_seqcount = 1;
448 else
449 fp->f_seqcount = 0;
450 return(0);
454 * Package up an I/O request on a vnode into a uio and do it.
457 vn_rdwr(enum uio_rw rw, struct vnode *vp, caddr_t base, int len,
458 off_t offset, enum uio_seg segflg, int ioflg,
459 struct ucred *cred, int *aresid)
461 struct uio auio;
462 struct iovec aiov;
463 struct ccms_lock ccms_lock;
464 int error;
466 if ((ioflg & IO_NODELOCKED) == 0)
467 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
468 auio.uio_iov = &aiov;
469 auio.uio_iovcnt = 1;
470 aiov.iov_base = base;
471 aiov.iov_len = len;
472 auio.uio_resid = len;
473 auio.uio_offset = offset;
474 auio.uio_segflg = segflg;
475 auio.uio_rw = rw;
476 auio.uio_td = curthread;
477 ccms_lock_get_uio(&vp->v_ccms, &ccms_lock, &auio);
478 if (rw == UIO_READ) {
479 error = VOP_READ(vp, &auio, ioflg, cred);
480 } else {
481 error = VOP_WRITE(vp, &auio, ioflg, cred);
483 ccms_lock_put(&vp->v_ccms, &ccms_lock);
484 if (aresid)
485 *aresid = auio.uio_resid;
486 else
487 if (auio.uio_resid && error == 0)
488 error = EIO;
489 if ((ioflg & IO_NODELOCKED) == 0)
490 vn_unlock(vp);
491 return (error);
495 * Package up an I/O request on a vnode into a uio and do it. The I/O
496 * request is split up into smaller chunks and we try to avoid saturating
497 * the buffer cache while potentially holding a vnode locked, so we
498 * check bwillwrite() before calling vn_rdwr(). We also call uio_yield()
499 * to give other processes a chance to lock the vnode (either other processes
500 * core'ing the same binary, or unrelated processes scanning the directory).
503 vn_rdwr_inchunks(enum uio_rw rw, struct vnode *vp, caddr_t base, int len,
504 off_t offset, enum uio_seg segflg, int ioflg,
505 struct ucred *cred, int *aresid)
507 int error = 0;
509 do {
510 int chunk;
513 * Force `offset' to a multiple of MAXBSIZE except possibly
514 * for the first chunk, so that filesystems only need to
515 * write full blocks except possibly for the first and last
516 * chunks.
518 chunk = MAXBSIZE - (uoff_t)offset % MAXBSIZE;
520 if (chunk > len)
521 chunk = len;
522 if (vp->v_type == VREG) {
523 switch(rw) {
524 case UIO_READ:
525 bwillread(chunk);
526 break;
527 case UIO_WRITE:
528 bwillwrite(chunk);
529 break;
532 error = vn_rdwr(rw, vp, base, chunk, offset, segflg,
533 ioflg, cred, aresid);
534 len -= chunk; /* aresid calc already includes length */
535 if (error)
536 break;
537 offset += chunk;
538 base += chunk;
539 uio_yield();
540 } while (len);
541 if (aresid)
542 *aresid += len;
543 return (error);
547 * MPALMOSTSAFE - acquires mplock
549 static int
550 vn_read(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
552 struct ccms_lock ccms_lock;
553 struct vnode *vp;
554 int error, ioflag;
556 get_mplock();
557 KASSERT(uio->uio_td == curthread,
558 ("uio_td %p is not td %p", uio->uio_td, curthread));
559 vp = (struct vnode *)fp->f_data;
561 ioflag = 0;
562 if (flags & O_FBLOCKING) {
563 /* ioflag &= ~IO_NDELAY; */
564 } else if (flags & O_FNONBLOCKING) {
565 ioflag |= IO_NDELAY;
566 } else if (fp->f_flag & FNONBLOCK) {
567 ioflag |= IO_NDELAY;
569 if (flags & O_FBUFFERED) {
570 /* ioflag &= ~IO_DIRECT; */
571 } else if (flags & O_FUNBUFFERED) {
572 ioflag |= IO_DIRECT;
573 } else if (fp->f_flag & O_DIRECT) {
574 ioflag |= IO_DIRECT;
576 vn_lock(vp, LK_SHARED | LK_RETRY);
577 if ((flags & O_FOFFSET) == 0)
578 uio->uio_offset = fp->f_offset;
579 ioflag |= sequential_heuristic(uio, fp);
581 ccms_lock_get_uio(&vp->v_ccms, &ccms_lock, uio);
582 error = VOP_READ(vp, uio, ioflag, cred);
583 ccms_lock_put(&vp->v_ccms, &ccms_lock);
584 if ((flags & O_FOFFSET) == 0)
585 fp->f_offset = uio->uio_offset;
586 fp->f_nextoff = uio->uio_offset;
587 vn_unlock(vp);
588 rel_mplock();
589 return (error);
593 * Device-optimized file table vnode read routine.
595 * This bypasses the VOP table and talks directly to the device. Most
596 * filesystems just route to specfs and can make this optimization.
598 * MPALMOSTSAFE - acquires mplock
600 static int
601 svn_read(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
603 struct vnode *vp;
604 int ioflag;
605 int error;
606 cdev_t dev;
608 get_mplock();
609 KASSERT(uio->uio_td == curthread,
610 ("uio_td %p is not td %p", uio->uio_td, curthread));
612 vp = (struct vnode *)fp->f_data;
613 if (vp == NULL || vp->v_type == VBAD) {
614 error = EBADF;
615 goto done;
618 if ((dev = vp->v_rdev) == NULL) {
619 error = EBADF;
620 goto done;
622 reference_dev(dev);
624 if (uio->uio_resid == 0) {
625 error = 0;
626 goto done;
628 if ((flags & O_FOFFSET) == 0)
629 uio->uio_offset = fp->f_offset;
631 ioflag = 0;
632 if (flags & O_FBLOCKING) {
633 /* ioflag &= ~IO_NDELAY; */
634 } else if (flags & O_FNONBLOCKING) {
635 ioflag |= IO_NDELAY;
636 } else if (fp->f_flag & FNONBLOCK) {
637 ioflag |= IO_NDELAY;
639 if (flags & O_FBUFFERED) {
640 /* ioflag &= ~IO_DIRECT; */
641 } else if (flags & O_FUNBUFFERED) {
642 ioflag |= IO_DIRECT;
643 } else if (fp->f_flag & O_DIRECT) {
644 ioflag |= IO_DIRECT;
646 ioflag |= sequential_heuristic(uio, fp);
648 error = dev_dread(dev, uio, ioflag);
650 release_dev(dev);
651 if ((flags & O_FOFFSET) == 0)
652 fp->f_offset = uio->uio_offset;
653 fp->f_nextoff = uio->uio_offset;
654 done:
655 rel_mplock();
656 return (error);
660 * MPALMOSTSAFE - acquires mplock
662 static int
663 vn_write(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
665 struct ccms_lock ccms_lock;
666 struct vnode *vp;
667 int error, ioflag;
669 get_mplock();
670 KASSERT(uio->uio_td == curthread,
671 ("uio_td %p is not p %p", uio->uio_td, curthread));
672 vp = (struct vnode *)fp->f_data;
673 #if 0
674 /* VOP_WRITE should handle this now */
675 if (vp->v_type == VREG || vp->v_type == VDATABASE)
676 bwillwrite();
677 #endif
678 vp = (struct vnode *)fp->f_data; /* XXX needed? */
680 ioflag = IO_UNIT;
681 if (vp->v_type == VREG &&
682 ((fp->f_flag & O_APPEND) || (flags & O_FAPPEND))) {
683 ioflag |= IO_APPEND;
686 if (flags & O_FBLOCKING) {
687 /* ioflag &= ~IO_NDELAY; */
688 } else if (flags & O_FNONBLOCKING) {
689 ioflag |= IO_NDELAY;
690 } else if (fp->f_flag & FNONBLOCK) {
691 ioflag |= IO_NDELAY;
693 if (flags & O_FBUFFERED) {
694 /* ioflag &= ~IO_DIRECT; */
695 } else if (flags & O_FUNBUFFERED) {
696 ioflag |= IO_DIRECT;
697 } else if (fp->f_flag & O_DIRECT) {
698 ioflag |= IO_DIRECT;
700 if (flags & O_FASYNCWRITE) {
701 /* ioflag &= ~IO_SYNC; */
702 } else if (flags & O_FSYNCWRITE) {
703 ioflag |= IO_SYNC;
704 } else if (fp->f_flag & O_FSYNC) {
705 ioflag |= IO_SYNC;
708 if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS))
709 ioflag |= IO_SYNC;
710 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
711 if ((flags & O_FOFFSET) == 0)
712 uio->uio_offset = fp->f_offset;
713 ioflag |= sequential_heuristic(uio, fp);
714 ccms_lock_get_uio(&vp->v_ccms, &ccms_lock, uio);
715 error = VOP_WRITE(vp, uio, ioflag, cred);
716 ccms_lock_put(&vp->v_ccms, &ccms_lock);
717 if ((flags & O_FOFFSET) == 0)
718 fp->f_offset = uio->uio_offset;
719 fp->f_nextoff = uio->uio_offset;
720 vn_unlock(vp);
721 rel_mplock();
722 return (error);
726 * Device-optimized file table vnode write routine.
728 * This bypasses the VOP table and talks directly to the device. Most
729 * filesystems just route to specfs and can make this optimization.
731 * MPALMOSTSAFE - acquires mplock
733 static int
734 svn_write(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
736 struct vnode *vp;
737 int ioflag;
738 int error;
739 cdev_t dev;
741 get_mplock();
742 KASSERT(uio->uio_td == curthread,
743 ("uio_td %p is not p %p", uio->uio_td, curthread));
745 vp = (struct vnode *)fp->f_data;
746 if (vp == NULL || vp->v_type == VBAD) {
747 error = EBADF;
748 goto done;
750 if (vp->v_type == VREG)
751 bwillwrite(uio->uio_resid);
752 vp = (struct vnode *)fp->f_data; /* XXX needed? */
754 if ((dev = vp->v_rdev) == NULL) {
755 error = EBADF;
756 goto done;
758 reference_dev(dev);
760 if ((flags & O_FOFFSET) == 0)
761 uio->uio_offset = fp->f_offset;
763 ioflag = IO_UNIT;
764 if (vp->v_type == VREG &&
765 ((fp->f_flag & O_APPEND) || (flags & O_FAPPEND))) {
766 ioflag |= IO_APPEND;
769 if (flags & O_FBLOCKING) {
770 /* ioflag &= ~IO_NDELAY; */
771 } else if (flags & O_FNONBLOCKING) {
772 ioflag |= IO_NDELAY;
773 } else if (fp->f_flag & FNONBLOCK) {
774 ioflag |= IO_NDELAY;
776 if (flags & O_FBUFFERED) {
777 /* ioflag &= ~IO_DIRECT; */
778 } else if (flags & O_FUNBUFFERED) {
779 ioflag |= IO_DIRECT;
780 } else if (fp->f_flag & O_DIRECT) {
781 ioflag |= IO_DIRECT;
783 if (flags & O_FASYNCWRITE) {
784 /* ioflag &= ~IO_SYNC; */
785 } else if (flags & O_FSYNCWRITE) {
786 ioflag |= IO_SYNC;
787 } else if (fp->f_flag & O_FSYNC) {
788 ioflag |= IO_SYNC;
791 if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS))
792 ioflag |= IO_SYNC;
793 ioflag |= sequential_heuristic(uio, fp);
795 error = dev_dwrite(dev, uio, ioflag);
797 release_dev(dev);
798 if ((flags & O_FOFFSET) == 0)
799 fp->f_offset = uio->uio_offset;
800 fp->f_nextoff = uio->uio_offset;
801 done:
802 rel_mplock();
803 return (error);
807 * MPALMOSTSAFE - acquires mplock
809 static int
810 vn_statfile(struct file *fp, struct stat *sb, struct ucred *cred)
812 struct vnode *vp;
813 int error;
815 get_mplock();
816 vp = (struct vnode *)fp->f_data;
817 error = vn_stat(vp, sb, cred);
818 rel_mplock();
819 return (error);
823 vn_stat(struct vnode *vp, struct stat *sb, struct ucred *cred)
825 struct vattr vattr;
826 struct vattr *vap;
827 int error;
828 u_short mode;
829 cdev_t dev;
831 vap = &vattr;
832 error = VOP_GETATTR(vp, vap);
833 if (error)
834 return (error);
837 * Zero the spare stat fields
839 sb->st_lspare = 0;
840 sb->st_qspare = 0;
843 * Copy from vattr table
845 if (vap->va_fsid != VNOVAL)
846 sb->st_dev = vap->va_fsid;
847 else
848 sb->st_dev = vp->v_mount->mnt_stat.f_fsid.val[0];
849 sb->st_ino = vap->va_fileid;
850 mode = vap->va_mode;
851 switch (vap->va_type) {
852 case VREG:
853 mode |= S_IFREG;
854 break;
855 case VDATABASE:
856 mode |= S_IFDB;
857 break;
858 case VDIR:
859 mode |= S_IFDIR;
860 break;
861 case VBLK:
862 mode |= S_IFBLK;
863 break;
864 case VCHR:
865 mode |= S_IFCHR;
866 break;
867 case VLNK:
868 mode |= S_IFLNK;
869 /* This is a cosmetic change, symlinks do not have a mode. */
870 if (vp->v_mount->mnt_flag & MNT_NOSYMFOLLOW)
871 sb->st_mode &= ~ACCESSPERMS; /* 0000 */
872 else
873 sb->st_mode |= ACCESSPERMS; /* 0777 */
874 break;
875 case VSOCK:
876 mode |= S_IFSOCK;
877 break;
878 case VFIFO:
879 mode |= S_IFIFO;
880 break;
881 default:
882 return (EBADF);
884 sb->st_mode = mode;
885 if (vap->va_nlink > (nlink_t)-1)
886 sb->st_nlink = (nlink_t)-1;
887 else
888 sb->st_nlink = vap->va_nlink;
889 sb->st_uid = vap->va_uid;
890 sb->st_gid = vap->va_gid;
891 sb->st_rdev = makeudev(vap->va_rmajor, vap->va_rminor);
892 sb->st_size = vap->va_size;
893 sb->st_atimespec = vap->va_atime;
894 sb->st_mtimespec = vap->va_mtime;
895 sb->st_ctimespec = vap->va_ctime;
898 * A VCHR and VBLK device may track the last access and last modified
899 * time independantly of the filesystem. This is particularly true
900 * because device read and write calls may bypass the filesystem.
902 if (vp->v_type == VCHR || vp->v_type == VBLK) {
903 if ((dev = vp->v_rdev) != NULL) {
904 if (dev->si_lastread) {
905 sb->st_atimespec.tv_sec = dev->si_lastread;
906 sb->st_atimespec.tv_nsec = 0;
908 if (dev->si_lastwrite) {
909 sb->st_atimespec.tv_sec = dev->si_lastwrite;
910 sb->st_atimespec.tv_nsec = 0;
916 * According to www.opengroup.org, the meaning of st_blksize is
917 * "a filesystem-specific preferred I/O block size for this
918 * object. In some filesystem types, this may vary from file
919 * to file"
920 * Default to PAGE_SIZE after much discussion.
923 if (vap->va_type == VREG) {
924 sb->st_blksize = vap->va_blocksize;
925 } else if (vn_isdisk(vp, NULL)) {
927 * XXX this is broken. If the device is not yet open (aka
928 * stat() call, aka v_rdev == NULL), how are we supposed
929 * to get a valid block size out of it?
931 cdev_t dev;
933 if ((dev = vp->v_rdev) == NULL) {
934 if (vp->v_type == VCHR)
935 dev = get_dev(vp->v_umajor, vp->v_uminor);
937 sb->st_blksize = dev->si_bsize_best;
938 if (sb->st_blksize < dev->si_bsize_phys)
939 sb->st_blksize = dev->si_bsize_phys;
940 if (sb->st_blksize < BLKDEV_IOSIZE)
941 sb->st_blksize = BLKDEV_IOSIZE;
942 } else {
943 sb->st_blksize = PAGE_SIZE;
946 sb->st_flags = vap->va_flags;
947 if (suser_cred(cred, 0))
948 sb->st_gen = 0;
949 else
950 sb->st_gen = (u_int32_t)vap->va_gen;
952 #if (S_BLKSIZE == 512)
953 /* Optimize this case */
954 sb->st_blocks = vap->va_bytes >> 9;
955 #else
956 sb->st_blocks = vap->va_bytes / S_BLKSIZE;
957 #endif
958 sb->st_fsmid = vap->va_fsmid;
959 return (0);
963 * MPALMOSTSAFE - acquires mplock
965 static int
966 vn_ioctl(struct file *fp, u_long com, caddr_t data, struct ucred *ucred)
968 struct vnode *vp = ((struct vnode *)fp->f_data);
969 struct vnode *ovp;
970 struct vattr vattr;
971 int error;
973 get_mplock();
975 switch (vp->v_type) {
976 case VREG:
977 case VDIR:
978 if (com == FIONREAD) {
979 if ((error = VOP_GETATTR(vp, &vattr)) != 0)
980 break;
981 *(int *)data = vattr.va_size - fp->f_offset;
982 error = 0;
983 break;
985 if (com == FIOASYNC) { /* XXX */
986 error = 0; /* XXX */
987 break;
989 /* fall into ... */
990 default:
991 #if 0
992 return (ENOTTY);
993 #endif
994 case VFIFO:
995 case VCHR:
996 case VBLK:
997 if (com == FIODTYPE) {
998 if (vp->v_type != VCHR && vp->v_type != VBLK) {
999 error = ENOTTY;
1000 break;
1002 *(int *)data = dev_dflags(vp->v_rdev) & D_TYPEMASK;
1003 error = 0;
1004 break;
1006 error = VOP_IOCTL(vp, com, data, fp->f_flag, ucred);
1007 if (error == 0 && com == TIOCSCTTY) {
1008 struct proc *p = curthread->td_proc;
1009 struct session *sess;
1011 if (p == NULL) {
1012 error = ENOTTY;
1013 break;
1016 sess = p->p_session;
1017 /* Do nothing if reassigning same control tty */
1018 if (sess->s_ttyvp == vp) {
1019 error = 0;
1020 break;
1023 /* Get rid of reference to old control tty */
1024 ovp = sess->s_ttyvp;
1025 vref(vp);
1026 sess->s_ttyvp = vp;
1027 if (ovp)
1028 vrele(ovp);
1030 break;
1032 rel_mplock();
1033 return (error);
1037 * MPALMOSTSAFE - acquires mplock
1039 static int
1040 vn_poll(struct file *fp, int events, struct ucred *cred)
1042 int error;
1044 get_mplock();
1045 error = VOP_POLL(((struct vnode *)fp->f_data), events, cred);
1046 rel_mplock();
1047 return (error);
1051 * Check that the vnode is still valid, and if so
1052 * acquire requested lock.
1055 #ifndef DEBUG_LOCKS
1056 vn_lock(struct vnode *vp, int flags)
1057 #else
1058 debug_vn_lock(struct vnode *vp, int flags, const char *filename, int line)
1059 #endif
1061 int error;
1063 do {
1064 #ifdef DEBUG_LOCKS
1065 vp->filename = filename;
1066 vp->line = line;
1067 error = debuglockmgr(&vp->v_lock, flags,
1068 "vn_lock", filename, line);
1069 #else
1070 error = lockmgr(&vp->v_lock, flags);
1071 #endif
1072 if (error == 0)
1073 break;
1074 } while (flags & LK_RETRY);
1077 * Because we (had better!) have a ref on the vnode, once it
1078 * goes to VRECLAIMED state it will not be recycled until all
1079 * refs go away. So we can just check the flag.
1081 if (error == 0 && (vp->v_flag & VRECLAIMED)) {
1082 lockmgr(&vp->v_lock, LK_RELEASE);
1083 error = ENOENT;
1085 return (error);
1088 void
1089 vn_unlock(struct vnode *vp)
1091 lockmgr(&vp->v_lock, LK_RELEASE);
1095 vn_islocked(struct vnode *vp)
1097 return (lockstatus(&vp->v_lock, curthread));
1101 * MPALMOSTSAFE - acquires mplock
1103 static int
1104 vn_closefile(struct file *fp)
1106 int error;
1108 get_mplock();
1109 fp->f_ops = &badfileops;
1110 error = vn_close(((struct vnode *)fp->f_data), fp->f_flag);
1111 rel_mplock();
1112 return(error);
1116 * MPALMOSTSAFE - acquires mplock
1118 static int
1119 vn_kqfilter(struct file *fp, struct knote *kn)
1121 int error;
1123 get_mplock();
1124 error = VOP_KQFILTER(((struct vnode *)fp->f_data), kn);
1125 rel_mplock();
1126 return (error);