Create the USB task queues before creating the event thread to avoid
[dragonfly/vkernel-mp.git] / sys / kern / vfs_vnops.c
blob60af4195b37173bde1214a4ab5c9aca02b6380e7
1 /*
2 * Copyright (c) 1982, 1986, 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
38 * @(#)vfs_vnops.c 8.2 (Berkeley) 1/21/94
39 * $FreeBSD: src/sys/kern/vfs_vnops.c,v 1.87.2.13 2002/12/29 18:19:53 dillon Exp $
40 * $DragonFly: src/sys/kern/vfs_vnops.c,v 1.50 2007/05/09 00:53:34 dillon Exp $
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/fcntl.h>
46 #include <sys/file.h>
47 #include <sys/stat.h>
48 #include <sys/proc.h>
49 #include <sys/mount.h>
50 #include <sys/nlookup.h>
51 #include <sys/vnode.h>
52 #include <sys/buf.h>
53 #include <sys/filio.h>
54 #include <sys/ttycom.h>
55 #include <sys/conf.h>
56 #include <sys/syslog.h>
58 static int vn_closefile (struct file *fp);
59 static int vn_ioctl (struct file *fp, u_long com, caddr_t data,
60 struct ucred *cred);
61 static int vn_read (struct file *fp, struct uio *uio,
62 struct ucred *cred, int flags);
63 static int svn_read (struct file *fp, struct uio *uio,
64 struct ucred *cred, int flags);
65 static int vn_poll (struct file *fp, int events, struct ucred *cred);
66 static int vn_kqfilter (struct file *fp, struct knote *kn);
67 static int vn_statfile (struct file *fp, struct stat *sb, struct ucred *cred);
68 static int vn_write (struct file *fp, struct uio *uio,
69 struct ucred *cred, int flags);
70 static int svn_write (struct file *fp, struct uio *uio,
71 struct ucred *cred, int flags);
73 struct fileops vnode_fileops = {
74 .fo_read = vn_read,
75 .fo_write = vn_write,
76 .fo_ioctl = vn_ioctl,
77 .fo_poll = vn_poll,
78 .fo_kqfilter = vn_kqfilter,
79 .fo_stat = vn_statfile,
80 .fo_close = vn_closefile,
81 .fo_shutdown = nofo_shutdown
84 struct fileops specvnode_fileops = {
85 .fo_read = svn_read,
86 .fo_write = svn_write,
87 .fo_ioctl = vn_ioctl,
88 .fo_poll = vn_poll,
89 .fo_kqfilter = vn_kqfilter,
90 .fo_stat = vn_statfile,
91 .fo_close = vn_closefile,
92 .fo_shutdown = nofo_shutdown
96 * Shortcut the device read/write. This avoids a lot of vnode junk.
97 * Basically the specfs vnops for read and write take the locked vnode,
98 * unlock it (because we can't hold the vnode locked while reading or writing
99 * a device which may block indefinitely), issues the device operation, then
100 * relock the vnode before returning, plus other junk. This bypasses all
101 * of that and just does the device operation.
103 void
104 vn_setspecops(struct file *fp)
106 if (vfs_fastdev && fp->f_ops == &vnode_fileops) {
107 fp->f_ops = &specvnode_fileops;
112 * Common code for vnode open operations. Check permissions, and call
113 * the VOP_NOPEN or VOP_NCREATE routine.
115 * The caller is responsible for setting up nd with nlookup_init() and
116 * for cleaning it up with nlookup_done(), whether we return an error
117 * or not.
119 * On success nd->nl_open_vp will hold a referenced and, if requested,
120 * locked vnode. A locked vnode is requested via NLC_LOCKVP. If fp
121 * is non-NULL the vnode will be installed in the file pointer.
123 * NOTE: The vnode is referenced just once on return whether or not it
124 * is also installed in the file pointer.
127 vn_open(struct nlookupdata *nd, struct file *fp, int fmode, int cmode)
129 struct vnode *vp;
130 struct ucred *cred = nd->nl_cred;
131 struct vattr vat;
132 struct vattr *vap = &vat;
133 int mode, error;
136 * Lookup the path and create or obtain the vnode. After a
137 * successful lookup a locked nd->nl_nch will be returned.
139 * The result of this section should be a locked vnode.
141 * XXX with only a little work we should be able to avoid locking
142 * the vnode if FWRITE, O_CREAT, and O_TRUNC are *not* set.
144 if (fmode & O_CREAT) {
146 * CONDITIONAL CREATE FILE CASE
148 * Setting NLC_CREATE causes a negative hit to store
149 * the negative hit ncp and not return an error. Then
150 * nc_error or nc_vp may be checked to see if the ncp
151 * represents a negative hit. NLC_CREATE also requires
152 * write permission on the governing directory or EPERM
153 * is returned.
155 if ((fmode & O_EXCL) == 0 && (fmode & O_NOFOLLOW) == 0)
156 nd->nl_flags |= NLC_FOLLOW;
157 nd->nl_flags |= NLC_CREATE;
158 bwillwrite();
159 error = nlookup(nd);
160 } else {
162 * NORMAL OPEN FILE CASE
164 error = nlookup(nd);
167 if (error)
168 return (error);
171 * split case to allow us to re-resolve and retry the ncp in case
172 * we get ESTALE.
174 again:
175 if (fmode & O_CREAT) {
176 if (nd->nl_nch.ncp->nc_vp == NULL) {
177 if ((error = ncp_writechk(&nd->nl_nch)) != 0)
178 return (error);
179 VATTR_NULL(vap);
180 vap->va_type = VREG;
181 vap->va_mode = cmode;
182 if (fmode & O_EXCL)
183 vap->va_vaflags |= VA_EXCLUSIVE;
184 error = VOP_NCREATE(&nd->nl_nch, &vp, nd->nl_cred, vap);
185 if (error)
186 return (error);
187 fmode &= ~O_TRUNC;
188 /* locked vnode is returned */
189 } else {
190 if (fmode & O_EXCL) {
191 error = EEXIST;
192 } else {
193 error = cache_vget(&nd->nl_nch, cred,
194 LK_EXCLUSIVE, &vp);
196 if (error)
197 return (error);
198 fmode &= ~O_CREAT;
200 } else {
201 error = cache_vget(&nd->nl_nch, cred, LK_EXCLUSIVE, &vp);
202 if (error)
203 return (error);
207 * We have a locked vnode and ncp now. Note that the ncp will
208 * be cleaned up by the caller if nd->nl_nch is left intact.
210 if (vp->v_type == VLNK) {
211 error = EMLINK;
212 goto bad;
214 if (vp->v_type == VSOCK) {
215 error = EOPNOTSUPP;
216 goto bad;
218 if ((fmode & O_CREAT) == 0) {
219 mode = 0;
220 if (fmode & (FWRITE | O_TRUNC)) {
221 if (vp->v_type == VDIR) {
222 error = EISDIR;
223 goto bad;
225 error = vn_writechk(vp, &nd->nl_nch);
226 if (error) {
228 * Special stale handling, re-resolve the
229 * vnode.
231 if (error == ESTALE) {
232 vput(vp);
233 vp = NULL;
234 cache_setunresolved(&nd->nl_nch);
235 error = cache_resolve(&nd->nl_nch, cred);
236 if (error == 0)
237 goto again;
239 goto bad;
241 mode |= VWRITE;
243 if (fmode & FREAD)
244 mode |= VREAD;
245 if (mode) {
246 error = VOP_ACCESS(vp, mode, cred);
247 if (error) {
249 * Special stale handling, re-resolve the
250 * vnode.
252 if (error == ESTALE) {
253 vput(vp);
254 vp = NULL;
255 cache_setunresolved(&nd->nl_nch);
256 error = cache_resolve(&nd->nl_nch, cred);
257 if (error == 0)
258 goto again;
260 goto bad;
264 if (fmode & O_TRUNC) {
265 vn_unlock(vp); /* XXX */
266 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); /* XXX */
267 VATTR_NULL(vap);
268 vap->va_size = 0;
269 error = VOP_SETATTR(vp, vap, cred);
270 if (error)
271 goto bad;
275 * Setup the fp so VOP_OPEN can override it. No descriptor has been
276 * associated with the fp yet so we own it clean.
278 * f_nchandle inherits nl_nch. This used to be necessary only for
279 * directories but now we do it unconditionally so f*() ops
280 * such as fchmod() can access the actual namespace that was
281 * used to open the file.
283 if (fp) {
284 fp->f_nchandle = nd->nl_nch;
285 cache_zero(&nd->nl_nch);
286 cache_unlock(&fp->f_nchandle);
290 * Get rid of nl_nch. vn_open does not return it (it returns the
291 * vnode or the file pointer). Note: we can't leave nl_nch locked
292 * through the VOP_OPEN anyway since the VOP_OPEN may block, e.g.
293 * on /dev/ttyd0
295 if (nd->nl_nch.ncp)
296 cache_put(&nd->nl_nch);
298 error = VOP_OPEN(vp, fmode, cred, fp);
299 if (error) {
301 * setting f_ops to &badfileops will prevent the descriptor
302 * code from trying to close and release the vnode, since
303 * the open failed we do not want to call close.
305 if (fp) {
306 fp->f_data = NULL;
307 fp->f_ops = &badfileops;
309 goto bad;
312 #if 0
314 * Assert that VREG files have been setup for vmio.
316 KASSERT(vp->v_type != VREG || vp->v_object != NULL,
317 ("vn_open: regular file was not VMIO enabled!"));
318 #endif
321 * Return the vnode. XXX needs some cleaning up. The vnode is
322 * only returned in the fp == NULL case.
324 if (fp == NULL) {
325 nd->nl_open_vp = vp;
326 nd->nl_vp_fmode = fmode;
327 if ((nd->nl_flags & NLC_LOCKVP) == 0)
328 vn_unlock(vp);
329 } else {
330 vput(vp);
332 return (0);
333 bad:
334 if (vp)
335 vput(vp);
336 return (error);
340 * Check for write permissions on the specified vnode. nch may be NULL.
343 vn_writechk(struct vnode *vp, struct nchandle *nch)
346 * If there's shared text associated with
347 * the vnode, try to free it up once. If
348 * we fail, we can't allow writing.
350 if (vp->v_flag & VTEXT)
351 return (ETXTBSY);
354 * If the vnode represents a regular file, check the mount
355 * point via the nch. This may be a different mount point
356 * then the one embedded in the vnode (e.g. nullfs).
358 * We can still write to non-regular files (e.g. devices)
359 * via read-only mounts.
361 if (nch && nch->ncp && vp->v_type == VREG)
362 return (ncp_writechk(nch));
363 return (0);
367 * Check whether the underlying mount is read-only. The mount point
368 * referenced by the namecache may be different from the mount point
369 * used by the underlying vnode in the case of NULLFS, so a separate
370 * check is needed.
373 ncp_writechk(struct nchandle *nch)
375 if (nch->mount && (nch->mount->mnt_flag & MNT_RDONLY))
376 return (EROFS);
377 return(0);
381 * Vnode close call
384 vn_close(struct vnode *vp, int flags)
386 int error;
388 if ((error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY)) == 0) {
389 error = VOP_CLOSE(vp, flags);
390 vn_unlock(vp);
392 vrele(vp);
393 return (error);
396 static __inline
398 sequential_heuristic(struct uio *uio, struct file *fp)
401 * Sequential heuristic - detect sequential operation
403 if ((uio->uio_offset == 0 && fp->f_seqcount > 0) ||
404 uio->uio_offset == fp->f_nextoff) {
405 int tmpseq = fp->f_seqcount;
407 * XXX we assume that the filesystem block size is
408 * the default. Not true, but still gives us a pretty
409 * good indicator of how sequential the read operations
410 * are.
412 tmpseq += (uio->uio_resid + BKVASIZE - 1) / BKVASIZE;
413 if (tmpseq > IO_SEQMAX)
414 tmpseq = IO_SEQMAX;
415 fp->f_seqcount = tmpseq;
416 return(fp->f_seqcount << IO_SEQSHIFT);
420 * Not sequential, quick draw-down of seqcount
422 if (fp->f_seqcount > 1)
423 fp->f_seqcount = 1;
424 else
425 fp->f_seqcount = 0;
426 return(0);
430 * Package up an I/O request on a vnode into a uio and do it.
433 vn_rdwr(enum uio_rw rw, struct vnode *vp, caddr_t base, int len,
434 off_t offset, enum uio_seg segflg, int ioflg,
435 struct ucred *cred, int *aresid)
437 struct uio auio;
438 struct iovec aiov;
439 struct ccms_lock ccms_lock;
440 int error;
442 if ((ioflg & IO_NODELOCKED) == 0)
443 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
444 auio.uio_iov = &aiov;
445 auio.uio_iovcnt = 1;
446 aiov.iov_base = base;
447 aiov.iov_len = len;
448 auio.uio_resid = len;
449 auio.uio_offset = offset;
450 auio.uio_segflg = segflg;
451 auio.uio_rw = rw;
452 auio.uio_td = curthread;
453 ccms_lock_get_uio(&vp->v_ccms, &ccms_lock, &auio);
454 if (rw == UIO_READ) {
455 error = VOP_READ(vp, &auio, ioflg, cred);
456 } else {
457 error = VOP_WRITE(vp, &auio, ioflg, cred);
459 ccms_lock_put(&vp->v_ccms, &ccms_lock);
460 if (aresid)
461 *aresid = auio.uio_resid;
462 else
463 if (auio.uio_resid && error == 0)
464 error = EIO;
465 if ((ioflg & IO_NODELOCKED) == 0)
466 vn_unlock(vp);
467 return (error);
471 * Package up an I/O request on a vnode into a uio and do it. The I/O
472 * request is split up into smaller chunks and we try to avoid saturating
473 * the buffer cache while potentially holding a vnode locked, so we
474 * check bwillwrite() before calling vn_rdwr(). We also call uio_yield()
475 * to give other processes a chance to lock the vnode (either other processes
476 * core'ing the same binary, or unrelated processes scanning the directory).
479 vn_rdwr_inchunks(enum uio_rw rw, struct vnode *vp, caddr_t base, int len,
480 off_t offset, enum uio_seg segflg, int ioflg,
481 struct ucred *cred, int *aresid)
483 int error = 0;
485 do {
486 int chunk;
489 * Force `offset' to a multiple of MAXBSIZE except possibly
490 * for the first chunk, so that filesystems only need to
491 * write full blocks except possibly for the first and last
492 * chunks.
494 chunk = MAXBSIZE - (uoff_t)offset % MAXBSIZE;
496 if (chunk > len)
497 chunk = len;
498 if (rw != UIO_READ && vp->v_type == VREG)
499 bwillwrite();
500 error = vn_rdwr(rw, vp, base, chunk, offset, segflg,
501 ioflg, cred, aresid);
502 len -= chunk; /* aresid calc already includes length */
503 if (error)
504 break;
505 offset += chunk;
506 base += chunk;
507 uio_yield();
508 } while (len);
509 if (aresid)
510 *aresid += len;
511 return (error);
515 * MPALMOSTSAFE - acquires mplock
517 static int
518 vn_read(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
520 struct ccms_lock ccms_lock;
521 struct vnode *vp;
522 int error, ioflag;
524 get_mplock();
525 KASSERT(uio->uio_td == curthread,
526 ("uio_td %p is not td %p", uio->uio_td, curthread));
527 vp = (struct vnode *)fp->f_data;
529 ioflag = 0;
530 if (flags & O_FBLOCKING) {
531 /* ioflag &= ~IO_NDELAY; */
532 } else if (flags & O_FNONBLOCKING) {
533 ioflag |= IO_NDELAY;
534 } else if (fp->f_flag & FNONBLOCK) {
535 ioflag |= IO_NDELAY;
537 if (flags & O_FBUFFERED) {
538 /* ioflag &= ~IO_DIRECT; */
539 } else if (flags & O_FUNBUFFERED) {
540 ioflag |= IO_DIRECT;
541 } else if (fp->f_flag & O_DIRECT) {
542 ioflag |= IO_DIRECT;
544 vn_lock(vp, LK_SHARED | LK_RETRY);
545 if ((flags & O_FOFFSET) == 0)
546 uio->uio_offset = fp->f_offset;
547 ioflag |= sequential_heuristic(uio, fp);
549 ccms_lock_get_uio(&vp->v_ccms, &ccms_lock, uio);
550 error = VOP_READ(vp, uio, ioflag, cred);
551 ccms_lock_put(&vp->v_ccms, &ccms_lock);
552 if ((flags & O_FOFFSET) == 0)
553 fp->f_offset = uio->uio_offset;
554 fp->f_nextoff = uio->uio_offset;
555 vn_unlock(vp);
556 rel_mplock();
557 return (error);
561 * Device-optimized file table vnode read routine.
563 * This bypasses the VOP table and talks directly to the device. Most
564 * filesystems just route to specfs and can make this optimization.
566 * MPALMOSTSAFE - acquires mplock
568 static int
569 svn_read(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
571 struct vnode *vp;
572 int ioflag;
573 int error;
574 cdev_t dev;
576 get_mplock();
577 KASSERT(uio->uio_td == curthread,
578 ("uio_td %p is not td %p", uio->uio_td, curthread));
580 vp = (struct vnode *)fp->f_data;
581 if (vp == NULL || vp->v_type == VBAD) {
582 error = EBADF;
583 goto done;
586 if ((dev = vp->v_rdev) == NULL) {
587 error = EBADF;
588 goto done;
590 reference_dev(dev);
592 if (uio->uio_resid == 0) {
593 error = 0;
594 goto done;
596 if ((flags & O_FOFFSET) == 0)
597 uio->uio_offset = fp->f_offset;
599 ioflag = 0;
600 if (flags & O_FBLOCKING) {
601 /* ioflag &= ~IO_NDELAY; */
602 } else if (flags & O_FNONBLOCKING) {
603 ioflag |= IO_NDELAY;
604 } else if (fp->f_flag & FNONBLOCK) {
605 ioflag |= IO_NDELAY;
607 if (flags & O_FBUFFERED) {
608 /* ioflag &= ~IO_DIRECT; */
609 } else if (flags & O_FUNBUFFERED) {
610 ioflag |= IO_DIRECT;
611 } else if (fp->f_flag & O_DIRECT) {
612 ioflag |= IO_DIRECT;
614 ioflag |= sequential_heuristic(uio, fp);
616 error = dev_dread(dev, uio, ioflag);
618 release_dev(dev);
619 if ((flags & O_FOFFSET) == 0)
620 fp->f_offset = uio->uio_offset;
621 fp->f_nextoff = uio->uio_offset;
622 done:
623 rel_mplock();
624 return (error);
628 * MPALMOSTSAFE - acquires mplock
630 static int
631 vn_write(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
633 struct ccms_lock ccms_lock;
634 struct vnode *vp;
635 int error, ioflag;
637 get_mplock();
638 KASSERT(uio->uio_td == curthread,
639 ("uio_procp %p is not p %p", uio->uio_td, curthread));
640 vp = (struct vnode *)fp->f_data;
641 if (vp->v_type == VREG)
642 bwillwrite();
643 vp = (struct vnode *)fp->f_data; /* XXX needed? */
645 ioflag = IO_UNIT;
646 if (vp->v_type == VREG &&
647 ((fp->f_flag & O_APPEND) || (flags & O_FAPPEND))) {
648 ioflag |= IO_APPEND;
651 if (flags & O_FBLOCKING) {
652 /* ioflag &= ~IO_NDELAY; */
653 } else if (flags & O_FNONBLOCKING) {
654 ioflag |= IO_NDELAY;
655 } else if (fp->f_flag & FNONBLOCK) {
656 ioflag |= IO_NDELAY;
658 if (flags & O_FBUFFERED) {
659 /* ioflag &= ~IO_DIRECT; */
660 } else if (flags & O_FUNBUFFERED) {
661 ioflag |= IO_DIRECT;
662 } else if (fp->f_flag & O_DIRECT) {
663 ioflag |= IO_DIRECT;
665 if (flags & O_FASYNCWRITE) {
666 /* ioflag &= ~IO_SYNC; */
667 } else if (flags & O_FSYNCWRITE) {
668 ioflag |= IO_SYNC;
669 } else if (fp->f_flag & O_FSYNC) {
670 ioflag |= IO_SYNC;
673 if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS))
674 ioflag |= IO_SYNC;
675 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
676 if ((flags & O_FOFFSET) == 0)
677 uio->uio_offset = fp->f_offset;
678 ioflag |= sequential_heuristic(uio, fp);
679 ccms_lock_get_uio(&vp->v_ccms, &ccms_lock, uio);
680 error = VOP_WRITE(vp, uio, ioflag, cred);
681 ccms_lock_put(&vp->v_ccms, &ccms_lock);
682 if ((flags & O_FOFFSET) == 0)
683 fp->f_offset = uio->uio_offset;
684 fp->f_nextoff = uio->uio_offset;
685 vn_unlock(vp);
686 rel_mplock();
687 return (error);
691 * Device-optimized file table vnode write routine.
693 * This bypasses the VOP table and talks directly to the device. Most
694 * filesystems just route to specfs and can make this optimization.
696 * MPALMOSTSAFE - acquires mplock
698 static int
699 svn_write(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
701 struct vnode *vp;
702 int ioflag;
703 int error;
704 cdev_t dev;
706 get_mplock();
707 KASSERT(uio->uio_td == curthread,
708 ("uio_procp %p is not p %p", uio->uio_td, curthread));
710 vp = (struct vnode *)fp->f_data;
711 if (vp == NULL || vp->v_type == VBAD) {
712 error = EBADF;
713 goto done;
715 if (vp->v_type == VREG)
716 bwillwrite();
717 vp = (struct vnode *)fp->f_data; /* XXX needed? */
719 if ((dev = vp->v_rdev) == NULL) {
720 error = EBADF;
721 goto done;
723 reference_dev(dev);
725 if ((flags & O_FOFFSET) == 0)
726 uio->uio_offset = fp->f_offset;
728 ioflag = IO_UNIT;
729 if (vp->v_type == VREG &&
730 ((fp->f_flag & O_APPEND) || (flags & O_FAPPEND))) {
731 ioflag |= IO_APPEND;
734 if (flags & O_FBLOCKING) {
735 /* ioflag &= ~IO_NDELAY; */
736 } else if (flags & O_FNONBLOCKING) {
737 ioflag |= IO_NDELAY;
738 } else if (fp->f_flag & FNONBLOCK) {
739 ioflag |= IO_NDELAY;
741 if (flags & O_FBUFFERED) {
742 /* ioflag &= ~IO_DIRECT; */
743 } else if (flags & O_FUNBUFFERED) {
744 ioflag |= IO_DIRECT;
745 } else if (fp->f_flag & O_DIRECT) {
746 ioflag |= IO_DIRECT;
748 if (flags & O_FASYNCWRITE) {
749 /* ioflag &= ~IO_SYNC; */
750 } else if (flags & O_FSYNCWRITE) {
751 ioflag |= IO_SYNC;
752 } else if (fp->f_flag & O_FSYNC) {
753 ioflag |= IO_SYNC;
756 if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS))
757 ioflag |= IO_SYNC;
758 ioflag |= sequential_heuristic(uio, fp);
760 error = dev_dwrite(dev, uio, ioflag);
762 release_dev(dev);
763 if ((flags & O_FOFFSET) == 0)
764 fp->f_offset = uio->uio_offset;
765 fp->f_nextoff = uio->uio_offset;
766 done:
767 rel_mplock();
768 return (error);
772 * MPALMOSTSAFE - acquires mplock
774 static int
775 vn_statfile(struct file *fp, struct stat *sb, struct ucred *cred)
777 struct vnode *vp;
778 int error;
780 get_mplock();
781 vp = (struct vnode *)fp->f_data;
782 error = vn_stat(vp, sb, cred);
783 rel_mplock();
784 return (error);
788 vn_stat(struct vnode *vp, struct stat *sb, struct ucred *cred)
790 struct vattr vattr;
791 struct vattr *vap;
792 int error;
793 u_short mode;
794 cdev_t dev;
796 vap = &vattr;
797 error = VOP_GETATTR(vp, vap);
798 if (error)
799 return (error);
802 * Zero the spare stat fields
804 sb->st_lspare = 0;
805 sb->st_qspare = 0;
808 * Copy from vattr table
810 if (vap->va_fsid != VNOVAL)
811 sb->st_dev = vap->va_fsid;
812 else
813 sb->st_dev = vp->v_mount->mnt_stat.f_fsid.val[0];
814 sb->st_ino = vap->va_fileid;
815 mode = vap->va_mode;
816 switch (vap->va_type) {
817 case VREG:
818 mode |= S_IFREG;
819 break;
820 case VDIR:
821 mode |= S_IFDIR;
822 break;
823 case VBLK:
824 mode |= S_IFBLK;
825 break;
826 case VCHR:
827 mode |= S_IFCHR;
828 break;
829 case VLNK:
830 mode |= S_IFLNK;
831 /* This is a cosmetic change, symlinks do not have a mode. */
832 if (vp->v_mount->mnt_flag & MNT_NOSYMFOLLOW)
833 sb->st_mode &= ~ACCESSPERMS; /* 0000 */
834 else
835 sb->st_mode |= ACCESSPERMS; /* 0777 */
836 break;
837 case VSOCK:
838 mode |= S_IFSOCK;
839 break;
840 case VFIFO:
841 mode |= S_IFIFO;
842 break;
843 default:
844 return (EBADF);
846 sb->st_mode = mode;
847 sb->st_nlink = vap->va_nlink;
848 sb->st_uid = vap->va_uid;
849 sb->st_gid = vap->va_gid;
850 sb->st_rdev = makeudev(vap->va_rmajor, vap->va_rminor);
851 sb->st_size = vap->va_size;
852 sb->st_atimespec = vap->va_atime;
853 sb->st_mtimespec = vap->va_mtime;
854 sb->st_ctimespec = vap->va_ctime;
857 * A VCHR and VBLK device may track the last access and last modified
858 * time independantly of the filesystem. This is particularly true
859 * because device read and write calls may bypass the filesystem.
861 if (vp->v_type == VCHR || vp->v_type == VBLK) {
862 if ((dev = vp->v_rdev) != NULL) {
863 if (dev->si_lastread) {
864 sb->st_atimespec.tv_sec = dev->si_lastread;
865 sb->st_atimespec.tv_nsec = 0;
867 if (dev->si_lastwrite) {
868 sb->st_atimespec.tv_sec = dev->si_lastwrite;
869 sb->st_atimespec.tv_nsec = 0;
875 * According to www.opengroup.org, the meaning of st_blksize is
876 * "a filesystem-specific preferred I/O block size for this
877 * object. In some filesystem types, this may vary from file
878 * to file"
879 * Default to PAGE_SIZE after much discussion.
882 if (vap->va_type == VREG) {
883 sb->st_blksize = vap->va_blocksize;
884 } else if (vn_isdisk(vp, NULL)) {
886 * XXX this is broken. If the device is not yet open (aka
887 * stat() call, aka v_rdev == NULL), how are we supposed
888 * to get a valid block size out of it?
890 cdev_t dev;
892 if ((dev = vp->v_rdev) == NULL) {
893 if (vp->v_type == VCHR)
894 dev = get_dev(vp->v_umajor, vp->v_uminor);
896 sb->st_blksize = dev->si_bsize_best;
897 if (sb->st_blksize < dev->si_bsize_phys)
898 sb->st_blksize = dev->si_bsize_phys;
899 if (sb->st_blksize < BLKDEV_IOSIZE)
900 sb->st_blksize = BLKDEV_IOSIZE;
901 } else {
902 sb->st_blksize = PAGE_SIZE;
905 sb->st_flags = vap->va_flags;
906 if (suser_cred(cred, 0))
907 sb->st_gen = 0;
908 else
909 sb->st_gen = vap->va_gen;
911 #if (S_BLKSIZE == 512)
912 /* Optimize this case */
913 sb->st_blocks = vap->va_bytes >> 9;
914 #else
915 sb->st_blocks = vap->va_bytes / S_BLKSIZE;
916 #endif
917 sb->st_fsmid = vap->va_fsmid;
918 return (0);
922 * MPALMOSTSAFE - acquires mplock
924 static int
925 vn_ioctl(struct file *fp, u_long com, caddr_t data, struct ucred *ucred)
927 struct vnode *vp = ((struct vnode *)fp->f_data);
928 struct vnode *ovp;
929 struct vattr vattr;
930 int error;
932 get_mplock();
934 switch (vp->v_type) {
935 case VREG:
936 case VDIR:
937 if (com == FIONREAD) {
938 if ((error = VOP_GETATTR(vp, &vattr)) != 0)
939 break;
940 *(int *)data = vattr.va_size - fp->f_offset;
941 error = 0;
942 break;
944 if (com == FIOASYNC) { /* XXX */
945 error = 0; /* XXX */
946 break;
948 /* fall into ... */
949 default:
950 #if 0
951 return (ENOTTY);
952 #endif
953 case VFIFO:
954 case VCHR:
955 case VBLK:
956 if (com == FIODTYPE) {
957 if (vp->v_type != VCHR && vp->v_type != VBLK) {
958 error = ENOTTY;
959 break;
961 *(int *)data = dev_dflags(vp->v_rdev) & D_TYPEMASK;
962 error = 0;
963 break;
965 error = VOP_IOCTL(vp, com, data, fp->f_flag, ucred);
966 if (error == 0 && com == TIOCSCTTY) {
967 struct proc *p = curthread->td_proc;
968 struct session *sess;
970 if (p == NULL) {
971 error = ENOTTY;
972 break;
975 sess = p->p_session;
976 /* Do nothing if reassigning same control tty */
977 if (sess->s_ttyvp == vp) {
978 error = 0;
979 break;
982 /* Get rid of reference to old control tty */
983 ovp = sess->s_ttyvp;
984 vref(vp);
985 sess->s_ttyvp = vp;
986 if (ovp)
987 vrele(ovp);
989 break;
991 rel_mplock();
992 return (error);
996 * MPALMOSTSAFE - acquires mplock
998 static int
999 vn_poll(struct file *fp, int events, struct ucred *cred)
1001 int error;
1003 get_mplock();
1004 error = VOP_POLL(((struct vnode *)fp->f_data), events, cred);
1005 rel_mplock();
1006 return (error);
1010 * Check that the vnode is still valid, and if so
1011 * acquire requested lock.
1014 #ifndef DEBUG_LOCKS
1015 vn_lock(struct vnode *vp, int flags)
1016 #else
1017 debug_vn_lock(struct vnode *vp, int flags, const char *filename, int line)
1018 #endif
1020 int error;
1022 do {
1023 #ifdef DEBUG_LOCKS
1024 vp->filename = filename;
1025 vp->line = line;
1026 error = debuglockmgr(&vp->v_lock, flags,
1027 "vn_lock", filename, line);
1028 #else
1029 error = lockmgr(&vp->v_lock, flags);
1030 #endif
1031 if (error == 0)
1032 break;
1033 } while (flags & LK_RETRY);
1036 * Because we (had better!) have a ref on the vnode, once it
1037 * goes to VRECLAIMED state it will not be recycled until all
1038 * refs go away. So we can just check the flag.
1040 if (error == 0 && (vp->v_flag & VRECLAIMED)) {
1041 lockmgr(&vp->v_lock, LK_RELEASE);
1042 error = ENOENT;
1044 return (error);
1047 void
1048 vn_unlock(struct vnode *vp)
1050 lockmgr(&vp->v_lock, LK_RELEASE);
1054 vn_islocked(struct vnode *vp)
1056 return (lockstatus(&vp->v_lock, curthread));
1060 * MPALMOSTSAFE - acquires mplock
1062 static int
1063 vn_closefile(struct file *fp)
1065 int error;
1067 get_mplock();
1068 fp->f_ops = &badfileops;
1069 error = vn_close(((struct vnode *)fp->f_data), fp->f_flag);
1070 rel_mplock();
1071 return(error);
1075 * MPALMOSTSAFE - acquires mplock
1077 static int
1078 vn_kqfilter(struct file *fp, struct knote *kn)
1080 int error;
1082 get_mplock();
1083 error = VOP_KQFILTER(((struct vnode *)fp->f_data), kn);
1084 rel_mplock();
1085 return (error);