installer: Re-add the TEST_DEV command which was removed accidentally.
[dragonfly.git] / sys / kern / vfs_vnops.c
blobd0ab7c8654f31222dfbd9dfe7b2704249d61d1d6
1 /*
2 * Copyright (c) 1982, 1986, 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
38 * @(#)vfs_vnops.c 8.2 (Berkeley) 1/21/94
39 * $FreeBSD: src/sys/kern/vfs_vnops.c,v 1.87.2.13 2002/12/29 18:19:53 dillon Exp $
40 * $DragonFly: src/sys/kern/vfs_vnops.c,v 1.58 2008/06/28 17:59:49 dillon Exp $
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/fcntl.h>
46 #include <sys/file.h>
47 #include <sys/stat.h>
48 #include <sys/proc.h>
49 #include <sys/priv.h>
50 #include <sys/mount.h>
51 #include <sys/nlookup.h>
52 #include <sys/vnode.h>
53 #include <sys/buf.h>
54 #include <sys/filio.h>
55 #include <sys/ttycom.h>
56 #include <sys/conf.h>
57 #include <sys/syslog.h>
59 static int vn_closefile (struct file *fp);
60 static int vn_ioctl (struct file *fp, u_long com, caddr_t data,
61 struct ucred *cred);
62 static int vn_read (struct file *fp, struct uio *uio,
63 struct ucred *cred, int flags);
64 static int svn_read (struct file *fp, struct uio *uio,
65 struct ucred *cred, int flags);
66 static int vn_poll (struct file *fp, int events, struct ucred *cred);
67 static int vn_kqfilter (struct file *fp, struct knote *kn);
68 static int vn_statfile (struct file *fp, struct stat *sb, struct ucred *cred);
69 static int vn_write (struct file *fp, struct uio *uio,
70 struct ucred *cred, int flags);
71 static int svn_write (struct file *fp, struct uio *uio,
72 struct ucred *cred, int flags);
74 struct fileops vnode_fileops = {
75 .fo_read = vn_read,
76 .fo_write = vn_write,
77 .fo_ioctl = vn_ioctl,
78 .fo_poll = vn_poll,
79 .fo_kqfilter = vn_kqfilter,
80 .fo_stat = vn_statfile,
81 .fo_close = vn_closefile,
82 .fo_shutdown = nofo_shutdown
85 struct fileops specvnode_fileops = {
86 .fo_read = svn_read,
87 .fo_write = svn_write,
88 .fo_ioctl = vn_ioctl,
89 .fo_poll = vn_poll,
90 .fo_kqfilter = vn_kqfilter,
91 .fo_stat = vn_statfile,
92 .fo_close = vn_closefile,
93 .fo_shutdown = nofo_shutdown
97 * Shortcut the device read/write. This avoids a lot of vnode junk.
98 * Basically the specfs vnops for read and write take the locked vnode,
99 * unlock it (because we can't hold the vnode locked while reading or writing
100 * a device which may block indefinitely), issues the device operation, then
101 * relock the vnode before returning, plus other junk. This bypasses all
102 * of that and just does the device operation.
104 void
105 vn_setspecops(struct file *fp)
107 if (vfs_fastdev && fp->f_ops == &vnode_fileops) {
108 fp->f_ops = &specvnode_fileops;
113 * Common code for vnode open operations. Check permissions, and call
114 * the VOP_NOPEN or VOP_NCREATE routine.
116 * The caller is responsible for setting up nd with nlookup_init() and
117 * for cleaning it up with nlookup_done(), whether we return an error
118 * or not.
120 * On success nd->nl_open_vp will hold a referenced and, if requested,
121 * locked vnode. A locked vnode is requested via NLC_LOCKVP. If fp
122 * is non-NULL the vnode will be installed in the file pointer.
124 * NOTE: The vnode is referenced just once on return whether or not it
125 * is also installed in the file pointer.
128 vn_open(struct nlookupdata *nd, struct file *fp, int fmode, int cmode)
130 struct vnode *vp;
131 struct ucred *cred = nd->nl_cred;
132 struct vattr vat;
133 struct vattr *vap = &vat;
134 int mode, error;
137 * Lookup the path and create or obtain the vnode. After a
138 * successful lookup a locked nd->nl_nch will be returned.
140 * The result of this section should be a locked vnode.
142 * XXX with only a little work we should be able to avoid locking
143 * the vnode if FWRITE, O_CREAT, and O_TRUNC are *not* set.
145 if (fmode & O_CREAT) {
147 * CONDITIONAL CREATE FILE CASE
149 * Setting NLC_CREATE causes a negative hit to store
150 * the negative hit ncp and not return an error. Then
151 * nc_error or nc_vp may be checked to see if the ncp
152 * represents a negative hit. NLC_CREATE also requires
153 * write permission on the governing directory or EPERM
154 * is returned.
156 if ((fmode & O_EXCL) == 0 && (fmode & O_NOFOLLOW) == 0)
157 nd->nl_flags |= NLC_FOLLOW;
158 nd->nl_flags |= NLC_CREATE;
159 nd->nl_flags |= NLC_REFDVP;
160 bwillinode(1);
161 error = nlookup(nd);
162 } else {
164 * NORMAL OPEN FILE CASE
166 error = nlookup(nd);
169 if (error)
170 return (error);
173 * split case to allow us to re-resolve and retry the ncp in case
174 * we get ESTALE.
176 again:
177 if (fmode & O_CREAT) {
178 if (nd->nl_nch.ncp->nc_vp == NULL) {
179 if ((error = ncp_writechk(&nd->nl_nch)) != 0)
180 return (error);
181 VATTR_NULL(vap);
182 vap->va_type = VREG;
183 vap->va_mode = cmode;
184 if (fmode & O_EXCL)
185 vap->va_vaflags |= VA_EXCLUSIVE;
186 error = VOP_NCREATE(&nd->nl_nch, nd->nl_dvp, &vp,
187 nd->nl_cred, vap);
188 if (error)
189 return (error);
190 fmode &= ~O_TRUNC;
191 /* locked vnode is returned */
192 } else {
193 if (fmode & O_EXCL) {
194 error = EEXIST;
195 } else {
196 error = cache_vget(&nd->nl_nch, cred,
197 LK_EXCLUSIVE, &vp);
199 if (error)
200 return (error);
201 fmode &= ~O_CREAT;
203 } else {
204 error = cache_vget(&nd->nl_nch, cred, LK_EXCLUSIVE, &vp);
205 if (error)
206 return (error);
210 * We have a locked vnode and ncp now. Note that the ncp will
211 * be cleaned up by the caller if nd->nl_nch is left intact.
213 if (vp->v_type == VLNK) {
214 error = EMLINK;
215 goto bad;
217 if (vp->v_type == VSOCK) {
218 error = EOPNOTSUPP;
219 goto bad;
221 if ((fmode & O_CREAT) == 0) {
222 mode = 0;
223 if (fmode & (FWRITE | O_TRUNC)) {
224 if (vp->v_type == VDIR) {
225 error = EISDIR;
226 goto bad;
228 error = vn_writechk(vp, &nd->nl_nch);
229 if (error) {
231 * Special stale handling, re-resolve the
232 * vnode.
234 if (error == ESTALE) {
235 vput(vp);
236 vp = NULL;
237 cache_setunresolved(&nd->nl_nch);
238 error = cache_resolve(&nd->nl_nch, cred);
239 if (error == 0)
240 goto again;
242 goto bad;
244 mode |= VWRITE;
246 if (fmode & FREAD)
247 mode |= VREAD;
248 if (mode) {
249 error = VOP_ACCESS(vp, mode, cred);
250 if (error) {
252 * Special stale handling, re-resolve the
253 * vnode.
255 if (error == ESTALE) {
256 vput(vp);
257 vp = NULL;
258 cache_setunresolved(&nd->nl_nch);
259 error = cache_resolve(&nd->nl_nch, cred);
260 if (error == 0)
261 goto again;
263 goto bad;
267 if (fmode & O_TRUNC) {
268 vn_unlock(vp); /* XXX */
269 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); /* XXX */
270 VATTR_NULL(vap);
271 vap->va_size = 0;
272 error = VOP_SETATTR(vp, vap, cred);
273 if (error)
274 goto bad;
278 * Setup the fp so VOP_OPEN can override it. No descriptor has been
279 * associated with the fp yet so we own it clean.
281 * f_nchandle inherits nl_nch. This used to be necessary only for
282 * directories but now we do it unconditionally so f*() ops
283 * such as fchmod() can access the actual namespace that was
284 * used to open the file.
286 if (fp) {
287 fp->f_nchandle = nd->nl_nch;
288 cache_zero(&nd->nl_nch);
289 cache_unlock(&fp->f_nchandle);
293 * Get rid of nl_nch. vn_open does not return it (it returns the
294 * vnode or the file pointer). Note: we can't leave nl_nch locked
295 * through the VOP_OPEN anyway since the VOP_OPEN may block, e.g.
296 * on /dev/ttyd0
298 if (nd->nl_nch.ncp)
299 cache_put(&nd->nl_nch);
301 error = VOP_OPEN(vp, fmode, cred, fp);
302 if (error) {
304 * setting f_ops to &badfileops will prevent the descriptor
305 * code from trying to close and release the vnode, since
306 * the open failed we do not want to call close.
308 if (fp) {
309 fp->f_data = NULL;
310 fp->f_ops = &badfileops;
312 goto bad;
315 #if 0
317 * Assert that VREG files have been setup for vmio.
319 KASSERT(vp->v_type != VREG || vp->v_object != NULL,
320 ("vn_open: regular file was not VMIO enabled!"));
321 #endif
324 * Return the vnode. XXX needs some cleaning up. The vnode is
325 * only returned in the fp == NULL case.
327 if (fp == NULL) {
328 nd->nl_open_vp = vp;
329 nd->nl_vp_fmode = fmode;
330 if ((nd->nl_flags & NLC_LOCKVP) == 0)
331 vn_unlock(vp);
332 } else {
333 vput(vp);
335 return (0);
336 bad:
337 if (vp)
338 vput(vp);
339 return (error);
343 vn_opendisk(const char *devname, int fmode, struct vnode **vpp)
345 struct vnode *vp;
346 int error;
348 if (strncmp(devname, "/dev/", 5) == 0)
349 devname += 5;
350 if ((vp = getsynthvnode(devname)) == NULL) {
351 error = ENODEV;
352 } else {
353 error = VOP_OPEN(vp, fmode, proc0.p_ucred, NULL);
354 vn_unlock(vp);
355 if (error) {
356 vrele(vp);
357 vp = NULL;
360 *vpp = vp;
361 return (error);
365 * Check for write permissions on the specified vnode. nch may be NULL.
368 vn_writechk(struct vnode *vp, struct nchandle *nch)
371 * If there's shared text associated with
372 * the vnode, try to free it up once. If
373 * we fail, we can't allow writing.
375 if (vp->v_flag & VTEXT)
376 return (ETXTBSY);
379 * If the vnode represents a regular file, check the mount
380 * point via the nch. This may be a different mount point
381 * then the one embedded in the vnode (e.g. nullfs).
383 * We can still write to non-regular files (e.g. devices)
384 * via read-only mounts.
386 if (nch && nch->ncp && vp->v_type == VREG)
387 return (ncp_writechk(nch));
388 return (0);
392 * Check whether the underlying mount is read-only. The mount point
393 * referenced by the namecache may be different from the mount point
394 * used by the underlying vnode in the case of NULLFS, so a separate
395 * check is needed.
398 ncp_writechk(struct nchandle *nch)
400 if (nch->mount && (nch->mount->mnt_flag & MNT_RDONLY))
401 return (EROFS);
402 return(0);
406 * Vnode close call
409 vn_close(struct vnode *vp, int flags)
411 int error;
413 error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
414 if (error == 0) {
415 error = VOP_CLOSE(vp, flags);
416 vn_unlock(vp);
418 vrele(vp);
419 return (error);
422 static __inline
424 sequential_heuristic(struct uio *uio, struct file *fp)
427 * Sequential heuristic - detect sequential operation
429 if ((uio->uio_offset == 0 && fp->f_seqcount > 0) ||
430 uio->uio_offset == fp->f_nextoff) {
431 int tmpseq = fp->f_seqcount;
433 * XXX we assume that the filesystem block size is
434 * the default. Not true, but still gives us a pretty
435 * good indicator of how sequential the read operations
436 * are.
438 tmpseq += (uio->uio_resid + BKVASIZE - 1) / BKVASIZE;
439 if (tmpseq > IO_SEQMAX)
440 tmpseq = IO_SEQMAX;
441 fp->f_seqcount = tmpseq;
442 return(fp->f_seqcount << IO_SEQSHIFT);
446 * Not sequential, quick draw-down of seqcount
448 if (fp->f_seqcount > 1)
449 fp->f_seqcount = 1;
450 else
451 fp->f_seqcount = 0;
452 return(0);
456 * Package up an I/O request on a vnode into a uio and do it.
459 vn_rdwr(enum uio_rw rw, struct vnode *vp, caddr_t base, int len,
460 off_t offset, enum uio_seg segflg, int ioflg,
461 struct ucred *cred, int *aresid)
463 struct uio auio;
464 struct iovec aiov;
465 struct ccms_lock ccms_lock;
466 int error;
468 if ((ioflg & IO_NODELOCKED) == 0)
469 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
470 auio.uio_iov = &aiov;
471 auio.uio_iovcnt = 1;
472 aiov.iov_base = base;
473 aiov.iov_len = len;
474 auio.uio_resid = len;
475 auio.uio_offset = offset;
476 auio.uio_segflg = segflg;
477 auio.uio_rw = rw;
478 auio.uio_td = curthread;
479 ccms_lock_get_uio(&vp->v_ccms, &ccms_lock, &auio);
480 if (rw == UIO_READ) {
481 error = VOP_READ(vp, &auio, ioflg, cred);
482 } else {
483 error = VOP_WRITE(vp, &auio, ioflg, cred);
485 ccms_lock_put(&vp->v_ccms, &ccms_lock);
486 if (aresid)
487 *aresid = auio.uio_resid;
488 else
489 if (auio.uio_resid && error == 0)
490 error = EIO;
491 if ((ioflg & IO_NODELOCKED) == 0)
492 vn_unlock(vp);
493 return (error);
497 * Package up an I/O request on a vnode into a uio and do it. The I/O
498 * request is split up into smaller chunks and we try to avoid saturating
499 * the buffer cache while potentially holding a vnode locked, so we
500 * check bwillwrite() before calling vn_rdwr(). We also call uio_yield()
501 * to give other processes a chance to lock the vnode (either other processes
502 * core'ing the same binary, or unrelated processes scanning the directory).
505 vn_rdwr_inchunks(enum uio_rw rw, struct vnode *vp, caddr_t base, int len,
506 off_t offset, enum uio_seg segflg, int ioflg,
507 struct ucred *cred, int *aresid)
509 int error = 0;
511 do {
512 int chunk;
515 * Force `offset' to a multiple of MAXBSIZE except possibly
516 * for the first chunk, so that filesystems only need to
517 * write full blocks except possibly for the first and last
518 * chunks.
520 chunk = MAXBSIZE - (uoff_t)offset % MAXBSIZE;
522 if (chunk > len)
523 chunk = len;
524 if (vp->v_type == VREG) {
525 switch(rw) {
526 case UIO_READ:
527 bwillread(chunk);
528 break;
529 case UIO_WRITE:
530 bwillwrite(chunk);
531 break;
534 error = vn_rdwr(rw, vp, base, chunk, offset, segflg,
535 ioflg, cred, aresid);
536 len -= chunk; /* aresid calc already includes length */
537 if (error)
538 break;
539 offset += chunk;
540 base += chunk;
541 uio_yield();
542 } while (len);
543 if (aresid)
544 *aresid += len;
545 return (error);
549 * MPALMOSTSAFE - acquires mplock
551 static int
552 vn_read(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
554 struct ccms_lock ccms_lock;
555 struct vnode *vp;
556 int error, ioflag;
558 get_mplock();
559 KASSERT(uio->uio_td == curthread,
560 ("uio_td %p is not td %p", uio->uio_td, curthread));
561 vp = (struct vnode *)fp->f_data;
563 ioflag = 0;
564 if (flags & O_FBLOCKING) {
565 /* ioflag &= ~IO_NDELAY; */
566 } else if (flags & O_FNONBLOCKING) {
567 ioflag |= IO_NDELAY;
568 } else if (fp->f_flag & FNONBLOCK) {
569 ioflag |= IO_NDELAY;
571 if (flags & O_FBUFFERED) {
572 /* ioflag &= ~IO_DIRECT; */
573 } else if (flags & O_FUNBUFFERED) {
574 ioflag |= IO_DIRECT;
575 } else if (fp->f_flag & O_DIRECT) {
576 ioflag |= IO_DIRECT;
578 vn_lock(vp, LK_SHARED | LK_RETRY);
579 if ((flags & O_FOFFSET) == 0)
580 uio->uio_offset = fp->f_offset;
581 ioflag |= sequential_heuristic(uio, fp);
583 ccms_lock_get_uio(&vp->v_ccms, &ccms_lock, uio);
584 error = VOP_READ(vp, uio, ioflag, cred);
585 ccms_lock_put(&vp->v_ccms, &ccms_lock);
586 if ((flags & O_FOFFSET) == 0)
587 fp->f_offset = uio->uio_offset;
588 fp->f_nextoff = uio->uio_offset;
589 vn_unlock(vp);
590 rel_mplock();
591 return (error);
595 * Device-optimized file table vnode read routine.
597 * This bypasses the VOP table and talks directly to the device. Most
598 * filesystems just route to specfs and can make this optimization.
600 * MPALMOSTSAFE - acquires mplock
602 static int
603 svn_read(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
605 struct vnode *vp;
606 int ioflag;
607 int error;
608 cdev_t dev;
610 get_mplock();
611 KASSERT(uio->uio_td == curthread,
612 ("uio_td %p is not td %p", uio->uio_td, curthread));
614 vp = (struct vnode *)fp->f_data;
615 if (vp == NULL || vp->v_type == VBAD) {
616 error = EBADF;
617 goto done;
620 if ((dev = vp->v_rdev) == NULL) {
621 error = EBADF;
622 goto done;
624 reference_dev(dev);
626 if (uio->uio_resid == 0) {
627 error = 0;
628 goto done;
630 if ((flags & O_FOFFSET) == 0)
631 uio->uio_offset = fp->f_offset;
633 ioflag = 0;
634 if (flags & O_FBLOCKING) {
635 /* ioflag &= ~IO_NDELAY; */
636 } else if (flags & O_FNONBLOCKING) {
637 ioflag |= IO_NDELAY;
638 } else if (fp->f_flag & FNONBLOCK) {
639 ioflag |= IO_NDELAY;
641 if (flags & O_FBUFFERED) {
642 /* ioflag &= ~IO_DIRECT; */
643 } else if (flags & O_FUNBUFFERED) {
644 ioflag |= IO_DIRECT;
645 } else if (fp->f_flag & O_DIRECT) {
646 ioflag |= IO_DIRECT;
648 ioflag |= sequential_heuristic(uio, fp);
650 error = dev_dread(dev, uio, ioflag);
652 release_dev(dev);
653 if ((flags & O_FOFFSET) == 0)
654 fp->f_offset = uio->uio_offset;
655 fp->f_nextoff = uio->uio_offset;
656 done:
657 rel_mplock();
658 return (error);
662 * MPALMOSTSAFE - acquires mplock
664 static int
665 vn_write(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
667 struct ccms_lock ccms_lock;
668 struct vnode *vp;
669 int error, ioflag;
671 get_mplock();
672 KASSERT(uio->uio_td == curthread,
673 ("uio_td %p is not p %p", uio->uio_td, curthread));
674 vp = (struct vnode *)fp->f_data;
675 #if 0
676 /* VOP_WRITE should handle this now */
677 if (vp->v_type == VREG || vp->v_type == VDATABASE)
678 bwillwrite();
679 #endif
680 vp = (struct vnode *)fp->f_data; /* XXX needed? */
682 ioflag = IO_UNIT;
683 if (vp->v_type == VREG &&
684 ((fp->f_flag & O_APPEND) || (flags & O_FAPPEND))) {
685 ioflag |= IO_APPEND;
688 if (flags & O_FBLOCKING) {
689 /* ioflag &= ~IO_NDELAY; */
690 } else if (flags & O_FNONBLOCKING) {
691 ioflag |= IO_NDELAY;
692 } else if (fp->f_flag & FNONBLOCK) {
693 ioflag |= IO_NDELAY;
695 if (flags & O_FBUFFERED) {
696 /* ioflag &= ~IO_DIRECT; */
697 } else if (flags & O_FUNBUFFERED) {
698 ioflag |= IO_DIRECT;
699 } else if (fp->f_flag & O_DIRECT) {
700 ioflag |= IO_DIRECT;
702 if (flags & O_FASYNCWRITE) {
703 /* ioflag &= ~IO_SYNC; */
704 } else if (flags & O_FSYNCWRITE) {
705 ioflag |= IO_SYNC;
706 } else if (fp->f_flag & O_FSYNC) {
707 ioflag |= IO_SYNC;
710 if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS))
711 ioflag |= IO_SYNC;
712 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
713 if ((flags & O_FOFFSET) == 0)
714 uio->uio_offset = fp->f_offset;
715 ioflag |= sequential_heuristic(uio, fp);
716 ccms_lock_get_uio(&vp->v_ccms, &ccms_lock, uio);
717 error = VOP_WRITE(vp, uio, ioflag, cred);
718 ccms_lock_put(&vp->v_ccms, &ccms_lock);
719 if ((flags & O_FOFFSET) == 0)
720 fp->f_offset = uio->uio_offset;
721 fp->f_nextoff = uio->uio_offset;
722 vn_unlock(vp);
723 rel_mplock();
724 return (error);
728 * Device-optimized file table vnode write routine.
730 * This bypasses the VOP table and talks directly to the device. Most
731 * filesystems just route to specfs and can make this optimization.
733 * MPALMOSTSAFE - acquires mplock
735 static int
736 svn_write(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
738 struct vnode *vp;
739 int ioflag;
740 int error;
741 cdev_t dev;
743 get_mplock();
744 KASSERT(uio->uio_td == curthread,
745 ("uio_td %p is not p %p", uio->uio_td, curthread));
747 vp = (struct vnode *)fp->f_data;
748 if (vp == NULL || vp->v_type == VBAD) {
749 error = EBADF;
750 goto done;
752 if (vp->v_type == VREG)
753 bwillwrite(uio->uio_resid);
754 vp = (struct vnode *)fp->f_data; /* XXX needed? */
756 if ((dev = vp->v_rdev) == NULL) {
757 error = EBADF;
758 goto done;
760 reference_dev(dev);
762 if ((flags & O_FOFFSET) == 0)
763 uio->uio_offset = fp->f_offset;
765 ioflag = IO_UNIT;
766 if (vp->v_type == VREG &&
767 ((fp->f_flag & O_APPEND) || (flags & O_FAPPEND))) {
768 ioflag |= IO_APPEND;
771 if (flags & O_FBLOCKING) {
772 /* ioflag &= ~IO_NDELAY; */
773 } else if (flags & O_FNONBLOCKING) {
774 ioflag |= IO_NDELAY;
775 } else if (fp->f_flag & FNONBLOCK) {
776 ioflag |= IO_NDELAY;
778 if (flags & O_FBUFFERED) {
779 /* ioflag &= ~IO_DIRECT; */
780 } else if (flags & O_FUNBUFFERED) {
781 ioflag |= IO_DIRECT;
782 } else if (fp->f_flag & O_DIRECT) {
783 ioflag |= IO_DIRECT;
785 if (flags & O_FASYNCWRITE) {
786 /* ioflag &= ~IO_SYNC; */
787 } else if (flags & O_FSYNCWRITE) {
788 ioflag |= IO_SYNC;
789 } else if (fp->f_flag & O_FSYNC) {
790 ioflag |= IO_SYNC;
793 if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS))
794 ioflag |= IO_SYNC;
795 ioflag |= sequential_heuristic(uio, fp);
797 error = dev_dwrite(dev, uio, ioflag);
799 release_dev(dev);
800 if ((flags & O_FOFFSET) == 0)
801 fp->f_offset = uio->uio_offset;
802 fp->f_nextoff = uio->uio_offset;
803 done:
804 rel_mplock();
805 return (error);
809 * MPALMOSTSAFE - acquires mplock
811 static int
812 vn_statfile(struct file *fp, struct stat *sb, struct ucred *cred)
814 struct vnode *vp;
815 int error;
817 get_mplock();
818 vp = (struct vnode *)fp->f_data;
819 error = vn_stat(vp, sb, cred);
820 rel_mplock();
821 return (error);
825 vn_stat(struct vnode *vp, struct stat *sb, struct ucred *cred)
827 struct vattr vattr;
828 struct vattr *vap;
829 int error;
830 u_short mode;
831 cdev_t dev;
833 vap = &vattr;
834 error = VOP_GETATTR(vp, vap);
835 if (error)
836 return (error);
839 * Zero the spare stat fields
841 sb->st_lspare = 0;
842 sb->st_qspare = 0;
845 * Copy from vattr table
847 if (vap->va_fsid != VNOVAL)
848 sb->st_dev = vap->va_fsid;
849 else
850 sb->st_dev = vp->v_mount->mnt_stat.f_fsid.val[0];
851 sb->st_ino = vap->va_fileid;
852 mode = vap->va_mode;
853 switch (vap->va_type) {
854 case VREG:
855 mode |= S_IFREG;
856 break;
857 case VDATABASE:
858 mode |= S_IFDB;
859 break;
860 case VDIR:
861 mode |= S_IFDIR;
862 break;
863 case VBLK:
864 mode |= S_IFBLK;
865 break;
866 case VCHR:
867 mode |= S_IFCHR;
868 break;
869 case VLNK:
870 mode |= S_IFLNK;
871 /* This is a cosmetic change, symlinks do not have a mode. */
872 if (vp->v_mount->mnt_flag & MNT_NOSYMFOLLOW)
873 sb->st_mode &= ~ACCESSPERMS; /* 0000 */
874 else
875 sb->st_mode |= ACCESSPERMS; /* 0777 */
876 break;
877 case VSOCK:
878 mode |= S_IFSOCK;
879 break;
880 case VFIFO:
881 mode |= S_IFIFO;
882 break;
883 default:
884 return (EBADF);
886 sb->st_mode = mode;
887 if (vap->va_nlink > (nlink_t)-1)
888 sb->st_nlink = (nlink_t)-1;
889 else
890 sb->st_nlink = vap->va_nlink;
891 sb->st_uid = vap->va_uid;
892 sb->st_gid = vap->va_gid;
893 sb->st_rdev = makeudev(vap->va_rmajor, vap->va_rminor);
894 sb->st_size = vap->va_size;
895 sb->st_atimespec = vap->va_atime;
896 sb->st_mtimespec = vap->va_mtime;
897 sb->st_ctimespec = vap->va_ctime;
900 * A VCHR and VBLK device may track the last access and last modified
901 * time independantly of the filesystem. This is particularly true
902 * because device read and write calls may bypass the filesystem.
904 if (vp->v_type == VCHR || vp->v_type == VBLK) {
905 dev = vp->v_rdev;
906 if (dev != NULL) {
907 if (dev->si_lastread) {
908 sb->st_atimespec.tv_sec = dev->si_lastread;
909 sb->st_atimespec.tv_nsec = 0;
911 if (dev->si_lastwrite) {
912 sb->st_atimespec.tv_sec = dev->si_lastwrite;
913 sb->st_atimespec.tv_nsec = 0;
919 * According to www.opengroup.org, the meaning of st_blksize is
920 * "a filesystem-specific preferred I/O block size for this
921 * object. In some filesystem types, this may vary from file
922 * to file"
923 * Default to PAGE_SIZE after much discussion.
926 if (vap->va_type == VREG) {
927 sb->st_blksize = vap->va_blocksize;
928 } else if (vn_isdisk(vp, NULL)) {
930 * XXX this is broken. If the device is not yet open (aka
931 * stat() call, aka v_rdev == NULL), how are we supposed
932 * to get a valid block size out of it?
934 dev = vp->v_rdev;
935 if (dev == NULL && vp->v_type == VCHR) {
936 dev = get_dev(vp->v_umajor, vp->v_uminor);
938 sb->st_blksize = dev->si_bsize_best;
939 if (sb->st_blksize < dev->si_bsize_phys)
940 sb->st_blksize = dev->si_bsize_phys;
941 if (sb->st_blksize < BLKDEV_IOSIZE)
942 sb->st_blksize = BLKDEV_IOSIZE;
943 } else {
944 sb->st_blksize = PAGE_SIZE;
947 sb->st_flags = vap->va_flags;
949 error = priv_check_cred(cred, PRIV_VFS_GENERATION, 0);
950 if (error)
951 sb->st_gen = 0;
952 else
953 sb->st_gen = (u_int32_t)vap->va_gen;
955 sb->st_blocks = vap->va_bytes / S_BLKSIZE;
956 sb->st_fsmid = vap->va_fsmid;
957 return (0);
961 * MPALMOSTSAFE - acquires mplock
963 static int
964 vn_ioctl(struct file *fp, u_long com, caddr_t data, struct ucred *ucred)
966 struct vnode *vp = ((struct vnode *)fp->f_data);
967 struct vnode *ovp;
968 struct vattr vattr;
969 int error;
971 get_mplock();
973 switch (vp->v_type) {
974 case VREG:
975 case VDIR:
976 if (com == FIONREAD) {
977 error = VOP_GETATTR(vp, &vattr);
978 if (error)
979 break;
980 *(int *)data = vattr.va_size - fp->f_offset;
981 error = 0;
982 break;
984 if (com == FIOASYNC) { /* XXX */
985 error = 0; /* XXX */
986 break;
988 /* fall into ... */
989 default:
990 #if 0
991 return (ENOTTY);
992 #endif
993 case VFIFO:
994 case VCHR:
995 case VBLK:
996 if (com == FIODTYPE) {
997 if (vp->v_type != VCHR && vp->v_type != VBLK) {
998 error = ENOTTY;
999 break;
1001 *(int *)data = dev_dflags(vp->v_rdev) & D_TYPEMASK;
1002 error = 0;
1003 break;
1005 error = VOP_IOCTL(vp, com, data, fp->f_flag, ucred);
1006 if (error == 0 && com == TIOCSCTTY) {
1007 struct proc *p = curthread->td_proc;
1008 struct session *sess;
1010 if (p == NULL) {
1011 error = ENOTTY;
1012 break;
1015 sess = p->p_session;
1016 /* Do nothing if reassigning same control tty */
1017 if (sess->s_ttyvp == vp) {
1018 error = 0;
1019 break;
1022 /* Get rid of reference to old control tty */
1023 ovp = sess->s_ttyvp;
1024 vref(vp);
1025 sess->s_ttyvp = vp;
1026 if (ovp)
1027 vrele(ovp);
1029 break;
1031 rel_mplock();
1032 return (error);
1036 * MPALMOSTSAFE - acquires mplock
1038 static int
1039 vn_poll(struct file *fp, int events, struct ucred *cred)
1041 int error;
1043 get_mplock();
1044 error = VOP_POLL(((struct vnode *)fp->f_data), events, cred);
1045 rel_mplock();
1046 return (error);
1050 * Check that the vnode is still valid, and if so
1051 * acquire requested lock.
1054 #ifndef DEBUG_LOCKS
1055 vn_lock(struct vnode *vp, int flags)
1056 #else
1057 debug_vn_lock(struct vnode *vp, int flags, const char *filename, int line)
1058 #endif
1060 int error;
1062 do {
1063 #ifdef DEBUG_LOCKS
1064 vp->filename = filename;
1065 vp->line = line;
1066 error = debuglockmgr(&vp->v_lock, flags,
1067 "vn_lock", filename, line);
1068 #else
1069 error = lockmgr(&vp->v_lock, flags);
1070 #endif
1071 if (error == 0)
1072 break;
1073 } while (flags & LK_RETRY);
1076 * Because we (had better!) have a ref on the vnode, once it
1077 * goes to VRECLAIMED state it will not be recycled until all
1078 * refs go away. So we can just check the flag.
1080 if (error == 0 && (vp->v_flag & VRECLAIMED)) {
1081 lockmgr(&vp->v_lock, LK_RELEASE);
1082 error = ENOENT;
1084 return (error);
1087 void
1088 vn_unlock(struct vnode *vp)
1090 lockmgr(&vp->v_lock, LK_RELEASE);
1094 vn_islocked(struct vnode *vp)
1096 return (lockstatus(&vp->v_lock, curthread));
1100 * MPALMOSTSAFE - acquires mplock
1102 static int
1103 vn_closefile(struct file *fp)
1105 int error;
1107 get_mplock();
1108 fp->f_ops = &badfileops;
1109 error = vn_close(((struct vnode *)fp->f_data), fp->f_flag);
1110 rel_mplock();
1111 return (error);
1115 * MPALMOSTSAFE - acquires mplock
1117 static int
1118 vn_kqfilter(struct file *fp, struct knote *kn)
1120 int error;
1122 get_mplock();
1123 error = VOP_KQFILTER(((struct vnode *)fp->f_data), kn);
1124 rel_mplock();
1125 return (error);