2 * Copyright (c) 1982, 1986, 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * @(#)vfs_vnops.c 8.2 (Berkeley) 1/21/94
39 * $FreeBSD: src/sys/kern/vfs_vnops.c,v 1.87.2.13 2002/12/29 18:19:53 dillon Exp $
40 * $DragonFly: src/sys/kern/vfs_vnops.c,v 1.50 2007/05/09 00:53:34 dillon Exp $
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/fcntl.h>
49 #include <sys/mount.h>
50 #include <sys/nlookup.h>
51 #include <sys/vnode.h>
53 #include <sys/filio.h>
54 #include <sys/ttycom.h>
56 #include <sys/syslog.h>
58 static int vn_closefile (struct file
*fp
);
59 static int vn_ioctl (struct file
*fp
, u_long com
, caddr_t data
,
61 static int vn_read (struct file
*fp
, struct uio
*uio
,
62 struct ucred
*cred
, int flags
);
63 static int svn_read (struct file
*fp
, struct uio
*uio
,
64 struct ucred
*cred
, int flags
);
65 static int vn_poll (struct file
*fp
, int events
, struct ucred
*cred
);
66 static int vn_kqfilter (struct file
*fp
, struct knote
*kn
);
67 static int vn_statfile (struct file
*fp
, struct stat
*sb
, struct ucred
*cred
);
68 static int vn_write (struct file
*fp
, struct uio
*uio
,
69 struct ucred
*cred
, int flags
);
70 static int svn_write (struct file
*fp
, struct uio
*uio
,
71 struct ucred
*cred
, int flags
);
73 struct fileops vnode_fileops
= {
78 .fo_kqfilter
= vn_kqfilter
,
79 .fo_stat
= vn_statfile
,
80 .fo_close
= vn_closefile
,
81 .fo_shutdown
= nofo_shutdown
84 struct fileops specvnode_fileops
= {
86 .fo_write
= svn_write
,
89 .fo_kqfilter
= vn_kqfilter
,
90 .fo_stat
= vn_statfile
,
91 .fo_close
= vn_closefile
,
92 .fo_shutdown
= nofo_shutdown
96 * Shortcut the device read/write. This avoids a lot of vnode junk.
97 * Basically the specfs vnops for read and write take the locked vnode,
98 * unlock it (because we can't hold the vnode locked while reading or writing
99 * a device which may block indefinitely), issues the device operation, then
100 * relock the vnode before returning, plus other junk. This bypasses all
101 * of that and just does the device operation.
104 vn_setspecops(struct file
*fp
)
106 if (vfs_fastdev
&& fp
->f_ops
== &vnode_fileops
) {
107 fp
->f_ops
= &specvnode_fileops
;
112 * Common code for vnode open operations. Check permissions, and call
113 * the VOP_NOPEN or VOP_NCREATE routine.
115 * The caller is responsible for setting up nd with nlookup_init() and
116 * for cleaning it up with nlookup_done(), whether we return an error
119 * On success nd->nl_open_vp will hold a referenced and, if requested,
120 * locked vnode. A locked vnode is requested via NLC_LOCKVP. If fp
121 * is non-NULL the vnode will be installed in the file pointer.
123 * NOTE: The vnode is referenced just once on return whether or not it
124 * is also installed in the file pointer.
127 vn_open(struct nlookupdata
*nd
, struct file
*fp
, int fmode
, int cmode
)
130 struct ucred
*cred
= nd
->nl_cred
;
132 struct vattr
*vap
= &vat
;
136 * Lookup the path and create or obtain the vnode. After a
137 * successful lookup a locked nd->nl_nch will be returned.
139 * The result of this section should be a locked vnode.
141 * XXX with only a little work we should be able to avoid locking
142 * the vnode if FWRITE, O_CREAT, and O_TRUNC are *not* set.
144 if (fmode
& O_CREAT
) {
146 * CONDITIONAL CREATE FILE CASE
148 * Setting NLC_CREATE causes a negative hit to store
149 * the negative hit ncp and not return an error. Then
150 * nc_error or nc_vp may be checked to see if the ncp
151 * represents a negative hit. NLC_CREATE also requires
152 * write permission on the governing directory or EPERM
155 if ((fmode
& O_EXCL
) == 0 && (fmode
& O_NOFOLLOW
) == 0)
156 nd
->nl_flags
|= NLC_FOLLOW
;
157 nd
->nl_flags
|= NLC_CREATE
;
162 * NORMAL OPEN FILE CASE
171 * split case to allow us to re-resolve and retry the ncp in case
175 if (fmode
& O_CREAT
) {
176 if (nd
->nl_nch
.ncp
->nc_vp
== NULL
) {
177 if ((error
= ncp_writechk(&nd
->nl_nch
)) != 0)
181 vap
->va_mode
= cmode
;
183 vap
->va_vaflags
|= VA_EXCLUSIVE
;
184 error
= VOP_NCREATE(&nd
->nl_nch
, &vp
, nd
->nl_cred
, vap
);
188 /* locked vnode is returned */
190 if (fmode
& O_EXCL
) {
193 error
= cache_vget(&nd
->nl_nch
, cred
,
201 error
= cache_vget(&nd
->nl_nch
, cred
, LK_EXCLUSIVE
, &vp
);
207 * We have a locked vnode and ncp now. Note that the ncp will
208 * be cleaned up by the caller if nd->nl_nch is left intact.
210 if (vp
->v_type
== VLNK
) {
214 if (vp
->v_type
== VSOCK
) {
218 if ((fmode
& O_CREAT
) == 0) {
220 if (fmode
& (FWRITE
| O_TRUNC
)) {
221 if (vp
->v_type
== VDIR
) {
225 error
= vn_writechk(vp
, &nd
->nl_nch
);
228 * Special stale handling, re-resolve the
231 if (error
== ESTALE
) {
234 cache_setunresolved(&nd
->nl_nch
);
235 error
= cache_resolve(&nd
->nl_nch
, cred
);
246 error
= VOP_ACCESS(vp
, mode
, cred
);
249 * Special stale handling, re-resolve the
252 if (error
== ESTALE
) {
255 cache_setunresolved(&nd
->nl_nch
);
256 error
= cache_resolve(&nd
->nl_nch
, cred
);
264 if (fmode
& O_TRUNC
) {
265 vn_unlock(vp
); /* XXX */
266 vn_lock(vp
, LK_EXCLUSIVE
| LK_RETRY
); /* XXX */
269 error
= VOP_SETATTR(vp
, vap
, cred
);
275 * Setup the fp so VOP_OPEN can override it. No descriptor has been
276 * associated with the fp yet so we own it clean.
278 * f_nchandle inherits nl_nch. This used to be necessary only for
279 * directories but now we do it unconditionally so f*() ops
280 * such as fchmod() can access the actual namespace that was
281 * used to open the file.
284 fp
->f_nchandle
= nd
->nl_nch
;
285 cache_zero(&nd
->nl_nch
);
286 cache_unlock(&fp
->f_nchandle
);
290 * Get rid of nl_nch. vn_open does not return it (it returns the
291 * vnode or the file pointer). Note: we can't leave nl_nch locked
292 * through the VOP_OPEN anyway since the VOP_OPEN may block, e.g.
296 cache_put(&nd
->nl_nch
);
298 error
= VOP_OPEN(vp
, fmode
, cred
, fp
);
301 * setting f_ops to &badfileops will prevent the descriptor
302 * code from trying to close and release the vnode, since
303 * the open failed we do not want to call close.
307 fp
->f_ops
= &badfileops
;
314 * Assert that VREG files have been setup for vmio.
316 KASSERT(vp
->v_type
!= VREG
|| vp
->v_object
!= NULL
,
317 ("vn_open: regular file was not VMIO enabled!"));
321 * Return the vnode. XXX needs some cleaning up. The vnode is
322 * only returned in the fp == NULL case.
326 nd
->nl_vp_fmode
= fmode
;
327 if ((nd
->nl_flags
& NLC_LOCKVP
) == 0)
340 * Check for write permissions on the specified vnode. nch may be NULL.
343 vn_writechk(struct vnode
*vp
, struct nchandle
*nch
)
346 * If there's shared text associated with
347 * the vnode, try to free it up once. If
348 * we fail, we can't allow writing.
350 if (vp
->v_flag
& VTEXT
)
354 * If the vnode represents a regular file, check the mount
355 * point via the nch. This may be a different mount point
356 * then the one embedded in the vnode (e.g. nullfs).
358 * We can still write to non-regular files (e.g. devices)
359 * via read-only mounts.
361 if (nch
&& nch
->ncp
&& vp
->v_type
== VREG
)
362 return (ncp_writechk(nch
));
367 * Check whether the underlying mount is read-only. The mount point
368 * referenced by the namecache may be different from the mount point
369 * used by the underlying vnode in the case of NULLFS, so a separate
373 ncp_writechk(struct nchandle
*nch
)
375 if (nch
->mount
&& (nch
->mount
->mnt_flag
& MNT_RDONLY
))
384 vn_close(struct vnode
*vp
, int flags
)
388 if ((error
= vn_lock(vp
, LK_EXCLUSIVE
| LK_RETRY
)) == 0) {
389 error
= VOP_CLOSE(vp
, flags
);
398 sequential_heuristic(struct uio
*uio
, struct file
*fp
)
401 * Sequential heuristic - detect sequential operation
403 if ((uio
->uio_offset
== 0 && fp
->f_seqcount
> 0) ||
404 uio
->uio_offset
== fp
->f_nextoff
) {
405 int tmpseq
= fp
->f_seqcount
;
407 * XXX we assume that the filesystem block size is
408 * the default. Not true, but still gives us a pretty
409 * good indicator of how sequential the read operations
412 tmpseq
+= (uio
->uio_resid
+ BKVASIZE
- 1) / BKVASIZE
;
413 if (tmpseq
> IO_SEQMAX
)
415 fp
->f_seqcount
= tmpseq
;
416 return(fp
->f_seqcount
<< IO_SEQSHIFT
);
420 * Not sequential, quick draw-down of seqcount
422 if (fp
->f_seqcount
> 1)
430 * Package up an I/O request on a vnode into a uio and do it.
433 vn_rdwr(enum uio_rw rw
, struct vnode
*vp
, caddr_t base
, int len
,
434 off_t offset
, enum uio_seg segflg
, int ioflg
,
435 struct ucred
*cred
, int *aresid
)
439 struct ccms_lock ccms_lock
;
442 if ((ioflg
& IO_NODELOCKED
) == 0)
443 vn_lock(vp
, LK_EXCLUSIVE
| LK_RETRY
);
444 auio
.uio_iov
= &aiov
;
446 aiov
.iov_base
= base
;
448 auio
.uio_resid
= len
;
449 auio
.uio_offset
= offset
;
450 auio
.uio_segflg
= segflg
;
452 auio
.uio_td
= curthread
;
453 ccms_lock_get_uio(&vp
->v_ccms
, &ccms_lock
, &auio
);
454 if (rw
== UIO_READ
) {
455 error
= VOP_READ(vp
, &auio
, ioflg
, cred
);
457 error
= VOP_WRITE(vp
, &auio
, ioflg
, cred
);
459 ccms_lock_put(&vp
->v_ccms
, &ccms_lock
);
461 *aresid
= auio
.uio_resid
;
463 if (auio
.uio_resid
&& error
== 0)
465 if ((ioflg
& IO_NODELOCKED
) == 0)
471 * Package up an I/O request on a vnode into a uio and do it. The I/O
472 * request is split up into smaller chunks and we try to avoid saturating
473 * the buffer cache while potentially holding a vnode locked, so we
474 * check bwillwrite() before calling vn_rdwr(). We also call uio_yield()
475 * to give other processes a chance to lock the vnode (either other processes
476 * core'ing the same binary, or unrelated processes scanning the directory).
479 vn_rdwr_inchunks(enum uio_rw rw
, struct vnode
*vp
, caddr_t base
, int len
,
480 off_t offset
, enum uio_seg segflg
, int ioflg
,
481 struct ucred
*cred
, int *aresid
)
489 * Force `offset' to a multiple of MAXBSIZE except possibly
490 * for the first chunk, so that filesystems only need to
491 * write full blocks except possibly for the first and last
494 chunk
= MAXBSIZE
- (uoff_t
)offset
% MAXBSIZE
;
498 if (rw
!= UIO_READ
&& vp
->v_type
== VREG
)
500 error
= vn_rdwr(rw
, vp
, base
, chunk
, offset
, segflg
,
501 ioflg
, cred
, aresid
);
502 len
-= chunk
; /* aresid calc already includes length */
515 * MPALMOSTSAFE - acquires mplock
518 vn_read(struct file
*fp
, struct uio
*uio
, struct ucred
*cred
, int flags
)
520 struct ccms_lock ccms_lock
;
525 KASSERT(uio
->uio_td
== curthread
,
526 ("uio_td %p is not td %p", uio
->uio_td
, curthread
));
527 vp
= (struct vnode
*)fp
->f_data
;
530 if (flags
& O_FBLOCKING
) {
531 /* ioflag &= ~IO_NDELAY; */
532 } else if (flags
& O_FNONBLOCKING
) {
534 } else if (fp
->f_flag
& FNONBLOCK
) {
537 if (flags
& O_FBUFFERED
) {
538 /* ioflag &= ~IO_DIRECT; */
539 } else if (flags
& O_FUNBUFFERED
) {
541 } else if (fp
->f_flag
& O_DIRECT
) {
544 vn_lock(vp
, LK_SHARED
| LK_RETRY
);
545 if ((flags
& O_FOFFSET
) == 0)
546 uio
->uio_offset
= fp
->f_offset
;
547 ioflag
|= sequential_heuristic(uio
, fp
);
549 ccms_lock_get_uio(&vp
->v_ccms
, &ccms_lock
, uio
);
550 error
= VOP_READ(vp
, uio
, ioflag
, cred
);
551 ccms_lock_put(&vp
->v_ccms
, &ccms_lock
);
552 if ((flags
& O_FOFFSET
) == 0)
553 fp
->f_offset
= uio
->uio_offset
;
554 fp
->f_nextoff
= uio
->uio_offset
;
561 * Device-optimized file table vnode read routine.
563 * This bypasses the VOP table and talks directly to the device. Most
564 * filesystems just route to specfs and can make this optimization.
566 * MPALMOSTSAFE - acquires mplock
569 svn_read(struct file
*fp
, struct uio
*uio
, struct ucred
*cred
, int flags
)
577 KASSERT(uio
->uio_td
== curthread
,
578 ("uio_td %p is not td %p", uio
->uio_td
, curthread
));
580 vp
= (struct vnode
*)fp
->f_data
;
581 if (vp
== NULL
|| vp
->v_type
== VBAD
) {
586 if ((dev
= vp
->v_rdev
) == NULL
) {
592 if (uio
->uio_resid
== 0) {
596 if ((flags
& O_FOFFSET
) == 0)
597 uio
->uio_offset
= fp
->f_offset
;
600 if (flags
& O_FBLOCKING
) {
601 /* ioflag &= ~IO_NDELAY; */
602 } else if (flags
& O_FNONBLOCKING
) {
604 } else if (fp
->f_flag
& FNONBLOCK
) {
607 if (flags
& O_FBUFFERED
) {
608 /* ioflag &= ~IO_DIRECT; */
609 } else if (flags
& O_FUNBUFFERED
) {
611 } else if (fp
->f_flag
& O_DIRECT
) {
614 ioflag
|= sequential_heuristic(uio
, fp
);
616 error
= dev_dread(dev
, uio
, ioflag
);
619 if ((flags
& O_FOFFSET
) == 0)
620 fp
->f_offset
= uio
->uio_offset
;
621 fp
->f_nextoff
= uio
->uio_offset
;
628 * MPALMOSTSAFE - acquires mplock
631 vn_write(struct file
*fp
, struct uio
*uio
, struct ucred
*cred
, int flags
)
633 struct ccms_lock ccms_lock
;
638 KASSERT(uio
->uio_td
== curthread
,
639 ("uio_procp %p is not p %p", uio
->uio_td
, curthread
));
640 vp
= (struct vnode
*)fp
->f_data
;
641 if (vp
->v_type
== VREG
)
643 vp
= (struct vnode
*)fp
->f_data
; /* XXX needed? */
646 if (vp
->v_type
== VREG
&&
647 ((fp
->f_flag
& O_APPEND
) || (flags
& O_FAPPEND
))) {
651 if (flags
& O_FBLOCKING
) {
652 /* ioflag &= ~IO_NDELAY; */
653 } else if (flags
& O_FNONBLOCKING
) {
655 } else if (fp
->f_flag
& FNONBLOCK
) {
658 if (flags
& O_FBUFFERED
) {
659 /* ioflag &= ~IO_DIRECT; */
660 } else if (flags
& O_FUNBUFFERED
) {
662 } else if (fp
->f_flag
& O_DIRECT
) {
665 if (flags
& O_FASYNCWRITE
) {
666 /* ioflag &= ~IO_SYNC; */
667 } else if (flags
& O_FSYNCWRITE
) {
669 } else if (fp
->f_flag
& O_FSYNC
) {
673 if (vp
->v_mount
&& (vp
->v_mount
->mnt_flag
& MNT_SYNCHRONOUS
))
675 vn_lock(vp
, LK_EXCLUSIVE
| LK_RETRY
);
676 if ((flags
& O_FOFFSET
) == 0)
677 uio
->uio_offset
= fp
->f_offset
;
678 ioflag
|= sequential_heuristic(uio
, fp
);
679 ccms_lock_get_uio(&vp
->v_ccms
, &ccms_lock
, uio
);
680 error
= VOP_WRITE(vp
, uio
, ioflag
, cred
);
681 ccms_lock_put(&vp
->v_ccms
, &ccms_lock
);
682 if ((flags
& O_FOFFSET
) == 0)
683 fp
->f_offset
= uio
->uio_offset
;
684 fp
->f_nextoff
= uio
->uio_offset
;
691 * Device-optimized file table vnode write routine.
693 * This bypasses the VOP table and talks directly to the device. Most
694 * filesystems just route to specfs and can make this optimization.
696 * MPALMOSTSAFE - acquires mplock
699 svn_write(struct file
*fp
, struct uio
*uio
, struct ucred
*cred
, int flags
)
707 KASSERT(uio
->uio_td
== curthread
,
708 ("uio_procp %p is not p %p", uio
->uio_td
, curthread
));
710 vp
= (struct vnode
*)fp
->f_data
;
711 if (vp
== NULL
|| vp
->v_type
== VBAD
) {
715 if (vp
->v_type
== VREG
)
717 vp
= (struct vnode
*)fp
->f_data
; /* XXX needed? */
719 if ((dev
= vp
->v_rdev
) == NULL
) {
725 if ((flags
& O_FOFFSET
) == 0)
726 uio
->uio_offset
= fp
->f_offset
;
729 if (vp
->v_type
== VREG
&&
730 ((fp
->f_flag
& O_APPEND
) || (flags
& O_FAPPEND
))) {
734 if (flags
& O_FBLOCKING
) {
735 /* ioflag &= ~IO_NDELAY; */
736 } else if (flags
& O_FNONBLOCKING
) {
738 } else if (fp
->f_flag
& FNONBLOCK
) {
741 if (flags
& O_FBUFFERED
) {
742 /* ioflag &= ~IO_DIRECT; */
743 } else if (flags
& O_FUNBUFFERED
) {
745 } else if (fp
->f_flag
& O_DIRECT
) {
748 if (flags
& O_FASYNCWRITE
) {
749 /* ioflag &= ~IO_SYNC; */
750 } else if (flags
& O_FSYNCWRITE
) {
752 } else if (fp
->f_flag
& O_FSYNC
) {
756 if (vp
->v_mount
&& (vp
->v_mount
->mnt_flag
& MNT_SYNCHRONOUS
))
758 ioflag
|= sequential_heuristic(uio
, fp
);
760 error
= dev_dwrite(dev
, uio
, ioflag
);
763 if ((flags
& O_FOFFSET
) == 0)
764 fp
->f_offset
= uio
->uio_offset
;
765 fp
->f_nextoff
= uio
->uio_offset
;
772 * MPALMOSTSAFE - acquires mplock
775 vn_statfile(struct file
*fp
, struct stat
*sb
, struct ucred
*cred
)
781 vp
= (struct vnode
*)fp
->f_data
;
782 error
= vn_stat(vp
, sb
, cred
);
788 vn_stat(struct vnode
*vp
, struct stat
*sb
, struct ucred
*cred
)
797 error
= VOP_GETATTR(vp
, vap
);
802 * Zero the spare stat fields
808 * Copy from vattr table
810 if (vap
->va_fsid
!= VNOVAL
)
811 sb
->st_dev
= vap
->va_fsid
;
813 sb
->st_dev
= vp
->v_mount
->mnt_stat
.f_fsid
.val
[0];
814 sb
->st_ino
= vap
->va_fileid
;
816 switch (vap
->va_type
) {
831 /* This is a cosmetic change, symlinks do not have a mode. */
832 if (vp
->v_mount
->mnt_flag
& MNT_NOSYMFOLLOW
)
833 sb
->st_mode
&= ~ACCESSPERMS
; /* 0000 */
835 sb
->st_mode
|= ACCESSPERMS
; /* 0777 */
847 sb
->st_nlink
= vap
->va_nlink
;
848 sb
->st_uid
= vap
->va_uid
;
849 sb
->st_gid
= vap
->va_gid
;
850 sb
->st_rdev
= makeudev(vap
->va_rmajor
, vap
->va_rminor
);
851 sb
->st_size
= vap
->va_size
;
852 sb
->st_atimespec
= vap
->va_atime
;
853 sb
->st_mtimespec
= vap
->va_mtime
;
854 sb
->st_ctimespec
= vap
->va_ctime
;
857 * A VCHR and VBLK device may track the last access and last modified
858 * time independantly of the filesystem. This is particularly true
859 * because device read and write calls may bypass the filesystem.
861 if (vp
->v_type
== VCHR
|| vp
->v_type
== VBLK
) {
862 if ((dev
= vp
->v_rdev
) != NULL
) {
863 if (dev
->si_lastread
) {
864 sb
->st_atimespec
.tv_sec
= dev
->si_lastread
;
865 sb
->st_atimespec
.tv_nsec
= 0;
867 if (dev
->si_lastwrite
) {
868 sb
->st_atimespec
.tv_sec
= dev
->si_lastwrite
;
869 sb
->st_atimespec
.tv_nsec
= 0;
875 * According to www.opengroup.org, the meaning of st_blksize is
876 * "a filesystem-specific preferred I/O block size for this
877 * object. In some filesystem types, this may vary from file
879 * Default to PAGE_SIZE after much discussion.
882 if (vap
->va_type
== VREG
) {
883 sb
->st_blksize
= vap
->va_blocksize
;
884 } else if (vn_isdisk(vp
, NULL
)) {
886 * XXX this is broken. If the device is not yet open (aka
887 * stat() call, aka v_rdev == NULL), how are we supposed
888 * to get a valid block size out of it?
892 if ((dev
= vp
->v_rdev
) == NULL
) {
893 if (vp
->v_type
== VCHR
)
894 dev
= get_dev(vp
->v_umajor
, vp
->v_uminor
);
896 sb
->st_blksize
= dev
->si_bsize_best
;
897 if (sb
->st_blksize
< dev
->si_bsize_phys
)
898 sb
->st_blksize
= dev
->si_bsize_phys
;
899 if (sb
->st_blksize
< BLKDEV_IOSIZE
)
900 sb
->st_blksize
= BLKDEV_IOSIZE
;
902 sb
->st_blksize
= PAGE_SIZE
;
905 sb
->st_flags
= vap
->va_flags
;
906 if (suser_cred(cred
, 0))
909 sb
->st_gen
= vap
->va_gen
;
911 #if (S_BLKSIZE == 512)
912 /* Optimize this case */
913 sb
->st_blocks
= vap
->va_bytes
>> 9;
915 sb
->st_blocks
= vap
->va_bytes
/ S_BLKSIZE
;
917 sb
->st_fsmid
= vap
->va_fsmid
;
922 * MPALMOSTSAFE - acquires mplock
925 vn_ioctl(struct file
*fp
, u_long com
, caddr_t data
, struct ucred
*ucred
)
927 struct vnode
*vp
= ((struct vnode
*)fp
->f_data
);
934 switch (vp
->v_type
) {
937 if (com
== FIONREAD
) {
938 if ((error
= VOP_GETATTR(vp
, &vattr
)) != 0)
940 *(int *)data
= vattr
.va_size
- fp
->f_offset
;
944 if (com
== FIOASYNC
) { /* XXX */
956 if (com
== FIODTYPE
) {
957 if (vp
->v_type
!= VCHR
&& vp
->v_type
!= VBLK
) {
961 *(int *)data
= dev_dflags(vp
->v_rdev
) & D_TYPEMASK
;
965 error
= VOP_IOCTL(vp
, com
, data
, fp
->f_flag
, ucred
);
966 if (error
== 0 && com
== TIOCSCTTY
) {
967 struct proc
*p
= curthread
->td_proc
;
968 struct session
*sess
;
976 /* Do nothing if reassigning same control tty */
977 if (sess
->s_ttyvp
== vp
) {
982 /* Get rid of reference to old control tty */
996 * MPALMOSTSAFE - acquires mplock
999 vn_poll(struct file
*fp
, int events
, struct ucred
*cred
)
1004 error
= VOP_POLL(((struct vnode
*)fp
->f_data
), events
, cred
);
1010 * Check that the vnode is still valid, and if so
1011 * acquire requested lock.
1015 vn_lock(struct vnode
*vp
, int flags
)
1017 debug_vn_lock(struct vnode
*vp
, int flags
, const char *filename
, int line
)
1024 vp
->filename
= filename
;
1026 error
= debuglockmgr(&vp
->v_lock
, flags
,
1027 "vn_lock", filename
, line
);
1029 error
= lockmgr(&vp
->v_lock
, flags
);
1033 } while (flags
& LK_RETRY
);
1036 * Because we (had better!) have a ref on the vnode, once it
1037 * goes to VRECLAIMED state it will not be recycled until all
1038 * refs go away. So we can just check the flag.
1040 if (error
== 0 && (vp
->v_flag
& VRECLAIMED
)) {
1041 lockmgr(&vp
->v_lock
, LK_RELEASE
);
1048 vn_unlock(struct vnode
*vp
)
1050 lockmgr(&vp
->v_lock
, LK_RELEASE
);
1054 vn_islocked(struct vnode
*vp
)
1056 return (lockstatus(&vp
->v_lock
, curthread
));
1060 * MPALMOSTSAFE - acquires mplock
1063 vn_closefile(struct file
*fp
)
1068 fp
->f_ops
= &badfileops
;
1069 error
= vn_close(((struct vnode
*)fp
->f_data
), fp
->f_flag
);
1075 * MPALMOSTSAFE - acquires mplock
1078 vn_kqfilter(struct file
*fp
, struct knote
*kn
)
1083 error
= VOP_KQFILTER(((struct vnode
*)fp
->f_data
), kn
);