2 * Copyright (c) 1982, 1986, 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * @(#)vfs_vnops.c 8.2 (Berkeley) 1/21/94
35 * $FreeBSD: src/sys/kern/vfs_vnops.c,v 1.87.2.13 2002/12/29 18:19:53 dillon Exp $
38 #include <sys/param.h>
39 #include <sys/systm.h>
41 #include <sys/fcntl.h>
46 #include <sys/mount.h>
47 #include <sys/nlookup.h>
48 #include <sys/vnode.h>
50 #include <sys/filio.h>
51 #include <sys/ttycom.h>
53 #include <sys/sysctl.h>
54 #include <sys/syslog.h>
56 #include <sys/mplock2.h>
58 static int vn_closefile (struct file
*fp
);
59 static int vn_ioctl (struct file
*fp
, u_long com
, caddr_t data
,
60 struct ucred
*cred
, struct sysmsg
*msg
);
61 static int vn_read (struct file
*fp
, struct uio
*uio
,
62 struct ucred
*cred
, int flags
);
63 static int vn_kqfilter (struct file
*fp
, struct knote
*kn
);
64 static int vn_statfile (struct file
*fp
, struct stat
*sb
, struct ucred
*cred
);
65 static int vn_write (struct file
*fp
, struct uio
*uio
,
66 struct ucred
*cred
, int flags
);
68 struct fileops vnode_fileops
= {
72 .fo_kqfilter
= vn_kqfilter
,
73 .fo_stat
= vn_statfile
,
74 .fo_close
= vn_closefile
,
75 .fo_shutdown
= nofo_shutdown
79 * Common code for vnode open operations. Check permissions, and call
80 * the VOP_NOPEN or VOP_NCREATE routine.
82 * The caller is responsible for setting up nd with nlookup_init() and
83 * for cleaning it up with nlookup_done(), whether we return an error
86 * On success nd->nl_open_vp will hold a referenced and, if requested,
87 * locked vnode. A locked vnode is requested via NLC_LOCKVP. If fp
88 * is non-NULL the vnode will be installed in the file pointer.
90 * NOTE: If the caller wishes the namecache entry to be operated with
91 * a shared lock it must use NLC_SHAREDLOCK. If NLC_LOCKVP is set
92 * then the vnode lock will also be shared.
94 * NOTE: The vnode is referenced just once on return whether or not it
95 * is also installed in the file pointer.
98 vn_open(struct nlookupdata
*nd
, struct file
**fpp
, int fmode
, int cmode
)
100 struct file
*fp
= fpp
? *fpp
: NULL
;
102 struct ucred
*cred
= nd
->nl_cred
;
104 struct vattr
*vap
= &vat
;
112 * Certain combinations are illegal
114 if ((fmode
& (FWRITE
| O_TRUNC
)) == O_TRUNC
)
118 * Lookup the path and create or obtain the vnode. After a
119 * successful lookup a locked nd->nl_nch will be returned.
121 * The result of this section should be a locked vnode.
123 * XXX with only a little work we should be able to avoid locking
124 * the vnode if FWRITE, O_CREAT, and O_TRUNC are *not* set.
126 nd
->nl_flags
|= NLC_OPEN
;
127 if (fmode
& O_APPEND
)
128 nd
->nl_flags
|= NLC_APPEND
;
130 nd
->nl_flags
|= NLC_TRUNCATE
;
132 nd
->nl_flags
|= NLC_READ
;
134 nd
->nl_flags
|= NLC_WRITE
;
135 if ((fmode
& O_EXCL
) == 0 && (fmode
& O_NOFOLLOW
) == 0)
136 nd
->nl_flags
|= NLC_FOLLOW
;
138 if (fmode
& O_CREAT
) {
140 * CONDITIONAL CREATE FILE CASE
142 * Setting NLC_CREATE causes a negative hit to store
143 * the negative hit ncp and not return an error. Then
144 * nc_error or nc_vp may be checked to see if the ncp
145 * represents a negative hit. NLC_CREATE also requires
146 * write permission on the governing directory or EPERM
149 * If the file exists but is missing write permission,
150 * nlookup() returns EACCES. This has to be handled specially
151 * when combined with O_EXCL.
153 nd
->nl_flags
|= NLC_CREATE
;
154 nd
->nl_flags
|= NLC_REFDVP
;
157 if (error
== EACCES
&& nd
->nl_nch
.ncp
->nc_vp
!= NULL
&&
158 (fmode
& O_EXCL
) && !nd
->nl_dir_error
)
164 * If no error and nd->nl_dvp is NULL, the nlookup represents
165 * a mount-point or cross-mount situation. e.g.
166 * open("/var/cache", O_CREAT), where /var/cache is a
167 * mount point or a null-mount point.
169 if (error
== 0 && nd
->nl_dvp
== NULL
)
173 * NORMAL OPEN FILE CASE
182 * split case to allow us to re-resolve and retry the ncp in case
185 * (error is 0 on entry / retry)
189 * Checks for (likely) filesystem-modifying cases and allows
190 * the filesystem to stall the front-end.
192 if ((fmode
& (FWRITE
| O_TRUNC
)) ||
193 ((fmode
& O_CREAT
) && nd
->nl_nch
.ncp
->nc_vp
== NULL
)) {
194 error
= ncp_writechk(&nd
->nl_nch
);
200 if (fmode
& O_CREAT
) {
201 if (nd
->nl_nch
.ncp
->nc_vp
== NULL
) {
204 vap
->va_mode
= cmode
;
205 vap
->va_fuseflags
= fmode
; /* FUSE */
207 vap
->va_vaflags
|= VA_EXCLUSIVE
;
208 error
= VOP_NCREATE(&nd
->nl_nch
, nd
->nl_dvp
, &vp
,
213 /* locked vnode is returned */
215 if (fmode
& O_EXCL
) {
218 error
= cache_vget(&nd
->nl_nch
, cred
,
227 * In most other cases a shared lock on the vnode is
228 * sufficient. However, the O_RDWR case needs an
229 * exclusive lock if the vnode is executable. The
230 * NLC_EXCLLOCK_IFEXEC and NCF_NOTX flags help resolve
233 * NOTE: If NCF_NOTX is not set, we do not know the
234 * the state of the 'x' bits and have to get
235 * an exclusive lock for the EXCLLOCK_IFEXEC case.
237 if ((nd
->nl_flags
& NLC_SHAREDLOCK
) &&
238 ((nd
->nl_flags
& NLC_EXCLLOCK_IFEXEC
) == 0 ||
239 nd
->nl_nch
.ncp
->nc_flag
& NCF_NOTX
)) {
240 error
= cache_vget(&nd
->nl_nch
, cred
, LK_SHARED
, &vp
);
243 error
= cache_vget(&nd
->nl_nch
, cred
,
251 * We have a locked vnode and ncp now. Note that the ncp will
252 * be cleaned up by the caller if nd->nl_nch is left intact.
254 if (vp
->v_type
== VLNK
) {
258 if (vp
->v_type
== VSOCK
) {
262 if (vp
->v_type
!= VDIR
&& (fmode
& O_DIRECTORY
)) {
266 if ((fmode
& O_CREAT
) == 0) {
267 if (fmode
& (FWRITE
| O_TRUNC
)) {
268 if (vp
->v_type
== VDIR
) {
274 * Additional checks on vnode (does not substitute
275 * for ncp_writechk()).
277 error
= vn_writechk(vp
);
280 * Special stale handling, re-resolve the
283 if (error
== ESTALE
) {
289 cache_unlock(&nd
->nl_nch
);
290 cache_lock(&nd
->nl_nch
);
292 cache_setunresolved(&nd
->nl_nch
);
293 error
= cache_resolve(&nd
->nl_nch
,
303 if (fmode
& O_TRUNC
) {
304 vn_unlock(vp
); /* XXX */
305 vn_lock(vp
, LK_EXCLUSIVE
| LK_RETRY
); /* XXX */
306 osize
= vp
->v_filesize
;
309 error
= VOP_SETATTR_FP(vp
, vap
, cred
, fp
);
312 error
= VOP_GETATTR(vp
, vap
);
316 VFS_ACCOUNT(mp
, vap
->va_uid
, vap
->va_gid
, -osize
);
320 * Set or clear VNSWAPCACHE on the vp based on nd->nl_nch.ncp->nc_flag.
321 * These particular bits a tracked all the way from the root.
323 * NOTE: Might not work properly on NFS servers due to the
324 * disconnected namecache.
326 flags
= nd
->nl_nch
.ncp
->nc_flag
;
327 if ((flags
& (NCF_UF_CACHE
| NCF_UF_PCACHE
)) &&
328 (flags
& (NCF_SF_NOCACHE
| NCF_SF_PNOCACHE
)) == 0) {
329 vsetflags(vp
, VSWAPCACHE
);
331 vclrflags(vp
, VSWAPCACHE
);
335 * Setup the fp so VOP_OPEN can override it. No descriptor has been
336 * associated with the fp yet so we own it clean.
338 * f_nchandle inherits nl_nch. This used to be necessary only for
339 * directories but now we do it unconditionally so f*() ops
340 * such as fchmod() can access the actual namespace that was
341 * used to open the file.
344 if (nd
->nl_flags
& NLC_APPENDONLY
)
345 fmode
|= FAPPENDONLY
;
346 fp
->f_nchandle
= nd
->nl_nch
;
347 cache_zero(&nd
->nl_nch
);
348 cache_unlock(&fp
->f_nchandle
);
352 * Get rid of nl_nch. vn_open does not return it (it returns the
353 * vnode or the file pointer).
355 * NOTE: We can't leave nl_nch locked through the VOP_OPEN anyway
356 * since the VOP_OPEN may block, e.g. on /dev/ttyd0
358 * NOTE: The VOP_OPEN() can replace the *fpp we supply with its own
359 * (it will fdrop/fhold), and can also set the *fpp up however
360 * it wants, not necessarily using DTYPE_VNODE.
363 cache_put(&nd
->nl_nch
);
365 error
= VOP_OPEN(vp
, fmode
, cred
, fpp
);
366 fp
= fpp
? *fpp
: NULL
;
370 * setting f_ops to &badfileops will prevent the descriptor
371 * code from trying to close and release the vnode, since
372 * the open failed we do not want to call close.
376 fp
->f_ops
= &badfileops
;
383 * Assert that VREG files have been setup for vmio.
385 KASSERT(vp
->v_type
!= VREG
|| vp
->v_object
!= NULL
,
386 ("vn_open: regular file was not VMIO enabled!"));
390 * Return the vnode. XXX needs some cleaning up. The vnode is
391 * only returned in the fp == NULL case.
393 * NOTE: vnode stored in fp may be different
397 nd
->nl_vp_fmode
= fmode
;
398 if ((nd
->nl_flags
& NLC_LOCKVP
) == 0)
411 vn_opendisk(const char *devname
, int fmode
, struct vnode
**vpp
)
416 if (strncmp(devname
, "/dev/", 5) == 0)
418 if ((vp
= getsynthvnode(devname
)) == NULL
) {
421 error
= VOP_OPEN(vp
, fmode
, proc0
.p_ucred
, NULL
);
433 * Checks for special conditions on the vnode which might prevent writing
434 * after the vnode has (likely) been locked. The vnode might or might not
435 * be locked as of this call, but will be at least referenced.
437 * Also re-checks the mount RDONLY flag that ncp_writechk() checked prior
438 * to the vnode being locked.
441 vn_writechk(struct vnode
*vp
)
444 * If there's shared text associated with
445 * the vnode, try to free it up once. If
446 * we fail, we can't allow writing.
448 if (vp
->v_flag
& VTEXT
)
450 if (vp
->v_mount
&& (vp
->v_mount
->mnt_flag
& MNT_RDONLY
))
456 * Check whether the underlying mount is read-only. The mount point
457 * referenced by the namecache may be different from the mount point
458 * used by the underlying vnode in the case of NULLFS, so a separate
461 * Must be called PRIOR to any vnodes being locked.
464 ncp_writechk(struct nchandle
*nch
)
468 if ((mp
= nch
->mount
) != NULL
) {
469 if (mp
->mnt_flag
& MNT_RDONLY
)
471 if (mp
->mnt_op
->vfs_modifying
!= vfs_stdmodifying
)
483 vn_close(struct vnode
*vp
, int flags
, struct file
*fp
)
487 error
= vn_lock(vp
, LK_SHARED
| LK_RETRY
| LK_FAILRECLAIM
);
489 error
= VOP_CLOSE(vp
, flags
, fp
);
497 * Sequential heuristic.
499 * MPSAFE (f_seqcount and f_nextoff are allowed to race)
503 sequential_heuristic(struct uio
*uio
, struct file
*fp
)
506 * Sequential heuristic - detect sequential operation
508 * NOTE: SMP: We allow f_seqcount updates to race.
510 if ((uio
->uio_offset
== 0 && fp
->f_seqcount
> 0) ||
511 uio
->uio_offset
== fp
->f_nextoff
) {
512 int tmpseq
= fp
->f_seqcount
;
514 tmpseq
+= howmany(uio
->uio_resid
, MAXBSIZE
);
515 if (tmpseq
> IO_SEQMAX
)
517 fp
->f_seqcount
= tmpseq
;
518 return(fp
->f_seqcount
<< IO_SEQSHIFT
);
522 * Not sequential, quick draw-down of seqcount
524 * NOTE: SMP: We allow f_seqcount updates to race.
526 if (fp
->f_seqcount
> 1)
534 * get - lock and return the f_offset field.
535 * set - set and unlock the f_offset field.
537 * These routines serve the dual purpose of serializing access to the
538 * f_offset field (at least on x86) and guaranteeing operational integrity
539 * when multiple read()ers and write()ers are present on the same fp.
543 static __inline off_t
544 vn_get_fpf_offset(struct file
*fp
)
550 * Shortcut critical path.
552 flags
= fp
->f_flag
& ~FOFFSETLOCK
;
553 if (atomic_cmpset_int(&fp
->f_flag
, flags
, flags
| FOFFSETLOCK
))
554 return(fp
->f_offset
);
561 if (flags
& FOFFSETLOCK
) {
562 nflags
= flags
| FOFFSETWAKE
;
563 tsleep_interlock(&fp
->f_flag
, 0);
564 if (atomic_cmpset_int(&fp
->f_flag
, flags
, nflags
))
565 tsleep(&fp
->f_flag
, PINTERLOCKED
, "fpoff", 0);
567 nflags
= flags
| FOFFSETLOCK
;
568 if (atomic_cmpset_int(&fp
->f_flag
, flags
, nflags
))
572 return(fp
->f_offset
);
579 vn_set_fpf_offset(struct file
*fp
, off_t offset
)
585 * We hold the lock so we can set the offset without interference.
587 fp
->f_offset
= offset
;
590 * Normal release is already a reasonably critical path.
594 nflags
= flags
& ~(FOFFSETLOCK
| FOFFSETWAKE
);
595 if (atomic_cmpset_int(&fp
->f_flag
, flags
, nflags
)) {
596 if (flags
& FOFFSETWAKE
)
606 static __inline off_t
607 vn_poll_fpf_offset(struct file
*fp
)
609 #if defined(__x86_64__)
610 return(fp
->f_offset
);
612 off_t off
= vn_get_fpf_offset(fp
);
613 vn_set_fpf_offset(fp
, off
);
619 * Package up an I/O request on a vnode into a uio and do it.
624 vn_rdwr(enum uio_rw rw
, struct vnode
*vp
, caddr_t base
, int len
,
625 off_t offset
, enum uio_seg segflg
, int ioflg
,
626 struct ucred
*cred
, int *aresid
)
632 if ((ioflg
& IO_NODELOCKED
) == 0)
633 vn_lock(vp
, LK_EXCLUSIVE
| LK_RETRY
);
634 auio
.uio_iov
= &aiov
;
636 aiov
.iov_base
= base
;
638 auio
.uio_resid
= len
;
639 auio
.uio_offset
= offset
;
640 auio
.uio_segflg
= segflg
;
642 auio
.uio_td
= curthread
;
643 if (rw
== UIO_READ
) {
644 error
= VOP_READ(vp
, &auio
, ioflg
, cred
);
646 error
= VOP_WRITE(vp
, &auio
, ioflg
, cred
);
649 *aresid
= auio
.uio_resid
;
651 if (auio
.uio_resid
&& error
== 0)
653 if ((ioflg
& IO_NODELOCKED
) == 0)
659 * Package up an I/O request on a vnode into a uio and do it. The I/O
660 * request is split up into smaller chunks and we try to avoid saturating
661 * the buffer cache while potentially holding a vnode locked, so we
662 * check bwillwrite() before calling vn_rdwr(). We also call lwkt_user_yield()
663 * to give other processes a chance to lock the vnode (either other processes
664 * core'ing the same binary, or unrelated processes scanning the directory).
669 vn_rdwr_inchunks(enum uio_rw rw
, struct vnode
*vp
, caddr_t base
, int len
,
670 off_t offset
, enum uio_seg segflg
, int ioflg
,
671 struct ucred
*cred
, int *aresid
)
679 * Force `offset' to a multiple of MAXBSIZE except possibly
680 * for the first chunk, so that filesystems only need to
681 * write full blocks except possibly for the first and last
684 chunk
= MAXBSIZE
- (uoff_t
)offset
% MAXBSIZE
;
688 if (vp
->v_type
== VREG
&& (ioflg
& IO_RECURSE
) == 0) {
698 error
= vn_rdwr(rw
, vp
, base
, chunk
, offset
, segflg
,
699 ioflg
, cred
, aresid
);
700 len
-= chunk
; /* aresid calc already includes length */
713 * File pointers can no longer get ripped up by revoke so
714 * we don't need to lock access to the vp.
716 * f_offset updates are not guaranteed against multiple readers
719 vn_read(struct file
*fp
, struct uio
*uio
, struct ucred
*cred
, int flags
)
724 KASSERT(uio
->uio_td
== curthread
,
725 ("uio_td %p is not td %p", uio
->uio_td
, curthread
));
726 vp
= (struct vnode
*)fp
->f_data
;
729 if (flags
& O_FBLOCKING
) {
730 /* ioflag &= ~IO_NDELAY; */
731 } else if (flags
& O_FNONBLOCKING
) {
733 } else if (fp
->f_flag
& FNONBLOCK
) {
736 if (fp
->f_flag
& O_DIRECT
) {
739 if ((flags
& O_FOFFSET
) == 0 && (vp
->v_flag
& VNOTSEEKABLE
) == 0)
740 uio
->uio_offset
= vn_get_fpf_offset(fp
);
741 vn_lock(vp
, LK_SHARED
| LK_RETRY
);
742 ioflag
|= sequential_heuristic(uio
, fp
);
744 error
= VOP_READ_FP(vp
, uio
, ioflag
, cred
, fp
);
745 fp
->f_nextoff
= uio
->uio_offset
;
747 if ((flags
& O_FOFFSET
) == 0 && (vp
->v_flag
& VNOTSEEKABLE
) == 0)
748 vn_set_fpf_offset(fp
, uio
->uio_offset
);
756 vn_write(struct file
*fp
, struct uio
*uio
, struct ucred
*cred
, int flags
)
761 KASSERT(uio
->uio_td
== curthread
,
762 ("uio_td %p is not p %p", uio
->uio_td
, curthread
));
763 vp
= (struct vnode
*)fp
->f_data
;
766 if (vp
->v_type
== VREG
&&
767 ((fp
->f_flag
& O_APPEND
) || (flags
& O_FAPPEND
))) {
771 if (flags
& O_FBLOCKING
) {
772 /* ioflag &= ~IO_NDELAY; */
773 } else if (flags
& O_FNONBLOCKING
) {
775 } else if (fp
->f_flag
& FNONBLOCK
) {
778 if (fp
->f_flag
& O_DIRECT
) {
781 if (flags
& O_FASYNCWRITE
) {
782 /* ioflag &= ~IO_SYNC; */
783 } else if (flags
& O_FSYNCWRITE
) {
785 } else if (fp
->f_flag
& O_FSYNC
) {
789 if (vp
->v_mount
&& (vp
->v_mount
->mnt_flag
& MNT_SYNCHRONOUS
))
791 if ((flags
& O_FOFFSET
) == 0)
792 uio
->uio_offset
= vn_get_fpf_offset(fp
);
794 VFS_MODIFYING(vp
->v_mount
);
795 vn_lock(vp
, LK_EXCLUSIVE
| LK_RETRY
);
796 ioflag
|= sequential_heuristic(uio
, fp
);
797 error
= VOP_WRITE_FP(vp
, uio
, ioflag
, cred
, fp
);
798 fp
->f_nextoff
= uio
->uio_offset
;
800 if ((flags
& O_FOFFSET
) == 0)
801 vn_set_fpf_offset(fp
, uio
->uio_offset
);
809 vn_statfile(struct file
*fp
, struct stat
*sb
, struct ucred
*cred
)
814 vp
= (struct vnode
*)fp
->f_data
;
815 error
= vn_stat(vp
, sb
, cred
);
823 vn_stat(struct vnode
*vp
, struct stat
*sb
, struct ucred
*cred
)
832 * vp already has a ref and is validated, can call unlocked.
835 error
= VOP_GETATTR(vp
, vap
);
840 * Zero the spare stat fields
846 * Copy from vattr table
848 if (vap
->va_fsid
!= VNOVAL
)
849 sb
->st_dev
= vap
->va_fsid
;
851 sb
->st_dev
= vp
->v_mount
->mnt_stat
.f_fsid
.val
[0];
852 sb
->st_ino
= vap
->va_fileid
;
854 switch (vap
->va_type
) {
872 /* This is a cosmetic change, symlinks do not have a mode. */
873 if (vp
->v_mount
->mnt_flag
& MNT_NOSYMFOLLOW
)
874 sb
->st_mode
&= ~ACCESSPERMS
; /* 0000 */
876 sb
->st_mode
|= ACCESSPERMS
; /* 0777 */
888 if (vap
->va_nlink
> (nlink_t
)-1)
889 sb
->st_nlink
= (nlink_t
)-1;
891 sb
->st_nlink
= vap
->va_nlink
;
892 sb
->st_uid
= vap
->va_uid
;
893 sb
->st_gid
= vap
->va_gid
;
894 sb
->st_rdev
= devid_from_dev(vp
->v_rdev
);
895 sb
->st_size
= vap
->va_size
;
896 sb
->st_atimespec
= vap
->va_atime
;
897 sb
->st_mtimespec
= vap
->va_mtime
;
898 sb
->st_ctimespec
= vap
->va_ctime
;
901 * A VCHR and VBLK device may track the last access and last modified
902 * time independantly of the filesystem. This is particularly true
903 * because device read and write calls may bypass the filesystem.
905 if (vp
->v_type
== VCHR
|| vp
->v_type
== VBLK
) {
908 if (dev
->si_lastread
) {
909 sb
->st_atimespec
.tv_sec
= time_second
+
912 sb
->st_atimespec
.tv_nsec
= 0;
914 if (dev
->si_lastwrite
) {
915 sb
->st_mtimespec
.tv_sec
= time_second
+
918 sb
->st_mtimespec
.tv_nsec
= 0;
924 * According to www.opengroup.org, the meaning of st_blksize is
925 * "a filesystem-specific preferred I/O block size for this
926 * object. In some filesystem types, this may vary from file
928 * Default to PAGE_SIZE after much discussion.
931 if (vap
->va_type
== VREG
) {
932 sb
->st_blksize
= vap
->va_blocksize
;
933 } else if (vn_isdisk(vp
, NULL
)) {
935 * XXX this is broken. If the device is not yet open (aka
936 * stat() call, aka v_rdev == NULL), how are we supposed
937 * to get a valid block size out of it?
941 sb
->st_blksize
= dev
->si_bsize_best
;
942 if (sb
->st_blksize
< dev
->si_bsize_phys
)
943 sb
->st_blksize
= dev
->si_bsize_phys
;
944 if (sb
->st_blksize
< BLKDEV_IOSIZE
)
945 sb
->st_blksize
= BLKDEV_IOSIZE
;
947 sb
->st_blksize
= PAGE_SIZE
;
950 sb
->st_flags
= vap
->va_flags
;
952 error
= caps_priv_check(cred
, SYSCAP_NOVFS_GENERATION
);
956 sb
->st_gen
= (u_int32_t
)vap
->va_gen
;
958 sb
->st_blocks
= vap
->va_bytes
/ S_BLKSIZE
;
961 * This is for ABI compatibility <= 5.7 (for ABI change made in
964 sb
->__old_st_blksize
= sb
->st_blksize
;
970 * MPALMOSTSAFE - acquires mplock
973 vn_ioctl(struct file
*fp
, u_long com
, caddr_t data
, struct ucred
*ucred
,
976 struct vnode
*vp
= ((struct vnode
*)fp
->f_data
);
982 switch (vp
->v_type
) {
985 if (com
== FIONREAD
) {
986 error
= VOP_GETATTR(vp
, &vattr
);
989 size
= vattr
.va_size
;
990 if ((vp
->v_flag
& VNOTSEEKABLE
) == 0)
991 size
-= vn_poll_fpf_offset(fp
);
992 if (size
> 0x7FFFFFFF)
998 if (com
== FIOASYNC
) { /* XXX */
1010 if (com
== FIODTYPE
) {
1011 if (vp
->v_type
!= VCHR
&& vp
->v_type
!= VBLK
) {
1015 *(int *)data
= dev_dflags(vp
->v_rdev
) & D_TYPEMASK
;
1019 error
= VOP_IOCTL(vp
, com
, data
, fp
->f_flag
, ucred
, msg
);
1020 if (error
== 0 && com
== TIOCSCTTY
) {
1021 struct proc
*p
= curthread
->td_proc
;
1022 struct session
*sess
;
1030 sess
= p
->p_session
;
1031 /* Do nothing if reassigning same control tty */
1032 if (sess
->s_ttyvp
== vp
) {
1038 /* Get rid of reference to old control tty */
1039 ovp
= sess
->s_ttyvp
;
1052 * Obtain the requested vnode lock
1054 * LK_RETRY Automatically retry on timeout
1055 * LK_FAILRECLAIM Fail if the vnode is being reclaimed
1057 * Failures will occur if the vnode is undergoing recyclement, but not
1058 * all callers expect that the function will fail so the caller must pass
1059 * LK_FAILOK if it wants to process an error code.
1061 * Errors can occur for other reasons if you pass in other LK_ flags,
1062 * regardless of whether you pass in LK_FAILRECLAIM
1065 vn_lock(struct vnode
*vp
, int flags
)
1070 error
= lockmgr(&vp
->v_lock
, flags
);
1073 } while (flags
& LK_RETRY
);
1076 * Because we (had better!) have a ref on the vnode, once it
1077 * goes to VRECLAIMED state it will not be recycled until all
1078 * refs go away. So we can just check the flag.
1080 if (error
== 0 && (vp
->v_flag
& VRECLAIMED
)) {
1081 if (flags
& LK_FAILRECLAIM
) {
1082 lockmgr(&vp
->v_lock
, LK_RELEASE
);
1090 vn_relock(struct vnode
*vp
, int flags
)
1095 error
= lockmgr(&vp
->v_lock
, flags
);
1098 } while (flags
& LK_RETRY
);
1103 #ifdef DEBUG_VN_UNLOCK
1106 debug_vn_unlock(struct vnode
*vp
, const char *filename
, int line
)
1108 kprintf("vn_unlock from %s:%d\n", filename
, line
);
1109 lockmgr(&vp
->v_lock
, LK_RELEASE
);
1115 vn_unlock(struct vnode
*vp
)
1117 lockmgr(&vp
->v_lock
, LK_RELEASE
);
1126 vn_islocked(struct vnode
*vp
)
1128 return (lockstatus(&vp
->v_lock
, curthread
));
1132 * Return the lock status of a vnode and unlock the vnode
1133 * if we owned the lock. This is not a boolean, if the
1134 * caller cares what the lock status is the caller must
1135 * check the various possible values.
1137 * This only unlocks exclusive locks held by the caller,
1138 * it will NOT unlock shared locks (there is no way to
1139 * tell who the shared lock belongs to).
1144 vn_islocked_unlock(struct vnode
*vp
)
1148 vpls
= lockstatus(&vp
->v_lock
, curthread
);
1149 if (vpls
== LK_EXCLUSIVE
)
1150 lockmgr(&vp
->v_lock
, LK_RELEASE
);
1155 * Restore a vnode lock that we previously released via
1156 * vn_islocked_unlock(). This is a NOP if we did not
1157 * own the original lock.
1162 vn_islocked_relock(struct vnode
*vp
, int vpls
)
1166 if (vpls
== LK_EXCLUSIVE
)
1167 error
= lockmgr(&vp
->v_lock
, vpls
);
1174 vn_closefile(struct file
*fp
)
1178 fp
->f_ops
= &badfileops
;
1179 error
= vn_close(((struct vnode
*)fp
->f_data
), fp
->f_flag
, fp
);
1187 vn_kqfilter(struct file
*fp
, struct knote
*kn
)
1191 error
= VOP_KQFILTER(((struct vnode
*)fp
->f_data
), kn
);