2 * Copyright (c) 1982, 1986, 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * @(#)vfs_vnops.c 8.2 (Berkeley) 1/21/94
39 * $FreeBSD: src/sys/kern/vfs_vnops.c,v 1.87.2.13 2002/12/29 18:19:53 dillon Exp $
40 * $DragonFly: src/sys/kern/vfs_vnops.c,v 1.58 2008/06/28 17:59:49 dillon Exp $
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/fcntl.h>
50 #include <sys/mount.h>
51 #include <sys/nlookup.h>
52 #include <sys/vnode.h>
54 #include <sys/filio.h>
55 #include <sys/ttycom.h>
57 #include <sys/sysctl.h>
58 #include <sys/syslog.h>
60 #include <sys/thread2.h>
61 #include <sys/mplock2.h>
63 static int vn_closefile (struct file
*fp
);
64 static int vn_ioctl (struct file
*fp
, u_long com
, caddr_t data
,
65 struct ucred
*cred
, struct sysmsg
*msg
);
66 static int vn_read (struct file
*fp
, struct uio
*uio
,
67 struct ucred
*cred
, int flags
);
68 static int vn_poll (struct file
*fp
, int events
, struct ucred
*cred
);
69 static int vn_kqfilter (struct file
*fp
, struct knote
*kn
);
70 static int vn_statfile (struct file
*fp
, struct stat
*sb
, struct ucred
*cred
);
71 static int vn_write (struct file
*fp
, struct uio
*uio
,
72 struct ucred
*cred
, int flags
);
74 struct fileops vnode_fileops
= {
79 .fo_kqfilter
= vn_kqfilter
,
80 .fo_stat
= vn_statfile
,
81 .fo_close
= vn_closefile
,
82 .fo_shutdown
= nofo_shutdown
86 * Common code for vnode open operations. Check permissions, and call
87 * the VOP_NOPEN or VOP_NCREATE routine.
89 * The caller is responsible for setting up nd with nlookup_init() and
90 * for cleaning it up with nlookup_done(), whether we return an error
93 * On success nd->nl_open_vp will hold a referenced and, if requested,
94 * locked vnode. A locked vnode is requested via NLC_LOCKVP. If fp
95 * is non-NULL the vnode will be installed in the file pointer.
97 * NOTE: The vnode is referenced just once on return whether or not it
98 * is also installed in the file pointer.
101 vn_open(struct nlookupdata
*nd
, struct file
*fp
, int fmode
, int cmode
)
104 struct ucred
*cred
= nd
->nl_cred
;
106 struct vattr
*vap
= &vat
;
111 * Certain combinations are illegal
113 if ((fmode
& (FWRITE
| O_TRUNC
)) == O_TRUNC
)
117 * Lookup the path and create or obtain the vnode. After a
118 * successful lookup a locked nd->nl_nch will be returned.
120 * The result of this section should be a locked vnode.
122 * XXX with only a little work we should be able to avoid locking
123 * the vnode if FWRITE, O_CREAT, and O_TRUNC are *not* set.
125 nd
->nl_flags
|= NLC_OPEN
;
126 if (fmode
& O_APPEND
)
127 nd
->nl_flags
|= NLC_APPEND
;
129 nd
->nl_flags
|= NLC_TRUNCATE
;
131 nd
->nl_flags
|= NLC_READ
;
133 nd
->nl_flags
|= NLC_WRITE
;
134 if ((fmode
& O_EXCL
) == 0 && (fmode
& O_NOFOLLOW
) == 0)
135 nd
->nl_flags
|= NLC_FOLLOW
;
137 if (fmode
& O_CREAT
) {
139 * CONDITIONAL CREATE FILE CASE
141 * Setting NLC_CREATE causes a negative hit to store
142 * the negative hit ncp and not return an error. Then
143 * nc_error or nc_vp may be checked to see if the ncp
144 * represents a negative hit. NLC_CREATE also requires
145 * write permission on the governing directory or EPERM
148 nd
->nl_flags
|= NLC_CREATE
;
149 nd
->nl_flags
|= NLC_REFDVP
;
154 * NORMAL OPEN FILE CASE
163 * split case to allow us to re-resolve and retry the ncp in case
167 if (fmode
& O_CREAT
) {
168 if (nd
->nl_nch
.ncp
->nc_vp
== NULL
) {
169 if ((error
= ncp_writechk(&nd
->nl_nch
)) != 0)
173 vap
->va_mode
= cmode
;
175 vap
->va_vaflags
|= VA_EXCLUSIVE
;
176 error
= VOP_NCREATE(&nd
->nl_nch
, nd
->nl_dvp
, &vp
,
181 /* locked vnode is returned */
183 if (fmode
& O_EXCL
) {
186 error
= cache_vget(&nd
->nl_nch
, cred
,
194 error
= cache_vget(&nd
->nl_nch
, cred
, LK_EXCLUSIVE
, &vp
);
200 * We have a locked vnode and ncp now. Note that the ncp will
201 * be cleaned up by the caller if nd->nl_nch is left intact.
203 if (vp
->v_type
== VLNK
) {
207 if (vp
->v_type
== VSOCK
) {
211 if ((fmode
& O_CREAT
) == 0) {
212 if (fmode
& (FWRITE
| O_TRUNC
)) {
213 if (vp
->v_type
== VDIR
) {
217 error
= vn_writechk(vp
, &nd
->nl_nch
);
220 * Special stale handling, re-resolve the
223 if (error
== ESTALE
) {
226 cache_setunresolved(&nd
->nl_nch
);
227 error
= cache_resolve(&nd
->nl_nch
, cred
);
235 if (fmode
& O_TRUNC
) {
236 vn_unlock(vp
); /* XXX */
237 vn_lock(vp
, LK_EXCLUSIVE
| LK_RETRY
); /* XXX */
240 error
= VOP_SETATTR(vp
, vap
, cred
);
246 * Set or clear VNSWAPCACHE on the vp based on nd->nl_nch.ncp->nc_flag.
247 * These particular bits a tracked all the way from the root.
249 * NOTE: Might not work properly on NFS servers due to the
250 * disconnected namecache.
252 flags
= nd
->nl_nch
.ncp
->nc_flag
;
253 if ((flags
& (NCF_UF_CACHE
| NCF_UF_PCACHE
)) &&
254 (flags
& (NCF_SF_NOCACHE
| NCF_SF_PNOCACHE
)) == 0) {
255 vsetflags(vp
, VSWAPCACHE
);
257 vclrflags(vp
, VSWAPCACHE
);
261 * Setup the fp so VOP_OPEN can override it. No descriptor has been
262 * associated with the fp yet so we own it clean.
264 * f_nchandle inherits nl_nch. This used to be necessary only for
265 * directories but now we do it unconditionally so f*() ops
266 * such as fchmod() can access the actual namespace that was
267 * used to open the file.
270 if (nd
->nl_flags
& NLC_APPENDONLY
)
271 fmode
|= FAPPENDONLY
;
272 fp
->f_nchandle
= nd
->nl_nch
;
273 cache_zero(&nd
->nl_nch
);
274 cache_unlock(&fp
->f_nchandle
);
278 * Get rid of nl_nch. vn_open does not return it (it returns the
279 * vnode or the file pointer). Note: we can't leave nl_nch locked
280 * through the VOP_OPEN anyway since the VOP_OPEN may block, e.g.
284 cache_put(&nd
->nl_nch
);
286 error
= VOP_OPEN(vp
, fmode
, cred
, fp
);
289 * setting f_ops to &badfileops will prevent the descriptor
290 * code from trying to close and release the vnode, since
291 * the open failed we do not want to call close.
295 fp
->f_ops
= &badfileops
;
302 * Assert that VREG files have been setup for vmio.
304 KASSERT(vp
->v_type
!= VREG
|| vp
->v_object
!= NULL
,
305 ("vn_open: regular file was not VMIO enabled!"));
309 * Return the vnode. XXX needs some cleaning up. The vnode is
310 * only returned in the fp == NULL case.
314 nd
->nl_vp_fmode
= fmode
;
315 if ((nd
->nl_flags
& NLC_LOCKVP
) == 0)
328 vn_opendisk(const char *devname
, int fmode
, struct vnode
**vpp
)
333 if (strncmp(devname
, "/dev/", 5) == 0)
335 if ((vp
= getsynthvnode(devname
)) == NULL
) {
338 error
= VOP_OPEN(vp
, fmode
, proc0
.p_ucred
, NULL
);
350 * Check for write permissions on the specified vnode. nch may be NULL.
353 vn_writechk(struct vnode
*vp
, struct nchandle
*nch
)
356 * If there's shared text associated with
357 * the vnode, try to free it up once. If
358 * we fail, we can't allow writing.
360 if (vp
->v_flag
& VTEXT
)
364 * If the vnode represents a regular file, check the mount
365 * point via the nch. This may be a different mount point
366 * then the one embedded in the vnode (e.g. nullfs).
368 * We can still write to non-regular files (e.g. devices)
369 * via read-only mounts.
371 if (nch
&& nch
->ncp
&& vp
->v_type
== VREG
)
372 return (ncp_writechk(nch
));
377 * Check whether the underlying mount is read-only. The mount point
378 * referenced by the namecache may be different from the mount point
379 * used by the underlying vnode in the case of NULLFS, so a separate
383 ncp_writechk(struct nchandle
*nch
)
385 if (nch
->mount
&& (nch
->mount
->mnt_flag
& MNT_RDONLY
))
396 vn_close(struct vnode
*vp
, int flags
)
400 error
= vn_lock(vp
, LK_EXCLUSIVE
| LK_RETRY
);
402 error
= VOP_CLOSE(vp
, flags
);
410 * Sequential heuristic.
412 * MPSAFE (f_seqcount and f_nextoff are allowed to race)
416 sequential_heuristic(struct uio
*uio
, struct file
*fp
)
419 * Sequential heuristic - detect sequential operation
421 * NOTE: SMP: We allow f_seqcount updates to race.
423 if ((uio
->uio_offset
== 0 && fp
->f_seqcount
> 0) ||
424 uio
->uio_offset
== fp
->f_nextoff
) {
425 int tmpseq
= fp
->f_seqcount
;
427 tmpseq
+= (uio
->uio_resid
+ BKVASIZE
- 1) / BKVASIZE
;
428 if (tmpseq
> IO_SEQMAX
)
430 fp
->f_seqcount
= tmpseq
;
431 return(fp
->f_seqcount
<< IO_SEQSHIFT
);
435 * Not sequential, quick draw-down of seqcount
437 * NOTE: SMP: We allow f_seqcount updates to race.
439 if (fp
->f_seqcount
> 1)
447 * get - lock and return the f_offset field.
448 * set - set and unlock the f_offset field.
450 * These routines serve the dual purpose of serializing access to the
451 * f_offset field (at least on i386) and guaranteeing operational integrity
452 * when multiple read()ers and write()ers are present on the same fp.
456 static __inline off_t
457 vn_get_fpf_offset(struct file
*fp
)
463 * Shortcut critical path.
465 flags
= fp
->f_flag
& ~FOFFSETLOCK
;
466 if (atomic_cmpset_int(&fp
->f_flag
, flags
, flags
| FOFFSETLOCK
))
467 return(fp
->f_offset
);
474 if (flags
& FOFFSETLOCK
) {
475 nflags
= flags
| FOFFSETWAKE
;
476 tsleep_interlock(&fp
->f_flag
, 0);
477 if (atomic_cmpset_int(&fp
->f_flag
, flags
, nflags
))
478 tsleep(&fp
->f_flag
, PINTERLOCKED
, "fpoff", 0);
480 nflags
= flags
| FOFFSETLOCK
;
481 if (atomic_cmpset_int(&fp
->f_flag
, flags
, nflags
))
485 return(fp
->f_offset
);
492 vn_set_fpf_offset(struct file
*fp
, off_t offset
)
498 * We hold the lock so we can set the offset without interference.
500 fp
->f_offset
= offset
;
503 * Normal release is already a reasonably critical path.
507 nflags
= flags
& ~(FOFFSETLOCK
| FOFFSETWAKE
);
508 if (atomic_cmpset_int(&fp
->f_flag
, flags
, nflags
)) {
509 if (flags
& FOFFSETWAKE
)
519 static __inline off_t
520 vn_poll_fpf_offset(struct file
*fp
)
522 #if defined(__x86_64__) || !defined(SMP)
523 return(fp
->f_offset
);
525 off_t off
= vn_get_fpf_offset(fp
);
526 vn_set_fpf_offset(fp
, off
);
532 * Package up an I/O request on a vnode into a uio and do it.
537 vn_rdwr(enum uio_rw rw
, struct vnode
*vp
, caddr_t base
, int len
,
538 off_t offset
, enum uio_seg segflg
, int ioflg
,
539 struct ucred
*cred
, int *aresid
)
543 struct ccms_lock ccms_lock
;
546 if ((ioflg
& IO_NODELOCKED
) == 0)
547 vn_lock(vp
, LK_EXCLUSIVE
| LK_RETRY
);
548 auio
.uio_iov
= &aiov
;
550 aiov
.iov_base
= base
;
552 auio
.uio_resid
= len
;
553 auio
.uio_offset
= offset
;
554 auio
.uio_segflg
= segflg
;
556 auio
.uio_td
= curthread
;
557 ccms_lock_get_uio(&vp
->v_ccms
, &ccms_lock
, &auio
);
558 if (rw
== UIO_READ
) {
559 error
= VOP_READ(vp
, &auio
, ioflg
, cred
);
561 error
= VOP_WRITE(vp
, &auio
, ioflg
, cred
);
563 ccms_lock_put(&vp
->v_ccms
, &ccms_lock
);
565 *aresid
= auio
.uio_resid
;
567 if (auio
.uio_resid
&& error
== 0)
569 if ((ioflg
& IO_NODELOCKED
) == 0)
575 * Package up an I/O request on a vnode into a uio and do it. The I/O
576 * request is split up into smaller chunks and we try to avoid saturating
577 * the buffer cache while potentially holding a vnode locked, so we
578 * check bwillwrite() before calling vn_rdwr(). We also call uio_yield()
579 * to give other processes a chance to lock the vnode (either other processes
580 * core'ing the same binary, or unrelated processes scanning the directory).
585 vn_rdwr_inchunks(enum uio_rw rw
, struct vnode
*vp
, caddr_t base
, int len
,
586 off_t offset
, enum uio_seg segflg
, int ioflg
,
587 struct ucred
*cred
, int *aresid
)
595 * Force `offset' to a multiple of MAXBSIZE except possibly
596 * for the first chunk, so that filesystems only need to
597 * write full blocks except possibly for the first and last
600 chunk
= MAXBSIZE
- (uoff_t
)offset
% MAXBSIZE
;
604 if (vp
->v_type
== VREG
) {
614 error
= vn_rdwr(rw
, vp
, base
, chunk
, offset
, segflg
,
615 ioflg
, cred
, aresid
);
616 len
-= chunk
; /* aresid calc already includes length */
629 * File pointers can no longer get ripped up by revoke so
630 * we don't need to lock access to the vp.
632 * f_offset updates are not guaranteed against multiple readers
637 vn_read(struct file
*fp
, struct uio
*uio
, struct ucred
*cred
, int flags
)
639 struct ccms_lock ccms_lock
;
643 KASSERT(uio
->uio_td
== curthread
,
644 ("uio_td %p is not td %p", uio
->uio_td
, curthread
));
645 vp
= (struct vnode
*)fp
->f_data
;
648 if (flags
& O_FBLOCKING
) {
649 /* ioflag &= ~IO_NDELAY; */
650 } else if (flags
& O_FNONBLOCKING
) {
652 } else if (fp
->f_flag
& FNONBLOCK
) {
655 if (flags
& O_FBUFFERED
) {
656 /* ioflag &= ~IO_DIRECT; */
657 } else if (flags
& O_FUNBUFFERED
) {
659 } else if (fp
->f_flag
& O_DIRECT
) {
662 if ((flags
& O_FOFFSET
) == 0 && (vp
->v_flag
& VNOTSEEKABLE
) == 0)
663 uio
->uio_offset
= vn_get_fpf_offset(fp
);
664 vn_lock(vp
, LK_SHARED
| LK_RETRY
);
665 ioflag
|= sequential_heuristic(uio
, fp
);
667 ccms_lock_get_uio(&vp
->v_ccms
, &ccms_lock
, uio
);
668 error
= VOP_READ(vp
, uio
, ioflag
, cred
);
669 ccms_lock_put(&vp
->v_ccms
, &ccms_lock
);
670 fp
->f_nextoff
= uio
->uio_offset
;
672 if ((flags
& O_FOFFSET
) == 0 && (vp
->v_flag
& VNOTSEEKABLE
) == 0)
673 vn_set_fpf_offset(fp
, uio
->uio_offset
);
681 vn_write(struct file
*fp
, struct uio
*uio
, struct ucred
*cred
, int flags
)
683 struct ccms_lock ccms_lock
;
687 KASSERT(uio
->uio_td
== curthread
,
688 ("uio_td %p is not p %p", uio
->uio_td
, curthread
));
689 vp
= (struct vnode
*)fp
->f_data
;
692 if (vp
->v_type
== VREG
&&
693 ((fp
->f_flag
& O_APPEND
) || (flags
& O_FAPPEND
))) {
697 if (flags
& O_FBLOCKING
) {
698 /* ioflag &= ~IO_NDELAY; */
699 } else if (flags
& O_FNONBLOCKING
) {
701 } else if (fp
->f_flag
& FNONBLOCK
) {
704 if (flags
& O_FBUFFERED
) {
705 /* ioflag &= ~IO_DIRECT; */
706 } else if (flags
& O_FUNBUFFERED
) {
708 } else if (fp
->f_flag
& O_DIRECT
) {
711 if (flags
& O_FASYNCWRITE
) {
712 /* ioflag &= ~IO_SYNC; */
713 } else if (flags
& O_FSYNCWRITE
) {
715 } else if (fp
->f_flag
& O_FSYNC
) {
719 if (vp
->v_mount
&& (vp
->v_mount
->mnt_flag
& MNT_SYNCHRONOUS
))
721 if ((flags
& O_FOFFSET
) == 0)
722 uio
->uio_offset
= vn_get_fpf_offset(fp
);
723 vn_lock(vp
, LK_EXCLUSIVE
| LK_RETRY
);
724 ioflag
|= sequential_heuristic(uio
, fp
);
725 ccms_lock_get_uio(&vp
->v_ccms
, &ccms_lock
, uio
);
726 error
= VOP_WRITE(vp
, uio
, ioflag
, cred
);
727 ccms_lock_put(&vp
->v_ccms
, &ccms_lock
);
728 fp
->f_nextoff
= uio
->uio_offset
;
730 if ((flags
& O_FOFFSET
) == 0)
731 vn_set_fpf_offset(fp
, uio
->uio_offset
);
739 vn_statfile(struct file
*fp
, struct stat
*sb
, struct ucred
*cred
)
744 vp
= (struct vnode
*)fp
->f_data
;
745 error
= vn_stat(vp
, sb
, cred
);
753 vn_stat(struct vnode
*vp
, struct stat
*sb
, struct ucred
*cred
)
762 error
= VOP_GETATTR(vp
, vap
);
767 * Zero the spare stat fields
774 * Copy from vattr table
776 if (vap
->va_fsid
!= VNOVAL
)
777 sb
->st_dev
= vap
->va_fsid
;
779 sb
->st_dev
= vp
->v_mount
->mnt_stat
.f_fsid
.val
[0];
780 sb
->st_ino
= vap
->va_fileid
;
782 switch (vap
->va_type
) {
800 /* This is a cosmetic change, symlinks do not have a mode. */
801 if (vp
->v_mount
->mnt_flag
& MNT_NOSYMFOLLOW
)
802 sb
->st_mode
&= ~ACCESSPERMS
; /* 0000 */
804 sb
->st_mode
|= ACCESSPERMS
; /* 0777 */
816 if (vap
->va_nlink
> (nlink_t
)-1)
817 sb
->st_nlink
= (nlink_t
)-1;
819 sb
->st_nlink
= vap
->va_nlink
;
820 sb
->st_uid
= vap
->va_uid
;
821 sb
->st_gid
= vap
->va_gid
;
822 sb
->st_rdev
= dev2udev(vp
->v_rdev
);
823 sb
->st_size
= vap
->va_size
;
824 sb
->st_atimespec
= vap
->va_atime
;
825 sb
->st_mtimespec
= vap
->va_mtime
;
826 sb
->st_ctimespec
= vap
->va_ctime
;
829 * A VCHR and VBLK device may track the last access and last modified
830 * time independantly of the filesystem. This is particularly true
831 * because device read and write calls may bypass the filesystem.
833 if (vp
->v_type
== VCHR
|| vp
->v_type
== VBLK
) {
836 if (dev
->si_lastread
) {
837 sb
->st_atimespec
.tv_sec
= dev
->si_lastread
;
838 sb
->st_atimespec
.tv_nsec
= 0;
840 if (dev
->si_lastwrite
) {
841 sb
->st_atimespec
.tv_sec
= dev
->si_lastwrite
;
842 sb
->st_atimespec
.tv_nsec
= 0;
848 * According to www.opengroup.org, the meaning of st_blksize is
849 * "a filesystem-specific preferred I/O block size for this
850 * object. In some filesystem types, this may vary from file
852 * Default to PAGE_SIZE after much discussion.
855 if (vap
->va_type
== VREG
) {
856 sb
->st_blksize
= vap
->va_blocksize
;
857 } else if (vn_isdisk(vp
, NULL
)) {
859 * XXX this is broken. If the device is not yet open (aka
860 * stat() call, aka v_rdev == NULL), how are we supposed
861 * to get a valid block size out of it?
865 sb
->st_blksize
= dev
->si_bsize_best
;
866 if (sb
->st_blksize
< dev
->si_bsize_phys
)
867 sb
->st_blksize
= dev
->si_bsize_phys
;
868 if (sb
->st_blksize
< BLKDEV_IOSIZE
)
869 sb
->st_blksize
= BLKDEV_IOSIZE
;
871 sb
->st_blksize
= PAGE_SIZE
;
874 sb
->st_flags
= vap
->va_flags
;
876 error
= priv_check_cred(cred
, PRIV_VFS_GENERATION
, 0);
880 sb
->st_gen
= (u_int32_t
)vap
->va_gen
;
882 sb
->st_blocks
= vap
->va_bytes
/ S_BLKSIZE
;
887 * MPALMOSTSAFE - acquires mplock
890 vn_ioctl(struct file
*fp
, u_long com
, caddr_t data
, struct ucred
*ucred
,
893 struct vnode
*vp
= ((struct vnode
*)fp
->f_data
);
899 switch (vp
->v_type
) {
902 if (com
== FIONREAD
) {
903 error
= VOP_GETATTR(vp
, &vattr
);
906 size
= vattr
.va_size
;
907 if ((vp
->v_flag
& VNOTSEEKABLE
) == 0)
908 size
-= vn_poll_fpf_offset(fp
);
909 if (size
> 0x7FFFFFFF)
915 if (com
== FIOASYNC
) { /* XXX */
927 if (com
== FIODTYPE
) {
928 if (vp
->v_type
!= VCHR
&& vp
->v_type
!= VBLK
) {
932 *(int *)data
= dev_dflags(vp
->v_rdev
) & D_TYPEMASK
;
936 error
= VOP_IOCTL(vp
, com
, data
, fp
->f_flag
, ucred
, msg
);
937 if (error
== 0 && com
== TIOCSCTTY
) {
938 struct proc
*p
= curthread
->td_proc
;
939 struct session
*sess
;
948 /* Do nothing if reassigning same control tty */
949 if (sess
->s_ttyvp
== vp
) {
955 /* Get rid of reference to old control tty */
972 vn_poll(struct file
*fp
, int events
, struct ucred
*cred
)
976 error
= VOP_POLL(((struct vnode
*)fp
->f_data
), events
, cred
);
981 * Check that the vnode is still valid, and if so
982 * acquire requested lock.
986 vn_lock(struct vnode
*vp
, int flags
)
988 debug_vn_lock(struct vnode
*vp
, int flags
, const char *filename
, int line
)
995 vp
->filename
= filename
;
997 error
= debuglockmgr(&vp
->v_lock
, flags
,
998 "vn_lock", filename
, line
);
1000 error
= lockmgr(&vp
->v_lock
, flags
);
1004 } while (flags
& LK_RETRY
);
1007 * Because we (had better!) have a ref on the vnode, once it
1008 * goes to VRECLAIMED state it will not be recycled until all
1009 * refs go away. So we can just check the flag.
1011 if (error
== 0 && (vp
->v_flag
& VRECLAIMED
)) {
1012 lockmgr(&vp
->v_lock
, LK_RELEASE
);
1022 vn_unlock(struct vnode
*vp
)
1024 lockmgr(&vp
->v_lock
, LK_RELEASE
);
1031 vn_islocked(struct vnode
*vp
)
1033 return (lockstatus(&vp
->v_lock
, curthread
));
1037 * Return the lock status of a vnode and unlock the vnode
1038 * if we owned the lock. This is not a boolean, if the
1039 * caller cares what the lock status is the caller must
1040 * check the various possible values.
1042 * This only unlocks exclusive locks held by the caller,
1043 * it will NOT unlock shared locks (there is no way to
1044 * tell who the shared lock belongs to).
1049 vn_islocked_unlock(struct vnode
*vp
)
1053 vpls
= lockstatus(&vp
->v_lock
, curthread
);
1054 if (vpls
== LK_EXCLUSIVE
)
1055 lockmgr(&vp
->v_lock
, LK_RELEASE
);
1060 * Restore a vnode lock that we previously released via
1061 * vn_islocked_unlock(). This is a NOP if we did not
1062 * own the original lock.
1067 vn_islocked_relock(struct vnode
*vp
, int vpls
)
1071 if (vpls
== LK_EXCLUSIVE
)
1072 error
= lockmgr(&vp
->v_lock
, vpls
);
1079 vn_closefile(struct file
*fp
)
1083 fp
->f_ops
= &badfileops
;
1084 error
= vn_close(((struct vnode
*)fp
->f_data
), fp
->f_flag
);
1092 vn_kqfilter(struct file
*fp
, struct knote
*kn
)
1096 error
= VOP_KQFILTER(((struct vnode
*)fp
->f_data
), kn
);