2 * Copyright (c) 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95
38 * External virtual filesystem routines
41 #include <sys/cdefs.h>
42 __FBSDID("$FreeBSD$");
47 #include <sys/param.h>
48 #include <sys/systm.h>
51 #include <sys/condvar.h>
53 #include <sys/dirent.h>
54 #include <sys/event.h>
55 #include <sys/eventhandler.h>
56 #include <sys/extattr.h>
58 #include <sys/fcntl.h>
61 #include <sys/kernel.h>
62 #include <sys/kthread.h>
63 #include <sys/lockf.h>
64 #include <sys/malloc.h>
65 #include <sys/mount.h>
66 #include <sys/namei.h>
68 #include <sys/reboot.h>
69 #include <sys/sleepqueue.h>
71 #include <sys/sysctl.h>
72 #include <sys/syslog.h>
73 #include <sys/vmmeter.h>
74 #include <sys/vnode.h>
76 #include <machine/stdarg.h>
78 #include <security/mac/mac_framework.h>
81 #include <vm/vm_object.h>
82 #include <vm/vm_extern.h>
84 #include <vm/vm_map.h>
85 #include <vm/vm_page.h>
86 #include <vm/vm_kern.h>
96 static MALLOC_DEFINE(M_NETADDR
, "subr_export_host", "Export host address structure");
98 static void delmntque(struct vnode
*vp
);
99 static int flushbuflist(struct bufv
*bufv
, int flags
, struct bufobj
*bo
,
100 int slpflag
, int slptimeo
);
101 static void syncer_shutdown(void *arg
, int howto
);
102 static int vtryrecycle(struct vnode
*vp
);
103 static void vbusy(struct vnode
*vp
);
104 static void vinactive(struct vnode
*, struct thread
*);
105 static void v_incr_usecount(struct vnode
*);
106 static void v_decr_usecount(struct vnode
*);
107 static void v_decr_useonly(struct vnode
*);
108 static void v_upgrade_usecount(struct vnode
*);
109 static void vfree(struct vnode
*);
110 static void vnlru_free(int);
111 static void vdestroy(struct vnode
*);
112 static void vgonel(struct vnode
*);
113 static void vfs_knllock(void *arg
);
114 static void vfs_knlunlock(void *arg
);
115 static int vfs_knllocked(void *arg
);
119 * Enable Giant pushdown based on whether or not the vm is mpsafe in this
120 * build. Without mpsafevm the buffer cache can not run Giant free.
123 TUNABLE_INT("debug.mpsafevfs", &mpsafe_vfs
);
124 SYSCTL_INT(_debug
, OID_AUTO
, mpsafevfs
, CTLFLAG_RD
, &mpsafe_vfs
, 0,
128 * Number of vnodes in existence. Increased whenever getnewvnode()
129 * allocates a new vnode, decreased on vdestroy() called on VI_DOOMed
132 static unsigned long numvnodes
;
134 SYSCTL_LONG(_vfs
, OID_AUTO
, numvnodes
, CTLFLAG_RD
, &numvnodes
, 0, "");
137 * Conversion tables for conversion from vnode types to inode formats
140 enum vtype iftovt_tab
[16] = {
141 VNON
, VFIFO
, VCHR
, VNON
, VDIR
, VNON
, VBLK
, VNON
,
142 VREG
, VNON
, VLNK
, VNON
, VSOCK
, VNON
, VNON
, VBAD
,
144 int vttoif_tab
[10] = {
145 0, S_IFREG
, S_IFDIR
, S_IFBLK
, S_IFCHR
, S_IFLNK
,
146 S_IFSOCK
, S_IFIFO
, S_IFMT
, S_IFMT
150 * List of vnodes that are ready for recycling.
152 static TAILQ_HEAD(freelst
, vnode
) vnode_free_list
;
155 * Free vnode target. Free vnodes may simply be files which have been stat'd
156 * but not read. This is somewhat common, and a small cache of such files
157 * should be kept to avoid recreation costs.
159 static u_long wantfreevnodes
;
160 SYSCTL_LONG(_vfs
, OID_AUTO
, wantfreevnodes
, CTLFLAG_RW
, &wantfreevnodes
, 0, "");
161 /* Number of vnodes in the free list. */
162 static u_long freevnodes
;
163 SYSCTL_LONG(_vfs
, OID_AUTO
, freevnodes
, CTLFLAG_RD
, &freevnodes
, 0, "");
166 * Various variables used for debugging the new implementation of
168 * XXX these are probably of (very) limited utility now.
170 static int reassignbufcalls
;
171 SYSCTL_INT(_vfs
, OID_AUTO
, reassignbufcalls
, CTLFLAG_RW
, &reassignbufcalls
, 0, "");
174 * Cache for the mount type id assigned to NFS. This is used for
175 * special checks in nfs/nfs_nqlease.c and vm/vnode_pager.c.
177 int nfs_mount_type
= -1;
179 /* To keep more than one thread at a time from running vfs_getnewfsid */
180 static struct mtx mntid_mtx
;
183 * Lock for any access to the following:
188 static struct mtx vnode_free_list_mtx
;
190 /* Publicly exported FS */
191 struct nfs_public nfs_pub
;
193 /* Zone for allocation of new vnodes - used exclusively by getnewvnode() */
194 static uma_zone_t vnode_zone
;
195 static uma_zone_t vnodepoll_zone
;
197 /* Set to 1 to print out reclaim of active vnodes */
201 * The workitem queue.
203 * It is useful to delay writes of file data and filesystem metadata
204 * for tens of seconds so that quickly created and deleted files need
205 * not waste disk bandwidth being created and removed. To realize this,
206 * we append vnodes to a "workitem" queue. When running with a soft
207 * updates implementation, most pending metadata dependencies should
208 * not wait for more than a few seconds. Thus, mounted on block devices
209 * are delayed only about a half the time that file data is delayed.
210 * Similarly, directory updates are more critical, so are only delayed
211 * about a third the time that file data is delayed. Thus, there are
212 * SYNCER_MAXDELAY queues that are processed round-robin at a rate of
213 * one each second (driven off the filesystem syncer process). The
214 * syncer_delayno variable indicates the next queue that is to be processed.
215 * Items that need to be processed soon are placed in this queue:
217 * syncer_workitem_pending[syncer_delayno]
219 * A delay of fifteen seconds is done by placing the request fifteen
220 * entries later in the queue:
222 * syncer_workitem_pending[(syncer_delayno + 15) & syncer_mask]
225 static int syncer_delayno
;
226 static long syncer_mask
;
227 LIST_HEAD(synclist
, bufobj
);
228 static struct synclist
*syncer_workitem_pending
[2];
230 * The sync_mtx protects:
235 * syncer_workitem_pending
236 * syncer_worklist_len
239 static struct mtx sync_mtx
;
240 static struct cv sync_wakeup
;
242 #define SYNCER_MAXDELAY 32
243 static int syncer_maxdelay
= SYNCER_MAXDELAY
; /* maximum delay time */
244 static int syncdelay
= 30; /* max time to delay syncing data */
245 static int filedelay
= 30; /* time to delay syncing files */
246 SYSCTL_INT(_kern
, OID_AUTO
, filedelay
, CTLFLAG_RW
, &filedelay
, 0, "");
247 static int dirdelay
= 29; /* time to delay syncing directories */
248 SYSCTL_INT(_kern
, OID_AUTO
, dirdelay
, CTLFLAG_RW
, &dirdelay
, 0, "");
249 static int metadelay
= 28; /* time to delay syncing metadata */
250 SYSCTL_INT(_kern
, OID_AUTO
, metadelay
, CTLFLAG_RW
, &metadelay
, 0, "");
251 static int rushjob
; /* number of slots to run ASAP */
252 static int stat_rush_requests
; /* number of times I/O speeded up */
253 SYSCTL_INT(_debug
, OID_AUTO
, rush_requests
, CTLFLAG_RW
, &stat_rush_requests
, 0, "");
256 * When shutting down the syncer, run it at four times normal speed.
258 #define SYNCER_SHUTDOWN_SPEEDUP 4
259 static int sync_vnode_count
;
260 static int syncer_worklist_len
;
261 static enum { SYNCER_RUNNING
, SYNCER_SHUTTING_DOWN
, SYNCER_FINAL_DELAY
}
265 * Number of vnodes we want to exist at any one time. This is mostly used
266 * to size hash tables in vnode-related code. It is normally not used in
267 * getnewvnode(), as wantfreevnodes is normally nonzero.)
269 * XXX desiredvnodes is historical cruft and should not exist.
272 SYSCTL_INT(_kern
, KERN_MAXVNODES
, maxvnodes
, CTLFLAG_RW
,
273 &desiredvnodes
, 0, "Maximum number of vnodes");
274 SYSCTL_INT(_kern
, OID_AUTO
, minvnodes
, CTLFLAG_RW
,
275 &wantfreevnodes
, 0, "Minimum number of vnodes (legacy)");
276 static int vnlru_nowhere
;
277 SYSCTL_INT(_debug
, OID_AUTO
, vnlru_nowhere
, CTLFLAG_RW
,
278 &vnlru_nowhere
, 0, "Number of times the vnlru process ran without success");
281 * Macros to control when a vnode is freed and recycled. All require
282 * the vnode interlock.
284 #define VCANRECYCLE(vp) (((vp)->v_iflag & VI_FREE) && !(vp)->v_holdcnt)
285 #define VSHOULDFREE(vp) (!((vp)->v_iflag & VI_FREE) && !(vp)->v_holdcnt)
286 #define VSHOULDBUSY(vp) (((vp)->v_iflag & VI_FREE) && (vp)->v_holdcnt)
290 * Initialize the vnode management data structures.
292 #ifndef MAXVNODES_MAX
293 #define MAXVNODES_MAX 100000
296 vntblinit(void *dummy __unused
)
300 * Desiredvnodes is a function of the physical memory size and
301 * the kernel's heap size. Specifically, desiredvnodes scales
302 * in proportion to the physical memory size until two fifths
303 * of the kernel's heap size is consumed by vnodes and vm
306 desiredvnodes
= min(maxproc
+ cnt
.v_page_count
/ 4, 2 * vm_kmem_size
/
307 (5 * (sizeof(struct vm_object
) + sizeof(struct vnode
))));
308 if (desiredvnodes
> MAXVNODES_MAX
) {
310 printf("Reducing kern.maxvnodes %d -> %d\n",
311 desiredvnodes
, MAXVNODES_MAX
);
312 desiredvnodes
= MAXVNODES_MAX
;
314 wantfreevnodes
= desiredvnodes
/ 4;
315 mtx_init(&mntid_mtx
, "mntid", NULL
, MTX_DEF
);
316 TAILQ_INIT(&vnode_free_list
);
317 mtx_init(&vnode_free_list_mtx
, "vnode_free_list", NULL
, MTX_DEF
);
318 vnode_zone
= uma_zcreate("VNODE", sizeof (struct vnode
), NULL
, NULL
,
319 NULL
, NULL
, UMA_ALIGN_PTR
, 0);
320 vnodepoll_zone
= uma_zcreate("VNODEPOLL", sizeof (struct vpollinfo
),
321 NULL
, NULL
, NULL
, NULL
, UMA_ALIGN_PTR
, 0);
323 * Initialize the filesystem syncer.
325 syncer_workitem_pending
[WI_MPSAFEQ
] = hashinit(syncer_maxdelay
, M_VNODE
,
327 syncer_workitem_pending
[WI_GIANTQ
] = hashinit(syncer_maxdelay
, M_VNODE
,
329 syncer_maxdelay
= syncer_mask
+ 1;
330 mtx_init(&sync_mtx
, "Syncer mtx", NULL
, MTX_DEF
);
331 cv_init(&sync_wakeup
, "syncer");
333 SYSINIT(vfs
, SI_SUB_VFS
, SI_ORDER_FIRST
, vntblinit
, NULL
);
337 * Mark a mount point as busy. Used to synchronize access and to delay
338 * unmounting. Interlock is not released on failure.
341 vfs_busy(struct mount
*mp
, int flags
, struct mtx
*interlkp
)
347 if (mp
->mnt_kern_flag
& MNTK_UNMOUNT
) {
348 if (flags
& LK_NOWAIT
) {
354 mtx_unlock(interlkp
);
355 mp
->mnt_kern_flag
|= MNTK_MWAIT
;
357 * Since all busy locks are shared except the exclusive
358 * lock granted when unmounting, the only place that a
359 * wakeup needs to be done is at the release of the
360 * exclusive lock at the end of dounmount.
362 msleep(mp
, MNT_MTX(mp
), PVFS
, "vfs_busy", 0);
370 mtx_unlock(interlkp
);
371 lkflags
= LK_SHARED
| LK_INTERLOCK
;
372 if (lockmgr(&mp
->mnt_lock
, lkflags
, MNT_MTX(mp
)))
373 panic("vfs_busy: unexpected lock failure");
378 * Free a busy filesystem.
381 vfs_unbusy(struct mount
*mp
)
384 lockmgr(&mp
->mnt_lock
, LK_RELEASE
, NULL
);
389 * Lookup a mount point by filesystem identifier.
392 vfs_getvfs(fsid_t
*fsid
)
396 mtx_lock(&mountlist_mtx
);
397 TAILQ_FOREACH(mp
, &mountlist
, mnt_list
) {
398 if (mp
->mnt_stat
.f_fsid
.val
[0] == fsid
->val
[0] &&
399 mp
->mnt_stat
.f_fsid
.val
[1] == fsid
->val
[1]) {
401 mtx_unlock(&mountlist_mtx
);
405 mtx_unlock(&mountlist_mtx
);
406 return ((struct mount
*) 0);
410 * Check if a user can access privileged mount options.
413 vfs_suser(struct mount
*mp
, struct thread
*td
)
418 * If the thread is jailed, but this is not a jail-friendly file
419 * system, deny immediately.
421 if (jailed(td
->td_ucred
) && !(mp
->mnt_vfc
->vfc_flags
& VFCF_JAIL
))
425 * If the file system was mounted outside a jail and a jailed thread
426 * tries to access it, deny immediately.
428 if (!jailed(mp
->mnt_cred
) && jailed(td
->td_ucred
))
432 * If the file system was mounted inside different jail that the jail of
433 * the calling thread, deny immediately.
435 if (jailed(mp
->mnt_cred
) && jailed(td
->td_ucred
) &&
436 mp
->mnt_cred
->cr_prison
!= td
->td_ucred
->cr_prison
) {
440 if ((mp
->mnt_flag
& MNT_USER
) == 0 ||
441 mp
->mnt_cred
->cr_uid
!= td
->td_ucred
->cr_uid
) {
442 if ((error
= priv_check(td
, PRIV_VFS_MOUNT_OWNER
)) != 0)
449 * Get a new unique fsid. Try to make its val[0] unique, since this value
450 * will be used to create fake device numbers for stat(). Also try (but
451 * not so hard) make its val[0] unique mod 2^16, since some emulators only
452 * support 16-bit device numbers. We end up with unique val[0]'s for the
453 * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls.
455 * Keep in mind that several mounts may be running in parallel. Starting
456 * the search one past where the previous search terminated is both a
457 * micro-optimization and a defense against returning the same fsid to
461 vfs_getnewfsid(struct mount
*mp
)
463 static u_int16_t mntid_base
;
468 mtx_lock(&mntid_mtx
);
469 mtype
= mp
->mnt_vfc
->vfc_typenum
;
470 tfsid
.val
[1] = mtype
;
471 mtype
= (mtype
& 0xFF) << 24;
473 tfsid
.val
[0] = makedev(255,
474 mtype
| ((mntid_base
& 0xFF00) << 8) | (mntid_base
& 0xFF));
476 if ((nmp
= vfs_getvfs(&tfsid
)) == NULL
)
480 mp
->mnt_stat
.f_fsid
.val
[0] = tfsid
.val
[0];
481 mp
->mnt_stat
.f_fsid
.val
[1] = tfsid
.val
[1];
482 mtx_unlock(&mntid_mtx
);
486 * Knob to control the precision of file timestamps:
488 * 0 = seconds only; nanoseconds zeroed.
489 * 1 = seconds and nanoseconds, accurate within 1/HZ.
490 * 2 = seconds and nanoseconds, truncated to microseconds.
491 * >=3 = seconds and nanoseconds, maximum precision.
493 enum { TSP_SEC
, TSP_HZ
, TSP_USEC
, TSP_NSEC
};
495 static int timestamp_precision
= TSP_SEC
;
496 SYSCTL_INT(_vfs
, OID_AUTO
, timestamp_precision
, CTLFLAG_RW
,
497 ×tamp_precision
, 0, "");
500 * Get a current timestamp.
503 vfs_timestamp(struct timespec
*tsp
)
507 switch (timestamp_precision
) {
509 tsp
->tv_sec
= time_second
;
517 TIMEVAL_TO_TIMESPEC(&tv
, tsp
);
527 * Set vnode attributes to VNOVAL
530 vattr_null(struct vattr
*vap
)
534 vap
->va_size
= VNOVAL
;
535 vap
->va_bytes
= VNOVAL
;
536 vap
->va_mode
= VNOVAL
;
537 vap
->va_nlink
= VNOVAL
;
538 vap
->va_uid
= VNOVAL
;
539 vap
->va_gid
= VNOVAL
;
540 vap
->va_fsid
= VNOVAL
;
541 vap
->va_fileid
= VNOVAL
;
542 vap
->va_blocksize
= VNOVAL
;
543 vap
->va_rdev
= VNOVAL
;
544 vap
->va_atime
.tv_sec
= VNOVAL
;
545 vap
->va_atime
.tv_nsec
= VNOVAL
;
546 vap
->va_mtime
.tv_sec
= VNOVAL
;
547 vap
->va_mtime
.tv_nsec
= VNOVAL
;
548 vap
->va_ctime
.tv_sec
= VNOVAL
;
549 vap
->va_ctime
.tv_nsec
= VNOVAL
;
550 vap
->va_birthtime
.tv_sec
= VNOVAL
;
551 vap
->va_birthtime
.tv_nsec
= VNOVAL
;
552 vap
->va_flags
= VNOVAL
;
553 vap
->va_gen
= VNOVAL
;
558 * This routine is called when we have too many vnodes. It attempts
559 * to free <count> vnodes and will potentially free vnodes that still
560 * have VM backing store (VM backing store is typically the cause
561 * of a vnode blowout so we want to do this). Therefore, this operation
562 * is not considered cheap.
564 * A number of conditions may prevent a vnode from being reclaimed.
565 * the buffer cache may have references on the vnode, a directory
566 * vnode may still have references due to the namei cache representing
567 * underlying files, or the vnode may be in active use. It is not
568 * desireable to reuse such vnodes. These conditions may cause the
569 * number of vnodes to reach some minimum value regardless of what
570 * you set kern.maxvnodes to. Do not set kern.maxvnodes too low.
573 vlrureclaim(struct mount
*mp
)
582 * Calculate the trigger point, don't allow user
583 * screwups to blow us up. This prevents us from
584 * recycling vnodes with lots of resident pages. We
585 * aren't trying to free memory, we are trying to
588 usevnodes
= desiredvnodes
;
591 trigger
= cnt
.v_page_count
* 2 / usevnodes
;
593 vn_start_write(NULL
, &mp
, V_WAIT
);
595 count
= mp
->mnt_nvnodelistsize
/ 10 + 1;
597 vp
= TAILQ_FIRST(&mp
->mnt_nvnodelist
);
598 while (vp
!= NULL
&& vp
->v_type
== VMARKER
)
599 vp
= TAILQ_NEXT(vp
, v_nmntvnodes
);
602 TAILQ_REMOVE(&mp
->mnt_nvnodelist
, vp
, v_nmntvnodes
);
603 TAILQ_INSERT_TAIL(&mp
->mnt_nvnodelist
, vp
, v_nmntvnodes
);
608 * If it's been deconstructed already, it's still
609 * referenced, or it exceeds the trigger, skip it.
611 if (vp
->v_usecount
|| !LIST_EMPTY(&(vp
)->v_cache_src
) ||
612 (vp
->v_iflag
& VI_DOOMED
) != 0 || (vp
->v_object
!= NULL
&&
613 vp
->v_object
->resident_page_count
> trigger
)) {
619 if (VOP_LOCK(vp
, LK_INTERLOCK
|LK_EXCLUSIVE
|LK_NOWAIT
)) {
621 goto next_iter_mntunlocked
;
625 * v_usecount may have been bumped after VOP_LOCK() dropped
626 * the vnode interlock and before it was locked again.
628 * It is not necessary to recheck VI_DOOMED because it can
629 * only be set by another thread that holds both the vnode
630 * lock and vnode interlock. If another thread has the
631 * vnode lock before we get to VOP_LOCK() and obtains the
632 * vnode interlock after VOP_LOCK() drops the vnode
633 * interlock, the other thread will be unable to drop the
634 * vnode lock before our VOP_LOCK() call fails.
636 if (vp
->v_usecount
|| !LIST_EMPTY(&(vp
)->v_cache_src
) ||
637 (vp
->v_object
!= NULL
&&
638 vp
->v_object
->resident_page_count
> trigger
)) {
639 VOP_UNLOCK(vp
, LK_INTERLOCK
);
640 goto next_iter_mntunlocked
;
642 KASSERT((vp
->v_iflag
& VI_DOOMED
) == 0,
643 ("VI_DOOMED unexpectedly detected in vlrureclaim()"));
648 next_iter_mntunlocked
:
649 if ((count
% 256) != 0)
653 if ((count
% 256) != 0)
662 vn_finished_write(mp
);
667 * Attempt to keep the free list at wantfreevnodes length.
670 vnlru_free(int count
)
675 mtx_assert(&vnode_free_list_mtx
, MA_OWNED
);
676 for (; count
> 0; count
--) {
677 vp
= TAILQ_FIRST(&vnode_free_list
);
679 * The list can be modified while the free_list_mtx
680 * has been dropped and vp could be NULL here.
684 VNASSERT(vp
->v_op
!= NULL
, vp
,
685 ("vnlru_free: vnode already reclaimed."));
686 TAILQ_REMOVE(&vnode_free_list
, vp
, v_freelist
);
688 * Don't recycle if we can't get the interlock.
690 if (!VI_TRYLOCK(vp
)) {
691 TAILQ_INSERT_TAIL(&vnode_free_list
, vp
, v_freelist
);
694 VNASSERT(VCANRECYCLE(vp
), vp
,
695 ("vp inconsistent on freelist"));
697 vp
->v_iflag
&= ~VI_FREE
;
699 mtx_unlock(&vnode_free_list_mtx
);
701 vfslocked
= VFS_LOCK_GIANT(vp
->v_mount
);
703 VFS_UNLOCK_GIANT(vfslocked
);
705 * If the recycled succeeded this vdrop will actually free
706 * the vnode. If not it will simply place it back on
710 mtx_lock(&vnode_free_list_mtx
);
714 * Attempt to recycle vnodes in a context that is always safe to block.
715 * Calling vlrurecycle() from the bowels of filesystem code has some
716 * interesting deadlock problems.
718 static struct proc
*vnlruproc
;
719 static int vnlruproc_sig
;
724 struct mount
*mp
, *nmp
;
726 struct proc
*p
= vnlruproc
;
728 EVENTHANDLER_REGISTER(shutdown_pre_sync
, kproc_shutdown
, p
,
734 kproc_suspend_check(p
);
735 mtx_lock(&vnode_free_list_mtx
);
736 if (freevnodes
> wantfreevnodes
)
737 vnlru_free(freevnodes
- wantfreevnodes
);
738 if (numvnodes
<= desiredvnodes
* 9 / 10) {
740 wakeup(&vnlruproc_sig
);
741 msleep(vnlruproc
, &vnode_free_list_mtx
,
742 PVFS
|PDROP
, "vlruwt", hz
);
745 mtx_unlock(&vnode_free_list_mtx
);
747 mtx_lock(&mountlist_mtx
);
748 for (mp
= TAILQ_FIRST(&mountlist
); mp
!= NULL
; mp
= nmp
) {
750 if (vfs_busy(mp
, LK_NOWAIT
, &mountlist_mtx
)) {
751 nmp
= TAILQ_NEXT(mp
, mnt_list
);
754 if (!VFS_NEEDSGIANT(mp
)) {
759 done
+= vlrureclaim(mp
);
762 mtx_lock(&mountlist_mtx
);
763 nmp
= TAILQ_NEXT(mp
, mnt_list
);
766 mtx_unlock(&mountlist_mtx
);
768 EVENTHANDLER_INVOKE(vfs_lowvnodes
, desiredvnodes
/ 10);
770 /* These messages are temporary debugging aids */
771 if (vnlru_nowhere
< 5)
772 printf("vnlru process getting nowhere..\n");
773 else if (vnlru_nowhere
== 5)
774 printf("vnlru process messages stopped.\n");
777 tsleep(vnlruproc
, PPAUSE
, "vlrup", hz
* 3);
783 static struct kproc_desc vnlru_kp
= {
788 SYSINIT(vnlru
, SI_SUB_KTHREAD_UPDATE
, SI_ORDER_FIRST
, kproc_start
,
792 * Routines having to do with the management of the vnode table.
796 vdestroy(struct vnode
*vp
)
800 CTR1(KTR_VFS
, "vdestroy vp %p", vp
);
801 mtx_lock(&vnode_free_list_mtx
);
803 mtx_unlock(&vnode_free_list_mtx
);
805 VNASSERT((vp
->v_iflag
& VI_FREE
) == 0, vp
,
806 ("cleaned vnode still on the free list."));
807 VNASSERT(vp
->v_data
== NULL
, vp
, ("cleaned vnode isn't"));
808 VNASSERT(vp
->v_holdcnt
== 0, vp
, ("Non-zero hold count"));
809 VNASSERT(vp
->v_usecount
== 0, vp
, ("Non-zero use count"));
810 VNASSERT(vp
->v_writecount
== 0, vp
, ("Non-zero write count"));
811 VNASSERT(bo
->bo_numoutput
== 0, vp
, ("Clean vnode has pending I/O's"));
812 VNASSERT(bo
->bo_clean
.bv_cnt
== 0, vp
, ("cleanbufcnt not 0"));
813 VNASSERT(bo
->bo_clean
.bv_root
== NULL
, vp
, ("cleanblkroot not NULL"));
814 VNASSERT(bo
->bo_dirty
.bv_cnt
== 0, vp
, ("dirtybufcnt not 0"));
815 VNASSERT(bo
->bo_dirty
.bv_root
== NULL
, vp
, ("dirtyblkroot not NULL"));
816 VNASSERT(TAILQ_EMPTY(&vp
->v_cache_dst
), vp
, ("vp has namecache dst"));
817 VNASSERT(LIST_EMPTY(&vp
->v_cache_src
), vp
, ("vp has namecache src"));
820 mac_vnode_destroy(vp
);
822 if (vp
->v_pollinfo
!= NULL
) {
823 knlist_destroy(&vp
->v_pollinfo
->vpi_selinfo
.si_note
);
824 mtx_destroy(&vp
->v_pollinfo
->vpi_lock
);
825 uma_zfree(vnodepoll_zone
, vp
->v_pollinfo
);
828 /* XXX Elsewhere we can detect an already freed vnode via NULL v_op. */
831 lockdestroy(vp
->v_vnlock
);
832 mtx_destroy(&vp
->v_interlock
);
833 mtx_destroy(BO_MTX(bo
));
834 uma_zfree(vnode_zone
, vp
);
838 * Try to recycle a freed vnode. We abort if anyone picks up a reference
839 * before we actually vgone(). This function must be called with the vnode
840 * held to prevent the vnode from being returned to the free list midway
844 vtryrecycle(struct vnode
*vp
)
848 CTR1(KTR_VFS
, "vtryrecycle: trying vp %p", vp
);
849 VNASSERT(vp
->v_holdcnt
, vp
,
850 ("vtryrecycle: Recycling vp %p without a reference.", vp
));
852 * This vnode may found and locked via some other list, if so we
853 * can't recycle it yet.
855 if (VOP_LOCK(vp
, LK_EXCLUSIVE
| LK_NOWAIT
) != 0)
856 return (EWOULDBLOCK
);
858 * Don't recycle if its filesystem is being suspended.
860 if (vn_start_write(vp
, &vnmp
, V_NOWAIT
) != 0) {
865 * If we got this far, we need to acquire the interlock and see if
866 * anyone picked up this vnode from another list. If not, we will
867 * mark it with DOOMED via vgonel() so that anyone who does find it
871 if (vp
->v_usecount
) {
872 VOP_UNLOCK(vp
, LK_INTERLOCK
);
873 vn_finished_write(vnmp
);
876 if ((vp
->v_iflag
& VI_DOOMED
) == 0)
878 VOP_UNLOCK(vp
, LK_INTERLOCK
);
879 vn_finished_write(vnmp
);
880 CTR1(KTR_VFS
, "vtryrecycle: recycled vp %p", vp
);
885 * Return the next vnode from the free list.
888 getnewvnode(const char *tag
, struct mount
*mp
, struct vop_vector
*vops
,
891 struct vnode
*vp
= NULL
;
894 mtx_lock(&vnode_free_list_mtx
);
896 * Lend our context to reclaim vnodes if they've exceeded the max.
898 if (freevnodes
> wantfreevnodes
)
901 * Wait for available vnodes.
903 if (numvnodes
> desiredvnodes
) {
904 if (mp
!= NULL
&& (mp
->mnt_kern_flag
& MNTK_SUSPEND
)) {
906 * File system is beeing suspended, we cannot risk a
907 * deadlock here, so allocate new vnode anyway.
909 if (freevnodes
> wantfreevnodes
)
910 vnlru_free(freevnodes
- wantfreevnodes
);
913 if (vnlruproc_sig
== 0) {
914 vnlruproc_sig
= 1; /* avoid unnecessary wakeups */
917 msleep(&vnlruproc_sig
, &vnode_free_list_mtx
, PVFS
,
919 #if 0 /* XXX Not all VFS_VGET/ffs_vget callers check returns. */
920 if (numvnodes
> desiredvnodes
) {
921 mtx_unlock(&vnode_free_list_mtx
);
928 mtx_unlock(&vnode_free_list_mtx
);
929 vp
= (struct vnode
*) uma_zalloc(vnode_zone
, M_WAITOK
|M_ZERO
);
933 vp
->v_vnlock
= &vp
->v_lock
;
934 mtx_init(&vp
->v_interlock
, "vnode interlock", NULL
, MTX_DEF
);
936 * By default, don't allow shared locks unless filesystems
939 lockinit(vp
->v_vnlock
, PVFS
, tag
, VLKTIMEOUT
, LK_NOSHARE
);
945 mtx_init(BO_MTX(bo
), "bufobj interlock", NULL
, MTX_DEF
);
946 bo
->bo_ops
= &buf_ops_bio
;
948 TAILQ_INIT(&bo
->bo_clean
.bv_hd
);
949 TAILQ_INIT(&bo
->bo_dirty
.bv_hd
);
951 * Initialize namecache.
953 LIST_INIT(&vp
->v_cache_src
);
954 TAILQ_INIT(&vp
->v_cache_dst
);
956 * Finalize various vnode identity bits.
965 if (mp
!= NULL
&& (mp
->mnt_flag
& MNT_MULTILABEL
) == 0)
966 mac_vnode_associate_singlelabel(mp
, vp
);
967 else if (mp
== NULL
&& vops
!= &dead_vnodeops
)
968 printf("NULL mp in getnewvnode()\n");
971 bo
->bo_bsize
= mp
->mnt_stat
.f_iosize
;
972 if ((mp
->mnt_kern_flag
& MNTK_NOKNOTE
) != 0)
973 vp
->v_vflag
|= VV_NOKNOTE
;
976 CTR2(KTR_VFS
, "getnewvnode: mp %p vp %p", mp
, vp
);
982 * Delete from old mount point vnode list, if on one.
985 delmntque(struct vnode
*vp
)
994 VNASSERT(mp
->mnt_nvnodelistsize
> 0, vp
,
995 ("bad mount point vnode list size"));
996 TAILQ_REMOVE(&mp
->mnt_nvnodelist
, vp
, v_nmntvnodes
);
997 mp
->mnt_nvnodelistsize
--;
1003 insmntque_stddtr(struct vnode
*vp
, void *dtr_arg
)
1007 vp
->v_op
= &dead_vnodeops
;
1008 /* XXX non mp-safe fs may still call insmntque with vnode
1010 if (!VOP_ISLOCKED(vp
))
1011 vn_lock(vp
, LK_EXCLUSIVE
| LK_RETRY
);
1017 * Insert into list of vnodes for the new mount point, if available.
1020 insmntque1(struct vnode
*vp
, struct mount
*mp
,
1021 void (*dtr
)(struct vnode
*, void *), void *dtr_arg
)
1025 KASSERT(vp
->v_mount
== NULL
,
1026 ("insmntque: vnode already on per mount vnode list"));
1027 VNASSERT(mp
!= NULL
, vp
, ("Don't call insmntque(foo, NULL)"));
1028 #ifdef DEBUG_VFS_LOCKS
1029 if (!VFS_NEEDSGIANT(mp
))
1030 ASSERT_VOP_ELOCKED(vp
,
1031 "insmntque: mp-safe fs and non-locked vp");
1034 if ((mp
->mnt_kern_flag
& MNTK_NOINSMNTQ
) != 0 &&
1035 ((mp
->mnt_kern_flag
& MNTK_UNMOUNTF
) != 0 ||
1036 mp
->mnt_nvnodelistsize
== 0)) {
1037 locked
= VOP_ISLOCKED(vp
);
1038 if (!locked
|| (locked
== LK_EXCLUSIVE
&&
1039 (vp
->v_vflag
& VV_FORCEINSMQ
) == 0)) {
1048 TAILQ_INSERT_TAIL(&mp
->mnt_nvnodelist
, vp
, v_nmntvnodes
);
1049 VNASSERT(mp
->mnt_nvnodelistsize
>= 0, vp
,
1050 ("neg mount point vnode list size"));
1051 mp
->mnt_nvnodelistsize
++;
1057 insmntque(struct vnode
*vp
, struct mount
*mp
)
1060 return (insmntque1(vp
, mp
, insmntque_stddtr
, NULL
));
1064 * Flush out and invalidate all buffers associated with a bufobj
1065 * Called with the underlying object locked.
1068 bufobj_invalbuf(struct bufobj
*bo
, int flags
, struct thread
*td
, int slpflag
,
1074 if (flags
& V_SAVE
) {
1075 error
= bufobj_wwait(bo
, slpflag
, slptimeo
);
1080 if (bo
->bo_dirty
.bv_cnt
> 0) {
1082 if ((error
= BO_SYNC(bo
, MNT_WAIT
, td
)) != 0)
1085 * XXX We could save a lock/unlock if this was only
1086 * enabled under INVARIANTS
1089 if (bo
->bo_numoutput
> 0 || bo
->bo_dirty
.bv_cnt
> 0)
1090 panic("vinvalbuf: dirty bufs");
1094 * If you alter this loop please notice that interlock is dropped and
1095 * reacquired in flushbuflist. Special care is needed to ensure that
1096 * no race conditions occur from this.
1099 error
= flushbuflist(&bo
->bo_clean
,
1100 flags
, bo
, slpflag
, slptimeo
);
1102 error
= flushbuflist(&bo
->bo_dirty
,
1103 flags
, bo
, slpflag
, slptimeo
);
1104 if (error
!= 0 && error
!= EAGAIN
) {
1108 } while (error
!= 0);
1111 * Wait for I/O to complete. XXX needs cleaning up. The vnode can
1112 * have write I/O in-progress but if there is a VM object then the
1113 * VM object can also have read-I/O in-progress.
1116 bufobj_wwait(bo
, 0, 0);
1118 if (bo
->bo_object
!= NULL
) {
1119 VM_OBJECT_LOCK(bo
->bo_object
);
1120 vm_object_pip_wait(bo
->bo_object
, "bovlbx");
1121 VM_OBJECT_UNLOCK(bo
->bo_object
);
1124 } while (bo
->bo_numoutput
> 0);
1128 * Destroy the copy in the VM cache, too.
1130 if (bo
->bo_object
!= NULL
) {
1131 VM_OBJECT_LOCK(bo
->bo_object
);
1132 vm_object_page_remove(bo
->bo_object
, 0, 0,
1133 (flags
& V_SAVE
) ? TRUE
: FALSE
);
1134 VM_OBJECT_UNLOCK(bo
->bo_object
);
1139 if ((flags
& (V_ALT
| V_NORMAL
)) == 0 &&
1140 (bo
->bo_dirty
.bv_cnt
> 0 || bo
->bo_clean
.bv_cnt
> 0))
1141 panic("vinvalbuf: flush failed");
1148 * Flush out and invalidate all buffers associated with a vnode.
1149 * Called with the underlying object locked.
1152 vinvalbuf(struct vnode
*vp
, int flags
, struct thread
*td
, int slpflag
,
1156 CTR2(KTR_VFS
, "vinvalbuf vp %p flags %d", vp
, flags
);
1157 ASSERT_VOP_LOCKED(vp
, "vinvalbuf");
1158 return (bufobj_invalbuf(&vp
->v_bufobj
, flags
, td
, slpflag
, slptimeo
));
1162 * Flush out buffers on the specified list.
1166 flushbuflist( struct bufv
*bufv
, int flags
, struct bufobj
*bo
, int slpflag
,
1169 struct buf
*bp
, *nbp
;
1174 ASSERT_BO_LOCKED(bo
);
1177 TAILQ_FOREACH_SAFE(bp
, &bufv
->bv_hd
, b_bobufs
, nbp
) {
1178 if (((flags
& V_NORMAL
) && (bp
->b_xflags
& BX_ALTDATA
)) ||
1179 ((flags
& V_ALT
) && (bp
->b_xflags
& BX_ALTDATA
) == 0)) {
1185 lblkno
= nbp
->b_lblkno
;
1186 xflags
= nbp
->b_xflags
&
1187 (BX_BKGRDMARKER
| BX_VNDIRTY
| BX_VNCLEAN
);
1190 error
= BUF_TIMELOCK(bp
,
1191 LK_EXCLUSIVE
| LK_SLEEPFAIL
| LK_INTERLOCK
, BO_MTX(bo
),
1192 "flushbuf", slpflag
, slptimeo
);
1195 return (error
!= ENOLCK
? error
: EAGAIN
);
1197 KASSERT(bp
->b_bufobj
== bo
,
1198 ("bp %p wrong b_bufobj %p should be %p",
1199 bp
, bp
->b_bufobj
, bo
));
1200 if (bp
->b_bufobj
!= bo
) { /* XXX: necessary ? */
1206 * XXX Since there are no node locks for NFS, I
1207 * believe there is a slight chance that a delayed
1208 * write will occur while sleeping just above, so
1211 if (((bp
->b_flags
& (B_DELWRI
| B_INVAL
)) == B_DELWRI
) &&
1214 bp
->b_flags
|= B_ASYNC
;
1217 return (EAGAIN
); /* XXX: why not loop ? */
1220 bp
->b_flags
|= (B_INVAL
| B_RELBUF
);
1221 bp
->b_flags
&= ~B_ASYNC
;
1225 (nbp
->b_bufobj
!= bo
||
1226 nbp
->b_lblkno
!= lblkno
||
1228 (BX_BKGRDMARKER
| BX_VNDIRTY
| BX_VNCLEAN
)) != xflags
))
1229 break; /* nbp invalid */
1235 * Truncate a file's buffer and pages to a specified length. This
1236 * is in lieu of the old vinvalbuf mechanism, which performed unneeded
1240 vtruncbuf(struct vnode
*vp
, struct ucred
*cred
, struct thread
*td
,
1241 off_t length
, int blksize
)
1243 struct buf
*bp
, *nbp
;
1248 CTR2(KTR_VFS
, "vtruncbuf vp %p length %jd", vp
, length
);
1250 * Round up to the *next* lbn.
1252 trunclbn
= (length
+ blksize
- 1) / blksize
;
1254 ASSERT_VOP_LOCKED(vp
, "vtruncbuf");
1261 TAILQ_FOREACH_SAFE(bp
, &bo
->bo_clean
.bv_hd
, b_bobufs
, nbp
) {
1262 if (bp
->b_lblkno
< trunclbn
)
1265 LK_EXCLUSIVE
| LK_SLEEPFAIL
| LK_INTERLOCK
,
1266 BO_MTX(bo
)) == ENOLCK
)
1270 bp
->b_flags
|= (B_INVAL
| B_RELBUF
);
1271 bp
->b_flags
&= ~B_ASYNC
;
1276 (((nbp
->b_xflags
& BX_VNCLEAN
) == 0) ||
1277 (nbp
->b_vp
!= vp
) ||
1278 (nbp
->b_flags
& B_DELWRI
))) {
1284 TAILQ_FOREACH_SAFE(bp
, &bo
->bo_dirty
.bv_hd
, b_bobufs
, nbp
) {
1285 if (bp
->b_lblkno
< trunclbn
)
1288 LK_EXCLUSIVE
| LK_SLEEPFAIL
| LK_INTERLOCK
,
1289 BO_MTX(bo
)) == ENOLCK
)
1292 bp
->b_flags
|= (B_INVAL
| B_RELBUF
);
1293 bp
->b_flags
&= ~B_ASYNC
;
1297 (((nbp
->b_xflags
& BX_VNDIRTY
) == 0) ||
1298 (nbp
->b_vp
!= vp
) ||
1299 (nbp
->b_flags
& B_DELWRI
) == 0)) {
1308 TAILQ_FOREACH_SAFE(bp
, &bo
->bo_dirty
.bv_hd
, b_bobufs
, nbp
) {
1309 if (bp
->b_lblkno
> 0)
1312 * Since we hold the vnode lock this should only
1313 * fail if we're racing with the buf daemon.
1316 LK_EXCLUSIVE
| LK_SLEEPFAIL
| LK_INTERLOCK
,
1317 BO_MTX(bo
)) == ENOLCK
) {
1320 VNASSERT((bp
->b_flags
& B_DELWRI
), vp
,
1321 ("buf(%p) on dirty queue without DELWRI", bp
));
1330 bufobj_wwait(bo
, 0, 0);
1332 vnode_pager_setsize(vp
, length
);
1338 * buf_splay() - splay tree core for the clean/dirty list of buffers in
1341 * NOTE: We have to deal with the special case of a background bitmap
1342 * buffer, a situation where two buffers will have the same logical
1343 * block offset. We want (1) only the foreground buffer to be accessed
1344 * in a lookup and (2) must differentiate between the foreground and
1345 * background buffer in the splay tree algorithm because the splay
1346 * tree cannot normally handle multiple entities with the same 'index'.
1347 * We accomplish this by adding differentiating flags to the splay tree's
1352 buf_splay(daddr_t lblkno
, b_xflags_t xflags
, struct buf
*root
)
1355 struct buf
*lefttreemax
, *righttreemin
, *y
;
1359 lefttreemax
= righttreemin
= &dummy
;
1361 if (lblkno
< root
->b_lblkno
||
1362 (lblkno
== root
->b_lblkno
&&
1363 (xflags
& BX_BKGRDMARKER
) < (root
->b_xflags
& BX_BKGRDMARKER
))) {
1364 if ((y
= root
->b_left
) == NULL
)
1366 if (lblkno
< y
->b_lblkno
) {
1368 root
->b_left
= y
->b_right
;
1371 if ((y
= root
->b_left
) == NULL
)
1374 /* Link into the new root's right tree. */
1375 righttreemin
->b_left
= root
;
1376 righttreemin
= root
;
1377 } else if (lblkno
> root
->b_lblkno
||
1378 (lblkno
== root
->b_lblkno
&&
1379 (xflags
& BX_BKGRDMARKER
) > (root
->b_xflags
& BX_BKGRDMARKER
))) {
1380 if ((y
= root
->b_right
) == NULL
)
1382 if (lblkno
> y
->b_lblkno
) {
1384 root
->b_right
= y
->b_left
;
1387 if ((y
= root
->b_right
) == NULL
)
1390 /* Link into the new root's left tree. */
1391 lefttreemax
->b_right
= root
;
1398 /* Assemble the new root. */
1399 lefttreemax
->b_right
= root
->b_left
;
1400 righttreemin
->b_left
= root
->b_right
;
1401 root
->b_left
= dummy
.b_right
;
1402 root
->b_right
= dummy
.b_left
;
1407 buf_vlist_remove(struct buf
*bp
)
1412 KASSERT(bp
->b_bufobj
!= NULL
, ("No b_bufobj %p", bp
));
1413 ASSERT_BO_LOCKED(bp
->b_bufobj
);
1414 KASSERT((bp
->b_xflags
& (BX_VNDIRTY
|BX_VNCLEAN
)) !=
1415 (BX_VNDIRTY
|BX_VNCLEAN
),
1416 ("buf_vlist_remove: Buf %p is on two lists", bp
));
1417 if (bp
->b_xflags
& BX_VNDIRTY
)
1418 bv
= &bp
->b_bufobj
->bo_dirty
;
1420 bv
= &bp
->b_bufobj
->bo_clean
;
1421 if (bp
!= bv
->bv_root
) {
1422 root
= buf_splay(bp
->b_lblkno
, bp
->b_xflags
, bv
->bv_root
);
1423 KASSERT(root
== bp
, ("splay lookup failed in remove"));
1425 if (bp
->b_left
== NULL
) {
1428 root
= buf_splay(bp
->b_lblkno
, bp
->b_xflags
, bp
->b_left
);
1429 root
->b_right
= bp
->b_right
;
1432 TAILQ_REMOVE(&bv
->bv_hd
, bp
, b_bobufs
);
1434 bp
->b_xflags
&= ~(BX_VNDIRTY
| BX_VNCLEAN
);
1438 * Add the buffer to the sorted clean or dirty block list using a
1439 * splay tree algorithm.
1441 * NOTE: xflags is passed as a constant, optimizing this inline function!
1444 buf_vlist_add(struct buf
*bp
, struct bufobj
*bo
, b_xflags_t xflags
)
1449 ASSERT_BO_LOCKED(bo
);
1450 KASSERT((bp
->b_xflags
& (BX_VNDIRTY
|BX_VNCLEAN
)) == 0,
1451 ("buf_vlist_add: Buf %p has existing xflags %d", bp
, bp
->b_xflags
));
1452 bp
->b_xflags
|= xflags
;
1453 if (xflags
& BX_VNDIRTY
)
1458 root
= buf_splay(bp
->b_lblkno
, bp
->b_xflags
, bv
->bv_root
);
1462 TAILQ_INSERT_TAIL(&bv
->bv_hd
, bp
, b_bobufs
);
1463 } else if (bp
->b_lblkno
< root
->b_lblkno
||
1464 (bp
->b_lblkno
== root
->b_lblkno
&&
1465 (bp
->b_xflags
& BX_BKGRDMARKER
) < (root
->b_xflags
& BX_BKGRDMARKER
))) {
1466 bp
->b_left
= root
->b_left
;
1468 root
->b_left
= NULL
;
1469 TAILQ_INSERT_BEFORE(root
, bp
, b_bobufs
);
1471 bp
->b_right
= root
->b_right
;
1473 root
->b_right
= NULL
;
1474 TAILQ_INSERT_AFTER(&bv
->bv_hd
, root
, bp
, b_bobufs
);
1481 * Lookup a buffer using the splay tree. Note that we specifically avoid
1482 * shadow buffers used in background bitmap writes.
1484 * This code isn't quite efficient as it could be because we are maintaining
1485 * two sorted lists and do not know which list the block resides in.
1487 * During a "make buildworld" the desired buffer is found at one of
1488 * the roots more than 60% of the time. Thus, checking both roots
1489 * before performing either splay eliminates unnecessary splays on the
1490 * first tree splayed.
1493 gbincore(struct bufobj
*bo
, daddr_t lblkno
)
1497 ASSERT_BO_LOCKED(bo
);
1498 if ((bp
= bo
->bo_clean
.bv_root
) != NULL
&&
1499 bp
->b_lblkno
== lblkno
&& !(bp
->b_xflags
& BX_BKGRDMARKER
))
1501 if ((bp
= bo
->bo_dirty
.bv_root
) != NULL
&&
1502 bp
->b_lblkno
== lblkno
&& !(bp
->b_xflags
& BX_BKGRDMARKER
))
1504 if ((bp
= bo
->bo_clean
.bv_root
) != NULL
) {
1505 bo
->bo_clean
.bv_root
= bp
= buf_splay(lblkno
, 0, bp
);
1506 if (bp
->b_lblkno
== lblkno
&& !(bp
->b_xflags
& BX_BKGRDMARKER
))
1509 if ((bp
= bo
->bo_dirty
.bv_root
) != NULL
) {
1510 bo
->bo_dirty
.bv_root
= bp
= buf_splay(lblkno
, 0, bp
);
1511 if (bp
->b_lblkno
== lblkno
&& !(bp
->b_xflags
& BX_BKGRDMARKER
))
1518 * Associate a buffer with a vnode.
1521 bgetvp(struct vnode
*vp
, struct buf
*bp
)
1526 ASSERT_BO_LOCKED(bo
);
1527 VNASSERT(bp
->b_vp
== NULL
, bp
->b_vp
, ("bgetvp: not free"));
1529 CTR3(KTR_BUF
, "bgetvp(%p) vp %p flags %X", bp
, vp
, bp
->b_flags
);
1530 VNASSERT((bp
->b_xflags
& (BX_VNDIRTY
|BX_VNCLEAN
)) == 0, vp
,
1531 ("bgetvp: bp already attached! %p", bp
));
1534 if (VFS_NEEDSGIANT(vp
->v_mount
) || bo
->bo_flag
& BO_NEEDSGIANT
)
1535 bp
->b_flags
|= B_NEEDSGIANT
;
1539 * Insert onto list for new vnode.
1541 buf_vlist_add(bp
, bo
, BX_VNCLEAN
);
1545 * Disassociate a buffer from a vnode.
1548 brelvp(struct buf
*bp
)
1553 CTR3(KTR_BUF
, "brelvp(%p) vp %p flags %X", bp
, bp
->b_vp
, bp
->b_flags
);
1554 KASSERT(bp
->b_vp
!= NULL
, ("brelvp: NULL"));
1557 * Delete from old vnode list, if on one.
1559 vp
= bp
->b_vp
; /* XXX */
1562 if (bp
->b_xflags
& (BX_VNDIRTY
| BX_VNCLEAN
))
1563 buf_vlist_remove(bp
);
1565 panic("brelvp: Buffer %p not on queue.", bp
);
1566 if ((bo
->bo_flag
& BO_ONWORKLST
) && bo
->bo_dirty
.bv_cnt
== 0) {
1567 bo
->bo_flag
&= ~BO_ONWORKLST
;
1568 mtx_lock(&sync_mtx
);
1569 LIST_REMOVE(bo
, bo_synclist
);
1570 syncer_worklist_len
--;
1571 mtx_unlock(&sync_mtx
);
1573 bp
->b_flags
&= ~B_NEEDSGIANT
;
1575 bp
->b_bufobj
= NULL
;
1581 * Add an item to the syncer work queue.
1584 vn_syncer_add_to_worklist(struct bufobj
*bo
, int delay
)
1588 ASSERT_BO_LOCKED(bo
);
1590 mtx_lock(&sync_mtx
);
1591 if (bo
->bo_flag
& BO_ONWORKLST
)
1592 LIST_REMOVE(bo
, bo_synclist
);
1594 bo
->bo_flag
|= BO_ONWORKLST
;
1595 syncer_worklist_len
++;
1598 if (delay
> syncer_maxdelay
- 2)
1599 delay
= syncer_maxdelay
- 2;
1600 slot
= (syncer_delayno
+ delay
) & syncer_mask
;
1602 queue
= VFS_NEEDSGIANT(bo
->__bo_vnode
->v_mount
) ? WI_GIANTQ
:
1604 LIST_INSERT_HEAD(&syncer_workitem_pending
[queue
][slot
], bo
,
1606 mtx_unlock(&sync_mtx
);
1610 sysctl_vfs_worklist_len(SYSCTL_HANDLER_ARGS
)
1614 mtx_lock(&sync_mtx
);
1615 len
= syncer_worklist_len
- sync_vnode_count
;
1616 mtx_unlock(&sync_mtx
);
1617 error
= SYSCTL_OUT(req
, &len
, sizeof(len
));
1621 SYSCTL_PROC(_vfs
, OID_AUTO
, worklist_len
, CTLTYPE_INT
| CTLFLAG_RD
, NULL
, 0,
1622 sysctl_vfs_worklist_len
, "I", "Syncer thread worklist length");
1624 static struct proc
*updateproc
;
1625 static void sched_sync(void);
1626 static struct kproc_desc up_kp
= {
1631 SYSINIT(syncer
, SI_SUB_KTHREAD_UPDATE
, SI_ORDER_FIRST
, kproc_start
, &up_kp
);
1634 sync_vnode(struct synclist
*slp
, struct bufobj
**bo
, struct thread
*td
)
1639 *bo
= LIST_FIRST(slp
);
1642 vp
= (*bo
)->__bo_vnode
; /* XXX */
1643 if (VOP_ISLOCKED(vp
) != 0 || VI_TRYLOCK(vp
) == 0)
1646 * We use vhold in case the vnode does not
1647 * successfully sync. vhold prevents the vnode from
1648 * going away when we unlock the sync_mtx so that
1649 * we can acquire the vnode interlock.
1652 mtx_unlock(&sync_mtx
);
1654 if (vn_start_write(vp
, &mp
, V_NOWAIT
) != 0) {
1656 mtx_lock(&sync_mtx
);
1657 return (*bo
== LIST_FIRST(slp
));
1659 vn_lock(vp
, LK_EXCLUSIVE
| LK_RETRY
);
1660 (void) VOP_FSYNC(vp
, MNT_LAZY
, td
);
1662 vn_finished_write(mp
);
1664 if (((*bo
)->bo_flag
& BO_ONWORKLST
) != 0) {
1666 * Put us back on the worklist. The worklist
1667 * routine will remove us from our current
1668 * position and then add us back in at a later
1671 vn_syncer_add_to_worklist(*bo
, syncdelay
);
1675 mtx_lock(&sync_mtx
);
1680 * System filesystem synchronizer daemon.
1685 struct synclist
*gnext
, *next
;
1686 struct synclist
*gslp
, *slp
;
1689 struct thread
*td
= curthread
;
1691 int net_worklist_len
;
1692 int syncer_final_iter
;
1697 syncer_final_iter
= 0;
1699 syncer_state
= SYNCER_RUNNING
;
1700 starttime
= time_uptime
;
1701 td
->td_pflags
|= TDP_NORUNNINGBUF
;
1703 EVENTHANDLER_REGISTER(shutdown_pre_sync
, syncer_shutdown
, td
->td_proc
,
1706 mtx_lock(&sync_mtx
);
1708 if (syncer_state
== SYNCER_FINAL_DELAY
&&
1709 syncer_final_iter
== 0) {
1710 mtx_unlock(&sync_mtx
);
1711 kproc_suspend_check(td
->td_proc
);
1712 mtx_lock(&sync_mtx
);
1714 net_worklist_len
= syncer_worklist_len
- sync_vnode_count
;
1715 if (syncer_state
!= SYNCER_RUNNING
&&
1716 starttime
!= time_uptime
) {
1718 printf("\nSyncing disks, vnodes remaining...");
1721 printf("%d ", net_worklist_len
);
1723 starttime
= time_uptime
;
1726 * Push files whose dirty time has expired. Be careful
1727 * of interrupt race on slp queue.
1729 * Skip over empty worklist slots when shutting down.
1732 slp
= &syncer_workitem_pending
[WI_MPSAFEQ
][syncer_delayno
];
1733 gslp
= &syncer_workitem_pending
[WI_GIANTQ
][syncer_delayno
];
1734 syncer_delayno
+= 1;
1735 if (syncer_delayno
== syncer_maxdelay
)
1737 next
= &syncer_workitem_pending
[WI_MPSAFEQ
][syncer_delayno
];
1738 gnext
= &syncer_workitem_pending
[WI_GIANTQ
][syncer_delayno
];
1740 * If the worklist has wrapped since the
1741 * it was emptied of all but syncer vnodes,
1742 * switch to the FINAL_DELAY state and run
1743 * for one more second.
1745 if (syncer_state
== SYNCER_SHUTTING_DOWN
&&
1746 net_worklist_len
== 0 &&
1747 last_work_seen
== syncer_delayno
) {
1748 syncer_state
= SYNCER_FINAL_DELAY
;
1749 syncer_final_iter
= SYNCER_SHUTDOWN_SPEEDUP
;
1751 } while (syncer_state
!= SYNCER_RUNNING
&& LIST_EMPTY(slp
) &&
1752 LIST_EMPTY(gslp
) && syncer_worklist_len
> 0);
1755 * Keep track of the last time there was anything
1756 * on the worklist other than syncer vnodes.
1757 * Return to the SHUTTING_DOWN state if any
1760 if (net_worklist_len
> 0 || syncer_state
== SYNCER_RUNNING
)
1761 last_work_seen
= syncer_delayno
;
1762 if (net_worklist_len
> 0 && syncer_state
== SYNCER_FINAL_DELAY
)
1763 syncer_state
= SYNCER_SHUTTING_DOWN
;
1764 while (!LIST_EMPTY(slp
)) {
1765 error
= sync_vnode(slp
, &bo
, td
);
1767 LIST_REMOVE(bo
, bo_synclist
);
1768 LIST_INSERT_HEAD(next
, bo
, bo_synclist
);
1772 if (!LIST_EMPTY(gslp
)) {
1773 mtx_unlock(&sync_mtx
);
1775 mtx_lock(&sync_mtx
);
1776 while (!LIST_EMPTY(gslp
)) {
1777 error
= sync_vnode(gslp
, &bo
, td
);
1779 LIST_REMOVE(bo
, bo_synclist
);
1780 LIST_INSERT_HEAD(gnext
, bo
,
1787 if (syncer_state
== SYNCER_FINAL_DELAY
&& syncer_final_iter
> 0)
1788 syncer_final_iter
--;
1790 * The variable rushjob allows the kernel to speed up the
1791 * processing of the filesystem syncer process. A rushjob
1792 * value of N tells the filesystem syncer to process the next
1793 * N seconds worth of work on its queue ASAP. Currently rushjob
1794 * is used by the soft update code to speed up the filesystem
1795 * syncer process when the incore state is getting so far
1796 * ahead of the disk that the kernel memory pool is being
1797 * threatened with exhaustion.
1804 * Just sleep for a short period of time between
1805 * iterations when shutting down to allow some I/O
1808 * If it has taken us less than a second to process the
1809 * current work, then wait. Otherwise start right over
1810 * again. We can still lose time if any single round
1811 * takes more than two seconds, but it does not really
1812 * matter as we are just trying to generally pace the
1813 * filesystem activity.
1815 if (syncer_state
!= SYNCER_RUNNING
)
1816 cv_timedwait(&sync_wakeup
, &sync_mtx
,
1817 hz
/ SYNCER_SHUTDOWN_SPEEDUP
);
1818 else if (time_uptime
== starttime
)
1819 cv_timedwait(&sync_wakeup
, &sync_mtx
, hz
);
1824 * Request the syncer daemon to speed up its work.
1825 * We never push it to speed up more than half of its
1826 * normal turn time, otherwise it could take over the cpu.
1829 speedup_syncer(void)
1833 mtx_lock(&sync_mtx
);
1834 if (rushjob
< syncdelay
/ 2) {
1836 stat_rush_requests
+= 1;
1839 mtx_unlock(&sync_mtx
);
1840 cv_broadcast(&sync_wakeup
);
1845 * Tell the syncer to speed up its work and run though its work
1846 * list several times, then tell it to shut down.
1849 syncer_shutdown(void *arg
, int howto
)
1852 if (howto
& RB_NOSYNC
)
1854 mtx_lock(&sync_mtx
);
1855 syncer_state
= SYNCER_SHUTTING_DOWN
;
1857 mtx_unlock(&sync_mtx
);
1858 cv_broadcast(&sync_wakeup
);
1859 kproc_shutdown(arg
, howto
);
1863 * Reassign a buffer from one vnode to another.
1864 * Used to assign file specific control information
1865 * (indirect blocks) to the vnode to which they belong.
1868 reassignbuf(struct buf
*bp
)
1881 CTR3(KTR_BUF
, "reassignbuf(%p) vp %p flags %X",
1882 bp
, bp
->b_vp
, bp
->b_flags
);
1884 * B_PAGING flagged buffers cannot be reassigned because their vp
1885 * is not fully linked in.
1887 if (bp
->b_flags
& B_PAGING
)
1888 panic("cannot reassign paging buffer");
1891 * Delete from old vnode list, if on one.
1894 if (bp
->b_xflags
& (BX_VNDIRTY
| BX_VNCLEAN
))
1895 buf_vlist_remove(bp
);
1897 panic("reassignbuf: Buffer %p not on queue.", bp
);
1899 * If dirty, put on list of dirty buffers; otherwise insert onto list
1902 if (bp
->b_flags
& B_DELWRI
) {
1903 if ((bo
->bo_flag
& BO_ONWORKLST
) == 0) {
1904 switch (vp
->v_type
) {
1914 vn_syncer_add_to_worklist(bo
, delay
);
1916 buf_vlist_add(bp
, bo
, BX_VNDIRTY
);
1918 buf_vlist_add(bp
, bo
, BX_VNCLEAN
);
1920 if ((bo
->bo_flag
& BO_ONWORKLST
) && bo
->bo_dirty
.bv_cnt
== 0) {
1921 mtx_lock(&sync_mtx
);
1922 LIST_REMOVE(bo
, bo_synclist
);
1923 syncer_worklist_len
--;
1924 mtx_unlock(&sync_mtx
);
1925 bo
->bo_flag
&= ~BO_ONWORKLST
;
1930 bp
= TAILQ_FIRST(&bv
->bv_hd
);
1931 KASSERT(bp
== NULL
|| bp
->b_bufobj
== bo
,
1932 ("bp %p wrong b_bufobj %p should be %p", bp
, bp
->b_bufobj
, bo
));
1933 bp
= TAILQ_LAST(&bv
->bv_hd
, buflists
);
1934 KASSERT(bp
== NULL
|| bp
->b_bufobj
== bo
,
1935 ("bp %p wrong b_bufobj %p should be %p", bp
, bp
->b_bufobj
, bo
));
1937 bp
= TAILQ_FIRST(&bv
->bv_hd
);
1938 KASSERT(bp
== NULL
|| bp
->b_bufobj
== bo
,
1939 ("bp %p wrong b_bufobj %p should be %p", bp
, bp
->b_bufobj
, bo
));
1940 bp
= TAILQ_LAST(&bv
->bv_hd
, buflists
);
1941 KASSERT(bp
== NULL
|| bp
->b_bufobj
== bo
,
1942 ("bp %p wrong b_bufobj %p should be %p", bp
, bp
->b_bufobj
, bo
));
1948 * Increment the use and hold counts on the vnode, taking care to reference
1949 * the driver's usecount if this is a chardev. The vholdl() will remove
1950 * the vnode from the free list if it is presently free. Requires the
1951 * vnode interlock and returns with it held.
1954 v_incr_usecount(struct vnode
*vp
)
1957 CTR3(KTR_VFS
, "v_incr_usecount: vp %p holdcnt %d usecount %d\n",
1958 vp
, vp
->v_holdcnt
, vp
->v_usecount
);
1960 if (vp
->v_type
== VCHR
&& vp
->v_rdev
!= NULL
) {
1962 vp
->v_rdev
->si_usecount
++;
1969 * Turn a holdcnt into a use+holdcnt such that only one call to
1970 * v_decr_usecount is needed.
1973 v_upgrade_usecount(struct vnode
*vp
)
1976 CTR3(KTR_VFS
, "v_upgrade_usecount: vp %p holdcnt %d usecount %d\n",
1977 vp
, vp
->v_holdcnt
, vp
->v_usecount
);
1979 if (vp
->v_type
== VCHR
&& vp
->v_rdev
!= NULL
) {
1981 vp
->v_rdev
->si_usecount
++;
1987 * Decrement the vnode use and hold count along with the driver's usecount
1988 * if this is a chardev. The vdropl() below releases the vnode interlock
1989 * as it may free the vnode.
1992 v_decr_usecount(struct vnode
*vp
)
1995 CTR3(KTR_VFS
, "v_decr_usecount: vp %p holdcnt %d usecount %d\n",
1996 vp
, vp
->v_holdcnt
, vp
->v_usecount
);
1997 ASSERT_VI_LOCKED(vp
, __FUNCTION__
);
1998 VNASSERT(vp
->v_usecount
> 0, vp
,
1999 ("v_decr_usecount: negative usecount"));
2001 if (vp
->v_type
== VCHR
&& vp
->v_rdev
!= NULL
) {
2003 vp
->v_rdev
->si_usecount
--;
2010 * Decrement only the use count and driver use count. This is intended to
2011 * be paired with a follow on vdropl() to release the remaining hold count.
2012 * In this way we may vgone() a vnode with a 0 usecount without risk of
2013 * having it end up on a free list because the hold count is kept above 0.
2016 v_decr_useonly(struct vnode
*vp
)
2019 CTR3(KTR_VFS
, "v_decr_useonly: vp %p holdcnt %d usecount %d\n",
2020 vp
, vp
->v_holdcnt
, vp
->v_usecount
);
2021 ASSERT_VI_LOCKED(vp
, __FUNCTION__
);
2022 VNASSERT(vp
->v_usecount
> 0, vp
,
2023 ("v_decr_useonly: negative usecount"));
2025 if (vp
->v_type
== VCHR
&& vp
->v_rdev
!= NULL
) {
2027 vp
->v_rdev
->si_usecount
--;
2033 * Grab a particular vnode from the free list, increment its
2034 * reference count and lock it. VI_DOOMED is set if the vnode
2035 * is being destroyed. Only callers who specify LK_RETRY will
2036 * see doomed vnodes. If inactive processing was delayed in
2037 * vput try to do it here.
2040 vget(struct vnode
*vp
, int flags
, struct thread
*td
)
2045 VFS_ASSERT_GIANT(vp
->v_mount
);
2046 VNASSERT((flags
& LK_TYPE_MASK
) != 0, vp
,
2047 ("vget: invalid lock operation"));
2048 if ((flags
& LK_INTERLOCK
) == 0)
2051 if ((error
= vn_lock(vp
, flags
| LK_INTERLOCK
)) != 0) {
2055 if (vp
->v_iflag
& VI_DOOMED
&& (flags
& LK_RETRY
) == 0)
2056 panic("vget: vn_lock failed to return ENOENT\n");
2058 /* Upgrade our holdcnt to a usecount. */
2059 v_upgrade_usecount(vp
);
2061 * We don't guarantee that any particular close will
2062 * trigger inactive processing so just make a best effort
2063 * here at preventing a reference to a removed file. If
2064 * we don't succeed no harm is done.
2066 if (vp
->v_iflag
& VI_OWEINACT
) {
2067 if (VOP_ISLOCKED(vp
) == LK_EXCLUSIVE
&&
2068 (flags
& LK_NOWAIT
) == 0)
2070 vp
->v_iflag
&= ~VI_OWEINACT
;
2077 * Increase the reference count of a vnode.
2080 vref(struct vnode
*vp
)
2084 v_incr_usecount(vp
);
2089 * Return reference count of a vnode.
2091 * The results of this call are only guaranteed when some mechanism other
2092 * than the VI lock is used to stop other processes from gaining references
2093 * to the vnode. This may be the case if the caller holds the only reference.
2094 * This is also useful when stale data is acceptable as race conditions may
2095 * be accounted for by some other means.
2098 vrefcnt(struct vnode
*vp
)
2103 usecnt
= vp
->v_usecount
;
2111 * Vnode put/release.
2112 * If count drops to zero, call inactive routine and return to freelist.
2115 vrele(struct vnode
*vp
)
2117 struct thread
*td
= curthread
; /* XXX */
2119 KASSERT(vp
!= NULL
, ("vrele: null vp"));
2120 VFS_ASSERT_GIANT(vp
->v_mount
);
2124 /* Skip this v_writecount check if we're going to panic below. */
2125 VNASSERT(vp
->v_writecount
< vp
->v_usecount
|| vp
->v_usecount
< 1, vp
,
2126 ("vrele: missed vn_close"));
2128 if (vp
->v_usecount
> 1 || ((vp
->v_iflag
& VI_DOINGINACT
) &&
2129 vp
->v_usecount
== 1)) {
2130 v_decr_usecount(vp
);
2133 if (vp
->v_usecount
!= 1) {
2135 vprint("vrele: negative ref count", vp
);
2138 panic("vrele: negative ref cnt");
2141 * We want to hold the vnode until the inactive finishes to
2142 * prevent vgone() races. We drop the use count here and the
2143 * hold count below when we're done.
2147 * We must call VOP_INACTIVE with the node locked. Mark
2148 * as VI_DOINGINACT to avoid recursion.
2150 vp
->v_iflag
|= VI_OWEINACT
;
2151 if (vn_lock(vp
, LK_EXCLUSIVE
| LK_INTERLOCK
) == 0) {
2153 if (vp
->v_usecount
> 0)
2154 vp
->v_iflag
&= ~VI_OWEINACT
;
2155 if (vp
->v_iflag
& VI_OWEINACT
)
2160 if (vp
->v_usecount
> 0)
2161 vp
->v_iflag
&= ~VI_OWEINACT
;
2167 * Release an already locked vnode. This give the same effects as
2168 * unlock+vrele(), but takes less time and avoids releasing and
2169 * re-aquiring the lock (as vrele() acquires the lock internally.)
2172 vput(struct vnode
*vp
)
2174 struct thread
*td
= curthread
; /* XXX */
2177 KASSERT(vp
!= NULL
, ("vput: null vp"));
2178 ASSERT_VOP_LOCKED(vp
, "vput");
2179 VFS_ASSERT_GIANT(vp
->v_mount
);
2181 /* Skip this v_writecount check if we're going to panic below. */
2182 VNASSERT(vp
->v_writecount
< vp
->v_usecount
|| vp
->v_usecount
< 1, vp
,
2183 ("vput: missed vn_close"));
2186 if (vp
->v_usecount
> 1 || ((vp
->v_iflag
& VI_DOINGINACT
) &&
2187 vp
->v_usecount
== 1)) {
2189 v_decr_usecount(vp
);
2193 if (vp
->v_usecount
!= 1) {
2195 vprint("vput: negative ref count", vp
);
2197 panic("vput: negative ref cnt");
2200 * We want to hold the vnode until the inactive finishes to
2201 * prevent vgone() races. We drop the use count here and the
2202 * hold count below when we're done.
2205 vp
->v_iflag
|= VI_OWEINACT
;
2206 if (VOP_ISLOCKED(vp
) != LK_EXCLUSIVE
) {
2207 error
= VOP_LOCK(vp
, LK_UPGRADE
|LK_INTERLOCK
|LK_NOWAIT
);
2210 if (vp
->v_usecount
> 0)
2211 vp
->v_iflag
&= ~VI_OWEINACT
;
2215 if (vp
->v_usecount
> 0)
2216 vp
->v_iflag
&= ~VI_OWEINACT
;
2217 if (vp
->v_iflag
& VI_OWEINACT
)
2225 * Somebody doesn't want the vnode recycled.
2228 vhold(struct vnode
*vp
)
2237 vholdl(struct vnode
*vp
)
2241 if (VSHOULDBUSY(vp
))
2246 * Note that there is one less who cares about this vnode. vdrop() is the
2247 * opposite of vhold().
2250 vdrop(struct vnode
*vp
)
2258 * Drop the hold count of the vnode. If this is the last reference to
2259 * the vnode we will free it if it has been vgone'd otherwise it is
2260 * placed on the free list.
2263 vdropl(struct vnode
*vp
)
2266 ASSERT_VI_LOCKED(vp
, "vdropl");
2267 if (vp
->v_holdcnt
<= 0)
2268 panic("vdrop: holdcnt %d", vp
->v_holdcnt
);
2270 if (vp
->v_holdcnt
== 0) {
2271 if (vp
->v_iflag
& VI_DOOMED
) {
2281 * Call VOP_INACTIVE on the vnode and manage the DOINGINACT and OWEINACT
2282 * flags. DOINGINACT prevents us from recursing in calls to vinactive.
2283 * OWEINACT tracks whether a vnode missed a call to inactive due to a
2284 * failed lock upgrade.
2287 vinactive(struct vnode
*vp
, struct thread
*td
)
2290 ASSERT_VOP_ELOCKED(vp
, "vinactive");
2291 ASSERT_VI_LOCKED(vp
, "vinactive");
2292 VNASSERT((vp
->v_iflag
& VI_DOINGINACT
) == 0, vp
,
2293 ("vinactive: recursed on VI_DOINGINACT"));
2294 vp
->v_iflag
|= VI_DOINGINACT
;
2295 vp
->v_iflag
&= ~VI_OWEINACT
;
2297 VOP_INACTIVE(vp
, td
);
2299 VNASSERT(vp
->v_iflag
& VI_DOINGINACT
, vp
,
2300 ("vinactive: lost VI_DOINGINACT"));
2301 vp
->v_iflag
&= ~VI_DOINGINACT
;
2305 * Remove any vnodes in the vnode table belonging to mount point mp.
2307 * If FORCECLOSE is not specified, there should not be any active ones,
2308 * return error if any are found (nb: this is a user error, not a
2309 * system error). If FORCECLOSE is specified, detach any active vnodes
2312 * If WRITECLOSE is set, only flush out regular file vnodes open for
2315 * SKIPSYSTEM causes any vnodes marked VV_SYSTEM to be skipped.
2317 * `rootrefs' specifies the base reference count for the root vnode
2318 * of this filesystem. The root vnode is considered busy if its
2319 * v_usecount exceeds this value. On a successful return, vflush(, td)
2320 * will call vrele() on the root vnode exactly rootrefs times.
2321 * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must
2325 static int busyprt
= 0; /* print out busy vnodes */
2326 SYSCTL_INT(_debug
, OID_AUTO
, busyprt
, CTLFLAG_RW
, &busyprt
, 0, "");
2330 vflush( struct mount
*mp
, int rootrefs
, int flags
, struct thread
*td
)
2332 struct vnode
*vp
, *mvp
, *rootvp
= NULL
;
2334 int busy
= 0, error
;
2336 CTR1(KTR_VFS
, "vflush: mp %p", mp
);
2338 KASSERT((flags
& (SKIPSYSTEM
| WRITECLOSE
)) == 0,
2339 ("vflush: bad args"));
2341 * Get the filesystem root vnode. We can vput() it
2342 * immediately, since with rootrefs > 0, it won't go away.
2344 if ((error
= VFS_ROOT(mp
, LK_EXCLUSIVE
, &rootvp
, td
)) != 0)
2351 MNT_VNODE_FOREACH(vp
, mp
, mvp
) {
2356 error
= vn_lock(vp
, LK_INTERLOCK
| LK_EXCLUSIVE
);
2360 MNT_VNODE_FOREACH_ABORT_ILOCKED(mp
, mvp
);
2364 * Skip over a vnodes marked VV_SYSTEM.
2366 if ((flags
& SKIPSYSTEM
) && (vp
->v_vflag
& VV_SYSTEM
)) {
2373 * If WRITECLOSE is set, flush out unlinked but still open
2374 * files (even if open only for reading) and regular file
2375 * vnodes open for writing.
2377 if (flags
& WRITECLOSE
) {
2378 error
= VOP_GETATTR(vp
, &vattr
, td
->td_ucred
);
2381 if ((vp
->v_type
== VNON
||
2382 (error
== 0 && vattr
.va_nlink
> 0)) &&
2383 (vp
->v_writecount
== 0 || vp
->v_type
!= VREG
)) {
2392 * With v_usecount == 0, all we need to do is clear out the
2393 * vnode data structures and we are done.
2395 * If FORCECLOSE is set, forcibly close the vnode.
2397 if (vp
->v_usecount
== 0 || (flags
& FORCECLOSE
)) {
2398 VNASSERT(vp
->v_usecount
== 0 ||
2399 (vp
->v_type
!= VCHR
&& vp
->v_type
!= VBLK
), vp
,
2400 ("device VNODE %p is FORCECLOSED", vp
));
2406 vprint("vflush: busy vnode", vp
);
2414 if (rootrefs
> 0 && (flags
& FORCECLOSE
) == 0) {
2416 * If just the root vnode is busy, and if its refcount
2417 * is equal to `rootrefs', then go ahead and kill it.
2420 KASSERT(busy
> 0, ("vflush: not busy"));
2421 VNASSERT(rootvp
->v_usecount
>= rootrefs
, rootvp
,
2422 ("vflush: usecount %d < rootrefs %d",
2423 rootvp
->v_usecount
, rootrefs
));
2424 if (busy
== 1 && rootvp
->v_usecount
== rootrefs
) {
2425 VOP_LOCK(rootvp
, LK_EXCLUSIVE
|LK_INTERLOCK
);
2427 VOP_UNLOCK(rootvp
, 0);
2434 for (; rootrefs
> 0; rootrefs
--)
2440 * Recycle an unused vnode to the front of the free list.
2443 vrecycle(struct vnode
*vp
, struct thread
*td
)
2447 ASSERT_VOP_ELOCKED(vp
, "vrecycle");
2450 if (vp
->v_usecount
== 0) {
2459 * Eliminate all activity associated with a vnode
2460 * in preparation for reuse.
2463 vgone(struct vnode
*vp
)
2471 * vgone, with the vp interlock held.
2474 vgonel(struct vnode
*vp
)
2481 CTR1(KTR_VFS
, "vgonel: vp %p", vp
);
2482 ASSERT_VOP_ELOCKED(vp
, "vgonel");
2483 ASSERT_VI_LOCKED(vp
, "vgonel");
2484 VNASSERT(vp
->v_holdcnt
, vp
,
2485 ("vgonel: vp %p has no reference.", vp
));
2489 * Don't vgonel if we're already doomed.
2491 if (vp
->v_iflag
& VI_DOOMED
)
2493 vp
->v_iflag
|= VI_DOOMED
;
2495 * Check to see if the vnode is in use. If so, we have to call
2496 * VOP_CLOSE() and VOP_INACTIVE().
2498 active
= vp
->v_usecount
;
2499 oweinact
= (vp
->v_iflag
& VI_OWEINACT
);
2502 * Clean out any buffers associated with the vnode.
2503 * If the flush fails, just toss the buffers.
2506 if (!TAILQ_EMPTY(&vp
->v_bufobj
.bo_dirty
.bv_hd
))
2507 (void) vn_start_secondary_write(vp
, &mp
, V_WAIT
);
2508 if (vinvalbuf(vp
, V_SAVE
, td
, 0, 0) != 0)
2509 vinvalbuf(vp
, 0, td
, 0, 0);
2512 * If purging an active vnode, it must be closed and
2513 * deactivated before being reclaimed.
2516 VOP_CLOSE(vp
, FNONBLOCK
, NOCRED
, td
);
2517 if (oweinact
|| active
) {
2519 if ((vp
->v_iflag
& VI_DOINGINACT
) == 0)
2524 * Reclaim the vnode.
2526 if (VOP_RECLAIM(vp
, td
))
2527 panic("vgone: cannot reclaim");
2529 vn_finished_secondary_write(mp
);
2530 VNASSERT(vp
->v_object
== NULL
, vp
,
2531 ("vop_reclaim left v_object vp=%p, tag=%s", vp
, vp
->v_tag
));
2533 * Clear the advisory locks and wake up waiting threads.
2535 lf_purgelocks(vp
, &(vp
->v_lockf
));
2537 * Delete from old mount point vnode list.
2542 * Done with purge, reset to the standard lock and invalidate
2546 vp
->v_vnlock
= &vp
->v_lock
;
2547 vp
->v_op
= &dead_vnodeops
;
2553 * Calculate the total number of references to a special device.
2556 vcount(struct vnode
*vp
)
2561 count
= vp
->v_rdev
->si_usecount
;
2567 * Same as above, but using the struct cdev *as argument
2570 count_dev(struct cdev
*dev
)
2575 count
= dev
->si_usecount
;
2581 * Print out a description of a vnode.
2583 static char *typename
[] =
2584 {"VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD",
2588 vn_printf(struct vnode
*vp
, const char *fmt
, ...)
2591 char buf
[256], buf2
[16];
2597 printf("%p: ", (void *)vp
);
2598 printf("tag %s, type %s\n", vp
->v_tag
, typename
[vp
->v_type
]);
2599 printf(" usecount %d, writecount %d, refcount %d mountedhere %p\n",
2600 vp
->v_usecount
, vp
->v_writecount
, vp
->v_holdcnt
, vp
->v_mountedhere
);
2603 if (vp
->v_vflag
& VV_ROOT
)
2604 strlcat(buf
, "|VV_ROOT", sizeof(buf
));
2605 if (vp
->v_vflag
& VV_ISTTY
)
2606 strlcat(buf
, "|VV_ISTTY", sizeof(buf
));
2607 if (vp
->v_vflag
& VV_NOSYNC
)
2608 strlcat(buf
, "|VV_NOSYNC", sizeof(buf
));
2609 if (vp
->v_vflag
& VV_CACHEDLABEL
)
2610 strlcat(buf
, "|VV_CACHEDLABEL", sizeof(buf
));
2611 if (vp
->v_vflag
& VV_TEXT
)
2612 strlcat(buf
, "|VV_TEXT", sizeof(buf
));
2613 if (vp
->v_vflag
& VV_COPYONWRITE
)
2614 strlcat(buf
, "|VV_COPYONWRITE", sizeof(buf
));
2615 if (vp
->v_vflag
& VV_SYSTEM
)
2616 strlcat(buf
, "|VV_SYSTEM", sizeof(buf
));
2617 if (vp
->v_vflag
& VV_PROCDEP
)
2618 strlcat(buf
, "|VV_PROCDEP", sizeof(buf
));
2619 if (vp
->v_vflag
& VV_NOKNOTE
)
2620 strlcat(buf
, "|VV_NOKNOTE", sizeof(buf
));
2621 if (vp
->v_vflag
& VV_DELETED
)
2622 strlcat(buf
, "|VV_DELETED", sizeof(buf
));
2623 if (vp
->v_vflag
& VV_MD
)
2624 strlcat(buf
, "|VV_MD", sizeof(buf
));
2625 flags
= vp
->v_vflag
& ~(VV_ROOT
| VV_ISTTY
| VV_NOSYNC
|
2626 VV_CACHEDLABEL
| VV_TEXT
| VV_COPYONWRITE
| VV_SYSTEM
| VV_PROCDEP
|
2627 VV_NOKNOTE
| VV_DELETED
| VV_MD
);
2629 snprintf(buf2
, sizeof(buf2
), "|VV(0x%lx)", flags
);
2630 strlcat(buf
, buf2
, sizeof(buf
));
2632 if (vp
->v_iflag
& VI_MOUNT
)
2633 strlcat(buf
, "|VI_MOUNT", sizeof(buf
));
2634 if (vp
->v_iflag
& VI_AGE
)
2635 strlcat(buf
, "|VI_AGE", sizeof(buf
));
2636 if (vp
->v_iflag
& VI_DOOMED
)
2637 strlcat(buf
, "|VI_DOOMED", sizeof(buf
));
2638 if (vp
->v_iflag
& VI_FREE
)
2639 strlcat(buf
, "|VI_FREE", sizeof(buf
));
2640 if (vp
->v_iflag
& VI_OBJDIRTY
)
2641 strlcat(buf
, "|VI_OBJDIRTY", sizeof(buf
));
2642 if (vp
->v_iflag
& VI_DOINGINACT
)
2643 strlcat(buf
, "|VI_DOINGINACT", sizeof(buf
));
2644 if (vp
->v_iflag
& VI_OWEINACT
)
2645 strlcat(buf
, "|VI_OWEINACT", sizeof(buf
));
2646 flags
= vp
->v_iflag
& ~(VI_MOUNT
| VI_AGE
| VI_DOOMED
| VI_FREE
|
2647 VI_OBJDIRTY
| VI_DOINGINACT
| VI_OWEINACT
);
2649 snprintf(buf2
, sizeof(buf2
), "|VI(0x%lx)", flags
);
2650 strlcat(buf
, buf2
, sizeof(buf
));
2652 printf(" flags (%s)\n", buf
+ 1);
2653 if (mtx_owned(VI_MTX(vp
)))
2654 printf(" VI_LOCKed");
2655 if (vp
->v_object
!= NULL
)
2656 printf(" v_object %p ref %d pages %d\n",
2657 vp
->v_object
, vp
->v_object
->ref_count
,
2658 vp
->v_object
->resident_page_count
);
2660 lockmgr_printinfo(vp
->v_vnlock
);
2662 if (vp
->v_data
!= NULL
)
2668 * List all of the locked vnodes in the system.
2669 * Called when debugging the kernel.
2671 DB_SHOW_COMMAND(lockedvnods
, lockedvnodes
)
2673 struct mount
*mp
, *nmp
;
2677 * Note: because this is DDB, we can't obey the locking semantics
2678 * for these structures, which means we could catch an inconsistent
2679 * state and dereference a nasty pointer. Not much to be done
2682 db_printf("Locked vnodes\n");
2683 for (mp
= TAILQ_FIRST(&mountlist
); mp
!= NULL
; mp
= nmp
) {
2684 nmp
= TAILQ_NEXT(mp
, mnt_list
);
2685 TAILQ_FOREACH(vp
, &mp
->mnt_nvnodelist
, v_nmntvnodes
) {
2686 if (vp
->v_type
!= VMARKER
&&
2690 nmp
= TAILQ_NEXT(mp
, mnt_list
);
2695 * Show details about the given vnode.
2697 DB_SHOW_COMMAND(vnode
, db_show_vnode
)
2703 vp
= (struct vnode
*)addr
;
2704 vn_printf(vp
, "vnode ");
2708 * Show details about the given mount point.
2710 DB_SHOW_COMMAND(mount
, db_show_mount
)
2719 /* No address given, print short info about all mount points. */
2720 TAILQ_FOREACH(mp
, &mountlist
, mnt_list
) {
2721 db_printf("%p %s on %s (%s)\n", mp
,
2722 mp
->mnt_stat
.f_mntfromname
,
2723 mp
->mnt_stat
.f_mntonname
,
2724 mp
->mnt_stat
.f_fstypename
);
2728 db_printf("\nMore info: show mount <addr>\n");
2732 mp
= (struct mount
*)addr
;
2733 db_printf("%p %s on %s (%s)\n", mp
, mp
->mnt_stat
.f_mntfromname
,
2734 mp
->mnt_stat
.f_mntonname
, mp
->mnt_stat
.f_fstypename
);
2737 flags
= mp
->mnt_flag
;
2738 #define MNT_FLAG(flag) do { \
2739 if (flags & (flag)) { \
2740 if (buf[0] != '\0') \
2741 strlcat(buf, ", ", sizeof(buf)); \
2742 strlcat(buf, (#flag) + 4, sizeof(buf)); \
2746 MNT_FLAG(MNT_RDONLY
);
2747 MNT_FLAG(MNT_SYNCHRONOUS
);
2748 MNT_FLAG(MNT_NOEXEC
);
2749 MNT_FLAG(MNT_NOSUID
);
2750 MNT_FLAG(MNT_UNION
);
2751 MNT_FLAG(MNT_ASYNC
);
2752 MNT_FLAG(MNT_SUIDDIR
);
2753 MNT_FLAG(MNT_SOFTDEP
);
2754 MNT_FLAG(MNT_NOSYMFOLLOW
);
2755 MNT_FLAG(MNT_GJOURNAL
);
2756 MNT_FLAG(MNT_MULTILABEL
);
2758 MNT_FLAG(MNT_NOATIME
);
2759 MNT_FLAG(MNT_NOCLUSTERR
);
2760 MNT_FLAG(MNT_NOCLUSTERW
);
2761 MNT_FLAG(MNT_EXRDONLY
);
2762 MNT_FLAG(MNT_EXPORTED
);
2763 MNT_FLAG(MNT_DEFEXPORTED
);
2764 MNT_FLAG(MNT_EXPORTANON
);
2765 MNT_FLAG(MNT_EXKERB
);
2766 MNT_FLAG(MNT_EXPUBLIC
);
2767 MNT_FLAG(MNT_LOCAL
);
2768 MNT_FLAG(MNT_QUOTA
);
2769 MNT_FLAG(MNT_ROOTFS
);
2771 MNT_FLAG(MNT_IGNORE
);
2772 MNT_FLAG(MNT_UPDATE
);
2773 MNT_FLAG(MNT_DELEXPORT
);
2774 MNT_FLAG(MNT_RELOAD
);
2775 MNT_FLAG(MNT_FORCE
);
2776 MNT_FLAG(MNT_SNAPSHOT
);
2777 MNT_FLAG(MNT_BYFSID
);
2781 strlcat(buf
, ", ", sizeof(buf
));
2782 snprintf(buf
+ strlen(buf
), sizeof(buf
) - strlen(buf
),
2785 db_printf(" mnt_flag = %s\n", buf
);
2788 flags
= mp
->mnt_kern_flag
;
2789 #define MNT_KERN_FLAG(flag) do { \
2790 if (flags & (flag)) { \
2791 if (buf[0] != '\0') \
2792 strlcat(buf, ", ", sizeof(buf)); \
2793 strlcat(buf, (#flag) + 5, sizeof(buf)); \
2797 MNT_KERN_FLAG(MNTK_UNMOUNTF
);
2798 MNT_KERN_FLAG(MNTK_ASYNC
);
2799 MNT_KERN_FLAG(MNTK_SOFTDEP
);
2800 MNT_KERN_FLAG(MNTK_NOINSMNTQ
);
2801 MNT_KERN_FLAG(MNTK_UNMOUNT
);
2802 MNT_KERN_FLAG(MNTK_MWAIT
);
2803 MNT_KERN_FLAG(MNTK_SUSPEND
);
2804 MNT_KERN_FLAG(MNTK_SUSPEND2
);
2805 MNT_KERN_FLAG(MNTK_SUSPENDED
);
2806 MNT_KERN_FLAG(MNTK_MPSAFE
);
2807 MNT_KERN_FLAG(MNTK_NOKNOTE
);
2808 MNT_KERN_FLAG(MNTK_LOOKUP_SHARED
);
2809 #undef MNT_KERN_FLAG
2812 strlcat(buf
, ", ", sizeof(buf
));
2813 snprintf(buf
+ strlen(buf
), sizeof(buf
) - strlen(buf
),
2816 db_printf(" mnt_kern_flag = %s\n", buf
);
2819 db_printf(" mnt_stat = { version=%u type=%u flags=0x%016jx "
2820 "bsize=%ju iosize=%ju blocks=%ju bfree=%ju bavail=%jd files=%ju "
2821 "ffree=%jd syncwrites=%ju asyncwrites=%ju syncreads=%ju "
2822 "asyncreads=%ju namemax=%u owner=%u fsid=[%d, %d] }\n",
2823 (u_int
)sp
->f_version
, (u_int
)sp
->f_type
, (uintmax_t)sp
->f_flags
,
2824 (uintmax_t)sp
->f_bsize
, (uintmax_t)sp
->f_iosize
,
2825 (uintmax_t)sp
->f_blocks
, (uintmax_t)sp
->f_bfree
,
2826 (intmax_t)sp
->f_bavail
, (uintmax_t)sp
->f_files
,
2827 (intmax_t)sp
->f_ffree
, (uintmax_t)sp
->f_syncwrites
,
2828 (uintmax_t)sp
->f_asyncwrites
, (uintmax_t)sp
->f_syncreads
,
2829 (uintmax_t)sp
->f_asyncreads
, (u_int
)sp
->f_namemax
,
2830 (u_int
)sp
->f_owner
, (int)sp
->f_fsid
.val
[0], (int)sp
->f_fsid
.val
[1]);
2832 db_printf(" mnt_cred = { uid=%u ruid=%u",
2833 (u_int
)mp
->mnt_cred
->cr_uid
, (u_int
)mp
->mnt_cred
->cr_ruid
);
2834 if (mp
->mnt_cred
->cr_prison
!= NULL
)
2835 db_printf(", jail=%d", mp
->mnt_cred
->cr_prison
->pr_id
);
2837 db_printf(" mnt_ref = %d\n", mp
->mnt_ref
);
2838 db_printf(" mnt_gen = %d\n", mp
->mnt_gen
);
2839 db_printf(" mnt_nvnodelistsize = %d\n", mp
->mnt_nvnodelistsize
);
2840 db_printf(" mnt_writeopcount = %d\n", mp
->mnt_writeopcount
);
2841 db_printf(" mnt_noasync = %u\n", mp
->mnt_noasync
);
2842 db_printf(" mnt_maxsymlinklen = %d\n", mp
->mnt_maxsymlinklen
);
2843 db_printf(" mnt_iosize_max = %d\n", mp
->mnt_iosize_max
);
2844 db_printf(" mnt_hashseed = %u\n", mp
->mnt_hashseed
);
2845 db_printf(" mnt_markercnt = %d\n", mp
->mnt_markercnt
);
2846 db_printf(" mnt_holdcnt = %d\n", mp
->mnt_holdcnt
);
2847 db_printf(" mnt_holdcntwaiters = %d\n", mp
->mnt_holdcntwaiters
);
2848 db_printf(" mnt_secondary_writes = %d\n", mp
->mnt_secondary_writes
);
2849 db_printf(" mnt_secondary_accwrites = %d\n",
2850 mp
->mnt_secondary_accwrites
);
2851 db_printf(" mnt_gjprovider = %s\n",
2852 mp
->mnt_gjprovider
!= NULL
? mp
->mnt_gjprovider
: "NULL");
2855 TAILQ_FOREACH(vp
, &mp
->mnt_nvnodelist
, v_nmntvnodes
) {
2856 if (vp
->v_type
!= VMARKER
) {
2857 vn_printf(vp
, "vnode ");
2866 * Fill in a struct xvfsconf based on a struct vfsconf.
2869 vfsconf2x(struct vfsconf
*vfsp
, struct xvfsconf
*xvfsp
)
2872 strcpy(xvfsp
->vfc_name
, vfsp
->vfc_name
);
2873 xvfsp
->vfc_typenum
= vfsp
->vfc_typenum
;
2874 xvfsp
->vfc_refcount
= vfsp
->vfc_refcount
;
2875 xvfsp
->vfc_flags
= vfsp
->vfc_flags
;
2877 * These are unused in userland, we keep them
2878 * to not break binary compatibility.
2880 xvfsp
->vfc_vfsops
= NULL
;
2881 xvfsp
->vfc_next
= NULL
;
2885 * Top level filesystem related information gathering.
2888 sysctl_vfs_conflist(SYSCTL_HANDLER_ARGS
)
2890 struct vfsconf
*vfsp
;
2891 struct xvfsconf xvfsp
;
2895 TAILQ_FOREACH(vfsp
, &vfsconf
, vfc_list
) {
2896 bzero(&xvfsp
, sizeof(xvfsp
));
2897 vfsconf2x(vfsp
, &xvfsp
);
2898 error
= SYSCTL_OUT(req
, &xvfsp
, sizeof xvfsp
);
2905 SYSCTL_PROC(_vfs
, OID_AUTO
, conflist
, CTLFLAG_RD
, NULL
, 0, sysctl_vfs_conflist
,
2906 "S,xvfsconf", "List of all configured filesystems");
2908 #ifndef BURN_BRIDGES
2909 static int sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS
);
2912 vfs_sysctl(SYSCTL_HANDLER_ARGS
)
2914 int *name
= (int *)arg1
- 1; /* XXX */
2915 u_int namelen
= arg2
+ 1; /* XXX */
2916 struct vfsconf
*vfsp
;
2917 struct xvfsconf xvfsp
;
2919 printf("WARNING: userland calling deprecated sysctl, "
2920 "please rebuild world\n");
2922 #if 1 || defined(COMPAT_PRELITE2)
2923 /* Resolve ambiguity between VFS_VFSCONF and VFS_GENERIC. */
2925 return (sysctl_ovfs_conf(oidp
, arg1
, arg2
, req
));
2929 case VFS_MAXTYPENUM
:
2932 return (SYSCTL_OUT(req
, &maxvfsconf
, sizeof(int)));
2935 return (ENOTDIR
); /* overloaded */
2936 TAILQ_FOREACH(vfsp
, &vfsconf
, vfc_list
)
2937 if (vfsp
->vfc_typenum
== name
[2])
2940 return (EOPNOTSUPP
);
2941 bzero(&xvfsp
, sizeof(xvfsp
));
2942 vfsconf2x(vfsp
, &xvfsp
);
2943 return (SYSCTL_OUT(req
, &xvfsp
, sizeof(xvfsp
)));
2945 return (EOPNOTSUPP
);
2948 static SYSCTL_NODE(_vfs
, VFS_GENERIC
, generic
, CTLFLAG_RD
| CTLFLAG_SKIP
,
2949 vfs_sysctl
, "Generic filesystem");
2951 #if 1 || defined(COMPAT_PRELITE2)
2954 sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS
)
2957 struct vfsconf
*vfsp
;
2958 struct ovfsconf ovfs
;
2960 TAILQ_FOREACH(vfsp
, &vfsconf
, vfc_list
) {
2961 bzero(&ovfs
, sizeof(ovfs
));
2962 ovfs
.vfc_vfsops
= vfsp
->vfc_vfsops
; /* XXX used as flag */
2963 strcpy(ovfs
.vfc_name
, vfsp
->vfc_name
);
2964 ovfs
.vfc_index
= vfsp
->vfc_typenum
;
2965 ovfs
.vfc_refcount
= vfsp
->vfc_refcount
;
2966 ovfs
.vfc_flags
= vfsp
->vfc_flags
;
2967 error
= SYSCTL_OUT(req
, &ovfs
, sizeof ovfs
);
2974 #endif /* 1 || COMPAT_PRELITE2 */
2975 #endif /* !BURN_BRIDGES */
2977 #define KINFO_VNODESLOP 10
2980 * Dump vnode list (via sysctl).
2984 sysctl_vnode(SYSCTL_HANDLER_ARGS
)
2992 * Stale numvnodes access is not fatal here.
2995 len
= (numvnodes
+ KINFO_VNODESLOP
) * sizeof *xvn
;
2997 /* Make an estimate */
2998 return (SYSCTL_OUT(req
, 0, len
));
3000 error
= sysctl_wire_old_buffer(req
, 0);
3003 xvn
= malloc(len
, M_TEMP
, M_ZERO
| M_WAITOK
);
3005 mtx_lock(&mountlist_mtx
);
3006 TAILQ_FOREACH(mp
, &mountlist
, mnt_list
) {
3007 if (vfs_busy(mp
, LK_NOWAIT
, &mountlist_mtx
))
3010 TAILQ_FOREACH(vp
, &mp
->mnt_nvnodelist
, v_nmntvnodes
) {
3014 xvn
[n
].xv_size
= sizeof *xvn
;
3015 xvn
[n
].xv_vnode
= vp
;
3016 xvn
[n
].xv_id
= 0; /* XXX compat */
3017 #define XV_COPY(field) xvn[n].xv_##field = vp->v_##field
3019 XV_COPY(writecount
);
3025 xvn
[n
].xv_flag
= vp
->v_vflag
;
3027 switch (vp
->v_type
) {
3034 if (vp
->v_rdev
== NULL
) {
3038 xvn
[n
].xv_dev
= dev2udev(vp
->v_rdev
);
3041 xvn
[n
].xv_socket
= vp
->v_socket
;
3044 xvn
[n
].xv_fifo
= vp
->v_fifoinfo
;
3049 /* shouldn't happen? */
3057 mtx_lock(&mountlist_mtx
);
3062 mtx_unlock(&mountlist_mtx
);
3064 error
= SYSCTL_OUT(req
, xvn
, n
* sizeof *xvn
);
3069 SYSCTL_PROC(_kern
, KERN_VNODE
, vnode
, CTLTYPE_OPAQUE
|CTLFLAG_RD
,
3070 0, 0, sysctl_vnode
, "S,xvnode", "");
3074 * Unmount all filesystems. The list is traversed in reverse order
3075 * of mounting to avoid dependencies.
3078 vfs_unmountall(void)
3084 KASSERT(curthread
!= NULL
, ("vfs_unmountall: NULL curthread"));
3087 * Since this only runs when rebooting, it is not interlocked.
3089 while(!TAILQ_EMPTY(&mountlist
)) {
3090 mp
= TAILQ_LAST(&mountlist
, mntlist
);
3091 error
= dounmount(mp
, MNT_FORCE
, td
);
3093 TAILQ_REMOVE(&mountlist
, mp
, mnt_list
);
3095 * XXX: Due to the way in which we mount the root
3096 * file system off of devfs, devfs will generate a
3097 * "busy" warning when we try to unmount it before
3098 * the root. Don't print a warning as a result in
3099 * order to avoid false positive errors that may
3100 * cause needless upset.
3102 if (strcmp(mp
->mnt_vfc
->vfc_name
, "devfs") != 0) {
3103 printf("unmount of %s failed (",
3104 mp
->mnt_stat
.f_mntonname
);
3108 printf("%d)\n", error
);
3111 /* The unmount has removed mp from the mountlist */
3117 * perform msync on all vnodes under a mount point
3118 * the mount point must be locked.
3121 vfs_msync(struct mount
*mp
, int flags
)
3123 struct vnode
*vp
, *mvp
;
3124 struct vm_object
*obj
;
3127 MNT_VNODE_FOREACH(vp
, mp
, mvp
) {
3129 if ((vp
->v_iflag
& VI_OBJDIRTY
) &&
3130 (flags
== MNT_WAIT
|| VOP_ISLOCKED(vp
) == 0)) {
3133 LK_EXCLUSIVE
| LK_RETRY
| LK_INTERLOCK
,
3135 if (vp
->v_vflag
& VV_NOSYNC
) { /* unlinked */
3143 VM_OBJECT_LOCK(obj
);
3144 vm_object_page_clean(obj
, 0, 0,
3146 OBJPC_SYNC
: OBJPC_NOSYNC
);
3147 VM_OBJECT_UNLOCK(obj
);
3159 * Mark a vnode as free, putting it up for recycling.
3162 vfree(struct vnode
*vp
)
3165 CTR1(KTR_VFS
, "vfree vp %p", vp
);
3166 ASSERT_VI_LOCKED(vp
, "vfree");
3167 mtx_lock(&vnode_free_list_mtx
);
3168 VNASSERT(vp
->v_op
!= NULL
, vp
, ("vfree: vnode already reclaimed."));
3169 VNASSERT((vp
->v_iflag
& VI_FREE
) == 0, vp
, ("vnode already free"));
3170 VNASSERT(VSHOULDFREE(vp
), vp
, ("vfree: freeing when we shouldn't"));
3171 VNASSERT((vp
->v_iflag
& VI_DOOMED
) == 0, vp
,
3172 ("vfree: Freeing doomed vnode"));
3173 if (vp
->v_iflag
& VI_AGE
) {
3174 TAILQ_INSERT_HEAD(&vnode_free_list
, vp
, v_freelist
);
3176 TAILQ_INSERT_TAIL(&vnode_free_list
, vp
, v_freelist
);
3179 vp
->v_iflag
&= ~VI_AGE
;
3180 vp
->v_iflag
|= VI_FREE
;
3181 mtx_unlock(&vnode_free_list_mtx
);
3185 * Opposite of vfree() - mark a vnode as in use.
3188 vbusy(struct vnode
*vp
)
3190 CTR1(KTR_VFS
, "vbusy vp %p", vp
);
3191 ASSERT_VI_LOCKED(vp
, "vbusy");
3192 VNASSERT((vp
->v_iflag
& VI_FREE
) != 0, vp
, ("vnode not free"));
3193 VNASSERT(vp
->v_op
!= NULL
, vp
, ("vbusy: vnode already reclaimed."));
3195 mtx_lock(&vnode_free_list_mtx
);
3196 TAILQ_REMOVE(&vnode_free_list
, vp
, v_freelist
);
3198 vp
->v_iflag
&= ~(VI_FREE
|VI_AGE
);
3199 mtx_unlock(&vnode_free_list_mtx
);
3203 * Initalize per-vnode helper structure to hold poll-related state.
3206 v_addpollinfo(struct vnode
*vp
)
3208 struct vpollinfo
*vi
;
3210 vi
= uma_zalloc(vnodepoll_zone
, M_WAITOK
);
3211 if (vp
->v_pollinfo
!= NULL
) {
3212 uma_zfree(vnodepoll_zone
, vi
);
3215 vp
->v_pollinfo
= vi
;
3216 mtx_init(&vp
->v_pollinfo
->vpi_lock
, "vnode pollinfo", NULL
, MTX_DEF
);
3217 knlist_init(&vp
->v_pollinfo
->vpi_selinfo
.si_note
, vp
, vfs_knllock
,
3218 vfs_knlunlock
, vfs_knllocked
);
3222 * Record a process's interest in events which might happen to
3223 * a vnode. Because poll uses the historic select-style interface
3224 * internally, this routine serves as both the ``check for any
3225 * pending events'' and the ``record my interest in future events''
3226 * functions. (These are done together, while the lock is held,
3227 * to avoid race conditions.)
3230 vn_pollrecord(struct vnode
*vp
, struct thread
*td
, int events
)
3233 if (vp
->v_pollinfo
== NULL
)
3235 mtx_lock(&vp
->v_pollinfo
->vpi_lock
);
3236 if (vp
->v_pollinfo
->vpi_revents
& events
) {
3238 * This leaves events we are not interested
3239 * in available for the other process which
3240 * which presumably had requested them
3241 * (otherwise they would never have been
3244 events
&= vp
->v_pollinfo
->vpi_revents
;
3245 vp
->v_pollinfo
->vpi_revents
&= ~events
;
3247 mtx_unlock(&vp
->v_pollinfo
->vpi_lock
);
3250 vp
->v_pollinfo
->vpi_events
|= events
;
3251 selrecord(td
, &vp
->v_pollinfo
->vpi_selinfo
);
3252 mtx_unlock(&vp
->v_pollinfo
->vpi_lock
);
3257 * Routine to create and manage a filesystem syncer vnode.
3259 #define sync_close ((int (*)(struct vop_close_args *))nullop)
3260 static int sync_fsync(struct vop_fsync_args
*);
3261 static int sync_inactive(struct vop_inactive_args
*);
3262 static int sync_reclaim(struct vop_reclaim_args
*);
3264 static struct vop_vector sync_vnodeops
= {
3265 .vop_bypass
= VOP_EOPNOTSUPP
,
3266 .vop_close
= sync_close
, /* close */
3267 .vop_fsync
= sync_fsync
, /* fsync */
3268 .vop_inactive
= sync_inactive
, /* inactive */
3269 .vop_reclaim
= sync_reclaim
, /* reclaim */
3270 .vop_lock1
= vop_stdlock
, /* lock */
3271 .vop_unlock
= vop_stdunlock
, /* unlock */
3272 .vop_islocked
= vop_stdislocked
, /* islocked */
3276 * Create a new filesystem syncer vnode for the specified mount point.
3279 vfs_allocate_syncvnode(struct mount
*mp
)
3283 static long start
, incr
, next
;
3286 /* Allocate a new vnode */
3287 if ((error
= getnewvnode("syncer", mp
, &sync_vnodeops
, &vp
)) != 0) {
3288 mp
->mnt_syncer
= NULL
;
3292 vn_lock(vp
, LK_EXCLUSIVE
| LK_RETRY
);
3293 vp
->v_vflag
|= VV_FORCEINSMQ
;
3294 error
= insmntque(vp
, mp
);
3296 panic("vfs_allocate_syncvnode: insmntque failed");
3297 vp
->v_vflag
&= ~VV_FORCEINSMQ
;
3300 * Place the vnode onto the syncer worklist. We attempt to
3301 * scatter them about on the list so that they will go off
3302 * at evenly distributed times even if all the filesystems
3303 * are mounted at once.
3306 if (next
== 0 || next
> syncer_maxdelay
) {
3310 start
= syncer_maxdelay
/ 2;
3311 incr
= syncer_maxdelay
;
3317 vn_syncer_add_to_worklist(bo
, syncdelay
> 0 ? next
% syncdelay
: 0);
3318 /* XXX - vn_syncer_add_to_worklist() also grabs and drops sync_mtx. */
3319 mtx_lock(&sync_mtx
);
3321 mtx_unlock(&sync_mtx
);
3323 mp
->mnt_syncer
= vp
;
3328 * Do a lazy sync of the filesystem.
3331 sync_fsync(struct vop_fsync_args
*ap
)
3333 struct vnode
*syncvp
= ap
->a_vp
;
3334 struct mount
*mp
= syncvp
->v_mount
;
3339 * We only need to do something if this is a lazy evaluation.
3341 if (ap
->a_waitfor
!= MNT_LAZY
)
3345 * Move ourselves to the back of the sync list.
3347 bo
= &syncvp
->v_bufobj
;
3349 vn_syncer_add_to_worklist(bo
, syncdelay
);
3353 * Walk the list of vnodes pushing all that are dirty and
3354 * not already on the sync list.
3356 mtx_lock(&mountlist_mtx
);
3357 if (vfs_busy(mp
, LK_EXCLUSIVE
| LK_NOWAIT
, &mountlist_mtx
) != 0) {
3358 mtx_unlock(&mountlist_mtx
);
3361 if (vn_start_write(NULL
, &mp
, V_NOWAIT
) != 0) {
3367 mp
->mnt_kern_flag
&= ~MNTK_ASYNC
;
3369 vfs_msync(mp
, MNT_NOWAIT
);
3370 error
= VFS_SYNC(mp
, MNT_LAZY
, ap
->a_td
);
3373 if ((mp
->mnt_flag
& MNT_ASYNC
) != 0 && mp
->mnt_noasync
== 0)
3374 mp
->mnt_kern_flag
|= MNTK_ASYNC
;
3376 vn_finished_write(mp
);
3382 * The syncer vnode is no referenced.
3385 sync_inactive(struct vop_inactive_args
*ap
)
3393 * The syncer vnode is no longer needed and is being decommissioned.
3395 * Modifications to the worklist must be protected by sync_mtx.
3398 sync_reclaim(struct vop_reclaim_args
*ap
)
3400 struct vnode
*vp
= ap
->a_vp
;
3405 vp
->v_mount
->mnt_syncer
= NULL
;
3406 if (bo
->bo_flag
& BO_ONWORKLST
) {
3407 mtx_lock(&sync_mtx
);
3408 LIST_REMOVE(bo
, bo_synclist
);
3409 syncer_worklist_len
--;
3411 mtx_unlock(&sync_mtx
);
3412 bo
->bo_flag
&= ~BO_ONWORKLST
;
3420 * Check if vnode represents a disk device
3423 vn_isdisk(struct vnode
*vp
, int *errp
)
3429 if (vp
->v_type
!= VCHR
)
3431 else if (vp
->v_rdev
== NULL
)
3433 else if (vp
->v_rdev
->si_devsw
== NULL
)
3435 else if (!(vp
->v_rdev
->si_devsw
->d_flags
& D_DISK
))
3440 return (error
== 0);
3444 * Common filesystem object access control check routine. Accepts a
3445 * vnode's type, "mode", uid and gid, requested access mode, credentials,
3446 * and optional call-by-reference privused argument allowing vaccess()
3447 * to indicate to the caller whether privilege was used to satisfy the
3448 * request (obsoleted). Returns 0 on success, or an errno on failure.
3450 * The ifdef'd CAPABILITIES version is here for reference, but is not
3454 vaccess(enum vtype type
, mode_t file_mode
, uid_t file_uid
, gid_t file_gid
,
3455 mode_t acc_mode
, struct ucred
*cred
, int *privused
)
3458 mode_t priv_granted
;
3461 * Look for a normal, non-privileged way to access the file/directory
3462 * as requested. If it exists, go with that.
3465 if (privused
!= NULL
)
3470 /* Check the owner. */
3471 if (cred
->cr_uid
== file_uid
) {
3472 dac_granted
|= VADMIN
;
3473 if (file_mode
& S_IXUSR
)
3474 dac_granted
|= VEXEC
;
3475 if (file_mode
& S_IRUSR
)
3476 dac_granted
|= VREAD
;
3477 if (file_mode
& S_IWUSR
)
3478 dac_granted
|= (VWRITE
| VAPPEND
);
3480 if ((acc_mode
& dac_granted
) == acc_mode
)
3486 /* Otherwise, check the groups (first match) */
3487 if (groupmember(file_gid
, cred
)) {
3488 if (file_mode
& S_IXGRP
)
3489 dac_granted
|= VEXEC
;
3490 if (file_mode
& S_IRGRP
)
3491 dac_granted
|= VREAD
;
3492 if (file_mode
& S_IWGRP
)
3493 dac_granted
|= (VWRITE
| VAPPEND
);
3495 if ((acc_mode
& dac_granted
) == acc_mode
)
3501 /* Otherwise, check everyone else. */
3502 if (file_mode
& S_IXOTH
)
3503 dac_granted
|= VEXEC
;
3504 if (file_mode
& S_IROTH
)
3505 dac_granted
|= VREAD
;
3506 if (file_mode
& S_IWOTH
)
3507 dac_granted
|= (VWRITE
| VAPPEND
);
3508 if ((acc_mode
& dac_granted
) == acc_mode
)
3513 * Build a privilege mask to determine if the set of privileges
3514 * satisfies the requirements when combined with the granted mask
3515 * from above. For each privilege, if the privilege is required,
3516 * bitwise or the request type onto the priv_granted mask.
3522 * For directories, use PRIV_VFS_LOOKUP to satisfy VEXEC
3523 * requests, instead of PRIV_VFS_EXEC.
3525 if ((acc_mode
& VEXEC
) && ((dac_granted
& VEXEC
) == 0) &&
3526 !priv_check_cred(cred
, PRIV_VFS_LOOKUP
, 0))
3527 priv_granted
|= VEXEC
;
3529 if ((acc_mode
& VEXEC
) && ((dac_granted
& VEXEC
) == 0) &&
3530 !priv_check_cred(cred
, PRIV_VFS_EXEC
, 0))
3531 priv_granted
|= VEXEC
;
3534 if ((acc_mode
& VREAD
) && ((dac_granted
& VREAD
) == 0) &&
3535 !priv_check_cred(cred
, PRIV_VFS_READ
, 0))
3536 priv_granted
|= VREAD
;
3538 if ((acc_mode
& VWRITE
) && ((dac_granted
& VWRITE
) == 0) &&
3539 !priv_check_cred(cred
, PRIV_VFS_WRITE
, 0))
3540 priv_granted
|= (VWRITE
| VAPPEND
);
3542 if ((acc_mode
& VADMIN
) && ((dac_granted
& VADMIN
) == 0) &&
3543 !priv_check_cred(cred
, PRIV_VFS_ADMIN
, 0))
3544 priv_granted
|= VADMIN
;
3546 if ((acc_mode
& (priv_granted
| dac_granted
)) == acc_mode
) {
3547 /* XXX audit: privilege used */
3548 if (privused
!= NULL
)
3553 return ((acc_mode
& VADMIN
) ? EPERM
: EACCES
);
3557 * Credential check based on process requesting service, and per-attribute
3561 extattr_check_cred(struct vnode
*vp
, int attrnamespace
, struct ucred
*cred
,
3562 struct thread
*td
, int access
)
3566 * Kernel-invoked always succeeds.
3572 * Do not allow privileged processes in jail to directly manipulate
3573 * system attributes.
3575 switch (attrnamespace
) {
3576 case EXTATTR_NAMESPACE_SYSTEM
:
3577 /* Potentially should be: return (EPERM); */
3578 return (priv_check_cred(cred
, PRIV_VFS_EXTATTR_SYSTEM
, 0));
3579 case EXTATTR_NAMESPACE_USER
:
3580 return (VOP_ACCESS(vp
, access
, cred
, td
));
3586 #ifdef DEBUG_VFS_LOCKS
3588 * This only exists to supress warnings from unlocked specfs accesses. It is
3589 * no longer ok to have an unlocked VFS.
3591 #define IGNORE_LOCK(vp) (panicstr != NULL || (vp) == NULL || \
3592 (vp)->v_type == VCHR || (vp)->v_type == VBAD)
3594 int vfs_badlock_ddb
= 1; /* Drop into debugger on violation. */
3595 SYSCTL_INT(_debug
, OID_AUTO
, vfs_badlock_ddb
, CTLFLAG_RW
, &vfs_badlock_ddb
, 0, "");
3597 int vfs_badlock_mutex
= 1; /* Check for interlock across VOPs. */
3598 SYSCTL_INT(_debug
, OID_AUTO
, vfs_badlock_mutex
, CTLFLAG_RW
, &vfs_badlock_mutex
, 0, "");
3600 int vfs_badlock_print
= 1; /* Print lock violations. */
3601 SYSCTL_INT(_debug
, OID_AUTO
, vfs_badlock_print
, CTLFLAG_RW
, &vfs_badlock_print
, 0, "");
3604 int vfs_badlock_backtrace
= 1; /* Print backtrace at lock violations. */
3605 SYSCTL_INT(_debug
, OID_AUTO
, vfs_badlock_backtrace
, CTLFLAG_RW
, &vfs_badlock_backtrace
, 0, "");
3609 vfs_badlock(const char *msg
, const char *str
, struct vnode
*vp
)
3613 if (vfs_badlock_backtrace
)
3616 if (vfs_badlock_print
)
3617 printf("%s: %p %s\n", str
, (void *)vp
, msg
);
3618 if (vfs_badlock_ddb
)
3619 kdb_enter(KDB_WHY_VFSLOCK
, "lock violation");
3623 assert_vi_locked(struct vnode
*vp
, const char *str
)
3626 if (vfs_badlock_mutex
&& !mtx_owned(VI_MTX(vp
)))
3627 vfs_badlock("interlock is not locked but should be", str
, vp
);
3631 assert_vi_unlocked(struct vnode
*vp
, const char *str
)
3634 if (vfs_badlock_mutex
&& mtx_owned(VI_MTX(vp
)))
3635 vfs_badlock("interlock is locked but should not be", str
, vp
);
3639 assert_vop_locked(struct vnode
*vp
, const char *str
)
3642 if (!IGNORE_LOCK(vp
) && VOP_ISLOCKED(vp
) == 0)
3643 vfs_badlock("is not locked but should be", str
, vp
);
3647 assert_vop_unlocked(struct vnode
*vp
, const char *str
)
3650 if (!IGNORE_LOCK(vp
) && VOP_ISLOCKED(vp
) == LK_EXCLUSIVE
)
3651 vfs_badlock("is locked but should not be", str
, vp
);
3655 assert_vop_elocked(struct vnode
*vp
, const char *str
)
3658 if (!IGNORE_LOCK(vp
) && VOP_ISLOCKED(vp
) != LK_EXCLUSIVE
)
3659 vfs_badlock("is not exclusive locked but should be", str
, vp
);
3664 assert_vop_elocked_other(struct vnode
*vp
, const char *str
)
3667 if (!IGNORE_LOCK(vp
) && VOP_ISLOCKED(vp
) != LK_EXCLOTHER
)
3668 vfs_badlock("is not exclusive locked by another thread",
3673 assert_vop_slocked(struct vnode
*vp
, const char *str
)
3676 if (!IGNORE_LOCK(vp
) && VOP_ISLOCKED(vp
) != LK_SHARED
)
3677 vfs_badlock("is not locked shared but should be", str
, vp
);
3680 #endif /* DEBUG_VFS_LOCKS */
3683 vop_rename_pre(void *ap
)
3685 struct vop_rename_args
*a
= ap
;
3687 #ifdef DEBUG_VFS_LOCKS
3689 ASSERT_VI_UNLOCKED(a
->a_tvp
, "VOP_RENAME");
3690 ASSERT_VI_UNLOCKED(a
->a_tdvp
, "VOP_RENAME");
3691 ASSERT_VI_UNLOCKED(a
->a_fvp
, "VOP_RENAME");
3692 ASSERT_VI_UNLOCKED(a
->a_fdvp
, "VOP_RENAME");
3694 /* Check the source (from). */
3695 if (a
->a_tdvp
!= a
->a_fdvp
&& a
->a_tvp
!= a
->a_fdvp
)
3696 ASSERT_VOP_UNLOCKED(a
->a_fdvp
, "vop_rename: fdvp locked");
3697 if (a
->a_tvp
!= a
->a_fvp
)
3698 ASSERT_VOP_UNLOCKED(a
->a_fvp
, "vop_rename: fvp locked");
3700 /* Check the target. */
3702 ASSERT_VOP_LOCKED(a
->a_tvp
, "vop_rename: tvp not locked");
3703 ASSERT_VOP_LOCKED(a
->a_tdvp
, "vop_rename: tdvp not locked");
3705 if (a
->a_tdvp
!= a
->a_fdvp
)
3707 if (a
->a_tvp
!= a
->a_fvp
)
3715 vop_strategy_pre(void *ap
)
3717 #ifdef DEBUG_VFS_LOCKS
3718 struct vop_strategy_args
*a
;
3725 * Cluster ops lock their component buffers but not the IO container.
3727 if ((bp
->b_flags
& B_CLUSTER
) != 0)
3730 if (!BUF_ISLOCKED(bp
)) {
3731 if (vfs_badlock_print
)
3733 "VOP_STRATEGY: bp is not locked but should be\n");
3734 if (vfs_badlock_ddb
)
3735 kdb_enter(KDB_WHY_VFSLOCK
, "lock violation");
3741 vop_lookup_pre(void *ap
)
3743 #ifdef DEBUG_VFS_LOCKS
3744 struct vop_lookup_args
*a
;
3749 ASSERT_VI_UNLOCKED(dvp
, "VOP_LOOKUP");
3750 ASSERT_VOP_LOCKED(dvp
, "VOP_LOOKUP");
3755 vop_lookup_post(void *ap
, int rc
)
3757 #ifdef DEBUG_VFS_LOCKS
3758 struct vop_lookup_args
*a
;
3766 ASSERT_VI_UNLOCKED(dvp
, "VOP_LOOKUP");
3767 ASSERT_VOP_LOCKED(dvp
, "VOP_LOOKUP");
3770 ASSERT_VOP_LOCKED(vp
, "VOP_LOOKUP (child)");
3775 vop_lock_pre(void *ap
)
3777 #ifdef DEBUG_VFS_LOCKS
3778 struct vop_lock1_args
*a
= ap
;
3780 if ((a
->a_flags
& LK_INTERLOCK
) == 0)
3781 ASSERT_VI_UNLOCKED(a
->a_vp
, "VOP_LOCK");
3783 ASSERT_VI_LOCKED(a
->a_vp
, "VOP_LOCK");
3788 vop_lock_post(void *ap
, int rc
)
3790 #ifdef DEBUG_VFS_LOCKS
3791 struct vop_lock1_args
*a
= ap
;
3793 ASSERT_VI_UNLOCKED(a
->a_vp
, "VOP_LOCK");
3795 ASSERT_VOP_LOCKED(a
->a_vp
, "VOP_LOCK");
3800 vop_unlock_pre(void *ap
)
3802 #ifdef DEBUG_VFS_LOCKS
3803 struct vop_unlock_args
*a
= ap
;
3805 if (a
->a_flags
& LK_INTERLOCK
)
3806 ASSERT_VI_LOCKED(a
->a_vp
, "VOP_UNLOCK");
3807 ASSERT_VOP_LOCKED(a
->a_vp
, "VOP_UNLOCK");
3812 vop_unlock_post(void *ap
, int rc
)
3814 #ifdef DEBUG_VFS_LOCKS
3815 struct vop_unlock_args
*a
= ap
;
3817 if (a
->a_flags
& LK_INTERLOCK
)
3818 ASSERT_VI_UNLOCKED(a
->a_vp
, "VOP_UNLOCK");
3823 vop_create_post(void *ap
, int rc
)
3825 struct vop_create_args
*a
= ap
;
3828 VFS_KNOTE_LOCKED(a
->a_dvp
, NOTE_WRITE
);
3832 vop_link_post(void *ap
, int rc
)
3834 struct vop_link_args
*a
= ap
;
3837 VFS_KNOTE_LOCKED(a
->a_vp
, NOTE_LINK
);
3838 VFS_KNOTE_LOCKED(a
->a_tdvp
, NOTE_WRITE
);
3843 vop_mkdir_post(void *ap
, int rc
)
3845 struct vop_mkdir_args
*a
= ap
;
3848 VFS_KNOTE_LOCKED(a
->a_dvp
, NOTE_WRITE
| NOTE_LINK
);
3852 vop_mknod_post(void *ap
, int rc
)
3854 struct vop_mknod_args
*a
= ap
;
3857 VFS_KNOTE_LOCKED(a
->a_dvp
, NOTE_WRITE
);
3861 vop_remove_post(void *ap
, int rc
)
3863 struct vop_remove_args
*a
= ap
;
3866 VFS_KNOTE_LOCKED(a
->a_dvp
, NOTE_WRITE
);
3867 VFS_KNOTE_LOCKED(a
->a_vp
, NOTE_DELETE
);
3872 vop_rename_post(void *ap
, int rc
)
3874 struct vop_rename_args
*a
= ap
;
3877 VFS_KNOTE_UNLOCKED(a
->a_fdvp
, NOTE_WRITE
);
3878 VFS_KNOTE_UNLOCKED(a
->a_tdvp
, NOTE_WRITE
);
3879 VFS_KNOTE_UNLOCKED(a
->a_fvp
, NOTE_RENAME
);
3881 VFS_KNOTE_UNLOCKED(a
->a_tvp
, NOTE_DELETE
);
3883 if (a
->a_tdvp
!= a
->a_fdvp
)
3885 if (a
->a_tvp
!= a
->a_fvp
)
3893 vop_rmdir_post(void *ap
, int rc
)
3895 struct vop_rmdir_args
*a
= ap
;
3898 VFS_KNOTE_LOCKED(a
->a_dvp
, NOTE_WRITE
| NOTE_LINK
);
3899 VFS_KNOTE_LOCKED(a
->a_vp
, NOTE_DELETE
);
3904 vop_setattr_post(void *ap
, int rc
)
3906 struct vop_setattr_args
*a
= ap
;
3909 VFS_KNOTE_LOCKED(a
->a_vp
, NOTE_ATTRIB
);
3913 vop_symlink_post(void *ap
, int rc
)
3915 struct vop_symlink_args
*a
= ap
;
3918 VFS_KNOTE_LOCKED(a
->a_dvp
, NOTE_WRITE
);
3921 static struct knlist fs_knlist
;
3924 vfs_event_init(void *arg
)
3926 knlist_init(&fs_knlist
, NULL
, NULL
, NULL
, NULL
);
3928 /* XXX - correct order? */
3929 SYSINIT(vfs_knlist
, SI_SUB_VFS
, SI_ORDER_ANY
, vfs_event_init
, NULL
);
3932 vfs_event_signal(fsid_t
*fsid
, u_int32_t event
, intptr_t data __unused
)
3935 KNOTE_UNLOCKED(&fs_knlist
, event
);
3938 static int filt_fsattach(struct knote
*kn
);
3939 static void filt_fsdetach(struct knote
*kn
);
3940 static int filt_fsevent(struct knote
*kn
, long hint
);
3942 struct filterops fs_filtops
=
3943 { 0, filt_fsattach
, filt_fsdetach
, filt_fsevent
};
3946 filt_fsattach(struct knote
*kn
)
3949 kn
->kn_flags
|= EV_CLEAR
;
3950 knlist_add(&fs_knlist
, kn
, 0);
3955 filt_fsdetach(struct knote
*kn
)
3958 knlist_remove(&fs_knlist
, kn
, 0);
3962 filt_fsevent(struct knote
*kn
, long hint
)
3965 kn
->kn_fflags
|= hint
;
3966 return (kn
->kn_fflags
!= 0);
3970 sysctl_vfs_ctl(SYSCTL_HANDLER_ARGS
)
3976 error
= SYSCTL_IN(req
, &vc
, sizeof(vc
));
3979 if (vc
.vc_vers
!= VFS_CTL_VERS1
)
3981 mp
= vfs_getvfs(&vc
.vc_fsid
);
3984 /* ensure that a specific sysctl goes to the right filesystem. */
3985 if (strcmp(vc
.vc_fstypename
, "*") != 0 &&
3986 strcmp(vc
.vc_fstypename
, mp
->mnt_vfc
->vfc_name
) != 0) {
3990 VCTLTOREQ(&vc
, req
);
3991 error
= VFS_SYSCTL(mp
, vc
.vc_op
, req
);
3996 SYSCTL_PROC(_vfs
, OID_AUTO
, ctl
, CTLFLAG_WR
, NULL
, 0, sysctl_vfs_ctl
, "",
4000 * Function to initialize a va_filerev field sensibly.
4001 * XXX: Wouldn't a random number make a lot more sense ??
4004 init_va_filerev(void)
4009 return (((u_quad_t
)bt
.sec
<< 32LL) | (bt
.frac
>> 32LL));
4012 static int filt_vfsread(struct knote
*kn
, long hint
);
4013 static int filt_vfswrite(struct knote
*kn
, long hint
);
4014 static int filt_vfsvnode(struct knote
*kn
, long hint
);
4015 static void filt_vfsdetach(struct knote
*kn
);
4016 static struct filterops vfsread_filtops
=
4017 { 1, NULL
, filt_vfsdetach
, filt_vfsread
};
4018 static struct filterops vfswrite_filtops
=
4019 { 1, NULL
, filt_vfsdetach
, filt_vfswrite
};
4020 static struct filterops vfsvnode_filtops
=
4021 { 1, NULL
, filt_vfsdetach
, filt_vfsvnode
};
4024 vfs_knllock(void *arg
)
4026 struct vnode
*vp
= arg
;
4028 vn_lock(vp
, LK_EXCLUSIVE
| LK_RETRY
);
4032 vfs_knlunlock(void *arg
)
4034 struct vnode
*vp
= arg
;
4040 vfs_knllocked(void *arg
)
4042 struct vnode
*vp
= arg
;
4044 return (VOP_ISLOCKED(vp
) == LK_EXCLUSIVE
);
4048 vfs_kqfilter(struct vop_kqfilter_args
*ap
)
4050 struct vnode
*vp
= ap
->a_vp
;
4051 struct knote
*kn
= ap
->a_kn
;
4054 switch (kn
->kn_filter
) {
4056 kn
->kn_fop
= &vfsread_filtops
;
4059 kn
->kn_fop
= &vfswrite_filtops
;
4062 kn
->kn_fop
= &vfsvnode_filtops
;
4068 kn
->kn_hook
= (caddr_t
)vp
;
4070 if (vp
->v_pollinfo
== NULL
)
4072 if (vp
->v_pollinfo
== NULL
)
4074 knl
= &vp
->v_pollinfo
->vpi_selinfo
.si_note
;
4075 knlist_add(knl
, kn
, 0);
4081 * Detach knote from vnode
4084 filt_vfsdetach(struct knote
*kn
)
4086 struct vnode
*vp
= (struct vnode
*)kn
->kn_hook
;
4088 KASSERT(vp
->v_pollinfo
!= NULL
, ("Missing v_pollinfo"));
4089 knlist_remove(&vp
->v_pollinfo
->vpi_selinfo
.si_note
, kn
, 0);
4094 filt_vfsread(struct knote
*kn
, long hint
)
4096 struct vnode
*vp
= (struct vnode
*)kn
->kn_hook
;
4100 * filesystem is gone, so set the EOF flag and schedule
4101 * the knote for deletion.
4103 if (hint
== NOTE_REVOKE
) {
4104 kn
->kn_flags
|= (EV_EOF
| EV_ONESHOT
);
4108 if (VOP_GETATTR(vp
, &va
, curthread
->td_ucred
))
4111 kn
->kn_data
= va
.va_size
- kn
->kn_fp
->f_offset
;
4112 return (kn
->kn_data
!= 0);
4117 filt_vfswrite(struct knote
*kn
, long hint
)
4120 * filesystem is gone, so set the EOF flag and schedule
4121 * the knote for deletion.
4123 if (hint
== NOTE_REVOKE
)
4124 kn
->kn_flags
|= (EV_EOF
| EV_ONESHOT
);
4131 filt_vfsvnode(struct knote
*kn
, long hint
)
4133 if (kn
->kn_sfflags
& hint
)
4134 kn
->kn_fflags
|= hint
;
4135 if (hint
== NOTE_REVOKE
) {
4136 kn
->kn_flags
|= EV_EOF
;
4139 return (kn
->kn_fflags
!= 0);
4143 vfs_read_dirent(struct vop_readdir_args
*ap
, struct dirent
*dp
, off_t off
)
4147 if (dp
->d_reclen
> ap
->a_uio
->uio_resid
)
4148 return (ENAMETOOLONG
);
4149 error
= uiomove(dp
, dp
->d_reclen
, ap
->a_uio
);
4151 if (ap
->a_ncookies
!= NULL
) {
4152 if (ap
->a_cookies
!= NULL
)
4153 free(ap
->a_cookies
, M_TEMP
);
4154 ap
->a_cookies
= NULL
;
4155 *ap
->a_ncookies
= 0;
4159 if (ap
->a_ncookies
== NULL
)
4162 KASSERT(ap
->a_cookies
,
4163 ("NULL ap->a_cookies value with non-NULL ap->a_ncookies!"));
4165 *ap
->a_cookies
= realloc(*ap
->a_cookies
,
4166 (*ap
->a_ncookies
+ 1) * sizeof(u_long
), M_TEMP
, M_WAITOK
| M_ZERO
);
4167 (*ap
->a_cookies
)[*ap
->a_ncookies
] = off
;
4172 * Mark for update the access time of the file if the filesystem
4173 * supports VA_MARK_ATIME. This functionality is used by execve
4174 * and mmap, so we want to avoid the synchronous I/O implied by
4175 * directly setting va_atime for the sake of efficiency.
4178 vfs_mark_atime(struct vnode
*vp
, struct ucred
*cred
)
4180 struct vattr atimeattr
;
4182 if ((vp
->v_mount
->mnt_flag
& (MNT_NOATIME
| MNT_RDONLY
)) == 0) {
4183 VATTR_NULL(&atimeattr
);
4184 atimeattr
.va_vaflags
|= VA_MARK_ATIME
;
4185 (void)VOP_SETATTR(vp
, &atimeattr
, cred
);