kqueue: Use wakeup_one based on # of threads sleep on kqueue
[dragonfly.git] / sys / kern / vfs_sync.c
blob07466d83d95e7d30c8327b71a0b3ee0bcea4af7f
1 /*
2 * Copyright (c) 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
34 * @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95
35 * $FreeBSD: src/sys/kern/vfs_subr.c,v 1.249.2.30 2003/04/04 20:35:57 tegge Exp $
39 * External virtual filesystem routines
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/buf.h>
45 #include <sys/conf.h>
46 #include <sys/dirent.h>
47 #include <sys/domain.h>
48 #include <sys/eventhandler.h>
49 #include <sys/fcntl.h>
50 #include <sys/kernel.h>
51 #include <sys/kthread.h>
52 #include <sys/malloc.h>
53 #include <sys/mbuf.h>
54 #include <sys/mount.h>
55 #include <sys/proc.h>
56 #include <sys/namei.h>
57 #include <sys/reboot.h>
58 #include <sys/socket.h>
59 #include <sys/stat.h>
60 #include <sys/sysctl.h>
61 #include <sys/syslog.h>
62 #include <sys/vmmeter.h>
63 #include <sys/vnode.h>
65 #include <machine/limits.h>
67 #include <vm/vm.h>
68 #include <vm/vm_object.h>
69 #include <vm/vm_extern.h>
70 #include <vm/vm_kern.h>
71 #include <vm/pmap.h>
72 #include <vm/vm_map.h>
73 #include <vm/vm_page.h>
74 #include <vm/vm_pager.h>
75 #include <vm/vnode_pager.h>
77 #include <sys/buf2.h>
78 #include <sys/thread2.h>
81 * The workitem queue.
83 #define SYNCER_MAXDELAY 32
84 static int sysctl_kern_syncdelay(SYSCTL_HANDLER_ARGS);
85 time_t syncdelay = 30; /* max time to delay syncing data */
86 SYSCTL_PROC(_kern, OID_AUTO, syncdelay, CTLTYPE_INT | CTLFLAG_RW, 0, 0,
87 sysctl_kern_syncdelay, "I", "VFS data synchronization delay");
88 time_t filedelay = 30; /* time to delay syncing files */
89 SYSCTL_INT(_kern, OID_AUTO, filedelay, CTLFLAG_RW,
90 &filedelay, 0, "File synchronization delay");
91 time_t dirdelay = 29; /* time to delay syncing directories */
92 SYSCTL_INT(_kern, OID_AUTO, dirdelay, CTLFLAG_RW,
93 &dirdelay, 0, "Directory synchronization delay");
94 time_t metadelay = 28; /* time to delay syncing metadata */
95 SYSCTL_INT(_kern, OID_AUTO, metadelay, CTLFLAG_RW,
96 &metadelay, 0, "VFS metadata synchronization delay");
97 static int rushjob; /* number of slots to run ASAP */
98 static int stat_rush_requests; /* number of times I/O speeded up */
99 SYSCTL_INT(_debug, OID_AUTO, rush_requests, CTLFLAG_RW,
100 &stat_rush_requests, 0, "");
102 LIST_HEAD(synclist, vnode);
104 #define SC_FLAG_EXIT (0x1) /* request syncer exit */
105 #define SC_FLAG_DONE (0x2) /* syncer confirm exit */
107 struct syncer_ctx {
108 struct mount *sc_mp;
109 struct lwkt_token sc_token;
110 struct thread *sc_thread;
111 int sc_flags;
112 struct synclist *syncer_workitem_pending;
113 long syncer_mask;
114 int syncer_delayno;
115 int syncer_forced;
116 int syncer_rushjob;
119 static void syncer_thread(void *);
121 static int
122 sysctl_kern_syncdelay(SYSCTL_HANDLER_ARGS)
124 int error;
125 int v = syncdelay;
127 error = sysctl_handle_int(oidp, &v, 0, req);
128 if (error || !req->newptr)
129 return (error);
130 if (v < 1)
131 v = 1;
132 if (v > SYNCER_MAXDELAY)
133 v = SYNCER_MAXDELAY;
134 syncdelay = v;
136 return(0);
140 * The workitem queue.
142 * It is useful to delay writes of file data and filesystem metadata
143 * for tens of seconds so that quickly created and deleted files need
144 * not waste disk bandwidth being created and removed. To realize this,
145 * we append vnodes to a "workitem" queue. When running with a soft
146 * updates implementation, most pending metadata dependencies should
147 * not wait for more than a few seconds. Thus, mounted on block devices
148 * are delayed only about a half the time that file data is delayed.
149 * Similarly, directory updates are more critical, so are only delayed
150 * about a third the time that file data is delayed. Thus, there are
151 * SYNCER_MAXDELAY queues that are processed round-robin at a rate of
152 * one each second (driven off the filesystem syncer process). The
153 * syncer_delayno variable indicates the next queue that is to be processed.
154 * Items that need to be processed soon are placed in this queue:
156 * syncer_workitem_pending[syncer_delayno]
158 * A delay of fifteen seconds is done by placing the request fifteen
159 * entries later in the queue:
161 * syncer_workitem_pending[(syncer_delayno + 15) & syncer_mask]
166 * Add an item to the syncer work queue.
168 * WARNING: Cannot get vp->v_token here if not already held, we must
169 * depend on the syncer_token (which might already be held by
170 * the caller) to protect v_synclist and VONWORKLST.
172 * MPSAFE
174 void
175 vn_syncer_add(struct vnode *vp, int delay)
177 struct syncer_ctx *ctx;
178 int slot;
180 ctx = vp->v_mount->mnt_syncer_ctx;
181 lwkt_gettoken(&ctx->sc_token);
183 if (vp->v_flag & VONWORKLST)
184 LIST_REMOVE(vp, v_synclist);
185 if (delay <= 0) {
186 slot = -delay & ctx->syncer_mask;
187 } else {
188 if (delay > SYNCER_MAXDELAY - 2)
189 delay = SYNCER_MAXDELAY - 2;
190 slot = (ctx->syncer_delayno + delay) & ctx->syncer_mask;
193 LIST_INSERT_HEAD(&ctx->syncer_workitem_pending[slot], vp, v_synclist);
194 vsetflags(vp, VONWORKLST);
196 lwkt_reltoken(&ctx->sc_token);
200 * Removes the vnode from the syncer list. Since we might block while
201 * acquiring the syncer_token we have to [re]check conditions to determine
202 * that it is ok to remove the vnode.
204 * vp->v_token held on call
206 void
207 vn_syncer_remove(struct vnode *vp)
209 struct syncer_ctx *ctx;
211 ctx = vp->v_mount->mnt_syncer_ctx;
212 lwkt_gettoken(&ctx->sc_token);
214 if ((vp->v_flag & (VISDIRTY | VONWORKLST | VOBJDIRTY)) == VONWORKLST &&
215 RB_EMPTY(&vp->v_rbdirty_tree)) {
216 vclrflags(vp, VONWORKLST);
217 LIST_REMOVE(vp, v_synclist);
220 lwkt_reltoken(&ctx->sc_token);
224 * vnode must be locked
226 void
227 vclrisdirty(struct vnode *vp)
229 vclrflags(vp, VISDIRTY);
230 if (vp->v_flag & VONWORKLST)
231 vn_syncer_remove(vp);
234 void
235 vclrobjdirty(struct vnode *vp)
237 vclrflags(vp, VOBJDIRTY);
238 if (vp->v_flag & VONWORKLST)
239 vn_syncer_remove(vp);
243 * vnode must be stable
245 void
246 vsetisdirty(struct vnode *vp)
248 struct syncer_ctx *ctx;
250 if ((vp->v_flag & VISDIRTY) == 0) {
251 ctx = vp->v_mount->mnt_syncer_ctx;
252 vsetflags(vp, VISDIRTY);
253 lwkt_gettoken(&ctx->sc_token);
254 if ((vp->v_flag & VONWORKLST) == 0)
255 vn_syncer_add(vp, syncdelay);
256 lwkt_reltoken(&ctx->sc_token);
260 void
261 vsetobjdirty(struct vnode *vp)
263 struct syncer_ctx *ctx;
265 if ((vp->v_flag & VOBJDIRTY) == 0) {
266 ctx = vp->v_mount->mnt_syncer_ctx;
267 vsetflags(vp, VOBJDIRTY);
268 lwkt_gettoken(&ctx->sc_token);
269 if ((vp->v_flag & VONWORKLST) == 0)
270 vn_syncer_add(vp, syncdelay);
271 lwkt_reltoken(&ctx->sc_token);
276 * Create per-filesystem syncer process
278 void
279 vn_syncer_thr_create(struct mount *mp)
281 struct syncer_ctx *ctx;
282 static int syncalloc = 0;
284 ctx = kmalloc(sizeof(struct syncer_ctx), M_TEMP, M_WAITOK | M_ZERO);
285 ctx->sc_mp = mp;
286 ctx->sc_flags = 0;
287 ctx->syncer_workitem_pending = hashinit(SYNCER_MAXDELAY, M_DEVBUF,
288 &ctx->syncer_mask);
289 ctx->syncer_delayno = 0;
290 lwkt_token_init(&ctx->sc_token, "syncer");
291 mp->mnt_syncer_ctx = ctx;
292 kthread_create(syncer_thread, ctx, &ctx->sc_thread,
293 "syncer%d", ++syncalloc & 0x7FFFFFFF);
297 * Stop per-filesystem syncer process
299 void
300 vn_syncer_thr_stop(struct mount *mp)
302 struct syncer_ctx *ctx;
304 ctx = mp->mnt_syncer_ctx;
305 if (ctx == NULL)
306 return;
308 lwkt_gettoken(&ctx->sc_token);
310 /* Signal the syncer process to exit */
311 ctx->sc_flags |= SC_FLAG_EXIT;
312 wakeup(ctx);
314 /* Wait till syncer process exits */
315 while ((ctx->sc_flags & SC_FLAG_DONE) == 0)
316 tsleep(&ctx->sc_flags, 0, "syncexit", hz);
318 mp->mnt_syncer_ctx = NULL;
319 lwkt_reltoken(&ctx->sc_token);
321 hashdestroy(ctx->syncer_workitem_pending, M_DEVBUF, ctx->syncer_mask);
322 kfree(ctx, M_TEMP);
325 struct thread *updatethread;
328 * System filesystem synchronizer daemon.
330 static void
331 syncer_thread(void *_ctx)
333 struct syncer_ctx *ctx = _ctx;
334 struct synclist *slp;
335 struct vnode *vp;
336 long starttime;
337 int *sc_flagsp;
338 int sc_flags;
339 int vnodes_synced = 0;
340 int delta;
341 int dummy = 0;
343 for (;;) {
344 kproc_suspend_loop();
346 starttime = time_uptime;
347 lwkt_gettoken(&ctx->sc_token);
350 * Push files whose dirty time has expired. Be careful
351 * of interrupt race on slp queue.
353 slp = &ctx->syncer_workitem_pending[ctx->syncer_delayno];
354 ctx->syncer_delayno = (ctx->syncer_delayno + 1) &
355 ctx->syncer_mask;
357 while ((vp = LIST_FIRST(slp)) != NULL) {
358 if (ctx->syncer_forced) {
359 if (vget(vp, LK_EXCLUSIVE) == 0) {
360 VOP_FSYNC(vp, MNT_NOWAIT, 0);
361 vput(vp);
362 vnodes_synced++;
364 } else {
365 if (vget(vp, LK_EXCLUSIVE | LK_NOWAIT) == 0) {
366 VOP_FSYNC(vp, MNT_LAZY, 0);
367 vput(vp);
368 vnodes_synced++;
373 * vp is stale but can still be used if we can
374 * verify that it remains at the head of the list.
375 * Be careful not to try to get vp->v_token as
376 * vp can become stale if this blocks.
378 * If the vp is still at the head of the list were
379 * unable to completely flush it and move it to
380 * a later slot to give other vnodes a fair shot.
382 * Note that v_tag VT_VFS vnodes can remain on the
383 * worklist with no dirty blocks, but sync_fsync()
384 * moves it to a later slot so we will never see it
385 * here.
387 * It is possible to race a vnode with no dirty
388 * buffers being removed from the list. If this
389 * occurs we will move the vnode in the synclist
390 * and then the other thread will remove it. Do
391 * not try to remove it here.
393 if (LIST_FIRST(slp) == vp)
394 vn_syncer_add(vp, syncdelay);
397 sc_flags = ctx->sc_flags;
399 /* Exit on unmount */
400 if (sc_flags & SC_FLAG_EXIT)
401 break;
403 lwkt_reltoken(&ctx->sc_token);
406 * Do sync processing for each mount.
408 if (ctx->sc_mp)
409 bio_ops_sync(ctx->sc_mp);
412 * The variable rushjob allows the kernel to speed up the
413 * processing of the filesystem syncer process. A rushjob
414 * value of N tells the filesystem syncer to process the next
415 * N seconds worth of work on its queue ASAP. Currently rushjob
416 * is used by the soft update code to speed up the filesystem
417 * syncer process when the incore state is getting so far
418 * ahead of the disk that the kernel memory pool is being
419 * threatened with exhaustion.
421 delta = rushjob - ctx->syncer_rushjob;
422 if ((u_int)delta > syncdelay / 2) {
423 ctx->syncer_rushjob = rushjob - syncdelay / 2;
424 tsleep(&dummy, 0, "rush", 1);
425 continue;
427 if (delta) {
428 ++ctx->syncer_rushjob;
429 tsleep(&dummy, 0, "rush", 1);
430 continue;
434 * If it has taken us less than a second to process the
435 * current work, then wait. Otherwise start right over
436 * again. We can still lose time if any single round
437 * takes more than two seconds, but it does not really
438 * matter as we are just trying to generally pace the
439 * filesystem activity.
441 if (time_uptime == starttime)
442 tsleep(ctx, 0, "syncer", hz);
446 * Unmount/exit path for per-filesystem syncers; sc_token held
448 ctx->sc_flags |= SC_FLAG_DONE;
449 sc_flagsp = &ctx->sc_flags;
450 lwkt_reltoken(&ctx->sc_token);
451 wakeup(sc_flagsp);
453 kthread_exit();
457 * Request that the syncer daemon for a specific mount speed up its work.
458 * If mp is NULL the caller generally wants to speed up all syncers.
460 void
461 speedup_syncer(struct mount *mp)
464 * Don't bother protecting the test. unsleep_and_wakeup_thread()
465 * will only do something real if the thread is in the right state.
467 atomic_add_int(&rushjob, 1);
468 ++stat_rush_requests;
469 if (mp)
470 wakeup(mp->mnt_syncer_ctx);
474 * Routine to create and manage a filesystem syncer vnode.
476 static int sync_close(struct vop_close_args *);
477 static int sync_fsync(struct vop_fsync_args *);
478 static int sync_inactive(struct vop_inactive_args *);
479 static int sync_reclaim (struct vop_reclaim_args *);
480 static int sync_print(struct vop_print_args *);
482 static struct vop_ops sync_vnode_vops = {
483 .vop_default = vop_eopnotsupp,
484 .vop_close = sync_close,
485 .vop_fsync = sync_fsync,
486 .vop_inactive = sync_inactive,
487 .vop_reclaim = sync_reclaim,
488 .vop_print = sync_print,
491 static struct vop_ops *sync_vnode_vops_p = &sync_vnode_vops;
493 VNODEOP_SET(sync_vnode_vops);
496 * Create a new filesystem syncer vnode for the specified mount point.
497 * This vnode is placed on the worklist and is responsible for sync'ing
498 * the filesystem.
500 * NOTE: read-only mounts are also placed on the worklist. The filesystem
501 * sync code is also responsible for cleaning up vnodes.
504 vfs_allocate_syncvnode(struct mount *mp)
506 struct vnode *vp;
507 static long start, incr, next;
508 int error;
510 /* Allocate a new vnode */
511 error = getspecialvnode(VT_VFS, mp, &sync_vnode_vops_p, &vp, 0, 0);
512 if (error) {
513 mp->mnt_syncer = NULL;
514 return (error);
516 vp->v_type = VNON;
518 * Place the vnode onto the syncer worklist. We attempt to
519 * scatter them about on the list so that they will go off
520 * at evenly distributed times even if all the filesystems
521 * are mounted at once.
523 next += incr;
524 if (next == 0 || next > SYNCER_MAXDELAY) {
525 start /= 2;
526 incr /= 2;
527 if (start == 0) {
528 start = SYNCER_MAXDELAY / 2;
529 incr = SYNCER_MAXDELAY;
531 next = start;
535 * Only put the syncer vnode onto the syncer list if we have a
536 * syncer thread. Some VFS's (aka NULLFS) don't need a syncer
537 * thread.
539 if (mp->mnt_syncer_ctx)
540 vn_syncer_add(vp, syncdelay > 0 ? next % syncdelay : 0);
543 * The mnt_syncer field inherits the vnode reference, which is
544 * held until later decomissioning.
546 mp->mnt_syncer = vp;
547 vx_unlock(vp);
548 return (0);
551 static int
552 sync_close(struct vop_close_args *ap)
554 return (0);
558 * Do a lazy sync of the filesystem.
560 * sync_fsync { struct vnode *a_vp, int a_waitfor }
562 static int
563 sync_fsync(struct vop_fsync_args *ap)
565 struct vnode *syncvp = ap->a_vp;
566 struct mount *mp = syncvp->v_mount;
567 int asyncflag;
570 * We only need to do something if this is a lazy evaluation.
572 if ((ap->a_waitfor & MNT_LAZY) == 0)
573 return (0);
576 * Move ourselves to the back of the sync list.
578 vn_syncer_add(syncvp, syncdelay);
581 * Walk the list of vnodes pushing all that are dirty and
582 * not already on the sync list, and freeing vnodes which have
583 * no refs and whos VM objects are empty. vfs_msync() handles
584 * the VM issues and must be called whether the mount is readonly
585 * or not.
587 if (vfs_busy(mp, LK_NOWAIT) != 0)
588 return (0);
589 if (mp->mnt_flag & MNT_RDONLY) {
590 vfs_msync(mp, MNT_NOWAIT);
591 } else {
592 asyncflag = mp->mnt_flag & MNT_ASYNC;
593 mp->mnt_flag &= ~MNT_ASYNC; /* ZZZ hack */
594 vfs_msync(mp, MNT_NOWAIT);
595 VFS_SYNC(mp, MNT_NOWAIT | MNT_LAZY);
596 if (asyncflag)
597 mp->mnt_flag |= MNT_ASYNC;
599 vfs_unbusy(mp);
600 return (0);
604 * The syncer vnode is no longer referenced.
606 * sync_inactive { struct vnode *a_vp, struct proc *a_p }
608 static int
609 sync_inactive(struct vop_inactive_args *ap)
611 vgone_vxlocked(ap->a_vp);
612 return (0);
616 * The syncer vnode is no longer needed and is being decommissioned.
617 * This can only occur when the last reference has been released on
618 * mp->mnt_syncer, so mp->mnt_syncer had better be NULL.
620 * Modifications to the worklist must be protected with a critical
621 * section.
623 * sync_reclaim { struct vnode *a_vp }
625 static int
626 sync_reclaim(struct vop_reclaim_args *ap)
628 struct vnode *vp = ap->a_vp;
629 struct syncer_ctx *ctx;
631 ctx = vp->v_mount->mnt_syncer_ctx;
632 if (ctx) {
633 lwkt_gettoken(&ctx->sc_token);
634 KKASSERT(vp->v_mount->mnt_syncer != vp);
635 if (vp->v_flag & VONWORKLST) {
636 LIST_REMOVE(vp, v_synclist);
637 vclrflags(vp, VONWORKLST);
639 lwkt_reltoken(&ctx->sc_token);
640 } else {
641 KKASSERT((vp->v_flag & VONWORKLST) == 0);
644 return (0);
648 * This is very similar to vmntvnodescan() but it only scans the
649 * vnodes on the syncer list. VFS's which support faster VFS_SYNC
650 * operations use the VISDIRTY flag on the vnode to ensure that vnodes
651 * with dirty inodes are added to the syncer in addition to vnodes
652 * with dirty buffers, and can use this function instead of nmntvnodescan().
654 * This is important when a system has millions of vnodes.
657 vsyncscan(
658 struct mount *mp,
659 int vmsc_flags,
660 int (*slowfunc)(struct mount *mp, struct vnode *vp, void *data),
661 void *data
663 struct syncer_ctx *ctx;
664 struct synclist *slp;
665 struct vnode *vp;
666 int i;
667 int count;
668 int lkflags;
670 if (vmsc_flags & VMSC_NOWAIT)
671 lkflags = LK_NOWAIT;
672 else
673 lkflags = 0;
676 * Syncer list context. This API requires a dedicated syncer thread.
677 * (MNTK_THR_SYNC).
679 KKASSERT(mp->mnt_kern_flag & MNTK_THR_SYNC);
680 ctx = mp->mnt_syncer_ctx;
681 lwkt_gettoken(&ctx->sc_token);
684 * Setup for loop. Allow races against the syncer thread but
685 * require that the syncer thread no be lazy if we were told
686 * not to be lazy.
688 i = ctx->syncer_delayno & ctx->syncer_mask;
689 if ((vmsc_flags & VMSC_NOWAIT) == 0)
690 ++ctx->syncer_forced;
691 for (count = 0; count <= ctx->syncer_mask; ++count) {
692 slp = &ctx->syncer_workitem_pending[i];
694 while ((vp = LIST_FIRST(slp)) != NULL) {
695 KKASSERT(vp->v_mount == mp);
696 if (vmsc_flags & VMSC_GETVP) {
697 if (vget(vp, LK_EXCLUSIVE | lkflags) == 0) {
698 slowfunc(mp, vp, data);
699 vput(vp);
701 } else if (vmsc_flags & VMSC_GETVX) {
702 vx_get(vp);
703 slowfunc(mp, vp, data);
704 vx_put(vp);
705 } else {
706 vhold(vp);
707 slowfunc(mp, vp, data);
708 vdrop(vp);
712 * vp could be invalid. However, if vp is still at
713 * the head of the list it is clearly valid and we
714 * can safely move it.
716 if (LIST_FIRST(slp) == vp)
717 vn_syncer_add(vp, -(i + syncdelay));
719 i = (i + 1) & ctx->syncer_mask;
722 if ((vmsc_flags & VMSC_NOWAIT) == 0)
723 --ctx->syncer_forced;
724 lwkt_reltoken(&ctx->sc_token);
725 return(0);
729 * Print out a syncer vnode.
731 * sync_print { struct vnode *a_vp }
733 static int
734 sync_print(struct vop_print_args *ap)
736 struct vnode *vp = ap->a_vp;
738 kprintf("syncer vnode");
739 lockmgr_printinfo(&vp->v_lock);
740 kprintf("\n");
741 return (0);