4 * Copyright (c) 2010 The DragonFly Project. All rights reserved.
6 * This code is derived from software contributed to The DragonFly Project
7 * by Matthew Dillon <dillon@backplane.com>
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in
17 * the documentation and/or other materials provided with the
19 * 3. Neither the name of The DragonFly Project nor the names of its
20 * contributors may be used to endorse or promote products derived
21 * from this software without specific, prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * Implement the swapcache daemon. When enabled swap is assumed to be
39 * configured on a fast storage device such as a SSD. Swap is assigned
40 * to clean vnode-backed pages in the inactive queue, clustered by object
41 * if possible, and written out. The swap assignment sticks around even
42 * after the underlying pages have been recycled.
44 * The daemon manages write bandwidth based on sysctl settings to control
47 * The vnode strategy code will check for the swap assignments and divert
48 * reads to the swap device when the data is present in the swapcache.
50 * This operates on both regular files and the block device vnodes used by
51 * filesystems to manage meta-data.
55 #include <sys/param.h>
56 #include <sys/systm.h>
57 #include <sys/kernel.h>
59 #include <sys/kthread.h>
60 #include <sys/resourcevar.h>
61 #include <sys/signalvar.h>
62 #include <sys/vnode.h>
63 #include <sys/vmmeter.h>
64 #include <sys/sysctl.h>
65 #include <sys/eventhandler.h>
68 #include <vm/vm_param.h>
70 #include <vm/vm_object.h>
71 #include <vm/vm_page.h>
72 #include <vm/vm_map.h>
73 #include <vm/vm_pageout.h>
74 #include <vm/vm_pager.h>
75 #include <vm/swap_pager.h>
76 #include <vm/vm_extern.h>
78 #include <sys/thread2.h>
79 #include <sys/spinlock2.h>
80 #include <vm/vm_page2.h>
82 /* the kernel process "vm_pageout"*/
83 static int vm_swapcached_flush (vm_page_t m
, int isblkdev
);
84 static int vm_swapcache_test(vm_page_t m
);
85 static int vm_swapcache_writing_heuristic(void);
86 static int vm_swapcache_writing(vm_page_t marker
, int count
, int scount
);
87 static void vm_swapcache_cleaning(vm_object_t marker
);
88 static void vm_swapcache_movemarker(vm_object_t marker
, vm_object_t object
);
89 struct thread
*swapcached_thread
;
91 SYSCTL_NODE(_vm
, OID_AUTO
, swapcache
, CTLFLAG_RW
, NULL
, NULL
);
93 int vm_swapcache_read_enable
;
94 int vm_swapcache_inactive_heuristic
;
95 static int vm_swapcache_sleep
;
96 static int vm_swapcache_maxscan
= PQ_L2_SIZE
* 8;
97 static int vm_swapcache_maxlaunder
= PQ_L2_SIZE
* 4;
98 static int vm_swapcache_data_enable
= 0;
99 static int vm_swapcache_meta_enable
= 0;
100 static int vm_swapcache_maxswappct
= 75;
101 static int vm_swapcache_hysteresis
;
102 static int vm_swapcache_min_hysteresis
;
103 int vm_swapcache_use_chflags
= 1; /* require chflags cache */
104 static int64_t vm_swapcache_minburst
= 10000000LL; /* 10MB */
105 static int64_t vm_swapcache_curburst
= 4000000000LL; /* 4G after boot */
106 static int64_t vm_swapcache_maxburst
= 2000000000LL; /* 2G nominal max */
107 static int64_t vm_swapcache_accrate
= 100000LL; /* 100K/s */
108 static int64_t vm_swapcache_write_count
;
109 static int64_t vm_swapcache_maxfilesize
;
110 static int64_t vm_swapcache_cleanperobj
= 16*1024*1024;
112 SYSCTL_INT(_vm_swapcache
, OID_AUTO
, maxlaunder
,
113 CTLFLAG_RW
, &vm_swapcache_maxlaunder
, 0, "");
114 SYSCTL_INT(_vm_swapcache
, OID_AUTO
, maxscan
,
115 CTLFLAG_RW
, &vm_swapcache_maxscan
, 0, "");
117 SYSCTL_INT(_vm_swapcache
, OID_AUTO
, data_enable
,
118 CTLFLAG_RW
, &vm_swapcache_data_enable
, 0, "");
119 SYSCTL_INT(_vm_swapcache
, OID_AUTO
, meta_enable
,
120 CTLFLAG_RW
, &vm_swapcache_meta_enable
, 0, "");
121 SYSCTL_INT(_vm_swapcache
, OID_AUTO
, read_enable
,
122 CTLFLAG_RW
, &vm_swapcache_read_enable
, 0, "");
123 SYSCTL_INT(_vm_swapcache
, OID_AUTO
, maxswappct
,
124 CTLFLAG_RW
, &vm_swapcache_maxswappct
, 0, "");
125 SYSCTL_INT(_vm_swapcache
, OID_AUTO
, hysteresis
,
126 CTLFLAG_RD
, &vm_swapcache_hysteresis
, 0, "");
127 SYSCTL_INT(_vm_swapcache
, OID_AUTO
, min_hysteresis
,
128 CTLFLAG_RW
, &vm_swapcache_min_hysteresis
, 0, "");
129 SYSCTL_INT(_vm_swapcache
, OID_AUTO
, use_chflags
,
130 CTLFLAG_RW
, &vm_swapcache_use_chflags
, 0, "");
132 SYSCTL_QUAD(_vm_swapcache
, OID_AUTO
, minburst
,
133 CTLFLAG_RW
, &vm_swapcache_minburst
, 0, "");
134 SYSCTL_QUAD(_vm_swapcache
, OID_AUTO
, curburst
,
135 CTLFLAG_RW
, &vm_swapcache_curburst
, 0, "");
136 SYSCTL_QUAD(_vm_swapcache
, OID_AUTO
, maxburst
,
137 CTLFLAG_RW
, &vm_swapcache_maxburst
, 0, "");
138 SYSCTL_QUAD(_vm_swapcache
, OID_AUTO
, maxfilesize
,
139 CTLFLAG_RW
, &vm_swapcache_maxfilesize
, 0, "");
140 SYSCTL_QUAD(_vm_swapcache
, OID_AUTO
, accrate
,
141 CTLFLAG_RW
, &vm_swapcache_accrate
, 0, "");
142 SYSCTL_QUAD(_vm_swapcache
, OID_AUTO
, write_count
,
143 CTLFLAG_RW
, &vm_swapcache_write_count
, 0, "");
144 SYSCTL_QUAD(_vm_swapcache
, OID_AUTO
, cleanperobj
,
145 CTLFLAG_RW
, &vm_swapcache_cleanperobj
, 0, "");
147 #define SWAPMAX(adj) \
148 ((int64_t)vm_swap_max * (vm_swapcache_maxswappct + (adj)) / 100)
151 * When shutting down the machine we want to stop swapcache operation
152 * immediately so swap is not accessed after devices have been shuttered.
155 shutdown_swapcache(void *arg __unused
)
157 vm_swapcache_read_enable
= 0;
158 vm_swapcache_data_enable
= 0;
159 vm_swapcache_meta_enable
= 0;
160 wakeup(&vm_swapcache_sleep
); /* shortcut 5-second wait */
164 * vm_swapcached is the high level pageout daemon.
169 vm_swapcached_thread(void)
171 enum { SWAPC_WRITING
, SWAPC_CLEANING
} state
= SWAPC_WRITING
;
172 enum { SWAPB_BURSTING
, SWAPB_RECOVERING
} burst
= SWAPB_BURSTING
;
173 static struct vm_page page_marker
[PQ_L2_SIZE
];
174 static struct vm_object object_marker
;
180 curthread
->td_flags
|= TDF_SYSTHREAD
;
181 EVENTHANDLER_REGISTER(shutdown_pre_sync
, shutdown_kproc
,
182 swapcached_thread
, SHUTDOWN_PRI_FIRST
);
183 EVENTHANDLER_REGISTER(shutdown_pre_sync
, shutdown_swapcache
,
184 NULL
, SHUTDOWN_PRI_SECOND
);
187 * Initialize our marker for the inactive scan (SWAPC_WRITING)
189 bzero(&page_marker
, sizeof(page_marker
));
190 for (q
= 0; q
< PQ_L2_SIZE
; ++q
) {
191 page_marker
[q
].flags
= PG_BUSY
| PG_FICTITIOUS
| PG_MARKER
;
192 page_marker
[q
].queue
= PQ_INACTIVE
+ q
;
193 page_marker
[q
].pc
= q
;
194 page_marker
[q
].wire_count
= 1;
195 vm_page_queues_spin_lock(PQ_INACTIVE
+ q
);
197 &vm_page_queues
[PQ_INACTIVE
+ q
].pl
,
198 &page_marker
[q
], pageq
);
199 vm_page_queues_spin_unlock(PQ_INACTIVE
+ q
);
202 vm_swapcache_min_hysteresis
= 1024;
203 vm_swapcache_hysteresis
= vm_swapcache_min_hysteresis
;
204 vm_swapcache_inactive_heuristic
= -vm_swapcache_hysteresis
;
207 * Initialize our marker for the vm_object scan (SWAPC_CLEANING)
209 bzero(&object_marker
, sizeof(object_marker
));
210 object_marker
.type
= OBJT_MARKER
;
211 lwkt_gettoken(&vmobj_token
);
212 TAILQ_INSERT_HEAD(&vm_object_list
, &object_marker
, object_list
);
213 lwkt_reltoken(&vmobj_token
);
223 kproc_suspend_loop();
226 * Check every 5 seconds when not enabled or if no swap
229 if ((vm_swapcache_data_enable
== 0 &&
230 vm_swapcache_meta_enable
== 0) ||
232 tsleep(&vm_swapcache_sleep
, 0, "csleep", hz
* 5);
237 * Polling rate when enabled is approximately 10 hz.
239 tsleep(&vm_swapcache_sleep
, 0, "csleep", hz
/ 10);
242 * State hysteresis. Generate write activity up to 75% of
243 * swap, then clean out swap assignments down to 70%, then
246 if (state
== SWAPC_WRITING
) {
247 if (vm_swap_cache_use
> SWAPMAX(0))
248 state
= SWAPC_CLEANING
;
250 if (vm_swap_cache_use
< SWAPMAX(-10))
251 state
= SWAPC_WRITING
;
255 * We are allowed to continue accumulating burst value
256 * in either state. Allow the user to set curburst > maxburst
257 * for the initial load-in.
259 if (vm_swapcache_curburst
< vm_swapcache_maxburst
) {
260 vm_swapcache_curburst
+= vm_swapcache_accrate
/ 10;
261 if (vm_swapcache_curburst
> vm_swapcache_maxburst
)
262 vm_swapcache_curburst
= vm_swapcache_maxburst
;
266 * We don't want to nickle-and-dime the scan as that will
267 * create unnecessary fragmentation. The minimum burst
268 * is one-seconds worth of accumulation.
270 if (state
!= SWAPC_WRITING
) {
271 vm_swapcache_cleaning(&object_marker
);
274 if (vm_swapcache_curburst
< vm_swapcache_accrate
)
278 count
= vm_swapcache_maxlaunder
/ PQ_L2_SIZE
+ 2;
279 scount
= vm_swapcache_maxscan
/ PQ_L2_SIZE
+ 2;
281 if (burst
== SWAPB_BURSTING
) {
282 if (vm_swapcache_writing_heuristic()) {
283 for (q
= 0; q
< PQ_L2_SIZE
; ++q
) {
285 vm_swapcache_writing(
291 if (vm_swapcache_curburst
<= 0)
292 burst
= SWAPB_RECOVERING
;
293 } else if (vm_swapcache_curburst
> vm_swapcache_minburst
) {
294 if (vm_swapcache_writing_heuristic()) {
295 for (q
= 0; q
< PQ_L2_SIZE
; ++q
) {
297 vm_swapcache_writing(
303 burst
= SWAPB_BURSTING
;
305 if (reached_end
== PQ_L2_SIZE
) {
306 vm_swapcache_inactive_heuristic
=
307 -vm_swapcache_hysteresis
;
312 * Cleanup (NOT REACHED)
314 for (q
= 0; q
< PQ_L2_SIZE
; ++q
) {
315 vm_page_queues_spin_lock(PQ_INACTIVE
+ q
);
317 &vm_page_queues
[PQ_INACTIVE
+ q
].pl
,
318 &page_marker
[q
], pageq
);
319 vm_page_queues_spin_unlock(PQ_INACTIVE
+ q
);
322 lwkt_gettoken(&vmobj_token
);
323 TAILQ_REMOVE(&vm_object_list
, &object_marker
, object_list
);
324 lwkt_reltoken(&vmobj_token
);
327 static struct kproc_desc swpc_kp
= {
329 vm_swapcached_thread
,
332 SYSINIT(swapcached
, SI_SUB_KTHREAD_PAGE
, SI_ORDER_SECOND
, kproc_start
, &swpc_kp
)
335 * Deal with an overflow of the heuristic counter or if the user
336 * manually changes the hysteresis.
338 * Try to avoid small incremental pageouts by waiting for enough
339 * pages to buildup in the inactive queue to hopefully get a good
340 * burst in. This heuristic is bumped by the VM system and reset
341 * when our scan hits the end of the queue.
343 * Return TRUE if we need to take a writing pass.
346 vm_swapcache_writing_heuristic(void)
350 hyst
= vmstats
.v_inactive_count
/ 4;
351 if (hyst
< vm_swapcache_min_hysteresis
)
352 hyst
= vm_swapcache_min_hysteresis
;
354 vm_swapcache_hysteresis
= hyst
;
356 if (vm_swapcache_inactive_heuristic
< -hyst
)
357 vm_swapcache_inactive_heuristic
= -hyst
;
359 return (vm_swapcache_inactive_heuristic
>= 0);
363 * Take a writing pass on one of the inactive queues, return non-zero if
364 * we hit the end of the queue.
367 vm_swapcache_writing(vm_page_t marker
, int count
, int scount
)
375 * Scan the inactive queue from our marker to locate
376 * suitable pages to push to the swap cache.
378 * We are looking for clean vnode-backed pages.
380 vm_page_queues_spin_lock(marker
->queue
);
381 while ((m
= TAILQ_NEXT(marker
, pageq
)) != NULL
&&
382 count
> 0 && scount
-- > 0) {
383 KKASSERT(m
->queue
== marker
->queue
);
385 if (vm_swapcache_curburst
< 0)
388 &vm_page_queues
[marker
->queue
].pl
, marker
, pageq
);
390 &vm_page_queues
[marker
->queue
].pl
, m
, marker
, pageq
);
393 * Ignore markers and ignore pages that already have a swap
396 if (m
->flags
& (PG_MARKER
| PG_SWAPPED
))
398 if (vm_page_busy_try(m
, TRUE
))
400 vm_page_queues_spin_unlock(marker
->queue
);
402 if ((object
= m
->object
) == NULL
) {
404 vm_page_queues_spin_lock(marker
->queue
);
407 vm_object_hold(object
);
408 if (m
->object
!= object
) {
409 vm_object_drop(object
);
411 vm_page_queues_spin_lock(marker
->queue
);
414 if (vm_swapcache_test(m
)) {
415 vm_object_drop(object
);
417 vm_page_queues_spin_lock(marker
->queue
);
423 vm_object_drop(object
);
425 vm_page_queues_spin_lock(marker
->queue
);
432 * PG_NOTMETA generically means 'don't swapcache this',
433 * and HAMMER will set this for regular data buffers
434 * (and leave it unset for meta-data buffers) as
435 * appropriate when double buffering is enabled.
437 if (m
->flags
& PG_NOTMETA
) {
438 vm_object_drop(object
);
440 vm_page_queues_spin_lock(marker
->queue
);
445 * If data_enable is 0 do not try to swapcache data.
446 * If use_chflags is set then only swapcache data for
447 * VSWAPCACHE marked vnodes, otherwise any vnode.
449 if (vm_swapcache_data_enable
== 0 ||
450 ((vp
->v_flag
& VSWAPCACHE
) == 0 &&
451 vm_swapcache_use_chflags
)) {
452 vm_object_drop(object
);
454 vm_page_queues_spin_lock(marker
->queue
);
457 if (vm_swapcache_maxfilesize
&&
459 (vm_swapcache_maxfilesize
>> PAGE_SHIFT
)) {
460 vm_object_drop(object
);
462 vm_page_queues_spin_lock(marker
->queue
);
469 * PG_NOTMETA generically means 'don't swapcache this',
470 * and HAMMER will set this for regular data buffers
471 * (and leave it unset for meta-data buffers) as
472 * appropriate when double buffering is enabled.
474 if (m
->flags
& PG_NOTMETA
) {
475 vm_object_drop(object
);
477 vm_page_queues_spin_lock(marker
->queue
);
480 if (vm_swapcache_meta_enable
== 0) {
481 vm_object_drop(object
);
483 vm_page_queues_spin_lock(marker
->queue
);
489 vm_object_drop(object
);
491 vm_page_queues_spin_lock(marker
->queue
);
497 * Assign swap and initiate I/O.
499 * (adjust for the --count which also occurs in the loop)
501 count
-= vm_swapcached_flush(m
, isblkdev
);
504 * Setup for next loop using marker.
506 vm_object_drop(object
);
507 vm_page_queues_spin_lock(marker
->queue
);
511 * The marker could wind up at the end, which is ok. If we hit the
512 * end of the list adjust the heuristic.
514 * Earlier inactive pages that were dirty and become clean
515 * are typically moved to the end of PQ_INACTIVE by virtue
516 * of vfs_vmio_release() when they become unwired from the
519 vm_page_queues_spin_unlock(marker
->queue
);
522 * m invalid but can be used to test for NULL
528 * Flush the specified page using the swap_pager. The page
529 * must be busied by the caller and its disposition will become
530 * the responsibility of this function.
532 * Try to collect surrounding pages, including pages which may
533 * have already been assigned swap. Try to cluster within a
534 * contiguous aligned SMAP_META_PAGES (typ 16 x PAGE_SIZE) block
535 * to match what swap_pager_putpages() can do.
537 * We also want to try to match against the buffer cache blocksize
538 * but we don't really know what it is here. Since the buffer cache
539 * wires and unwires pages in groups the fact that we skip wired pages
540 * should be sufficient.
542 * Returns a count of pages we might have flushed (minimum 1)
546 vm_swapcached_flush(vm_page_t m
, int isblkdev
)
549 vm_page_t marray
[SWAP_META_PAGES
];
551 int rtvals
[SWAP_META_PAGES
];
559 vm_page_protect(m
, VM_PROT_READ
);
561 vm_object_hold(object
);
564 * Try to cluster around (m), keeping in mind that the swap pager
565 * can only do SMAP_META_PAGES worth of continguous write.
567 x
= (int)m
->pindex
& SWAP_META_MASK
;
572 for (i
= x
- 1; i
>= 0; --i
) {
573 m
= vm_page_lookup_busy_try(object
, basei
- x
+ i
,
575 if (error
|| m
== NULL
)
577 if (vm_swapcache_test(m
)) {
581 if (isblkdev
&& (m
->flags
& PG_NOTMETA
)) {
586 vm_page_protect(m
, VM_PROT_READ
);
587 if (m
->queue
- m
->pc
== PQ_CACHE
) {
588 vm_page_unqueue_nowakeup(m
);
589 vm_page_deactivate(m
);
596 for (j
= x
+ 1; j
< SWAP_META_PAGES
; ++j
) {
597 m
= vm_page_lookup_busy_try(object
, basei
- x
+ j
,
599 if (error
|| m
== NULL
)
601 if (vm_swapcache_test(m
)) {
605 if (isblkdev
&& (m
->flags
& PG_NOTMETA
)) {
610 vm_page_protect(m
, VM_PROT_READ
);
611 if (m
->queue
- m
->pc
== PQ_CACHE
) {
612 vm_page_unqueue_nowakeup(m
);
613 vm_page_deactivate(m
);
620 vm_object_pip_add(object
, count
);
621 swap_pager_putpages(object
, marray
+ i
, count
, FALSE
, rtvals
+ i
);
622 vm_swapcache_write_count
+= count
* PAGE_SIZE
;
623 vm_swapcache_curburst
-= count
* PAGE_SIZE
;
626 if (rtvals
[i
] != VM_PAGER_PEND
) {
627 vm_page_busy_wait(marray
[i
], FALSE
, "swppgfd");
628 vm_page_io_finish(marray
[i
]);
629 vm_page_wakeup(marray
[i
]);
630 vm_object_pip_wakeup(object
);
634 vm_object_drop(object
);
639 * Test whether a VM page is suitable for writing to the swapcache.
640 * Does not test m->queue, PG_MARKER, or PG_SWAPPED.
642 * Returns 0 on success, 1 on failure
645 vm_swapcache_test(vm_page_t m
)
649 if (m
->flags
& PG_UNMANAGED
)
651 if (m
->hold_count
|| m
->wire_count
)
653 if (m
->valid
!= VM_PAGE_BITS_ALL
)
655 if (m
->dirty
& m
->valid
)
657 if ((object
= m
->object
) == NULL
)
659 if (object
->type
!= OBJT_VNODE
||
660 (object
->flags
& OBJ_DEAD
)) {
663 vm_page_test_dirty(m
);
664 if (m
->dirty
& m
->valid
)
672 * We clean whole objects up to 16MB
676 vm_swapcache_cleaning(vm_object_t marker
)
684 count
= vm_swapcache_maxlaunder
;
685 scount
= vm_swapcache_maxscan
;
688 * Look for vnode objects
690 lwkt_gettoken(&vmobj_token
);
692 while ((object
= TAILQ_NEXT(marker
, object_list
)) != NULL
) {
694 * We have to skip markers. We cannot hold/drop marker
697 if (object
->type
== OBJT_MARKER
) {
698 vm_swapcache_movemarker(marker
, object
);
703 * Safety, or in case there are millions of VM objects
704 * without swapcache backing.
710 * We must hold the object before potentially yielding.
712 vm_object_hold(object
);
716 * Only operate on live VNODE objects that are either
717 * VREG or VCHR (VCHR for meta-data).
719 if ((object
->type
!= OBJT_VNODE
) ||
720 ((object
->flags
& OBJ_DEAD
) ||
721 object
->swblock_count
== 0) ||
722 ((vp
= object
->handle
) == NULL
) ||
723 (vp
->v_type
!= VREG
&& vp
->v_type
!= VCHR
)) {
724 vm_object_drop(object
);
725 /* object may be invalid now */
726 vm_swapcache_movemarker(marker
, object
);
731 * Reset the object pindex stored in the marker if the
732 * working object has changed.
734 if (marker
->backing_object
!= object
) {
736 marker
->backing_object_offset
= 0;
737 marker
->backing_object
= object
;
741 * Look for swblocks starting at our iterator.
743 * The swap_pager_condfree() function attempts to free
744 * swap space starting at the specified index. The index
745 * will be updated on return. The function will return
746 * a scan factor (NOT the number of blocks freed).
748 * If it must cut its scan of the object short due to an
749 * excessive number of swblocks, or is able to free the
750 * requested number of blocks, it will return n >= count
751 * and we break and pick it back up on a future attempt.
753 * Scan the object linearly and try to batch large sets of
754 * blocks that are likely to clean out entire swap radix
758 lwkt_reltoken(&vmobj_token
);
760 n
= swap_pager_condfree(object
, &marker
->size
,
761 (count
+ SWAP_META_MASK
) & ~SWAP_META_MASK
);
763 vm_object_drop(object
); /* object may be invalid now */
764 lwkt_gettoken(&vmobj_token
);
767 * If we have exhausted the object or deleted our per-pass
768 * page limit then move us to the next object. Note that
769 * the current object may no longer be on the vm_object_list.
772 marker
->backing_object_offset
> vm_swapcache_cleanperobj
) {
773 vm_swapcache_movemarker(marker
, object
);
777 * If we have exhausted our max-launder stop for now.
780 marker
->backing_object_offset
+= n
* PAGE_SIZE
;
786 * If we wound up at the end of the list this will move the
787 * marker back to the beginning.
790 vm_swapcache_movemarker(marker
, NULL
);
792 lwkt_reltoken(&vmobj_token
);
796 * Move the marker past the current object. Object can be stale, but we
797 * still need it to determine if the marker has to be moved. If the object
798 * is still the 'current object' (object after the marker), we hop-scotch
799 * the marker past it.
802 vm_swapcache_movemarker(vm_object_t marker
, vm_object_t object
)
804 if (TAILQ_NEXT(marker
, object_list
) == object
) {
805 TAILQ_REMOVE(&vm_object_list
, marker
, object_list
);
807 TAILQ_INSERT_AFTER(&vm_object_list
, object
,
808 marker
, object_list
);
810 TAILQ_INSERT_HEAD(&vm_object_list
,
811 marker
, object_list
);