2 * Copyright (c) 2010 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * Implement the swapcache daemon. When enabled swap is assumed to be
37 * configured on a fast storage device such as a SSD. Swap is assigned
38 * to clean vnode-backed pages in the inactive queue, clustered by object
39 * if possible, and written out. The swap assignment sticks around even
40 * after the underlying pages have been recycled.
42 * The daemon manages write bandwidth based on sysctl settings to control
45 * The vnode strategy code will check for the swap assignments and divert
46 * reads to the swap device when the data is present in the swapcache.
48 * This operates on both regular files and the block device vnodes used by
49 * filesystems to manage meta-data.
53 #include <sys/param.h>
54 #include <sys/systm.h>
55 #include <sys/kernel.h>
57 #include <sys/kthread.h>
58 #include <sys/resourcevar.h>
59 #include <sys/signalvar.h>
60 #include <sys/vnode.h>
61 #include <sys/vmmeter.h>
62 #include <sys/sysctl.h>
65 #include <vm/vm_param.h>
67 #include <vm/vm_object.h>
68 #include <vm/vm_page.h>
69 #include <vm/vm_map.h>
70 #include <vm/vm_pageout.h>
71 #include <vm/vm_pager.h>
72 #include <vm/swap_pager.h>
73 #include <vm/vm_extern.h>
75 #include <sys/thread2.h>
76 #include <vm/vm_page2.h>
78 #define INACTIVE_LIST (&vm_page_queues[PQ_INACTIVE].pl)
80 /* the kernel process "vm_pageout"*/
81 static void vm_swapcached (void);
82 static int vm_swapcached_flush (vm_page_t m
);
83 static int vm_swapcache_test(vm_page_t m
);
84 static void vm_swapcache_writing(vm_page_t marker
);
85 static void vm_swapcache_cleaning(vm_object_t marker
);
86 struct thread
*swapcached_thread
;
88 static struct kproc_desc swpc_kp
= {
93 SYSINIT(swapcached
, SI_SUB_KTHREAD_PAGE
, SI_ORDER_SECOND
, kproc_start
, &swpc_kp
)
95 SYSCTL_NODE(_vm
, OID_AUTO
, swapcache
, CTLFLAG_RW
, NULL
, NULL
);
97 int vm_swapcache_read_enable
;
98 int vm_swapcache_inactive_heuristic
;
99 static int vm_swapcache_sleep
;
100 static int vm_swapcache_maxlaunder
= 256;
101 static int vm_swapcache_data_enable
= 0;
102 static int vm_swapcache_meta_enable
= 0;
103 static int vm_swapcache_maxswappct
= 75;
104 static int vm_swapcache_hysteresis
;
105 static int vm_swapcache_use_chflags
= 1; /* require chflags cache */
106 static int64_t vm_swapcache_minburst
= 10000000LL; /* 10MB */
107 static int64_t vm_swapcache_curburst
= 4000000000LL; /* 4G after boot */
108 static int64_t vm_swapcache_maxburst
= 2000000000LL; /* 2G nominal max */
109 static int64_t vm_swapcache_accrate
= 100000LL; /* 100K/s */
110 static int64_t vm_swapcache_write_count
;
111 static int64_t vm_swapcache_maxfilesize
;
113 SYSCTL_INT(_vm_swapcache
, OID_AUTO
, maxlaunder
,
114 CTLFLAG_RW
, &vm_swapcache_maxlaunder
, 0, "");
116 SYSCTL_INT(_vm_swapcache
, OID_AUTO
, data_enable
,
117 CTLFLAG_RW
, &vm_swapcache_data_enable
, 0, "");
118 SYSCTL_INT(_vm_swapcache
, OID_AUTO
, meta_enable
,
119 CTLFLAG_RW
, &vm_swapcache_meta_enable
, 0, "");
120 SYSCTL_INT(_vm_swapcache
, OID_AUTO
, read_enable
,
121 CTLFLAG_RW
, &vm_swapcache_read_enable
, 0, "");
122 SYSCTL_INT(_vm_swapcache
, OID_AUTO
, maxswappct
,
123 CTLFLAG_RW
, &vm_swapcache_maxswappct
, 0, "");
124 SYSCTL_INT(_vm_swapcache
, OID_AUTO
, hysteresis
,
125 CTLFLAG_RW
, &vm_swapcache_hysteresis
, 0, "");
126 SYSCTL_INT(_vm_swapcache
, OID_AUTO
, use_chflags
,
127 CTLFLAG_RW
, &vm_swapcache_use_chflags
, 0, "");
129 SYSCTL_QUAD(_vm_swapcache
, OID_AUTO
, minburst
,
130 CTLFLAG_RW
, &vm_swapcache_minburst
, 0, "");
131 SYSCTL_QUAD(_vm_swapcache
, OID_AUTO
, curburst
,
132 CTLFLAG_RW
, &vm_swapcache_curburst
, 0, "");
133 SYSCTL_QUAD(_vm_swapcache
, OID_AUTO
, maxburst
,
134 CTLFLAG_RW
, &vm_swapcache_maxburst
, 0, "");
135 SYSCTL_QUAD(_vm_swapcache
, OID_AUTO
, maxfilesize
,
136 CTLFLAG_RW
, &vm_swapcache_maxfilesize
, 0, "");
137 SYSCTL_QUAD(_vm_swapcache
, OID_AUTO
, accrate
,
138 CTLFLAG_RW
, &vm_swapcache_accrate
, 0, "");
139 SYSCTL_QUAD(_vm_swapcache
, OID_AUTO
, write_count
,
140 CTLFLAG_RW
, &vm_swapcache_write_count
, 0, "");
142 #define SWAPMAX(adj) \
143 ((int64_t)vm_swap_max * (vm_swapcache_maxswappct + (adj)) / 100)
146 * vm_swapcached is the high level pageout daemon.
151 enum { SWAPC_WRITING
, SWAPC_CLEANING
} state
= SWAPC_WRITING
;
152 enum { SWAPB_BURSTING
, SWAPB_RECOVERING
} burst
= SWAPB_BURSTING
;
153 struct vm_page page_marker
;
154 struct vm_object object_marker
;
159 curthread
->td_flags
|= TDF_SYSTHREAD
;
163 * Initialize our marker for the inactive scan (SWAPC_WRITING)
165 bzero(&page_marker
, sizeof(page_marker
));
166 page_marker
.flags
= PG_BUSY
| PG_FICTITIOUS
| PG_MARKER
;
167 page_marker
.queue
= PQ_INACTIVE
;
168 page_marker
.wire_count
= 1;
169 TAILQ_INSERT_HEAD(INACTIVE_LIST
, &page_marker
, pageq
);
170 vm_swapcache_hysteresis
= vmstats
.v_inactive_target
/ 2;
171 vm_swapcache_inactive_heuristic
= -vm_swapcache_hysteresis
;
174 * Initialize our marker for the vm_object scan (SWAPC_CLEANING)
176 bzero(&object_marker
, sizeof(object_marker
));
177 object_marker
.type
= OBJT_MARKER
;
178 TAILQ_INSERT_HEAD(&vm_object_list
, &object_marker
, object_list
);
182 * Check every 5 seconds when not enabled or if no swap
185 if ((vm_swapcache_data_enable
== 0 &&
186 vm_swapcache_meta_enable
== 0) ||
188 tsleep(&vm_swapcache_sleep
, 0, "csleep", hz
* 5);
193 * Polling rate when enabled is approximately 10 hz.
195 tsleep(&vm_swapcache_sleep
, 0, "csleep", hz
/ 10);
198 * State hysteresis. Generate write activity up to 75% of
199 * swap, then clean out swap assignments down to 70%, then
202 if (state
== SWAPC_WRITING
) {
203 if (vm_swap_cache_use
> SWAPMAX(0))
204 state
= SWAPC_CLEANING
;
206 if (vm_swap_cache_use
< SWAPMAX(-5))
207 state
= SWAPC_WRITING
;
211 * We are allowed to continue accumulating burst value
212 * in either state. Allow the user to set curburst > maxburst
213 * for the initial load-in.
215 if (vm_swapcache_curburst
< vm_swapcache_maxburst
) {
216 vm_swapcache_curburst
+= vm_swapcache_accrate
/ 10;
217 if (vm_swapcache_curburst
> vm_swapcache_maxburst
)
218 vm_swapcache_curburst
= vm_swapcache_maxburst
;
222 * We don't want to nickle-and-dime the scan as that will
223 * create unnecessary fragmentation. The minimum burst
224 * is one-seconds worth of accumulation.
226 if (state
== SWAPC_WRITING
) {
227 if (vm_swapcache_curburst
>= vm_swapcache_accrate
) {
228 if (burst
== SWAPB_BURSTING
) {
229 vm_swapcache_writing(&page_marker
);
230 if (vm_swapcache_curburst
<= 0)
231 burst
= SWAPB_RECOVERING
;
232 } else if (vm_swapcache_curburst
>
233 vm_swapcache_minburst
) {
234 vm_swapcache_writing(&page_marker
);
235 burst
= SWAPB_BURSTING
;
239 vm_swapcache_cleaning(&object_marker
);
242 TAILQ_REMOVE(INACTIVE_LIST
, &page_marker
, pageq
);
243 TAILQ_REMOVE(&vm_object_list
, &object_marker
, object_list
);
248 vm_swapcache_writing(vm_page_t marker
)
256 * Try to avoid small incremental pageouts by waiting for enough
257 * pages to buildup in the inactive queue to hopefully get a good
258 * burst in. This heuristic is bumped by the VM system and reset
259 * when our scan hits the end of the queue.
261 if (vm_swapcache_inactive_heuristic
< 0)
265 * Scan the inactive queue from our marker to locate
266 * suitable pages to push to the swap cache.
268 * We are looking for clean vnode-backed pages.
270 * NOTE: PG_SWAPPED pages in particular are not part of
271 * our count because once the cache stabilizes we
272 * can end up with a very high datarate of VM pages
276 count
= vm_swapcache_maxlaunder
;
278 while ((m
= TAILQ_NEXT(m
, pageq
)) != NULL
&& count
--) {
279 if (m
->flags
& (PG_MARKER
| PG_SWAPPED
)) {
283 if (vm_swapcache_curburst
< 0)
285 if (vm_swapcache_test(m
))
295 * If data_enable is 0 do not try to swapcache data.
296 * If use_chflags is set then only swapcache data for
297 * VSWAPCACHE marked vnodes, otherwise any vnode.
299 if (vm_swapcache_data_enable
== 0 ||
300 ((vp
->v_flag
& VSWAPCACHE
) == 0 &&
301 vm_swapcache_use_chflags
)) {
304 if (vm_swapcache_maxfilesize
&&
306 (vm_swapcache_maxfilesize
>> PAGE_SHIFT
)) {
311 if (vm_swapcache_meta_enable
== 0)
319 * Ok, move the marker and soft-busy the page.
321 TAILQ_REMOVE(INACTIVE_LIST
, marker
, pageq
);
322 TAILQ_INSERT_AFTER(INACTIVE_LIST
, m
, marker
, pageq
);
325 * Assign swap and initiate I/O.
327 * (adjust for the --count which also occurs in the loop)
329 count
-= vm_swapcached_flush(m
) - 1;
332 * Setup for next loop using marker.
338 * Cleanup marker position. If we hit the end of the
339 * list the marker is placed at the tail. Newly deactivated
340 * pages will be placed after it.
342 * Earlier inactive pages that were dirty and become clean
343 * are typically moved to the end of PQ_INACTIVE by virtue
344 * of vfs_vmio_release() when they become unwired from the
347 TAILQ_REMOVE(INACTIVE_LIST
, marker
, pageq
);
349 TAILQ_INSERT_BEFORE(m
, marker
, pageq
);
351 TAILQ_INSERT_TAIL(INACTIVE_LIST
, marker
, pageq
);
352 vm_swapcache_inactive_heuristic
= -vm_swapcache_hysteresis
;
357 * Flush the specified page using the swap_pager.
359 * Try to collect surrounding pages, including pages which may
360 * have already been assigned swap. Try to cluster within a
361 * contiguous aligned SMAP_META_PAGES (typ 16 x PAGE_SIZE) block
362 * to match what swap_pager_putpages() can do.
364 * We also want to try to match against the buffer cache blocksize
365 * but we don't really know what it is here. Since the buffer cache
366 * wires and unwires pages in groups the fact that we skip wired pages
367 * should be sufficient.
369 * Returns a count of pages we might have flushed (minimum 1)
373 vm_swapcached_flush(vm_page_t m
)
376 vm_page_t marray
[SWAP_META_PAGES
];
378 int rtvals
[SWAP_META_PAGES
];
385 vm_page_protect(m
, VM_PROT_READ
);
389 * Try to cluster around (m), keeping in mind that the swap pager
390 * can only do SMAP_META_PAGES worth of continguous write.
392 x
= (int)m
->pindex
& SWAP_META_MASK
;
396 for (i
= x
- 1; i
>= 0; --i
) {
397 m
= vm_page_lookup(object
, basei
- x
+ i
);
400 if (vm_swapcache_test(m
))
403 vm_page_protect(m
, VM_PROT_READ
);
404 if (m
->queue
- m
->pc
== PQ_CACHE
) {
405 vm_page_unqueue_nowakeup(m
);
406 vm_page_deactivate(m
);
412 for (j
= x
+ 1; j
< SWAP_META_PAGES
; ++j
) {
413 m
= vm_page_lookup(object
, basei
- x
+ j
);
416 if (vm_swapcache_test(m
))
419 vm_page_protect(m
, VM_PROT_READ
);
420 if (m
->queue
- m
->pc
== PQ_CACHE
) {
421 vm_page_unqueue_nowakeup(m
);
422 vm_page_deactivate(m
);
428 vm_object_pip_add(object
, count
);
429 swap_pager_putpages(object
, marray
+ i
, count
, FALSE
, rtvals
+ i
);
430 vm_swapcache_write_count
+= count
* PAGE_SIZE
;
431 vm_swapcache_curburst
-= count
* PAGE_SIZE
;
434 if (rtvals
[i
] != VM_PAGER_PEND
) {
435 vm_page_io_finish(marray
[i
]);
436 vm_object_pip_wakeup(object
);
444 * Test whether a VM page is suitable for writing to the swapcache.
445 * Does not test m->queue, PG_MARKER, or PG_SWAPPED.
447 * Returns 0 on success, 1 on failure
450 vm_swapcache_test(vm_page_t m
)
454 if (m
->flags
& (PG_BUSY
| PG_UNMANAGED
| PG_NOTMETA
))
456 if (m
->busy
|| m
->hold_count
|| m
->wire_count
)
458 if (m
->valid
!= VM_PAGE_BITS_ALL
)
460 if (m
->dirty
& m
->valid
)
462 if ((object
= m
->object
) == NULL
)
464 if (object
->type
!= OBJT_VNODE
||
465 (object
->flags
& OBJ_DEAD
)) {
468 vm_page_test_dirty(m
);
469 if (m
->dirty
& m
->valid
)
479 vm_swapcache_cleaning(vm_object_t marker
)
487 count
= vm_swapcache_maxlaunder
;
490 * Look for vnode objects
492 while ((object
= TAILQ_NEXT(object
, object_list
)) != NULL
&& count
--) {
493 if (object
->type
!= OBJT_VNODE
)
495 if ((object
->flags
& OBJ_DEAD
) || object
->swblock_count
== 0)
497 if ((vp
= object
->handle
) == NULL
)
499 if (vp
->v_type
!= VREG
&& vp
->v_type
!= VCHR
)
505 if (marker
->backing_object
!= object
)
509 * Move the marker so we can work on the VM object
511 TAILQ_REMOVE(&vm_object_list
, marker
, object_list
);
512 TAILQ_INSERT_AFTER(&vm_object_list
, object
,
513 marker
, object_list
);
516 * Look for swblocks starting at our iterator.
518 * The swap_pager_condfree() function attempts to free
519 * swap space starting at the specified index. The index
520 * will be updated on return. The function will return
521 * a scan factor (NOT the number of blocks freed).
523 * If it must cut its scan of the object short due to an
524 * excessive number of swblocks, or is able to free the
525 * requested number of blocks, it will return n >= count
526 * and we break and pick it back up on a future attempt.
528 n
= swap_pager_condfree(object
, &marker
->size
, count
);
541 * Adjust marker so we continue the scan from where we left off.
542 * When we reach the end we start back at the beginning.
544 TAILQ_REMOVE(&vm_object_list
, marker
, object_list
);
546 TAILQ_INSERT_BEFORE(object
, marker
, object_list
);
548 TAILQ_INSERT_HEAD(&vm_object_list
, marker
, object_list
);
549 marker
->backing_object
= object
;