4 * Copyright (c) 2010 The DragonFly Project. All rights reserved.
6 * This code is derived from software contributed to The DragonFly Project
7 * by Matthew Dillon <dillon@backplane.com>
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in
17 * the documentation and/or other materials provided with the
19 * 3. Neither the name of The DragonFly Project nor the names of its
20 * contributors may be used to endorse or promote products derived
21 * from this software without specific, prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * Implement the swapcache daemon. When enabled swap is assumed to be
39 * configured on a fast storage device such as a SSD. Swap is assigned
40 * to clean vnode-backed pages in the inactive queue, clustered by object
41 * if possible, and written out. The swap assignment sticks around even
42 * after the underlying pages have been recycled.
44 * The daemon manages write bandwidth based on sysctl settings to control
47 * The vnode strategy code will check for the swap assignments and divert
48 * reads to the swap device when the data is present in the swapcache.
50 * This operates on both regular files and the block device vnodes used by
51 * filesystems to manage meta-data.
55 #include <sys/param.h>
56 #include <sys/systm.h>
57 #include <sys/kernel.h>
59 #include <sys/kthread.h>
60 #include <sys/resourcevar.h>
61 #include <sys/signalvar.h>
62 #include <sys/vnode.h>
63 #include <sys/vmmeter.h>
64 #include <sys/sysctl.h>
67 #include <vm/vm_param.h>
69 #include <vm/vm_object.h>
70 #include <vm/vm_page.h>
71 #include <vm/vm_map.h>
72 #include <vm/vm_pageout.h>
73 #include <vm/vm_pager.h>
74 #include <vm/swap_pager.h>
75 #include <vm/vm_extern.h>
77 #include <sys/thread2.h>
78 #include <vm/vm_page2.h>
80 #define INACTIVE_LIST (&vm_page_queues[PQ_INACTIVE].pl)
82 /* the kernel process "vm_pageout"*/
83 static int vm_swapcached_flush (vm_page_t m
, int isblkdev
);
84 static int vm_swapcache_test(vm_page_t m
);
85 static void vm_swapcache_writing(vm_page_t marker
);
86 static void vm_swapcache_cleaning(vm_object_t marker
);
87 struct thread
*swapcached_thread
;
89 SYSCTL_NODE(_vm
, OID_AUTO
, swapcache
, CTLFLAG_RW
, NULL
, NULL
);
91 int vm_swapcache_read_enable
;
92 int vm_swapcache_inactive_heuristic
;
93 static int vm_swapcache_sleep
;
94 static int vm_swapcache_maxlaunder
= 256;
95 static int vm_swapcache_data_enable
= 0;
96 static int vm_swapcache_meta_enable
= 0;
97 static int vm_swapcache_maxswappct
= 75;
98 static int vm_swapcache_hysteresis
;
99 static int vm_swapcache_use_chflags
= 1; /* require chflags cache */
100 static int64_t vm_swapcache_minburst
= 10000000LL; /* 10MB */
101 static int64_t vm_swapcache_curburst
= 4000000000LL; /* 4G after boot */
102 static int64_t vm_swapcache_maxburst
= 2000000000LL; /* 2G nominal max */
103 static int64_t vm_swapcache_accrate
= 100000LL; /* 100K/s */
104 static int64_t vm_swapcache_write_count
;
105 static int64_t vm_swapcache_maxfilesize
;
107 SYSCTL_INT(_vm_swapcache
, OID_AUTO
, maxlaunder
,
108 CTLFLAG_RW
, &vm_swapcache_maxlaunder
, 0, "");
110 SYSCTL_INT(_vm_swapcache
, OID_AUTO
, data_enable
,
111 CTLFLAG_RW
, &vm_swapcache_data_enable
, 0, "");
112 SYSCTL_INT(_vm_swapcache
, OID_AUTO
, meta_enable
,
113 CTLFLAG_RW
, &vm_swapcache_meta_enable
, 0, "");
114 SYSCTL_INT(_vm_swapcache
, OID_AUTO
, read_enable
,
115 CTLFLAG_RW
, &vm_swapcache_read_enable
, 0, "");
116 SYSCTL_INT(_vm_swapcache
, OID_AUTO
, maxswappct
,
117 CTLFLAG_RW
, &vm_swapcache_maxswappct
, 0, "");
118 SYSCTL_INT(_vm_swapcache
, OID_AUTO
, hysteresis
,
119 CTLFLAG_RW
, &vm_swapcache_hysteresis
, 0, "");
120 SYSCTL_INT(_vm_swapcache
, OID_AUTO
, use_chflags
,
121 CTLFLAG_RW
, &vm_swapcache_use_chflags
, 0, "");
123 SYSCTL_QUAD(_vm_swapcache
, OID_AUTO
, minburst
,
124 CTLFLAG_RW
, &vm_swapcache_minburst
, 0, "");
125 SYSCTL_QUAD(_vm_swapcache
, OID_AUTO
, curburst
,
126 CTLFLAG_RW
, &vm_swapcache_curburst
, 0, "");
127 SYSCTL_QUAD(_vm_swapcache
, OID_AUTO
, maxburst
,
128 CTLFLAG_RW
, &vm_swapcache_maxburst
, 0, "");
129 SYSCTL_QUAD(_vm_swapcache
, OID_AUTO
, maxfilesize
,
130 CTLFLAG_RW
, &vm_swapcache_maxfilesize
, 0, "");
131 SYSCTL_QUAD(_vm_swapcache
, OID_AUTO
, accrate
,
132 CTLFLAG_RW
, &vm_swapcache_accrate
, 0, "");
133 SYSCTL_QUAD(_vm_swapcache
, OID_AUTO
, write_count
,
134 CTLFLAG_RW
, &vm_swapcache_write_count
, 0, "");
136 #define SWAPMAX(adj) \
137 ((int64_t)vm_swap_max * (vm_swapcache_maxswappct + (adj)) / 100)
140 * vm_swapcached is the high level pageout daemon.
145 vm_swapcached_thread(void)
147 enum { SWAPC_WRITING
, SWAPC_CLEANING
} state
= SWAPC_WRITING
;
148 enum { SWAPB_BURSTING
, SWAPB_RECOVERING
} burst
= SWAPB_BURSTING
;
149 struct vm_page page_marker
;
150 struct vm_object object_marker
;
155 curthread
->td_flags
|= TDF_SYSTHREAD
;
157 lwkt_gettoken(&vm_token
);
161 * Initialize our marker for the inactive scan (SWAPC_WRITING)
163 bzero(&page_marker
, sizeof(page_marker
));
164 page_marker
.flags
= PG_BUSY
| PG_FICTITIOUS
| PG_MARKER
;
165 page_marker
.queue
= PQ_INACTIVE
;
166 page_marker
.wire_count
= 1;
167 TAILQ_INSERT_HEAD(INACTIVE_LIST
, &page_marker
, pageq
);
168 vm_swapcache_hysteresis
= vmstats
.v_inactive_target
/ 2;
169 vm_swapcache_inactive_heuristic
= -vm_swapcache_hysteresis
;
172 * Initialize our marker for the vm_object scan (SWAPC_CLEANING)
174 bzero(&object_marker
, sizeof(object_marker
));
175 object_marker
.type
= OBJT_MARKER
;
176 lwkt_gettoken(&vmobj_token
);
177 TAILQ_INSERT_HEAD(&vm_object_list
, &object_marker
, object_list
);
178 lwkt_reltoken(&vmobj_token
);
182 * Check every 5 seconds when not enabled or if no swap
185 if ((vm_swapcache_data_enable
== 0 &&
186 vm_swapcache_meta_enable
== 0) ||
188 tsleep(&vm_swapcache_sleep
, 0, "csleep", hz
* 5);
193 * Polling rate when enabled is approximately 10 hz.
195 tsleep(&vm_swapcache_sleep
, 0, "csleep", hz
/ 10);
198 * State hysteresis. Generate write activity up to 75% of
199 * swap, then clean out swap assignments down to 70%, then
202 if (state
== SWAPC_WRITING
) {
203 if (vm_swap_cache_use
> SWAPMAX(0))
204 state
= SWAPC_CLEANING
;
206 if (vm_swap_cache_use
< SWAPMAX(-5))
207 state
= SWAPC_WRITING
;
211 * We are allowed to continue accumulating burst value
212 * in either state. Allow the user to set curburst > maxburst
213 * for the initial load-in.
215 if (vm_swapcache_curburst
< vm_swapcache_maxburst
) {
216 vm_swapcache_curburst
+= vm_swapcache_accrate
/ 10;
217 if (vm_swapcache_curburst
> vm_swapcache_maxburst
)
218 vm_swapcache_curburst
= vm_swapcache_maxburst
;
222 * We don't want to nickle-and-dime the scan as that will
223 * create unnecessary fragmentation. The minimum burst
224 * is one-seconds worth of accumulation.
226 if (state
== SWAPC_WRITING
) {
227 if (vm_swapcache_curburst
>= vm_swapcache_accrate
) {
228 if (burst
== SWAPB_BURSTING
) {
229 vm_swapcache_writing(&page_marker
);
230 if (vm_swapcache_curburst
<= 0)
231 burst
= SWAPB_RECOVERING
;
232 } else if (vm_swapcache_curburst
>
233 vm_swapcache_minburst
) {
234 vm_swapcache_writing(&page_marker
);
235 burst
= SWAPB_BURSTING
;
239 vm_swapcache_cleaning(&object_marker
);
244 * Cleanup (NOT REACHED)
246 TAILQ_REMOVE(INACTIVE_LIST
, &page_marker
, pageq
);
248 lwkt_reltoken(&vm_token
);
250 lwkt_gettoken(&vmobj_token
);
251 TAILQ_REMOVE(&vm_object_list
, &object_marker
, object_list
);
252 lwkt_reltoken(&vmobj_token
);
255 static struct kproc_desc swpc_kp
= {
257 vm_swapcached_thread
,
260 SYSINIT(swapcached
, SI_SUB_KTHREAD_PAGE
, SI_ORDER_SECOND
, kproc_start
, &swpc_kp
)
263 * The caller must hold vm_token.
266 vm_swapcache_writing(vm_page_t marker
)
275 * Deal with an overflow of the heuristic counter or if the user
276 * manually changes the hysteresis.
278 * Try to avoid small incremental pageouts by waiting for enough
279 * pages to buildup in the inactive queue to hopefully get a good
280 * burst in. This heuristic is bumped by the VM system and reset
281 * when our scan hits the end of the queue.
283 if (vm_swapcache_inactive_heuristic
< -vm_swapcache_hysteresis
)
284 vm_swapcache_inactive_heuristic
= -vm_swapcache_hysteresis
;
285 if (vm_swapcache_inactive_heuristic
< 0)
289 * Scan the inactive queue from our marker to locate
290 * suitable pages to push to the swap cache.
292 * We are looking for clean vnode-backed pages.
294 * NOTE: PG_SWAPPED pages in particular are not part of
295 * our count because once the cache stabilizes we
296 * can end up with a very high datarate of VM pages
300 count
= vm_swapcache_maxlaunder
;
302 while ((m
= TAILQ_NEXT(m
, pageq
)) != NULL
&& count
--) {
303 if (m
->flags
& (PG_MARKER
| PG_SWAPPED
)) {
307 if (vm_swapcache_curburst
< 0)
309 if (vm_swapcache_test(m
))
319 * If data_enable is 0 do not try to swapcache data.
320 * If use_chflags is set then only swapcache data for
321 * VSWAPCACHE marked vnodes, otherwise any vnode.
323 if (vm_swapcache_data_enable
== 0 ||
324 ((vp
->v_flag
& VSWAPCACHE
) == 0 &&
325 vm_swapcache_use_chflags
)) {
328 if (vm_swapcache_maxfilesize
&&
330 (vm_swapcache_maxfilesize
>> PAGE_SHIFT
)) {
337 * The PG_NOTMETA flag only applies to pages
338 * associated with block devices.
340 if (m
->flags
& PG_NOTMETA
)
342 if (vm_swapcache_meta_enable
== 0)
351 * Ok, move the marker and soft-busy the page.
353 TAILQ_REMOVE(INACTIVE_LIST
, marker
, pageq
);
354 TAILQ_INSERT_AFTER(INACTIVE_LIST
, m
, marker
, pageq
);
357 * Assign swap and initiate I/O.
359 * (adjust for the --count which also occurs in the loop)
361 count
-= vm_swapcached_flush(m
, isblkdev
) - 1;
364 * Setup for next loop using marker.
370 * Cleanup marker position. If we hit the end of the
371 * list the marker is placed at the tail. Newly deactivated
372 * pages will be placed after it.
374 * Earlier inactive pages that were dirty and become clean
375 * are typically moved to the end of PQ_INACTIVE by virtue
376 * of vfs_vmio_release() when they become unwired from the
379 TAILQ_REMOVE(INACTIVE_LIST
, marker
, pageq
);
381 TAILQ_INSERT_BEFORE(m
, marker
, pageq
);
383 TAILQ_INSERT_TAIL(INACTIVE_LIST
, marker
, pageq
);
384 vm_swapcache_inactive_heuristic
= -vm_swapcache_hysteresis
;
389 * Flush the specified page using the swap_pager.
391 * Try to collect surrounding pages, including pages which may
392 * have already been assigned swap. Try to cluster within a
393 * contiguous aligned SMAP_META_PAGES (typ 16 x PAGE_SIZE) block
394 * to match what swap_pager_putpages() can do.
396 * We also want to try to match against the buffer cache blocksize
397 * but we don't really know what it is here. Since the buffer cache
398 * wires and unwires pages in groups the fact that we skip wired pages
399 * should be sufficient.
401 * Returns a count of pages we might have flushed (minimum 1)
403 * The caller must hold vm_token.
407 vm_swapcached_flush(vm_page_t m
, int isblkdev
)
410 vm_page_t marray
[SWAP_META_PAGES
];
412 int rtvals
[SWAP_META_PAGES
];
419 vm_page_protect(m
, VM_PROT_READ
);
423 * Try to cluster around (m), keeping in mind that the swap pager
424 * can only do SMAP_META_PAGES worth of continguous write.
426 x
= (int)m
->pindex
& SWAP_META_MASK
;
430 for (i
= x
- 1; i
>= 0; --i
) {
431 m
= vm_page_lookup(object
, basei
- x
+ i
);
434 if (vm_swapcache_test(m
))
436 if (isblkdev
&& (m
->flags
& PG_NOTMETA
))
439 vm_page_protect(m
, VM_PROT_READ
);
440 if (m
->queue
- m
->pc
== PQ_CACHE
) {
441 vm_page_unqueue_nowakeup(m
);
442 vm_page_deactivate(m
);
448 for (j
= x
+ 1; j
< SWAP_META_PAGES
; ++j
) {
449 m
= vm_page_lookup(object
, basei
- x
+ j
);
452 if (vm_swapcache_test(m
))
454 if (isblkdev
&& (m
->flags
& PG_NOTMETA
))
457 vm_page_protect(m
, VM_PROT_READ
);
458 if (m
->queue
- m
->pc
== PQ_CACHE
) {
459 vm_page_unqueue_nowakeup(m
);
460 vm_page_deactivate(m
);
466 vm_object_pip_add(object
, count
);
467 swap_pager_putpages(object
, marray
+ i
, count
, FALSE
, rtvals
+ i
);
468 vm_swapcache_write_count
+= count
* PAGE_SIZE
;
469 vm_swapcache_curburst
-= count
* PAGE_SIZE
;
472 if (rtvals
[i
] != VM_PAGER_PEND
) {
473 vm_page_io_finish(marray
[i
]);
474 vm_object_pip_wakeup(object
);
482 * Test whether a VM page is suitable for writing to the swapcache.
483 * Does not test m->queue, PG_MARKER, or PG_SWAPPED.
485 * Returns 0 on success, 1 on failure
487 * The caller must hold vm_token.
490 vm_swapcache_test(vm_page_t m
)
494 if (m
->flags
& (PG_BUSY
| PG_UNMANAGED
))
496 if (m
->busy
|| m
->hold_count
|| m
->wire_count
)
498 if (m
->valid
!= VM_PAGE_BITS_ALL
)
500 if (m
->dirty
& m
->valid
)
502 if ((object
= m
->object
) == NULL
)
504 if (object
->type
!= OBJT_VNODE
||
505 (object
->flags
& OBJ_DEAD
)) {
508 vm_page_test_dirty(m
);
509 if (m
->dirty
& m
->valid
)
517 * The caller must hold vm_token.
521 vm_swapcache_cleaning(vm_object_t marker
)
529 count
= vm_swapcache_maxlaunder
;
532 * Look for vnode objects
534 lwkt_gettoken(&vm_token
);
535 lwkt_gettoken(&vmobj_token
);
537 while ((object
= TAILQ_NEXT(object
, object_list
)) != NULL
&& count
--) {
538 if (object
->type
!= OBJT_VNODE
)
540 if ((object
->flags
& OBJ_DEAD
) || object
->swblock_count
== 0)
542 if ((vp
= object
->handle
) == NULL
)
544 if (vp
->v_type
!= VREG
&& vp
->v_type
!= VCHR
)
550 if (marker
->backing_object
!= object
)
554 * Move the marker so we can work on the VM object
556 TAILQ_REMOVE(&vm_object_list
, marker
, object_list
);
557 TAILQ_INSERT_AFTER(&vm_object_list
, object
,
558 marker
, object_list
);
561 * Look for swblocks starting at our iterator.
563 * The swap_pager_condfree() function attempts to free
564 * swap space starting at the specified index. The index
565 * will be updated on return. The function will return
566 * a scan factor (NOT the number of blocks freed).
568 * If it must cut its scan of the object short due to an
569 * excessive number of swblocks, or is able to free the
570 * requested number of blocks, it will return n >= count
571 * and we break and pick it back up on a future attempt.
573 n
= swap_pager_condfree(object
, &marker
->size
, count
);
586 * Adjust marker so we continue the scan from where we left off.
587 * When we reach the end we start back at the beginning.
589 TAILQ_REMOVE(&vm_object_list
, marker
, object_list
);
591 TAILQ_INSERT_BEFORE(object
, marker
, object_list
);
593 TAILQ_INSERT_HEAD(&vm_object_list
, marker
, object_list
);
594 marker
->backing_object
= object
;
596 lwkt_reltoken(&vmobj_token
);
597 lwkt_reltoken(&vm_token
);