4 * Copyright (c) 1998-2010 The DragonFly Project. All rights reserved.
6 * This code is derived from software contributed to The DragonFly Project
7 * by Matthew Dillon <dillon@backplane.com>
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in
17 * the documentation and/or other materials provided with the
19 * 3. Neither the name of The DragonFly Project nor the names of its
20 * contributors may be used to endorse or promote products derived
21 * from this software without specific, prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * Copyright (c) 1994 John S. Dyson
37 * Copyright (c) 1990 University of Utah.
38 * Copyright (c) 1991, 1993
39 * The Regents of the University of California. All rights reserved.
41 * This code is derived from software contributed to Berkeley by
42 * the Systems Programming Group of the University of Utah Computer
45 * Redistribution and use in source and binary forms, with or without
46 * modification, are permitted provided that the following conditions
48 * 1. Redistributions of source code must retain the above copyright
49 * notice, this list of conditions and the following disclaimer.
50 * 2. Redistributions in binary form must reproduce the above copyright
51 * notice, this list of conditions and the following disclaimer in the
52 * documentation and/or other materials provided with the distribution.
53 * 3. Neither the name of the University nor the names of its contributors
54 * may be used to endorse or promote products derived from this software
55 * without specific prior written permission.
57 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
58 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
61 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
62 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
63 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
64 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
65 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
66 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
72 * Radix Bitmap 'blists'.
74 * - The new swapper uses the new radix bitmap code. This should scale
75 * to arbitrarily small or arbitrarily large swap spaces and an almost
76 * arbitrary degree of fragmentation.
80 * - on the fly reallocation of swap during putpages. The new system
81 * does not try to keep previously allocated swap blocks for dirty
84 * - on the fly deallocation of swap
86 * - No more garbage collection required. Unnecessarily allocated swap
87 * blocks only exist for dirty vm_page_t's now and these are already
88 * cycled (in a high-load system) by the pager. We also do on-the-fly
89 * removal of invalidated swap blocks when a page is destroyed
92 * from: Utah $Hdr: swap_pager.c 1.4 91/04/30$
93 * @(#)swap_pager.c 8.9 (Berkeley) 3/21/94
94 * $FreeBSD: src/sys/vm/swap_pager.c,v 1.130.2.12 2002/08/31 21:15:55 dillon Exp $
98 #include <sys/param.h>
99 #include <sys/systm.h>
100 #include <sys/conf.h>
101 #include <sys/kernel.h>
102 #include <sys/proc.h>
104 #include <sys/vnode.h>
105 #include <sys/malloc.h>
106 #include <sys/vmmeter.h>
107 #include <sys/sysctl.h>
108 #include <sys/blist.h>
109 #include <sys/lock.h>
110 #include <sys/kcollect.h>
113 #include <vm/vm_object.h>
114 #include <vm/vm_page.h>
115 #include <vm/vm_pager.h>
116 #include <vm/vm_pageout.h>
117 #include <vm/swap_pager.h>
118 #include <vm/vm_extern.h>
119 #include <vm/vm_zone.h>
120 #include <vm/vnode_pager.h>
122 #include <sys/buf2.h>
123 #include <vm/vm_page2.h>
125 #ifndef MAX_PAGEOUT_CLUSTER
126 #define MAX_PAGEOUT_CLUSTER SWB_NPAGES
129 #define SWM_FREE 0x02 /* free, period */
130 #define SWM_POP 0x04 /* pop out */
132 #define SWBIO_READ 0x01
133 #define SWBIO_WRITE 0x02
134 #define SWBIO_SYNC 0x04
135 #define SWBIO_TTC 0x08 /* for OBJPC_TRY_TO_CACHE */
141 vm_pindex_t endi
; /* inclusive */
144 struct swswapoffinfo
{
151 * vm_swap_size is in page-sized chunks now. It was DEV_BSIZE'd chunks
155 int swap_pager_full
; /* swap space exhaustion (task killing) */
156 int swap_fail_ticks
; /* when we became exhausted */
157 int swap_pager_almost_full
; /* swap space exhaustion (w/ hysteresis)*/
158 swblk_t vm_swap_cache_use
;
159 swblk_t vm_swap_anon_use
;
160 static int vm_report_swap_allocs
;
162 static struct krate kswaprate
= { 1 };
163 static int nsw_rcount
; /* free read buffers */
164 static int nsw_wcount_sync
; /* limit write buffers / synchronous */
165 static int nsw_wcount_async
; /* limit write buffers / asynchronous */
166 static int nsw_wcount_async_max
;/* assigned maximum */
167 static int nsw_cluster_max
; /* maximum VOP I/O allowed */
169 struct blist
*swapblist
;
170 static int swap_async_max
= 4; /* maximum in-progress async I/O's */
171 static int swap_burst_read
= 0; /* allow burst reading */
172 static swblk_t swapiterator
; /* linearize allocations */
173 int swap_user_async
= 0; /* user swap pager operation can be async */
175 static struct spinlock swapbp_spin
= SPINLOCK_INITIALIZER(&swapbp_spin
, "swapbp_spin");
178 extern struct vnode
*swapdev_vp
;
179 extern struct swdevt
*swdevt
;
182 #define BLK2DEVIDX(blk) (nswdev > 1 ? blk / SWB_DMMAX % nswdev : 0)
184 SYSCTL_INT(_vm
, OID_AUTO
, swap_async_max
,
185 CTLFLAG_RW
, &swap_async_max
, 0, "Maximum running async swap ops");
186 SYSCTL_INT(_vm
, OID_AUTO
, swap_burst_read
,
187 CTLFLAG_RW
, &swap_burst_read
, 0, "Allow burst reads for pageins");
188 SYSCTL_INT(_vm
, OID_AUTO
, swap_user_async
,
189 CTLFLAG_RW
, &swap_user_async
, 0, "Allow async uuser swap write I/O");
192 SYSCTL_LONG(_vm
, OID_AUTO
, swap_cache_use
,
193 CTLFLAG_RD
, &vm_swap_cache_use
, 0, "");
194 SYSCTL_LONG(_vm
, OID_AUTO
, swap_anon_use
,
195 CTLFLAG_RD
, &vm_swap_anon_use
, 0, "");
196 SYSCTL_LONG(_vm
, OID_AUTO
, swap_free
,
197 CTLFLAG_RD
, &vm_swap_size
, 0, "");
198 SYSCTL_LONG(_vm
, OID_AUTO
, swap_size
,
199 CTLFLAG_RD
, &vm_swap_max
, 0, "");
201 SYSCTL_INT(_vm
, OID_AUTO
, swap_cache_use
,
202 CTLFLAG_RD
, &vm_swap_cache_use
, 0, "");
203 SYSCTL_INT(_vm
, OID_AUTO
, swap_anon_use
,
204 CTLFLAG_RD
, &vm_swap_anon_use
, 0, "");
205 SYSCTL_INT(_vm
, OID_AUTO
, swap_free
,
206 CTLFLAG_RD
, &vm_swap_size
, 0, "");
207 SYSCTL_INT(_vm
, OID_AUTO
, swap_size
,
208 CTLFLAG_RD
, &vm_swap_max
, 0, "");
210 SYSCTL_INT(_vm
, OID_AUTO
, report_swap_allocs
,
211 CTLFLAG_RW
, &vm_report_swap_allocs
, 0, "");
213 __read_mostly vm_zone_t swap_zone
;
216 * Red-Black tree for swblock entries
218 * The caller must hold vm_token
220 RB_GENERATE2(swblock_rb_tree
, swblock
, swb_entry
, rb_swblock_compare
,
221 vm_pindex_t
, swb_index
);
224 rb_swblock_compare(struct swblock
*swb1
, struct swblock
*swb2
)
226 if (swb1
->swb_index
< swb2
->swb_index
)
228 if (swb1
->swb_index
> swb2
->swb_index
)
235 rb_swblock_scancmp(struct swblock
*swb
, void *data
)
237 struct swfreeinfo
*info
= data
;
239 if (swb
->swb_index
< info
->basei
)
241 if (swb
->swb_index
> info
->endi
)
248 rb_swblock_condcmp(struct swblock
*swb
, void *data
)
250 struct swfreeinfo
*info
= data
;
252 if (swb
->swb_index
< info
->basei
)
258 * pagerops for OBJT_SWAP - "swap pager". Some ops are also global procedure
259 * calls hooked from other parts of the VM system and do not appear here.
260 * (see vm/swap_pager.h).
263 static void swap_pager_dealloc (vm_object_t object
);
264 static int swap_pager_getpage (vm_object_t
, vm_pindex_t
, vm_page_t
*, int);
265 static void swap_chain_iodone(struct bio
*biox
);
267 struct pagerops swappagerops
= {
268 swap_pager_dealloc
, /* deallocate an OBJT_SWAP object */
269 swap_pager_getpage
, /* pagein */
270 swap_pager_putpages
, /* pageout */
271 swap_pager_haspage
/* get backing store status for page */
275 * SWB_DMMAX is in page-sized chunks with the new swap system. It was
276 * dev-bsized chunks in the old. SWB_DMMAX is always a power of 2.
278 * swap_*() routines are externally accessible. swp_*() routines are
282 int nswap_lowat
= 128; /* in pages, swap_pager_almost_full warn */
283 int nswap_hiwat
= 512; /* in pages, swap_pager_almost_full warn */
285 static __inline
void swp_sizecheck (void);
286 static void swp_pager_async_iodone (struct bio
*bio
);
289 * Swap bitmap functions
292 static __inline
void swp_pager_freeswapspace(vm_object_t object
,
293 swblk_t blk
, int npages
);
294 static __inline swblk_t
swp_pager_getswapspace(vm_object_t object
, int npages
);
300 static void swp_pager_meta_convert(vm_object_t
);
301 static void swp_pager_meta_build(vm_object_t
, vm_pindex_t
, swblk_t
);
302 static void swp_pager_meta_free(vm_object_t
, vm_pindex_t
, vm_pindex_t
);
303 static void swp_pager_meta_free_all(vm_object_t
);
304 static swblk_t
swp_pager_meta_ctl(vm_object_t
, vm_pindex_t
, int);
307 * SWP_SIZECHECK() - update swap_pager_full indication
309 * update the swap_pager_almost_full indication and warn when we are
310 * about to run out of swap space, using lowat/hiwat hysteresis.
312 * Clear swap_pager_full ( task killing ) indication when lowat is met.
314 * No restrictions on call
315 * This routine may not block.
321 if (vm_swap_size
< nswap_lowat
) {
322 if (swap_pager_almost_full
== 0) {
323 kprintf("swap_pager: out of swap space\n");
324 swap_pager_almost_full
= 1;
325 swap_fail_ticks
= ticks
;
329 if (vm_swap_size
> nswap_hiwat
)
330 swap_pager_almost_full
= 0;
335 * Long-term data collection on 10-second interval. Return the value
336 * for KCOLLECT_SWAPPCT and set the values for SWAPANO and SWAPCCAC.
338 * Return total swap in the scale field. This can change if swap is
339 * regularly added or removed and may cause some historical confusion
340 * in that case, but SWAPPCT will always be historically accurate.
343 #define PTOB(value) ((uint64_t)(value) << PAGE_SHIFT)
346 collect_swap_callback(int n
)
348 uint64_t total
= vm_swap_max
;
349 uint64_t anon
= vm_swap_anon_use
;
350 uint64_t cache
= vm_swap_cache_use
;
352 if (total
== 0) /* avoid divide by zero */
354 kcollect_setvalue(KCOLLECT_SWAPANO
, PTOB(anon
));
355 kcollect_setvalue(KCOLLECT_SWAPCAC
, PTOB(cache
));
356 kcollect_setscale(KCOLLECT_SWAPANO
,
357 KCOLLECT_SCALE(KCOLLECT_SWAPANO_FORMAT
, PTOB(total
)));
358 kcollect_setscale(KCOLLECT_SWAPCAC
,
359 KCOLLECT_SCALE(KCOLLECT_SWAPCAC_FORMAT
, PTOB(total
)));
360 return (((anon
+ cache
) * 10000 + (total
>> 1)) / total
);
364 * SWAP_PAGER_INIT() - initialize the swap pager!
366 * Expected to be started from system init. NOTE: This code is run
367 * before much else so be careful what you depend on. Most of the VM
368 * system has yet to be initialized at this point.
370 * Called from the low level boot code only.
373 swap_pager_init(void *arg __unused
)
375 kcollect_register(KCOLLECT_SWAPPCT
, "swapuse", collect_swap_callback
,
376 KCOLLECT_SCALE(KCOLLECT_SWAPPCT_FORMAT
, 0));
377 kcollect_register(KCOLLECT_SWAPANO
, "swapano", NULL
,
378 KCOLLECT_SCALE(KCOLLECT_SWAPANO_FORMAT
, 0));
379 kcollect_register(KCOLLECT_SWAPCAC
, "swapcac", NULL
,
380 KCOLLECT_SCALE(KCOLLECT_SWAPCAC_FORMAT
, 0));
382 SYSINIT(vm_mem
, SI_BOOT1_VM
, SI_ORDER_THIRD
, swap_pager_init
, NULL
);
385 * SWAP_PAGER_SWAP_INIT() - swap pager initialization from pageout process
387 * Expected to be started from pageout process once, prior to entering
390 * Called from the low level boot code only.
393 swap_pager_swap_init(void)
398 * Number of in-transit swap bp operations. Don't
399 * exhaust the pbufs completely. Make sure we
400 * initialize workable values (0 will work for hysteresis
401 * but it isn't very efficient).
403 * The nsw_cluster_max is constrained by the number of pages an XIO
404 * holds, i.e., (MAXPHYS/PAGE_SIZE) and our locally defined
405 * MAX_PAGEOUT_CLUSTER. Also be aware that swap ops are
406 * constrained by the swap device interleave stripe size.
408 * Currently we hardwire nsw_wcount_async to 4. This limit is
409 * designed to prevent other I/O from having high latencies due to
410 * our pageout I/O. The value 4 works well for one or two active swap
411 * devices but is probably a little low if you have more. Even so,
412 * a higher value would probably generate only a limited improvement
413 * with three or four active swap devices since the system does not
414 * typically have to pageout at extreme bandwidths. We will want
415 * at least 2 per swap devices, and 4 is a pretty good value if you
416 * have one NFS swap device due to the command/ack latency over NFS.
417 * So it all works out pretty well.
420 nsw_cluster_max
= min((MAXPHYS
/PAGE_SIZE
), MAX_PAGEOUT_CLUSTER
);
422 nsw_rcount
= (nswbuf_kva
+ 1) / 2;
423 nsw_wcount_sync
= (nswbuf_kva
+ 3) / 4;
424 nsw_wcount_async
= 4;
425 nsw_wcount_async_max
= nsw_wcount_async
;
428 * The zone is dynamically allocated so generally size it to
429 * maxswzone (32MB to 256GB of KVM). Set a minimum size based
430 * on physical memory of around 8x (each swblock can hold 16 pages).
432 * With the advent of SSDs (vs HDs) the practical (swap:memory) ratio
433 * has increased dramatically.
435 n
= vmstats
.v_page_count
/ 2;
436 if (maxswzone
&& n
< maxswzone
/ sizeof(struct swblock
))
437 n
= maxswzone
/ sizeof(struct swblock
);
443 sizeof(struct swblock
),
446 if (swap_zone
!= NULL
)
449 * if the allocation failed, try a zone two thirds the
450 * size of the previous attempt.
455 if (swap_zone
== NULL
)
456 panic("swap_pager_swap_init: swap_zone == NULL");
458 kprintf("Swap zone entries reduced from %d to %d.\n", n2
, n
);
462 * SWAP_PAGER_ALLOC() - allocate a new OBJT_SWAP VM object and instantiate
463 * its metadata structures.
465 * This routine is called from the mmap and fork code to create a new
466 * OBJT_SWAP object. We do this by creating an OBJT_DEFAULT object
467 * and then converting it with swp_pager_meta_convert().
469 * We only support unnamed objects.
474 swap_pager_alloc(void *handle
, off_t size
, vm_prot_t prot
, off_t offset
)
478 KKASSERT(handle
== NULL
);
479 object
= vm_object_allocate_hold(OBJT_DEFAULT
,
480 OFF_TO_IDX(offset
+ PAGE_MASK
+ size
));
481 swp_pager_meta_convert(object
);
482 vm_object_drop(object
);
488 * SWAP_PAGER_DEALLOC() - remove swap metadata from object
490 * The swap backing for the object is destroyed. The code is
491 * designed such that we can reinstantiate it later, but this
492 * routine is typically called only when the entire object is
493 * about to be destroyed.
495 * The object must be locked or unreferenceable.
496 * No other requirements.
499 swap_pager_dealloc(vm_object_t object
)
501 vm_object_hold(object
);
502 vm_object_pip_wait(object
, "swpdea");
505 * Free all remaining metadata. We only bother to free it from
506 * the swap meta data. We do not attempt to free swapblk's still
507 * associated with vm_page_t's for this object. We do not care
508 * if paging is still in progress on some objects.
510 swp_pager_meta_free_all(object
);
511 vm_object_drop(object
);
514 /************************************************************************
515 * SWAP PAGER BITMAP ROUTINES *
516 ************************************************************************/
519 * SWP_PAGER_GETSWAPSPACE() - allocate raw swap space
521 * Allocate swap for the requested number of pages. The starting
522 * swap block number (a page index) is returned or SWAPBLK_NONE
523 * if the allocation failed.
525 * Also has the side effect of advising that somebody made a mistake
526 * when they configured swap and didn't configure enough.
528 * The caller must hold the object.
529 * This routine may not block.
531 static __inline swblk_t
532 swp_pager_getswapspace(vm_object_t object
, int npages
)
536 lwkt_gettoken(&vm_token
);
537 blk
= blist_allocat(swapblist
, npages
, swapiterator
);
538 if (blk
== SWAPBLK_NONE
)
539 blk
= blist_allocat(swapblist
, npages
, 0);
540 if (blk
== SWAPBLK_NONE
) {
541 if (swap_pager_full
!= 2) {
542 if (vm_swap_max
== 0) {
543 krateprintf(&kswaprate
,
544 "Warning: The system would like to "
545 "page to swap but no swap space "
548 krateprintf(&kswaprate
,
549 "swap_pager_getswapspace: "
550 "swap full allocating %d pages\n",
554 if (swap_pager_almost_full
== 0)
555 swap_fail_ticks
= ticks
;
556 swap_pager_almost_full
= 1;
559 /* swapiterator = blk; disable for now, doesn't work well */
560 swapacctspace(blk
, -npages
);
561 if (object
->type
== OBJT_SWAP
)
562 vm_swap_anon_use
+= npages
;
564 vm_swap_cache_use
+= npages
;
567 lwkt_reltoken(&vm_token
);
572 * SWP_PAGER_FREESWAPSPACE() - free raw swap space
574 * This routine returns the specified swap blocks back to the bitmap.
576 * Note: This routine may not block (it could in the old swap code),
577 * and through the use of the new blist routines it does not block.
579 * This routine may not block.
583 swp_pager_freeswapspace(vm_object_t object
, swblk_t blk
, int npages
)
585 struct swdevt
*sp
= &swdevt
[BLK2DEVIDX(blk
)];
587 lwkt_gettoken(&vm_token
);
588 sp
->sw_nused
-= npages
;
589 if (object
->type
== OBJT_SWAP
)
590 vm_swap_anon_use
-= npages
;
592 vm_swap_cache_use
-= npages
;
594 if (sp
->sw_flags
& SW_CLOSING
) {
595 lwkt_reltoken(&vm_token
);
599 blist_free(swapblist
, blk
, npages
);
600 vm_swap_size
+= npages
;
602 lwkt_reltoken(&vm_token
);
606 * SWAP_PAGER_FREESPACE() - frees swap blocks associated with a page
607 * range within an object.
609 * This is a globally accessible routine.
611 * This routine removes swapblk assignments from swap metadata.
613 * The external callers of this routine typically have already destroyed
614 * or renamed vm_page_t's associated with this range in the object so
620 swap_pager_freespace(vm_object_t object
, vm_pindex_t start
, vm_pindex_t size
)
622 if (object
->swblock_count
== 0)
624 vm_object_hold(object
);
625 swp_pager_meta_free(object
, start
, size
);
626 vm_object_drop(object
);
633 swap_pager_freespace_all(vm_object_t object
)
635 if (object
->swblock_count
== 0)
637 vm_object_hold(object
);
638 swp_pager_meta_free_all(object
);
639 vm_object_drop(object
);
643 * This function conditionally frees swap cache swap starting at
644 * (*basei) in the object. (count) swap blocks will be nominally freed.
645 * The actual number of blocks freed can be more or less than the
648 * This function nominally returns the number of blocks freed. However,
649 * the actual number of blocks freed may be less then the returned value.
650 * If the function is unable to exhaust the object or if it is able to
651 * free (approximately) the requested number of blocks it returns
654 * If we exhaust the object we will return a value n <= count.
656 * The caller must hold the object.
658 * WARNING! If count == 0 then -1 can be returned as a degenerate case,
659 * callers should always pass a count value > 0.
661 static int swap_pager_condfree_callback(struct swblock
*swap
, void *data
);
664 swap_pager_condfree(vm_object_t object
, vm_pindex_t
*basei
, int count
)
666 struct swfreeinfo info
;
670 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object
));
672 info
.object
= object
;
673 info
.basei
= *basei
; /* skip up to this page index */
674 info
.begi
= count
; /* max swap pages to destroy */
675 info
.endi
= count
* 8; /* max swblocks to scan */
677 swblock_rb_tree_RB_SCAN(&object
->swblock_root
, rb_swblock_condcmp
,
678 swap_pager_condfree_callback
, &info
);
682 * Take the higher difference swblocks vs pages
684 n
= count
- (int)info
.begi
;
685 t
= count
* 8 - (int)info
.endi
;
694 * The idea is to free whole meta-block to avoid fragmenting
695 * the swap space or disk I/O. We only do this if NO VM pages
698 * We do not have to deal with clearing PG_SWAPPED in related VM
699 * pages because there are no related VM pages.
701 * The caller must hold the object.
704 swap_pager_condfree_callback(struct swblock
*swap
, void *data
)
706 struct swfreeinfo
*info
= data
;
707 vm_object_t object
= info
->object
;
710 for (i
= 0; i
< SWAP_META_PAGES
; ++i
) {
711 if (vm_page_lookup(object
, swap
->swb_index
+ i
))
714 info
->basei
= swap
->swb_index
+ SWAP_META_PAGES
;
715 if (i
== SWAP_META_PAGES
) {
716 info
->begi
-= swap
->swb_count
;
717 swap_pager_freespace(object
, swap
->swb_index
, SWAP_META_PAGES
);
720 if ((int)info
->begi
< 0 || (int)info
->endi
< 0)
727 * Called by vm_page_alloc() when a new VM page is inserted
728 * into a VM object. Checks whether swap has been assigned to
729 * the page and sets PG_SWAPPED as necessary.
731 * (m) must be busied by caller and remains busied on return.
734 swap_pager_page_inserted(vm_page_t m
)
736 if (m
->object
->swblock_count
) {
737 vm_object_hold(m
->object
);
738 if (swp_pager_meta_ctl(m
->object
, m
->pindex
, 0) != SWAPBLK_NONE
)
739 vm_page_flag_set(m
, PG_SWAPPED
);
740 vm_object_drop(m
->object
);
745 * SWAP_PAGER_RESERVE() - reserve swap blocks in object
747 * Assigns swap blocks to the specified range within the object. The
748 * swap blocks are not zerod. Any previous swap assignment is destroyed.
750 * Returns 0 on success, -1 on failure.
752 * The caller is responsible for avoiding races in the specified range.
753 * No other requirements.
756 swap_pager_reserve(vm_object_t object
, vm_pindex_t start
, vm_size_t size
)
759 swblk_t blk
= SWAPBLK_NONE
;
760 vm_pindex_t beg
= start
; /* save start index */
762 vm_object_hold(object
);
767 while ((blk
= swp_pager_getswapspace(object
, n
)) ==
772 swp_pager_meta_free(object
, beg
,
774 vm_object_drop(object
);
779 swp_pager_meta_build(object
, start
, blk
);
785 swp_pager_meta_free(object
, start
, n
);
786 vm_object_drop(object
);
791 * SWAP_PAGER_COPY() - copy blocks from source pager to destination pager
792 * and destroy the source.
794 * Copy any valid swapblks from the source to the destination. In
795 * cases where both the source and destination have a valid swapblk,
796 * we keep the destination's.
798 * This routine is allowed to block. It may block allocating metadata
799 * indirectly through swp_pager_meta_build() or if paging is still in
800 * progress on the source.
802 * XXX vm_page_collapse() kinda expects us not to block because we
803 * supposedly do not need to allocate memory, but for the moment we
804 * *may* have to get a little memory from the zone allocator, but
805 * it is taken from the interrupt memory. We should be ok.
807 * The source object contains no vm_page_t's (which is just as well)
808 * The source object is of type OBJT_SWAP.
810 * The source and destination objects must be held by the caller.
813 swap_pager_copy(vm_object_t srcobject
, vm_object_t dstobject
,
814 vm_pindex_t base_index
, int destroysource
)
818 ASSERT_LWKT_TOKEN_HELD(vm_object_token(srcobject
));
819 ASSERT_LWKT_TOKEN_HELD(vm_object_token(dstobject
));
822 * transfer source to destination.
824 for (i
= 0; i
< dstobject
->size
; ++i
) {
828 * Locate (without changing) the swapblk on the destination,
829 * unless it is invalid in which case free it silently, or
830 * if the destination is a resident page, in which case the
831 * source is thrown away.
833 dstaddr
= swp_pager_meta_ctl(dstobject
, i
, 0);
835 if (dstaddr
== SWAPBLK_NONE
) {
837 * Destination has no swapblk and is not resident,
842 srcaddr
= swp_pager_meta_ctl(srcobject
,
843 base_index
+ i
, SWM_POP
);
845 if (srcaddr
!= SWAPBLK_NONE
)
846 swp_pager_meta_build(dstobject
, i
, srcaddr
);
849 * Destination has valid swapblk or it is represented
850 * by a resident page. We destroy the sourceblock.
852 swp_pager_meta_ctl(srcobject
, base_index
+ i
, SWM_FREE
);
857 * Free left over swap blocks in source.
859 * We have to revert the type to OBJT_DEFAULT so we do not accidently
860 * double-remove the object from the swap queues.
864 * Reverting the type is not necessary, the caller is going
865 * to destroy srcobject directly, but I'm doing it here
866 * for consistency since we've removed the object from its
869 swp_pager_meta_free_all(srcobject
);
870 if (srcobject
->type
== OBJT_SWAP
)
871 srcobject
->type
= OBJT_DEFAULT
;
876 * SWAP_PAGER_HASPAGE() - determine if we have good backing store for
877 * the requested page.
879 * We determine whether good backing store exists for the requested
880 * page and return TRUE if it does, FALSE if it doesn't.
882 * If TRUE, we also try to determine how much valid, contiguous backing
883 * store exists before and after the requested page within a reasonable
884 * distance. We do not try to restrict it to the swap device stripe
885 * (that is handled in getpages/putpages). It probably isn't worth
891 swap_pager_haspage(vm_object_t object
, vm_pindex_t pindex
)
896 * do we have good backing store at the requested index ?
898 vm_object_hold(object
);
899 blk0
= swp_pager_meta_ctl(object
, pindex
, 0);
901 if (blk0
== SWAPBLK_NONE
) {
902 vm_object_drop(object
);
905 vm_object_drop(object
);
910 * Object must be held exclusive or shared by the caller.
913 swap_pager_haspage_locked(vm_object_t object
, vm_pindex_t pindex
)
915 if (swp_pager_meta_ctl(object
, pindex
, 0) == SWAPBLK_NONE
)
921 * SWAP_PAGER_PAGE_UNSWAPPED() - remove swap backing store related to page
923 * This removes any associated swap backing store, whether valid or
924 * not, from the page. This operates on any VM object, not just OBJT_SWAP
927 * This routine is typically called when a page is made dirty, at
928 * which point any associated swap can be freed. MADV_FREE also
929 * calls us in a special-case situation
931 * NOTE!!! If the page is clean and the swap was valid, the caller
932 * should make the page dirty before calling this routine.
933 * This routine does NOT change the m->dirty status of the page.
934 * Also: MADV_FREE depends on it.
936 * The page must be busied.
937 * The caller can hold the object to avoid blocking, else we might block.
938 * No other requirements.
941 swap_pager_unswapped(vm_page_t m
)
943 if (m
->flags
& PG_SWAPPED
) {
944 vm_object_hold(m
->object
);
945 KKASSERT(m
->flags
& PG_SWAPPED
);
946 swp_pager_meta_ctl(m
->object
, m
->pindex
, SWM_FREE
);
947 vm_page_flag_clear(m
, PG_SWAPPED
);
948 vm_object_drop(m
->object
);
953 * SWAP_PAGER_STRATEGY() - read, write, free blocks
955 * This implements a VM OBJECT strategy function using swap backing store.
956 * This can operate on any VM OBJECT type, not necessarily just OBJT_SWAP
957 * types. Only BUF_CMD_{READ,WRITE,FREEBLKS} is supported, any other
958 * requests will return EINVAL.
960 * This is intended to be a cacheless interface (i.e. caching occurs at
961 * higher levels), and is also used as a swap-based SSD cache for vnode
962 * and device objects.
964 * All I/O goes directly to and from the swap device.
966 * We currently attempt to run I/O synchronously or asynchronously as
967 * the caller requests. This isn't perfect because we loose error
968 * sequencing when we run multiple ops in parallel to satisfy a request.
969 * But this is swap, so we let it all hang out.
971 * NOTE: This function supports the KVABIO API wherein bp->b_data might
972 * not be synchronized to the current cpu.
977 swap_pager_strategy(vm_object_t object
, struct bio
*bio
)
979 struct buf
*bp
= bio
->bio_buf
;
982 vm_pindex_t biox_blkno
= 0;
988 struct bio_track
*track
;
993 * tracking for swapdev vnode I/Os
995 if (bp
->b_cmd
== BUF_CMD_READ
)
996 track
= &swapdev_vp
->v_track_read
;
998 track
= &swapdev_vp
->v_track_write
;
1002 * Only supported commands
1004 if (bp
->b_cmd
!= BUF_CMD_FREEBLKS
&&
1005 bp
->b_cmd
!= BUF_CMD_READ
&&
1006 bp
->b_cmd
!= BUF_CMD_WRITE
) {
1007 bp
->b_error
= EINVAL
;
1008 bp
->b_flags
|= B_ERROR
| B_INVAL
;
1014 * bcount must be an integral number of pages.
1016 if (bp
->b_bcount
& PAGE_MASK
) {
1017 bp
->b_error
= EINVAL
;
1018 bp
->b_flags
|= B_ERROR
| B_INVAL
;
1020 kprintf("swap_pager_strategy: bp %p offset %lld size %d, "
1021 "not page bounded\n",
1022 bp
, (long long)bio
->bio_offset
, (int)bp
->b_bcount
);
1027 * Clear error indication, initialize page index, count, data pointer.
1030 bp
->b_flags
&= ~B_ERROR
;
1031 bp
->b_resid
= bp
->b_bcount
;
1033 start
= (vm_pindex_t
)(bio
->bio_offset
>> PAGE_SHIFT
);
1034 count
= howmany(bp
->b_bcount
, PAGE_SIZE
);
1037 * WARNING! Do not dereference *data without issuing a bkvasync()
1042 * Deal with BUF_CMD_FREEBLKS
1044 if (bp
->b_cmd
== BUF_CMD_FREEBLKS
) {
1046 * FREE PAGE(s) - destroy underlying swap that is no longer
1049 vm_object_hold(object
);
1050 swp_pager_meta_free(object
, start
, count
);
1051 vm_object_drop(object
);
1058 * We need to be able to create a new cluster of I/O's. We cannot
1059 * use the caller fields of the passed bio so push a new one.
1061 * Because nbio is just a placeholder for the cluster links,
1062 * we can biodone() the original bio instead of nbio to make
1063 * things a bit more efficient.
1065 nbio
= push_bio(bio
);
1066 nbio
->bio_offset
= bio
->bio_offset
;
1067 nbio
->bio_caller_info1
.cluster_head
= NULL
;
1068 nbio
->bio_caller_info2
.cluster_tail
= NULL
;
1074 * Execute read or write
1076 vm_object_hold(object
);
1082 * Obtain block. If block not found and writing, allocate a
1083 * new block and build it into the object.
1085 blk
= swp_pager_meta_ctl(object
, start
, 0);
1086 if ((blk
== SWAPBLK_NONE
) && bp
->b_cmd
== BUF_CMD_WRITE
) {
1087 blk
= swp_pager_getswapspace(object
, 1);
1088 if (blk
== SWAPBLK_NONE
) {
1089 bp
->b_error
= ENOMEM
;
1090 bp
->b_flags
|= B_ERROR
;
1093 swp_pager_meta_build(object
, start
, blk
);
1097 * Do we have to flush our current collection? Yes if:
1099 * - no swap block at this index
1100 * - swap block is not contiguous
1101 * - we cross a physical disk boundry in the
1105 (biox_blkno
+ btoc(bufx
->b_bcount
) != blk
||
1106 ((biox_blkno
^ blk
) & ~SWB_DMMASK
))) {
1109 ++mycpu
->gd_cnt
.v_swapin
;
1110 mycpu
->gd_cnt
.v_swappgsin
+=
1111 btoc(bufx
->b_bcount
);
1114 ++mycpu
->gd_cnt
.v_swapout
;
1115 mycpu
->gd_cnt
.v_swappgsout
+=
1116 btoc(bufx
->b_bcount
);
1117 bufx
->b_dirtyend
= bufx
->b_bcount
;
1125 * Finished with this buf.
1127 KKASSERT(bufx
->b_bcount
!= 0);
1128 if (bufx
->b_cmd
!= BUF_CMD_READ
)
1129 bufx
->b_dirtyend
= bufx
->b_bcount
;
1135 * Add new swapblk to biox, instantiating biox if necessary.
1136 * Zero-fill reads are able to take a shortcut.
1138 if (blk
== SWAPBLK_NONE
) {
1140 * We can only get here if we are reading.
1143 bzero(data
, PAGE_SIZE
);
1144 bp
->b_resid
-= PAGE_SIZE
;
1147 /* XXX chain count > 4, wait to <= 4 */
1149 bufx
= getpbuf(NULL
);
1150 bufx
->b_flags
|= B_KVABIO
;
1151 biox
= &bufx
->b_bio1
;
1152 cluster_append(nbio
, bufx
);
1153 bufx
->b_cmd
= bp
->b_cmd
;
1154 biox
->bio_done
= swap_chain_iodone
;
1155 biox
->bio_offset
= (off_t
)blk
<< PAGE_SHIFT
;
1156 biox
->bio_caller_info1
.cluster_parent
= nbio
;
1159 bufx
->b_data
= data
;
1161 bufx
->b_bcount
+= PAGE_SIZE
;
1168 vm_object_drop(object
);
1171 * Flush out last buffer
1174 if (bufx
->b_cmd
== BUF_CMD_READ
) {
1175 ++mycpu
->gd_cnt
.v_swapin
;
1176 mycpu
->gd_cnt
.v_swappgsin
+= btoc(bufx
->b_bcount
);
1178 ++mycpu
->gd_cnt
.v_swapout
;
1179 mycpu
->gd_cnt
.v_swappgsout
+= btoc(bufx
->b_bcount
);
1180 bufx
->b_dirtyend
= bufx
->b_bcount
;
1182 KKASSERT(bufx
->b_bcount
);
1183 if (bufx
->b_cmd
!= BUF_CMD_READ
)
1184 bufx
->b_dirtyend
= bufx
->b_bcount
;
1185 /* biox, bufx = NULL */
1189 * Now initiate all the I/O. Be careful looping on our chain as
1190 * I/O's may complete while we are still initiating them.
1192 * If the request is a 100% sparse read no bios will be present
1193 * and we just biodone() the buffer.
1195 nbio
->bio_caller_info2
.cluster_tail
= NULL
;
1196 bufx
= nbio
->bio_caller_info1
.cluster_head
;
1200 biox
= &bufx
->b_bio1
;
1202 bufx
= bufx
->b_cluster_next
;
1203 vn_strategy(swapdev_vp
, biox
);
1210 * Completion of the cluster will also call biodone_chain(nbio).
1211 * We never call biodone(nbio) so we don't have to worry about
1212 * setting up a bio_done callback. It's handled in the sub-IO.
1223 swap_chain_iodone(struct bio
*biox
)
1226 struct buf
*bufx
; /* chained sub-buffer */
1227 struct bio
*nbio
; /* parent nbio with chain glue */
1228 struct buf
*bp
; /* original bp associated with nbio */
1231 bufx
= biox
->bio_buf
;
1232 nbio
= biox
->bio_caller_info1
.cluster_parent
;
1236 * Update the original buffer
1238 KKASSERT(bp
!= NULL
);
1239 if (bufx
->b_flags
& B_ERROR
) {
1240 atomic_set_int(&bufx
->b_flags
, B_ERROR
);
1241 bp
->b_error
= bufx
->b_error
; /* race ok */
1242 } else if (bufx
->b_resid
!= 0) {
1243 atomic_set_int(&bufx
->b_flags
, B_ERROR
);
1244 bp
->b_error
= EINVAL
; /* race ok */
1246 atomic_subtract_int(&bp
->b_resid
, bufx
->b_bcount
);
1250 * Remove us from the chain.
1252 spin_lock(&swapbp_spin
);
1253 nextp
= &nbio
->bio_caller_info1
.cluster_head
;
1254 while (*nextp
!= bufx
) {
1255 KKASSERT(*nextp
!= NULL
);
1256 nextp
= &(*nextp
)->b_cluster_next
;
1258 *nextp
= bufx
->b_cluster_next
;
1259 chain_empty
= (nbio
->bio_caller_info1
.cluster_head
== NULL
);
1260 spin_unlock(&swapbp_spin
);
1263 * Clean up bufx. If the chain is now empty we finish out
1264 * the parent. Note that we may be racing other completions
1265 * so we must use the chain_empty status from above.
1268 if (bp
->b_resid
!= 0 && !(bp
->b_flags
& B_ERROR
)) {
1269 atomic_set_int(&bp
->b_flags
, B_ERROR
);
1270 bp
->b_error
= EINVAL
;
1272 biodone_chain(nbio
);
1274 relpbuf(bufx
, NULL
);
1278 * SWAP_PAGER_GETPAGES() - bring page in from swap
1280 * The requested page may have to be brought in from swap. Calculate the
1281 * swap block and bring in additional pages if possible. All pages must
1282 * have contiguous swap block assignments and reside in the same object.
1284 * The caller has a single vm_object_pip_add() reference prior to
1285 * calling us and we should return with the same.
1287 * The caller has BUSY'd the page. We should return with (*mpp) left busy,
1288 * and any additinal pages unbusied.
1290 * If the caller encounters a PG_RAM page it will pass it to us even though
1291 * it may be valid and dirty. We cannot overwrite the page in this case!
1292 * The case is used to allow us to issue pure read-aheads.
1294 * NOTE! XXX This code does not entirely pipeline yet due to the fact that
1295 * the PG_RAM page is validated at the same time as mreq. What we
1296 * really need to do is issue a separate read-ahead pbuf.
1301 swap_pager_getpage(vm_object_t object
, vm_pindex_t pindex
,
1302 vm_page_t
*mpp
, int seqaccess
)
1314 u_int32_t busy_count
;
1315 vm_page_t marray
[XIO_INTERNAL_PAGES
];
1319 vm_object_hold(object
);
1320 if (mreq
->object
!= object
) {
1321 panic("swap_pager_getpages: object mismatch %p/%p",
1328 * We don't want to overwrite a fully valid page as it might be
1329 * dirty. This case can occur when e.g. vm_fault hits a perfectly
1330 * valid page with PG_RAM set.
1332 * In this case we see if the next page is a suitable page-in
1333 * candidate and if it is we issue read-ahead. PG_RAM will be
1334 * set on the last page of the read-ahead to continue the pipeline.
1336 if (mreq
->valid
== VM_PAGE_BITS_ALL
) {
1337 if (swap_burst_read
== 0 || mreq
->pindex
+ 1 >= object
->size
) {
1338 vm_object_drop(object
);
1339 return(VM_PAGER_OK
);
1341 blk
= swp_pager_meta_ctl(object
, mreq
->pindex
+ 1, 0);
1342 if (blk
== SWAPBLK_NONE
) {
1343 vm_object_drop(object
);
1344 return(VM_PAGER_OK
);
1346 m
= vm_page_lookup_busy_try(object
, mreq
->pindex
+ 1,
1349 vm_object_drop(object
);
1350 return(VM_PAGER_OK
);
1351 } else if (m
== NULL
) {
1353 * Use VM_ALLOC_QUICK to avoid blocking on cache
1356 m
= vm_page_alloc(object
, mreq
->pindex
+ 1,
1359 vm_object_drop(object
);
1360 return(VM_PAGER_OK
);
1365 vm_object_drop(object
);
1366 return(VM_PAGER_OK
);
1368 vm_page_unqueue_nowakeup(m
);
1378 * Try to block-read contiguous pages from swap if sequential,
1379 * otherwise just read one page. Contiguous pages from swap must
1380 * reside within a single device stripe because the I/O cannot be
1381 * broken up across multiple stripes.
1383 * Note that blk and iblk can be SWAPBLK_NONE but the loop is
1384 * set up such that the case(s) are handled implicitly.
1386 blk
= swp_pager_meta_ctl(mreq
->object
, mreq
->pindex
, 0);
1389 for (i
= 1; i
<= swap_burst_read
&&
1390 i
< XIO_INTERNAL_PAGES
&&
1391 mreq
->pindex
+ i
< object
->size
; ++i
) {
1394 iblk
= swp_pager_meta_ctl(object
, mreq
->pindex
+ i
, 0);
1395 if (iblk
!= blk
+ i
)
1397 if ((blk
^ iblk
) & ~SWB_DMMASK
)
1399 m
= vm_page_lookup_busy_try(object
, mreq
->pindex
+ i
,
1403 } else if (m
== NULL
) {
1405 * Use VM_ALLOC_QUICK to avoid blocking on cache
1408 m
= vm_page_alloc(object
, mreq
->pindex
+ i
,
1417 vm_page_unqueue_nowakeup(m
);
1423 vm_page_flag_set(marray
[i
- 1], PG_RAM
);
1426 * If mreq is the requested page and we have nothing to do return
1427 * VM_PAGER_FAIL. If raonly is set mreq is just another read-ahead
1428 * page and must be cleaned up.
1430 if (blk
== SWAPBLK_NONE
) {
1433 vnode_pager_freepage(mreq
);
1434 vm_object_drop(object
);
1435 return(VM_PAGER_OK
);
1437 vm_object_drop(object
);
1438 return(VM_PAGER_FAIL
);
1443 * Map our page(s) into kva for input
1445 * Use the KVABIO API to avoid synchronizing the pmap.
1447 bp
= getpbuf_kva(&nsw_rcount
);
1449 kva
= (vm_offset_t
) bp
->b_kvabase
;
1450 bcopy(marray
, bp
->b_xio
.xio_pages
, i
* sizeof(vm_page_t
));
1451 pmap_qenter_noinval(kva
, bp
->b_xio
.xio_pages
, i
);
1453 bp
->b_data
= (caddr_t
)kva
;
1454 bp
->b_bcount
= PAGE_SIZE
* i
;
1455 bp
->b_xio
.xio_npages
= i
;
1456 bp
->b_flags
|= B_KVABIO
;
1457 bio
->bio_done
= swp_pager_async_iodone
;
1458 bio
->bio_offset
= (off_t
)blk
<< PAGE_SHIFT
;
1459 bio
->bio_caller_info1
.index
= SWBIO_READ
;
1462 * Set index. If raonly set the index beyond the array so all
1463 * the pages are treated the same, otherwise the original mreq is
1467 bio
->bio_driver_info
= (void *)(intptr_t)i
;
1469 bio
->bio_driver_info
= (void *)(intptr_t)0;
1471 for (j
= 0; j
< i
; ++j
) {
1472 atomic_set_int(&bp
->b_xio
.xio_pages
[j
]->busy_count
,
1476 mycpu
->gd_cnt
.v_swapin
++;
1477 mycpu
->gd_cnt
.v_swappgsin
+= bp
->b_xio
.xio_npages
;
1480 * We still hold the lock on mreq, and our automatic completion routine
1481 * does not remove it.
1483 vm_object_pip_add(object
, bp
->b_xio
.xio_npages
);
1486 * perform the I/O. NOTE!!! bp cannot be considered valid after
1487 * this point because we automatically release it on completion.
1488 * Instead, we look at the one page we are interested in which we
1489 * still hold a lock on even through the I/O completion.
1491 * The other pages in our m[] array are also released on completion,
1492 * so we cannot assume they are valid anymore either.
1494 bp
->b_cmd
= BUF_CMD_READ
;
1496 vn_strategy(swapdev_vp
, bio
);
1499 * Wait for the page we want to complete. PBUSY_SWAPINPROG is always
1500 * cleared on completion. If an I/O error occurs, SWAPBLK_NONE
1501 * is set in the meta-data.
1503 * If this is a read-ahead only we return immediately without
1507 vm_object_drop(object
);
1508 return(VM_PAGER_OK
);
1512 * Read-ahead includes originally requested page case.
1515 busy_count
= mreq
->busy_count
;
1517 if ((busy_count
& PBUSY_SWAPINPROG
) == 0)
1519 tsleep_interlock(mreq
, 0);
1520 if (!atomic_cmpset_int(&mreq
->busy_count
, busy_count
,
1522 PBUSY_SWAPINPROG
| PBUSY_WANTED
)) {
1525 atomic_set_int(&mreq
->flags
, PG_REFERENCED
);
1526 mycpu
->gd_cnt
.v_intrans
++;
1527 if (tsleep(mreq
, PINTERLOCKED
, "swread", hz
*20)) {
1529 "swap_pager: indefinite wait buffer: "
1530 " bp %p offset: %lld, size: %ld "
1531 " m=%p busy=%08x flags=%08x\n",
1533 (long long)bio
->bio_offset
,
1535 mreq
, mreq
->busy_count
, mreq
->flags
);
1540 * Disallow speculative reads prior to the SWAPINPROG test.
1545 * mreq is left busied after completion, but all the other pages
1546 * are freed. If we had an unrecoverable read error the page will
1549 vm_object_drop(object
);
1550 if (mreq
->valid
!= VM_PAGE_BITS_ALL
)
1551 return(VM_PAGER_ERROR
);
1553 return(VM_PAGER_OK
);
1556 * A final note: in a low swap situation, we cannot deallocate swap
1557 * and mark a page dirty here because the caller is likely to mark
1558 * the page clean when we return, causing the page to possibly revert
1559 * to all-zero's later.
1564 * swap_pager_putpages:
1566 * Assign swap (if necessary) and initiate I/O on the specified pages.
1568 * We support both OBJT_DEFAULT and OBJT_SWAP objects. DEFAULT objects
1569 * are automatically converted to SWAP objects.
1571 * In a low memory situation we may block in vn_strategy(), but the new
1572 * vm_page reservation system coupled with properly written VFS devices
1573 * should ensure that no low-memory deadlock occurs. This is an area
1576 * The parent has N vm_object_pip_add() references prior to
1577 * calling us and will remove references for rtvals[] that are
1578 * not set to VM_PAGER_PEND. We need to remove the rest on I/O
1581 * The parent has soft-busy'd the pages it passes us and will unbusy
1582 * those whos rtvals[] entry is not set to VM_PAGER_PEND on return.
1583 * We need to unbusy the rest on I/O completion.
1588 swap_pager_putpages(vm_object_t object
, vm_page_t
*m
, int count
,
1589 int flags
, int *rtvals
)
1594 vm_object_hold(object
);
1596 if (count
&& m
[0]->object
!= object
) {
1597 panic("swap_pager_getpages: object mismatch %p/%p",
1606 * Turn object into OBJT_SWAP
1607 * Check for bogus sysops
1609 * Force sync if not pageout process, we don't want any single
1610 * non-pageout process to be able to hog the I/O subsystem! This
1611 * can be overridden by setting.
1613 if (object
->type
== OBJT_DEFAULT
) {
1614 if (object
->type
== OBJT_DEFAULT
)
1615 swp_pager_meta_convert(object
);
1619 * Normally we force synchronous swap I/O if this is not the
1620 * pageout daemon to prevent any single user process limited
1621 * via RLIMIT_RSS from hogging swap write bandwidth.
1623 if (curthread
!= pagethread
&&
1624 curthread
!= emergpager
&&
1625 swap_user_async
== 0) {
1626 flags
|= OBJPC_SYNC
;
1632 * Update nsw parameters from swap_async_max sysctl values.
1633 * Do not let the sysop crash the machine with bogus numbers.
1635 if (swap_async_max
!= nsw_wcount_async_max
) {
1641 if ((n
= swap_async_max
) > nswbuf_kva
/ 2)
1648 * Adjust difference ( if possible ). If the current async
1649 * count is too low, we may not be able to make the adjustment
1652 * vm_token needed for nsw_wcount sleep interlock
1654 lwkt_gettoken(&vm_token
);
1655 n
-= nsw_wcount_async_max
;
1656 if (nsw_wcount_async
+ n
>= 0) {
1657 nsw_wcount_async_max
+= n
;
1658 pbuf_adjcount(&nsw_wcount_async
, n
);
1660 lwkt_reltoken(&vm_token
);
1666 * Assign swap blocks and issue I/O. We reallocate swap on the fly.
1667 * The page is left dirty until the pageout operation completes
1671 for (i
= 0; i
< count
; i
+= n
) {
1678 * Maximum I/O size is limited by a number of factors.
1681 n
= min(BLIST_MAX_ALLOC
, count
- i
);
1682 n
= min(n
, nsw_cluster_max
);
1684 lwkt_gettoken(&vm_token
);
1687 * Get biggest block of swap we can. If we fail, fall
1688 * back and try to allocate a smaller block. Don't go
1689 * overboard trying to allocate space if it would overly
1693 (blk
= swp_pager_getswapspace(object
, n
)) == SWAPBLK_NONE
&&
1698 if (blk
== SWAPBLK_NONE
) {
1699 for (j
= 0; j
< n
; ++j
)
1700 rtvals
[i
+j
] = VM_PAGER_FAIL
;
1701 lwkt_reltoken(&vm_token
);
1704 if (vm_report_swap_allocs
> 0) {
1705 kprintf("swap_alloc %08jx,%d\n", (intmax_t)blk
, n
);
1706 --vm_report_swap_allocs
;
1710 * The I/O we are constructing cannot cross a physical
1711 * disk boundry in the swap stripe.
1713 if ((blk
^ (blk
+ n
)) & ~SWB_DMMASK
) {
1714 j
= ((blk
+ SWB_DMMAX
) & ~SWB_DMMASK
) - blk
;
1715 swp_pager_freeswapspace(object
, blk
+ j
, n
- j
);
1720 * All I/O parameters have been satisfied, build the I/O
1721 * request and assign the swap space.
1723 * Use the KVABIO API to avoid synchronizing the pmap.
1725 if ((flags
& OBJPC_SYNC
))
1726 bp
= getpbuf_kva(&nsw_wcount_sync
);
1728 bp
= getpbuf_kva(&nsw_wcount_async
);
1731 lwkt_reltoken(&vm_token
);
1733 pmap_qenter_noinval((vm_offset_t
)bp
->b_data
, &m
[i
], n
);
1735 bp
->b_flags
|= B_KVABIO
;
1736 bp
->b_bcount
= PAGE_SIZE
* n
;
1737 bio
->bio_offset
= (off_t
)blk
<< PAGE_SHIFT
;
1739 for (j
= 0; j
< n
; ++j
) {
1740 vm_page_t mreq
= m
[i
+j
];
1742 swp_pager_meta_build(mreq
->object
, mreq
->pindex
,
1744 if (object
->type
== OBJT_SWAP
)
1745 vm_page_dirty(mreq
);
1746 rtvals
[i
+j
] = VM_PAGER_OK
;
1748 atomic_set_int(&mreq
->busy_count
, PBUSY_SWAPINPROG
);
1749 bp
->b_xio
.xio_pages
[j
] = mreq
;
1751 bp
->b_xio
.xio_npages
= n
;
1753 mycpu
->gd_cnt
.v_swapout
++;
1754 mycpu
->gd_cnt
.v_swappgsout
+= bp
->b_xio
.xio_npages
;
1756 bp
->b_dirtyoff
= 0; /* req'd for NFS */
1757 bp
->b_dirtyend
= bp
->b_bcount
; /* req'd for NFS */
1758 bp
->b_cmd
= BUF_CMD_WRITE
;
1759 bio
->bio_caller_info1
.index
= SWBIO_WRITE
;
1764 if ((flags
& OBJPC_SYNC
) == 0) {
1765 bio
->bio_done
= swp_pager_async_iodone
;
1767 vn_strategy(swapdev_vp
, bio
);
1769 for (j
= 0; j
< n
; ++j
)
1770 rtvals
[i
+j
] = VM_PAGER_PEND
;
1775 * Issue synchrnously.
1777 * Wait for the sync I/O to complete, then update rtvals.
1778 * We just set the rtvals[] to VM_PAGER_PEND so we can call
1779 * our async completion routine at the end, thus avoiding a
1782 bio
->bio_caller_info1
.index
|= SWBIO_SYNC
;
1783 if (flags
& OBJPC_TRY_TO_CACHE
)
1784 bio
->bio_caller_info1
.index
|= SWBIO_TTC
;
1785 bio
->bio_done
= biodone_sync
;
1786 bio
->bio_flags
|= BIO_SYNC
;
1787 vn_strategy(swapdev_vp
, bio
);
1788 biowait(bio
, "swwrt");
1790 for (j
= 0; j
< n
; ++j
)
1791 rtvals
[i
+j
] = VM_PAGER_PEND
;
1794 * Now that we are through with the bp, we can call the
1795 * normal async completion, which frees everything up.
1797 swp_pager_async_iodone(bio
);
1799 vm_object_drop(object
);
1805 * Recalculate the low and high-water marks.
1808 swap_pager_newswap(void)
1811 * NOTE: vm_swap_max cannot exceed 1 billion blocks, which is the
1812 * limitation imposed by the blist code. Remember that this
1813 * will be divided by NSWAP_MAX (4), so each swap device is
1814 * limited to around a terrabyte.
1817 nswap_lowat
= (int64_t)vm_swap_max
* 4 / 100; /* 4% left */
1818 nswap_hiwat
= (int64_t)vm_swap_max
* 6 / 100; /* 6% left */
1819 kprintf("swap low/high-water marks set to %d/%d\n",
1820 nswap_lowat
, nswap_hiwat
);
1829 * swp_pager_async_iodone:
1831 * Completion routine for asynchronous reads and writes from/to swap.
1832 * Also called manually by synchronous code to finish up a bp.
1834 * For READ operations, the pages are BUSY'd. For WRITE operations,
1835 * the pages are vm_page_t->busy'd. For READ operations, we BUSY
1836 * unbusy all pages except the 'main' request page. For WRITE
1837 * operations, we vm_page_t->busy'd unbusy all pages ( we can do this
1838 * because we marked them all VM_PAGER_PEND on return from putpages ).
1840 * This routine may not block.
1845 swp_pager_async_iodone(struct bio
*bio
)
1847 struct buf
*bp
= bio
->bio_buf
;
1848 vm_object_t object
= NULL
;
1855 if (bp
->b_flags
& B_ERROR
) {
1857 "swap_pager: I/O error - %s failed; offset %lld,"
1858 "size %ld, error %d\n",
1859 ((bio
->bio_caller_info1
.index
& SWBIO_READ
) ?
1860 "pagein" : "pageout"),
1861 (long long)bio
->bio_offset
,
1870 if (bp
->b_xio
.xio_npages
)
1871 object
= bp
->b_xio
.xio_pages
[0]->object
;
1874 /* PMAP TESTING CODE (useful, keep it in but #if 0'd) */
1875 if (bio
->bio_caller_info1
.index
& SWBIO_WRITE
) {
1876 if (bio
->bio_crc
!= iscsi_crc32(bp
->b_data
, bp
->b_bcount
)) {
1877 kprintf("SWAPOUT: BADCRC %08x %08x\n",
1879 iscsi_crc32(bp
->b_data
, bp
->b_bcount
));
1880 for (i
= 0; i
< bp
->b_xio
.xio_npages
; ++i
) {
1881 vm_page_t m
= bp
->b_xio
.xio_pages
[i
];
1882 if ((m
->flags
& PG_WRITEABLE
) &&
1883 (pmap_mapped_sync(m
) & PG_WRITEABLE
)) {
1885 "%d/%d %p writable\n",
1886 i
, bp
->b_xio
.xio_npages
, m
);
1894 * remove the mapping for kernel virtual
1896 pmap_qremove((vm_offset_t
)bp
->b_data
, bp
->b_xio
.xio_npages
);
1899 * cleanup pages. If an error occurs writing to swap, we are in
1900 * very serious trouble. If it happens to be a disk error, though,
1901 * we may be able to recover by reassigning the swap later on. So
1902 * in this case we remove the m->swapblk assignment for the page
1903 * but do not free it in the rlist. The errornous block(s) are thus
1904 * never reallocated as swap. Redirty the page and continue.
1906 for (i
= 0; i
< bp
->b_xio
.xio_npages
; ++i
) {
1907 vm_page_t m
= bp
->b_xio
.xio_pages
[i
];
1909 if (bp
->b_flags
& B_ERROR
) {
1911 * If an error occurs I'd love to throw the swapblk
1912 * away without freeing it back to swapspace, so it
1913 * can never be used again. But I can't from an
1917 if (bio
->bio_caller_info1
.index
& SWBIO_READ
) {
1919 * When reading, reqpage needs to stay
1920 * locked for the parent, but all other
1921 * pages can be freed. We still want to
1922 * wakeup the parent waiting on the page,
1923 * though. ( also: pg_reqpage can be -1 and
1924 * not match anything ).
1926 * We have to wake specifically requested pages
1927 * up too because we cleared SWAPINPROG and
1928 * someone may be waiting for that.
1930 * NOTE: For reads, m->dirty will probably
1931 * be overridden by the original caller
1932 * of getpages so don't play cute tricks
1935 * NOTE: We can't actually free the page from
1936 * here, because this is an interrupt.
1937 * It is not legal to mess with
1938 * object->memq from an interrupt.
1939 * Deactivate the page instead.
1941 * WARNING! The instant SWAPINPROG is
1942 * cleared another cpu may start
1943 * using the mreq page (it will
1944 * check m->valid immediately).
1948 atomic_clear_int(&m
->busy_count
,
1952 * bio_driver_info holds the requested page
1955 if (i
!= (int)(intptr_t)bio
->bio_driver_info
) {
1956 vm_page_deactivate(m
);
1962 * If i == bp->b_pager.pg_reqpage, do not wake
1963 * the page up. The caller needs to.
1967 * If a write error occurs remove the swap
1968 * assignment (note that PG_SWAPPED may or
1969 * may not be set depending on prior activity).
1971 * Re-dirty OBJT_SWAP pages as there is no
1972 * other backing store, we can't throw the
1975 * Non-OBJT_SWAP pages (aka swapcache) must
1976 * not be dirtied since they may not have
1977 * been dirty in the first place, and they
1978 * do have backing store (the vnode).
1980 vm_page_busy_wait(m
, FALSE
, "swadpg");
1981 vm_object_hold(m
->object
);
1982 swp_pager_meta_ctl(m
->object
, m
->pindex
,
1984 vm_page_flag_clear(m
, PG_SWAPPED
);
1985 vm_object_drop(m
->object
);
1986 if (m
->object
->type
== OBJT_SWAP
) {
1988 vm_page_activate(m
);
1990 vm_page_io_finish(m
);
1991 atomic_clear_int(&m
->busy_count
,
1995 } else if (bio
->bio_caller_info1
.index
& SWBIO_READ
) {
1997 * NOTE: for reads, m->dirty will probably be
1998 * overridden by the original caller of getpages so
1999 * we cannot set them in order to free the underlying
2000 * swap in a low-swap situation. I don't think we'd
2001 * want to do that anyway, but it was an optimization
2002 * that existed in the old swapper for a time before
2003 * it got ripped out due to precisely this problem.
2005 * If not the requested page then deactivate it.
2007 * Note that the requested page, reqpage, is left
2008 * busied, but we still have to wake it up. The
2009 * other pages are released (unbusied) by
2010 * vm_page_wakeup(). We do not set reqpage's
2011 * valid bits here, it is up to the caller.
2015 * NOTE: Can't call pmap_clear_modify(m) from an
2016 * interrupt thread, the pmap code may have to
2017 * map non-kernel pmaps and currently asserts
2020 * WARNING! The instant SWAPINPROG is
2021 * cleared another cpu may start
2022 * using the mreq page (it will
2023 * check m->valid immediately).
2025 /*pmap_clear_modify(m);*/
2026 m
->valid
= VM_PAGE_BITS_ALL
;
2028 vm_page_flag_set(m
, PG_SWAPPED
);
2029 atomic_clear_int(&m
->busy_count
, PBUSY_SWAPINPROG
);
2032 * We have to wake specifically requested pages
2033 * up too because we cleared SWAPINPROG and
2034 * could be waiting for it in getpages. However,
2035 * be sure to not unbusy getpages specifically
2036 * requested page - getpages expects it to be
2039 * bio_driver_info holds the requested page
2041 if (i
!= (int)(intptr_t)bio
->bio_driver_info
) {
2042 vm_page_deactivate(m
);
2049 * Mark the page clean but do not mess with the
2050 * pmap-layer's modified state. That state should
2051 * also be clear since the caller protected the
2052 * page VM_PROT_READ, but allow the case.
2054 * We are in an interrupt, avoid pmap operations.
2056 * If we have a severe page deficit, deactivate the
2057 * page. Do not try to cache it (which would also
2058 * involve a pmap op), because the page might still
2061 * When using the swap to cache clean vnode pages
2062 * we do not mess with the page dirty bits.
2064 * NOTE! Nobody is waiting for the key mreq page
2065 * on write completion.
2067 vm_page_busy_wait(m
, FALSE
, "swadpg");
2068 if (m
->object
->type
== OBJT_SWAP
)
2070 vm_page_flag_set(m
, PG_SWAPPED
);
2071 atomic_clear_int(&m
->busy_count
, PBUSY_SWAPINPROG
);
2072 if (vm_paging_severe())
2073 vm_page_deactivate(m
);
2074 vm_page_io_finish(m
);
2075 if (bio
->bio_caller_info1
.index
& SWBIO_TTC
)
2076 vm_page_try_to_cache(m
);
2083 * adjust pip. NOTE: the original parent may still have its own
2084 * pip refs on the object.
2088 vm_object_pip_wakeup_n(object
, bp
->b_xio
.xio_npages
);
2091 * Release the physical I/O buffer.
2093 * NOTE: Due to synchronous operations in the write case b_cmd may
2094 * already be set to BUF_CMD_DONE and BIO_SYNC may have already
2097 * Use vm_token to interlock nsw_rcount/wcount wakeup?
2099 lwkt_gettoken(&vm_token
);
2100 if (bio
->bio_caller_info1
.index
& SWBIO_READ
)
2101 nswptr
= &nsw_rcount
;
2102 else if (bio
->bio_caller_info1
.index
& SWBIO_SYNC
)
2103 nswptr
= &nsw_wcount_sync
;
2105 nswptr
= &nsw_wcount_async
;
2106 bp
->b_cmd
= BUF_CMD_DONE
;
2107 relpbuf(bp
, nswptr
);
2108 lwkt_reltoken(&vm_token
);
2112 * Fault-in a potentially swapped page and remove the swap reference.
2113 * (used by swapoff code)
2115 * object must be held.
2117 static __inline
void
2118 swp_pager_fault_page(vm_object_t object
, int *sharedp
, vm_pindex_t pindex
)
2124 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object
));
2126 if (object
->type
== OBJT_VNODE
) {
2128 * Any swap related to a vnode is due to swapcache. We must
2129 * vget() the vnode in case it is not active (otherwise
2130 * vref() will panic). Calling vm_object_page_remove() will
2131 * ensure that any swap ref is removed interlocked with the
2132 * page. clean_only is set to TRUE so we don't throw away
2135 vp
= object
->handle
;
2136 error
= vget(vp
, LK_SHARED
| LK_RETRY
| LK_CANRECURSE
);
2138 vm_object_page_remove(object
, pindex
, pindex
+ 1, TRUE
);
2143 * Otherwise it is a normal OBJT_SWAP object and we can
2144 * fault the page in and remove the swap.
2146 m
= vm_fault_object_page(object
, IDX_TO_OFF(pindex
),
2148 VM_FAULT_DIRTY
| VM_FAULT_UNSWAP
,
2156 * This removes all swap blocks related to a particular device. We have
2157 * to be careful of ripups during the scan.
2159 static int swp_pager_swapoff_callback(struct swblock
*swap
, void *data
);
2162 swap_pager_swapoff(int devidx
)
2164 struct vm_object_hash
*hash
;
2165 struct swswapoffinfo info
;
2166 struct vm_object marker
;
2170 bzero(&marker
, sizeof(marker
));
2171 marker
.type
= OBJT_MARKER
;
2173 for (n
= 0; n
< VMOBJ_HSIZE
; ++n
) {
2174 hash
= &vm_object_hash
[n
];
2176 lwkt_gettoken(&hash
->token
);
2177 TAILQ_INSERT_HEAD(&hash
->list
, &marker
, object_entry
);
2179 while ((object
= TAILQ_NEXT(&marker
, object_entry
)) != NULL
) {
2180 if (object
->type
== OBJT_MARKER
)
2182 if (object
->type
!= OBJT_SWAP
&&
2183 object
->type
!= OBJT_VNODE
)
2185 vm_object_hold(object
);
2186 if (object
->type
!= OBJT_SWAP
&&
2187 object
->type
!= OBJT_VNODE
) {
2188 vm_object_drop(object
);
2193 * Object is special in that we can't just pagein
2194 * into vm_page's in it (tmpfs, vn).
2196 if ((object
->flags
& OBJ_NOPAGEIN
) &&
2197 RB_ROOT(&object
->swblock_root
)) {
2198 vm_object_drop(object
);
2202 info
.object
= object
;
2204 info
.devidx
= devidx
;
2205 swblock_rb_tree_RB_SCAN(&object
->swblock_root
,
2206 NULL
, swp_pager_swapoff_callback
,
2208 vm_object_drop(object
);
2210 if (object
== TAILQ_NEXT(&marker
, object_entry
)) {
2211 TAILQ_REMOVE(&hash
->list
, &marker
,
2213 TAILQ_INSERT_AFTER(&hash
->list
, object
,
2214 &marker
, object_entry
);
2217 TAILQ_REMOVE(&hash
->list
, &marker
, object_entry
);
2218 lwkt_reltoken(&hash
->token
);
2222 * If we fail to locate all swblocks we just fail gracefully and
2223 * do not bother to restore paging on the swap device. If the
2224 * user wants to retry the user can retry.
2226 if (swdevt
[devidx
].sw_nused
)
2234 swp_pager_swapoff_callback(struct swblock
*swap
, void *data
)
2236 struct swswapoffinfo
*info
= data
;
2237 vm_object_t object
= info
->object
;
2242 index
= swap
->swb_index
;
2243 for (i
= 0; i
< SWAP_META_PAGES
; ++i
) {
2245 * Make sure we don't race a dying object. This will
2246 * kill the scan of the object's swap blocks entirely.
2248 if (object
->flags
& OBJ_DEAD
)
2252 * Fault the page, which can obviously block. If the swap
2253 * structure disappears break out.
2255 v
= swap
->swb_pages
[i
];
2256 if (v
!= SWAPBLK_NONE
&& BLK2DEVIDX(v
) == info
->devidx
) {
2257 swp_pager_fault_page(object
, &info
->shared
,
2258 swap
->swb_index
+ i
);
2259 /* swap ptr might go away */
2260 if (RB_LOOKUP(swblock_rb_tree
,
2261 &object
->swblock_root
, index
) != swap
) {
2269 /************************************************************************
2271 ************************************************************************
2273 * These routines manipulate the swap metadata stored in the
2276 * Swap metadata is implemented with a global hash and not directly
2277 * linked into the object. Instead the object simply contains
2278 * appropriate tracking counters.
2282 * Lookup the swblock containing the specified swap block index.
2284 * The caller must hold the object.
2288 swp_pager_lookup(vm_object_t object
, vm_pindex_t index
)
2290 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object
));
2291 index
&= ~(vm_pindex_t
)SWAP_META_MASK
;
2292 return (RB_LOOKUP(swblock_rb_tree
, &object
->swblock_root
, index
));
2296 * Remove a swblock from the RB tree.
2298 * The caller must hold the object.
2302 swp_pager_remove(vm_object_t object
, struct swblock
*swap
)
2304 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object
));
2305 RB_REMOVE(swblock_rb_tree
, &object
->swblock_root
, swap
);
2309 * Convert default object to swap object if necessary
2311 * The caller must hold the object.
2314 swp_pager_meta_convert(vm_object_t object
)
2316 if (object
->type
== OBJT_DEFAULT
) {
2317 object
->type
= OBJT_SWAP
;
2318 KKASSERT(object
->swblock_count
== 0);
2323 * SWP_PAGER_META_BUILD() - add swap block to swap meta data for object
2325 * We first convert the object to a swap object if it is a default
2326 * object. Vnode objects do not need to be converted.
2328 * The specified swapblk is added to the object's swap metadata. If
2329 * the swapblk is not valid, it is freed instead. Any previously
2330 * assigned swapblk is freed.
2332 * The caller must hold the object.
2335 swp_pager_meta_build(vm_object_t object
, vm_pindex_t index
, swblk_t swapblk
)
2337 struct swblock
*swap
;
2338 struct swblock
*oswap
;
2341 KKASSERT(swapblk
!= SWAPBLK_NONE
);
2342 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object
));
2345 * Convert object if necessary
2347 if (object
->type
== OBJT_DEFAULT
)
2348 swp_pager_meta_convert(object
);
2351 * Locate swblock. If not found create, but if we aren't adding
2352 * anything just return. If we run out of space in the map we wait
2353 * and, since the hash table may have changed, retry.
2356 swap
= swp_pager_lookup(object
, index
);
2361 swap
= zalloc(swap_zone
);
2366 swap
->swb_index
= index
& ~(vm_pindex_t
)SWAP_META_MASK
;
2367 swap
->swb_count
= 0;
2369 ++object
->swblock_count
;
2371 for (i
= 0; i
< SWAP_META_PAGES
; ++i
)
2372 swap
->swb_pages
[i
] = SWAPBLK_NONE
;
2373 oswap
= RB_INSERT(swblock_rb_tree
, &object
->swblock_root
, swap
);
2374 KKASSERT(oswap
== NULL
);
2378 * Delete prior contents of metadata.
2380 * NOTE: Decrement swb_count after the freeing operation (which
2381 * might block) to prevent racing destruction of the swblock.
2383 index
&= SWAP_META_MASK
;
2385 while ((v
= swap
->swb_pages
[index
]) != SWAPBLK_NONE
) {
2386 swap
->swb_pages
[index
] = SWAPBLK_NONE
;
2388 swp_pager_freeswapspace(object
, v
, 1);
2390 --mycpu
->gd_vmtotal
.t_vm
;
2394 * Enter block into metadata
2396 swap
->swb_pages
[index
] = swapblk
;
2397 if (swapblk
!= SWAPBLK_NONE
) {
2399 ++mycpu
->gd_vmtotal
.t_vm
;
2404 * SWP_PAGER_META_FREE() - free a range of blocks in the object's swap metadata
2406 * The requested range of blocks is freed, with any associated swap
2407 * returned to the swap bitmap.
2409 * This routine will free swap metadata structures as they are cleaned
2410 * out. This routine does *NOT* operate on swap metadata associated
2411 * with resident pages.
2413 * The caller must hold the object.
2415 static int swp_pager_meta_free_callback(struct swblock
*swb
, void *data
);
2418 swp_pager_meta_free(vm_object_t object
, vm_pindex_t index
, vm_pindex_t count
)
2420 struct swfreeinfo info
;
2422 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object
));
2427 if (object
->swblock_count
== 0) {
2428 KKASSERT(RB_EMPTY(&object
->swblock_root
));
2435 * Setup for RB tree scan. Note that the pindex range can be huge
2436 * due to the 64 bit page index space so we cannot safely iterate.
2438 info
.object
= object
;
2439 info
.basei
= index
& ~(vm_pindex_t
)SWAP_META_MASK
;
2441 info
.endi
= index
+ count
- 1;
2442 swblock_rb_tree_RB_SCAN(&object
->swblock_root
, rb_swblock_scancmp
,
2443 swp_pager_meta_free_callback
, &info
);
2447 * The caller must hold the object.
2451 swp_pager_meta_free_callback(struct swblock
*swap
, void *data
)
2453 struct swfreeinfo
*info
= data
;
2454 vm_object_t object
= info
->object
;
2459 * Figure out the range within the swblock. The wider scan may
2460 * return edge-case swap blocks when the start and/or end points
2461 * are in the middle of a block.
2463 if (swap
->swb_index
< info
->begi
)
2464 index
= (int)info
->begi
& SWAP_META_MASK
;
2468 if (swap
->swb_index
+ SWAP_META_PAGES
> info
->endi
)
2469 eindex
= (int)info
->endi
& SWAP_META_MASK
;
2471 eindex
= SWAP_META_MASK
;
2474 * Scan and free the blocks. The loop terminates early
2475 * if (swap) runs out of blocks and could be freed.
2477 * NOTE: Decrement swb_count after swp_pager_freeswapspace()
2478 * to deal with a zfree race.
2480 while (index
<= eindex
) {
2481 swblk_t v
= swap
->swb_pages
[index
];
2483 if (v
!= SWAPBLK_NONE
) {
2484 swap
->swb_pages
[index
] = SWAPBLK_NONE
;
2486 swp_pager_freeswapspace(object
, v
, 1);
2487 --mycpu
->gd_vmtotal
.t_vm
;
2488 if (--swap
->swb_count
== 0) {
2489 swp_pager_remove(object
, swap
);
2490 zfree(swap_zone
, swap
);
2491 --object
->swblock_count
;
2498 /* swap may be invalid here due to zfree above */
2505 * SWP_PAGER_META_FREE_ALL() - destroy all swap metadata associated with object
2507 * This routine locates and destroys all swap metadata associated with
2510 * NOTE: Decrement swb_count after the freeing operation (which
2511 * might block) to prevent racing destruction of the swblock.
2513 * The caller must hold the object.
2516 swp_pager_meta_free_all(vm_object_t object
)
2518 struct swblock
*swap
;
2521 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object
));
2523 while ((swap
= RB_ROOT(&object
->swblock_root
)) != NULL
) {
2524 swp_pager_remove(object
, swap
);
2525 for (i
= 0; i
< SWAP_META_PAGES
; ++i
) {
2526 swblk_t v
= swap
->swb_pages
[i
];
2527 if (v
!= SWAPBLK_NONE
) {
2529 swp_pager_freeswapspace(object
, v
, 1);
2531 --mycpu
->gd_vmtotal
.t_vm
;
2534 if (swap
->swb_count
!= 0)
2535 panic("swap_pager_meta_free_all: swb_count != 0");
2536 zfree(swap_zone
, swap
);
2537 --object
->swblock_count
;
2540 KKASSERT(object
->swblock_count
== 0);
2544 * SWP_PAGER_METACTL() - misc control of swap and vm_page_t meta data.
2546 * This routine is capable of looking up, popping, or freeing
2547 * swapblk assignments in the swap meta data or in the vm_page_t.
2548 * The routine typically returns the swapblk being looked-up, or popped,
2549 * or SWAPBLK_NONE if the block was freed, or SWAPBLK_NONE if the block
2550 * was invalid. This routine will automatically free any invalid
2551 * meta-data swapblks.
2553 * It is not possible to store invalid swapblks in the swap meta data
2554 * (other then a literal 'SWAPBLK_NONE'), so we don't bother checking.
2556 * When acting on a busy resident page and paging is in progress, we
2557 * have to wait until paging is complete but otherwise can act on the
2560 * SWM_FREE remove and free swap block from metadata
2561 * SWM_POP remove from meta data but do not free.. pop it out
2563 * The caller must hold the object.
2566 swp_pager_meta_ctl(vm_object_t object
, vm_pindex_t index
, int flags
)
2568 struct swblock
*swap
;
2571 if (object
->swblock_count
== 0)
2572 return(SWAPBLK_NONE
);
2575 swap
= swp_pager_lookup(object
, index
);
2578 index
&= SWAP_META_MASK
;
2579 r1
= swap
->swb_pages
[index
];
2581 if (r1
!= SWAPBLK_NONE
) {
2582 if (flags
& (SWM_FREE
|SWM_POP
)) {
2583 swap
->swb_pages
[index
] = SWAPBLK_NONE
;
2584 --mycpu
->gd_vmtotal
.t_vm
;
2585 if (--swap
->swb_count
== 0) {
2586 swp_pager_remove(object
, swap
);
2587 zfree(swap_zone
, swap
);
2588 --object
->swblock_count
;
2591 /* swap ptr may be invalid */
2592 if (flags
& SWM_FREE
) {
2593 swp_pager_freeswapspace(object
, r1
, 1);
2597 /* swap ptr may be invalid */